linux/drivers/vhost/vhost.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2009 Red Hat, Inc.
   3 * Copyright (C) 2006 Rusty Russell IBM Corporation
   4 *
   5 * Author: Michael S. Tsirkin <mst@redhat.com>
   6 *
   7 * Inspiration, some code, and most witty comments come from
   8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   9 *
  10 * Generic code for virtio server in host kernel.
  11 */
  12
  13#include <linux/eventfd.h>
  14#include <linux/vhost.h>
  15#include <linux/uio.h>
  16#include <linux/mm.h>
  17#include <linux/miscdevice.h>
  18#include <linux/mutex.h>
  19#include <linux/poll.h>
  20#include <linux/file.h>
  21#include <linux/highmem.h>
  22#include <linux/slab.h>
  23#include <linux/vmalloc.h>
  24#include <linux/kthread.h>
  25#include <linux/cgroup.h>
  26#include <linux/module.h>
  27#include <linux/sort.h>
  28#include <linux/sched/mm.h>
  29#include <linux/sched/signal.h>
  30#include <linux/interval_tree_generic.h>
  31#include <linux/nospec.h>
  32#include <linux/kcov.h>
  33
  34#include "vhost.h"
  35
  36static ushort max_mem_regions = 64;
  37module_param(max_mem_regions, ushort, 0444);
  38MODULE_PARM_DESC(max_mem_regions,
  39        "Maximum number of memory regions in memory map. (default: 64)");
  40static int max_iotlb_entries = 2048;
  41module_param(max_iotlb_entries, int, 0444);
  42MODULE_PARM_DESC(max_iotlb_entries,
  43        "Maximum number of iotlb entries. (default: 2048)");
  44
  45enum {
  46        VHOST_MEMORY_F_LOG = 0x1,
  47};
  48
  49#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
  50#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
  51
  52#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
  53static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
  54{
  55        vq->user_be = !virtio_legacy_is_little_endian();
  56}
  57
  58static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
  59{
  60        vq->user_be = true;
  61}
  62
  63static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
  64{
  65        vq->user_be = false;
  66}
  67
  68static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
  69{
  70        struct vhost_vring_state s;
  71
  72        if (vq->private_data)
  73                return -EBUSY;
  74
  75        if (copy_from_user(&s, argp, sizeof(s)))
  76                return -EFAULT;
  77
  78        if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
  79            s.num != VHOST_VRING_BIG_ENDIAN)
  80                return -EINVAL;
  81
  82        if (s.num == VHOST_VRING_BIG_ENDIAN)
  83                vhost_enable_cross_endian_big(vq);
  84        else
  85                vhost_enable_cross_endian_little(vq);
  86
  87        return 0;
  88}
  89
  90static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
  91                                   int __user *argp)
  92{
  93        struct vhost_vring_state s = {
  94                .index = idx,
  95                .num = vq->user_be
  96        };
  97
  98        if (copy_to_user(argp, &s, sizeof(s)))
  99                return -EFAULT;
 100
 101        return 0;
 102}
 103
 104static void vhost_init_is_le(struct vhost_virtqueue *vq)
 105{
 106        /* Note for legacy virtio: user_be is initialized at reset time
 107         * according to the host endianness. If userspace does not set an
 108         * explicit endianness, the default behavior is native endian, as
 109         * expected by legacy virtio.
 110         */
 111        vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
 112}
 113#else
 114static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
 115{
 116}
 117
 118static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
 119{
 120        return -ENOIOCTLCMD;
 121}
 122
 123static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 124                                   int __user *argp)
 125{
 126        return -ENOIOCTLCMD;
 127}
 128
 129static void vhost_init_is_le(struct vhost_virtqueue *vq)
 130{
 131        vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
 132                || virtio_legacy_is_little_endian();
 133}
 134#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 135
 136static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 137{
 138        vhost_init_is_le(vq);
 139}
 140
 141struct vhost_flush_struct {
 142        struct vhost_work work;
 143        struct completion wait_event;
 144};
 145
 146static void vhost_flush_work(struct vhost_work *work)
 147{
 148        struct vhost_flush_struct *s;
 149
 150        s = container_of(work, struct vhost_flush_struct, work);
 151        complete(&s->wait_event);
 152}
 153
 154static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 155                            poll_table *pt)
 156{
 157        struct vhost_poll *poll;
 158
 159        poll = container_of(pt, struct vhost_poll, table);
 160        poll->wqh = wqh;
 161        add_wait_queue(wqh, &poll->wait);
 162}
 163
 164static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
 165                             void *key)
 166{
 167        struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 168        struct vhost_work *work = &poll->work;
 169
 170        if (!(key_to_poll(key) & poll->mask))
 171                return 0;
 172
 173        if (!poll->dev->use_worker)
 174                work->fn(work);
 175        else
 176                vhost_poll_queue(poll);
 177
 178        return 0;
 179}
 180
 181void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
 182{
 183        clear_bit(VHOST_WORK_QUEUED, &work->flags);
 184        work->fn = fn;
 185}
 186EXPORT_SYMBOL_GPL(vhost_work_init);
 187
 188/* Init poll structure */
 189void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 190                     __poll_t mask, struct vhost_dev *dev)
 191{
 192        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 193        init_poll_funcptr(&poll->table, vhost_poll_func);
 194        poll->mask = mask;
 195        poll->dev = dev;
 196        poll->wqh = NULL;
 197
 198        vhost_work_init(&poll->work, fn);
 199}
 200EXPORT_SYMBOL_GPL(vhost_poll_init);
 201
 202/* Start polling a file. We add ourselves to file's wait queue. The caller must
 203 * keep a reference to a file until after vhost_poll_stop is called. */
 204int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 205{
 206        __poll_t mask;
 207
 208        if (poll->wqh)
 209                return 0;
 210
 211        mask = vfs_poll(file, &poll->table);
 212        if (mask)
 213                vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
 214        if (mask & EPOLLERR) {
 215                vhost_poll_stop(poll);
 216                return -EINVAL;
 217        }
 218
 219        return 0;
 220}
 221EXPORT_SYMBOL_GPL(vhost_poll_start);
 222
 223/* Stop polling a file. After this function returns, it becomes safe to drop the
 224 * file reference. You must also flush afterwards. */
 225void vhost_poll_stop(struct vhost_poll *poll)
 226{
 227        if (poll->wqh) {
 228                remove_wait_queue(poll->wqh, &poll->wait);
 229                poll->wqh = NULL;
 230        }
 231}
 232EXPORT_SYMBOL_GPL(vhost_poll_stop);
 233
 234void vhost_work_dev_flush(struct vhost_dev *dev)
 235{
 236        struct vhost_flush_struct flush;
 237
 238        if (dev->worker) {
 239                init_completion(&flush.wait_event);
 240                vhost_work_init(&flush.work, vhost_flush_work);
 241
 242                vhost_work_queue(dev, &flush.work);
 243                wait_for_completion(&flush.wait_event);
 244        }
 245}
 246EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
 247
 248/* Flush any work that has been scheduled. When calling this, don't hold any
 249 * locks that are also used by the callback. */
 250void vhost_poll_flush(struct vhost_poll *poll)
 251{
 252        vhost_work_dev_flush(poll->dev);
 253}
 254EXPORT_SYMBOL_GPL(vhost_poll_flush);
 255
 256void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 257{
 258        if (!dev->worker)
 259                return;
 260
 261        if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 262                /* We can only add the work to the list after we're
 263                 * sure it was not in the list.
 264                 * test_and_set_bit() implies a memory barrier.
 265                 */
 266                llist_add(&work->node, &dev->work_list);
 267                wake_up_process(dev->worker);
 268        }
 269}
 270EXPORT_SYMBOL_GPL(vhost_work_queue);
 271
 272/* A lockless hint for busy polling code to exit the loop */
 273bool vhost_has_work(struct vhost_dev *dev)
 274{
 275        return !llist_empty(&dev->work_list);
 276}
 277EXPORT_SYMBOL_GPL(vhost_has_work);
 278
 279void vhost_poll_queue(struct vhost_poll *poll)
 280{
 281        vhost_work_queue(poll->dev, &poll->work);
 282}
 283EXPORT_SYMBOL_GPL(vhost_poll_queue);
 284
 285static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
 286{
 287        int j;
 288
 289        for (j = 0; j < VHOST_NUM_ADDRS; j++)
 290                vq->meta_iotlb[j] = NULL;
 291}
 292
 293static void vhost_vq_meta_reset(struct vhost_dev *d)
 294{
 295        int i;
 296
 297        for (i = 0; i < d->nvqs; ++i)
 298                __vhost_vq_meta_reset(d->vqs[i]);
 299}
 300
 301static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
 302{
 303        call_ctx->ctx = NULL;
 304        memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
 305}
 306
 307bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
 308{
 309        return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
 310}
 311EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
 312
 313static void vhost_vq_reset(struct vhost_dev *dev,
 314                           struct vhost_virtqueue *vq)
 315{
 316        vq->num = 1;
 317        vq->desc = NULL;
 318        vq->avail = NULL;
 319        vq->used = NULL;
 320        vq->last_avail_idx = 0;
 321        vq->avail_idx = 0;
 322        vq->last_used_idx = 0;
 323        vq->signalled_used = 0;
 324        vq->signalled_used_valid = false;
 325        vq->used_flags = 0;
 326        vq->log_used = false;
 327        vq->log_addr = -1ull;
 328        vq->private_data = NULL;
 329        vq->acked_features = 0;
 330        vq->acked_backend_features = 0;
 331        vq->log_base = NULL;
 332        vq->error_ctx = NULL;
 333        vq->kick = NULL;
 334        vq->log_ctx = NULL;
 335        vhost_disable_cross_endian(vq);
 336        vhost_reset_is_le(vq);
 337        vq->busyloop_timeout = 0;
 338        vq->umem = NULL;
 339        vq->iotlb = NULL;
 340        vhost_vring_call_reset(&vq->call_ctx);
 341        __vhost_vq_meta_reset(vq);
 342}
 343
 344static int vhost_worker(void *data)
 345{
 346        struct vhost_dev *dev = data;
 347        struct vhost_work *work, *work_next;
 348        struct llist_node *node;
 349
 350        kthread_use_mm(dev->mm);
 351
 352        for (;;) {
 353                /* mb paired w/ kthread_stop */
 354                set_current_state(TASK_INTERRUPTIBLE);
 355
 356                if (kthread_should_stop()) {
 357                        __set_current_state(TASK_RUNNING);
 358                        break;
 359                }
 360
 361                node = llist_del_all(&dev->work_list);
 362                if (!node)
 363                        schedule();
 364
 365                node = llist_reverse_order(node);
 366                /* make sure flag is seen after deletion */
 367                smp_wmb();
 368                llist_for_each_entry_safe(work, work_next, node, node) {
 369                        clear_bit(VHOST_WORK_QUEUED, &work->flags);
 370                        __set_current_state(TASK_RUNNING);
 371                        kcov_remote_start_common(dev->kcov_handle);
 372                        work->fn(work);
 373                        kcov_remote_stop();
 374                        if (need_resched())
 375                                schedule();
 376                }
 377        }
 378        kthread_unuse_mm(dev->mm);
 379        return 0;
 380}
 381
 382static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 383{
 384        kfree(vq->indirect);
 385        vq->indirect = NULL;
 386        kfree(vq->log);
 387        vq->log = NULL;
 388        kfree(vq->heads);
 389        vq->heads = NULL;
 390}
 391
 392/* Helper to allocate iovec buffers for all vqs. */
 393static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 394{
 395        struct vhost_virtqueue *vq;
 396        int i;
 397
 398        for (i = 0; i < dev->nvqs; ++i) {
 399                vq = dev->vqs[i];
 400                vq->indirect = kmalloc_array(UIO_MAXIOV,
 401                                             sizeof(*vq->indirect),
 402                                             GFP_KERNEL);
 403                vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
 404                                        GFP_KERNEL);
 405                vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
 406                                          GFP_KERNEL);
 407                if (!vq->indirect || !vq->log || !vq->heads)
 408                        goto err_nomem;
 409        }
 410        return 0;
 411
 412err_nomem:
 413        for (; i >= 0; --i)
 414                vhost_vq_free_iovecs(dev->vqs[i]);
 415        return -ENOMEM;
 416}
 417
 418static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 419{
 420        int i;
 421
 422        for (i = 0; i < dev->nvqs; ++i)
 423                vhost_vq_free_iovecs(dev->vqs[i]);
 424}
 425
 426bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
 427                          int pkts, int total_len)
 428{
 429        struct vhost_dev *dev = vq->dev;
 430
 431        if ((dev->byte_weight && total_len >= dev->byte_weight) ||
 432            pkts >= dev->weight) {
 433                vhost_poll_queue(&vq->poll);
 434                return true;
 435        }
 436
 437        return false;
 438}
 439EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
 440
 441static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
 442                                   unsigned int num)
 443{
 444        size_t event __maybe_unused =
 445               vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 446
 447        return sizeof(*vq->avail) +
 448               sizeof(*vq->avail->ring) * num + event;
 449}
 450
 451static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
 452                                  unsigned int num)
 453{
 454        size_t event __maybe_unused =
 455               vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 456
 457        return sizeof(*vq->used) +
 458               sizeof(*vq->used->ring) * num + event;
 459}
 460
 461static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
 462                                  unsigned int num)
 463{
 464        return sizeof(*vq->desc) * num;
 465}
 466
 467void vhost_dev_init(struct vhost_dev *dev,
 468                    struct vhost_virtqueue **vqs, int nvqs,
 469                    int iov_limit, int weight, int byte_weight,
 470                    bool use_worker,
 471                    int (*msg_handler)(struct vhost_dev *dev,
 472                                       struct vhost_iotlb_msg *msg))
 473{
 474        struct vhost_virtqueue *vq;
 475        int i;
 476
 477        dev->vqs = vqs;
 478        dev->nvqs = nvqs;
 479        mutex_init(&dev->mutex);
 480        dev->log_ctx = NULL;
 481        dev->umem = NULL;
 482        dev->iotlb = NULL;
 483        dev->mm = NULL;
 484        dev->worker = NULL;
 485        dev->iov_limit = iov_limit;
 486        dev->weight = weight;
 487        dev->byte_weight = byte_weight;
 488        dev->use_worker = use_worker;
 489        dev->msg_handler = msg_handler;
 490        init_llist_head(&dev->work_list);
 491        init_waitqueue_head(&dev->wait);
 492        INIT_LIST_HEAD(&dev->read_list);
 493        INIT_LIST_HEAD(&dev->pending_list);
 494        spin_lock_init(&dev->iotlb_lock);
 495
 496
 497        for (i = 0; i < dev->nvqs; ++i) {
 498                vq = dev->vqs[i];
 499                vq->log = NULL;
 500                vq->indirect = NULL;
 501                vq->heads = NULL;
 502                vq->dev = dev;
 503                mutex_init(&vq->mutex);
 504                vhost_vq_reset(dev, vq);
 505                if (vq->handle_kick)
 506                        vhost_poll_init(&vq->poll, vq->handle_kick,
 507                                        EPOLLIN, dev);
 508        }
 509}
 510EXPORT_SYMBOL_GPL(vhost_dev_init);
 511
 512/* Caller should have device mutex */
 513long vhost_dev_check_owner(struct vhost_dev *dev)
 514{
 515        /* Are you the owner? If not, I don't think you mean to do that */
 516        return dev->mm == current->mm ? 0 : -EPERM;
 517}
 518EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 519
 520struct vhost_attach_cgroups_struct {
 521        struct vhost_work work;
 522        struct task_struct *owner;
 523        int ret;
 524};
 525
 526static void vhost_attach_cgroups_work(struct vhost_work *work)
 527{
 528        struct vhost_attach_cgroups_struct *s;
 529
 530        s = container_of(work, struct vhost_attach_cgroups_struct, work);
 531        s->ret = cgroup_attach_task_all(s->owner, current);
 532}
 533
 534static int vhost_attach_cgroups(struct vhost_dev *dev)
 535{
 536        struct vhost_attach_cgroups_struct attach;
 537
 538        attach.owner = current;
 539        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 540        vhost_work_queue(dev, &attach.work);
 541        vhost_work_dev_flush(dev);
 542        return attach.ret;
 543}
 544
 545/* Caller should have device mutex */
 546bool vhost_dev_has_owner(struct vhost_dev *dev)
 547{
 548        return dev->mm;
 549}
 550EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 551
 552static void vhost_attach_mm(struct vhost_dev *dev)
 553{
 554        /* No owner, become one */
 555        if (dev->use_worker) {
 556                dev->mm = get_task_mm(current);
 557        } else {
 558                /* vDPA device does not use worker thead, so there's
 559                 * no need to hold the address space for mm. This help
 560                 * to avoid deadlock in the case of mmap() which may
 561                 * held the refcnt of the file and depends on release
 562                 * method to remove vma.
 563                 */
 564                dev->mm = current->mm;
 565                mmgrab(dev->mm);
 566        }
 567}
 568
 569static void vhost_detach_mm(struct vhost_dev *dev)
 570{
 571        if (!dev->mm)
 572                return;
 573
 574        if (dev->use_worker)
 575                mmput(dev->mm);
 576        else
 577                mmdrop(dev->mm);
 578
 579        dev->mm = NULL;
 580}
 581
 582/* Caller should have device mutex */
 583long vhost_dev_set_owner(struct vhost_dev *dev)
 584{
 585        struct task_struct *worker;
 586        int err;
 587
 588        /* Is there an owner already? */
 589        if (vhost_dev_has_owner(dev)) {
 590                err = -EBUSY;
 591                goto err_mm;
 592        }
 593
 594        vhost_attach_mm(dev);
 595
 596        dev->kcov_handle = kcov_common_handle();
 597        if (dev->use_worker) {
 598                worker = kthread_create(vhost_worker, dev,
 599                                        "vhost-%d", current->pid);
 600                if (IS_ERR(worker)) {
 601                        err = PTR_ERR(worker);
 602                        goto err_worker;
 603                }
 604
 605                dev->worker = worker;
 606                wake_up_process(worker); /* avoid contributing to loadavg */
 607
 608                err = vhost_attach_cgroups(dev);
 609                if (err)
 610                        goto err_cgroup;
 611        }
 612
 613        err = vhost_dev_alloc_iovecs(dev);
 614        if (err)
 615                goto err_cgroup;
 616
 617        return 0;
 618err_cgroup:
 619        if (dev->worker) {
 620                kthread_stop(dev->worker);
 621                dev->worker = NULL;
 622        }
 623err_worker:
 624        vhost_detach_mm(dev);
 625        dev->kcov_handle = 0;
 626err_mm:
 627        return err;
 628}
 629EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 630
 631static struct vhost_iotlb *iotlb_alloc(void)
 632{
 633        return vhost_iotlb_alloc(max_iotlb_entries,
 634                                 VHOST_IOTLB_FLAG_RETIRE);
 635}
 636
 637struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
 638{
 639        return iotlb_alloc();
 640}
 641EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 642
 643/* Caller should have device mutex */
 644void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
 645{
 646        int i;
 647
 648        vhost_dev_cleanup(dev);
 649
 650        dev->umem = umem;
 651        /* We don't need VQ locks below since vhost_dev_cleanup makes sure
 652         * VQs aren't running.
 653         */
 654        for (i = 0; i < dev->nvqs; ++i)
 655                dev->vqs[i]->umem = umem;
 656}
 657EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 658
 659void vhost_dev_stop(struct vhost_dev *dev)
 660{
 661        int i;
 662
 663        for (i = 0; i < dev->nvqs; ++i) {
 664                if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 665                        vhost_poll_stop(&dev->vqs[i]->poll);
 666                        vhost_poll_flush(&dev->vqs[i]->poll);
 667                }
 668        }
 669}
 670EXPORT_SYMBOL_GPL(vhost_dev_stop);
 671
 672static void vhost_clear_msg(struct vhost_dev *dev)
 673{
 674        struct vhost_msg_node *node, *n;
 675
 676        spin_lock(&dev->iotlb_lock);
 677
 678        list_for_each_entry_safe(node, n, &dev->read_list, node) {
 679                list_del(&node->node);
 680                kfree(node);
 681        }
 682
 683        list_for_each_entry_safe(node, n, &dev->pending_list, node) {
 684                list_del(&node->node);
 685                kfree(node);
 686        }
 687
 688        spin_unlock(&dev->iotlb_lock);
 689}
 690
 691void vhost_dev_cleanup(struct vhost_dev *dev)
 692{
 693        int i;
 694
 695        for (i = 0; i < dev->nvqs; ++i) {
 696                if (dev->vqs[i]->error_ctx)
 697                        eventfd_ctx_put(dev->vqs[i]->error_ctx);
 698                if (dev->vqs[i]->kick)
 699                        fput(dev->vqs[i]->kick);
 700                if (dev->vqs[i]->call_ctx.ctx)
 701                        eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
 702                vhost_vq_reset(dev, dev->vqs[i]);
 703        }
 704        vhost_dev_free_iovecs(dev);
 705        if (dev->log_ctx)
 706                eventfd_ctx_put(dev->log_ctx);
 707        dev->log_ctx = NULL;
 708        /* No one will access memory at this point */
 709        vhost_iotlb_free(dev->umem);
 710        dev->umem = NULL;
 711        vhost_iotlb_free(dev->iotlb);
 712        dev->iotlb = NULL;
 713        vhost_clear_msg(dev);
 714        wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
 715        WARN_ON(!llist_empty(&dev->work_list));
 716        if (dev->worker) {
 717                kthread_stop(dev->worker);
 718                dev->worker = NULL;
 719                dev->kcov_handle = 0;
 720        }
 721        vhost_detach_mm(dev);
 722}
 723EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 724
 725static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 726{
 727        u64 a = addr / VHOST_PAGE_SIZE / 8;
 728
 729        /* Make sure 64 bit math will not overflow. */
 730        if (a > ULONG_MAX - (unsigned long)log_base ||
 731            a + (unsigned long)log_base > ULONG_MAX)
 732                return false;
 733
 734        return access_ok(log_base + a,
 735                         (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 736}
 737
 738/* Make sure 64 bit math will not overflow. */
 739static bool vhost_overflow(u64 uaddr, u64 size)
 740{
 741        if (uaddr > ULONG_MAX || size > ULONG_MAX)
 742                return true;
 743
 744        if (!size)
 745                return false;
 746
 747        return uaddr > ULONG_MAX - size + 1;
 748}
 749
 750/* Caller should have vq mutex and device mutex. */
 751static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
 752                                int log_all)
 753{
 754        struct vhost_iotlb_map *map;
 755
 756        if (!umem)
 757                return false;
 758
 759        list_for_each_entry(map, &umem->list, link) {
 760                unsigned long a = map->addr;
 761
 762                if (vhost_overflow(map->addr, map->size))
 763                        return false;
 764
 765
 766                if (!access_ok((void __user *)a, map->size))
 767                        return false;
 768                else if (log_all && !log_access_ok(log_base,
 769                                                   map->start,
 770                                                   map->size))
 771                        return false;
 772        }
 773        return true;
 774}
 775
 776static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
 777                                               u64 addr, unsigned int size,
 778                                               int type)
 779{
 780        const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
 781
 782        if (!map)
 783                return NULL;
 784
 785        return (void __user *)(uintptr_t)(map->addr + addr - map->start);
 786}
 787
 788/* Can we switch to this memory table? */
 789/* Caller should have device mutex but not vq mutex */
 790static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
 791                             int log_all)
 792{
 793        int i;
 794
 795        for (i = 0; i < d->nvqs; ++i) {
 796                bool ok;
 797                bool log;
 798
 799                mutex_lock(&d->vqs[i]->mutex);
 800                log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
 801                /* If ring is inactive, will check when it's enabled. */
 802                if (d->vqs[i]->private_data)
 803                        ok = vq_memory_access_ok(d->vqs[i]->log_base,
 804                                                 umem, log);
 805                else
 806                        ok = true;
 807                mutex_unlock(&d->vqs[i]->mutex);
 808                if (!ok)
 809                        return false;
 810        }
 811        return true;
 812}
 813
 814static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
 815                          struct iovec iov[], int iov_size, int access);
 816
 817static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
 818                              const void *from, unsigned size)
 819{
 820        int ret;
 821
 822        if (!vq->iotlb)
 823                return __copy_to_user(to, from, size);
 824        else {
 825                /* This function should be called after iotlb
 826                 * prefetch, which means we're sure that all vq
 827                 * could be access through iotlb. So -EAGAIN should
 828                 * not happen in this case.
 829                 */
 830                struct iov_iter t;
 831                void __user *uaddr = vhost_vq_meta_fetch(vq,
 832                                     (u64)(uintptr_t)to, size,
 833                                     VHOST_ADDR_USED);
 834
 835                if (uaddr)
 836                        return __copy_to_user(uaddr, from, size);
 837
 838                ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
 839                                     ARRAY_SIZE(vq->iotlb_iov),
 840                                     VHOST_ACCESS_WO);
 841                if (ret < 0)
 842                        goto out;
 843                iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
 844                ret = copy_to_iter(from, size, &t);
 845                if (ret == size)
 846                        ret = 0;
 847        }
 848out:
 849        return ret;
 850}
 851
 852static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
 853                                void __user *from, unsigned size)
 854{
 855        int ret;
 856
 857        if (!vq->iotlb)
 858                return __copy_from_user(to, from, size);
 859        else {
 860                /* This function should be called after iotlb
 861                 * prefetch, which means we're sure that vq
 862                 * could be access through iotlb. So -EAGAIN should
 863                 * not happen in this case.
 864                 */
 865                void __user *uaddr = vhost_vq_meta_fetch(vq,
 866                                     (u64)(uintptr_t)from, size,
 867                                     VHOST_ADDR_DESC);
 868                struct iov_iter f;
 869
 870                if (uaddr)
 871                        return __copy_from_user(to, uaddr, size);
 872
 873                ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
 874                                     ARRAY_SIZE(vq->iotlb_iov),
 875                                     VHOST_ACCESS_RO);
 876                if (ret < 0) {
 877                        vq_err(vq, "IOTLB translation failure: uaddr "
 878                               "%p size 0x%llx\n", from,
 879                               (unsigned long long) size);
 880                        goto out;
 881                }
 882                iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
 883                ret = copy_from_iter(to, size, &f);
 884                if (ret == size)
 885                        ret = 0;
 886        }
 887
 888out:
 889        return ret;
 890}
 891
 892static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
 893                                          void __user *addr, unsigned int size,
 894                                          int type)
 895{
 896        int ret;
 897
 898        ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
 899                             ARRAY_SIZE(vq->iotlb_iov),
 900                             VHOST_ACCESS_RO);
 901        if (ret < 0) {
 902                vq_err(vq, "IOTLB translation failure: uaddr "
 903                        "%p size 0x%llx\n", addr,
 904                        (unsigned long long) size);
 905                return NULL;
 906        }
 907
 908        if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
 909                vq_err(vq, "Non atomic userspace memory access: uaddr "
 910                        "%p size 0x%llx\n", addr,
 911                        (unsigned long long) size);
 912                return NULL;
 913        }
 914
 915        return vq->iotlb_iov[0].iov_base;
 916}
 917
 918/* This function should be called after iotlb
 919 * prefetch, which means we're sure that vq
 920 * could be access through iotlb. So -EAGAIN should
 921 * not happen in this case.
 922 */
 923static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 924                                            void __user *addr, unsigned int size,
 925                                            int type)
 926{
 927        void __user *uaddr = vhost_vq_meta_fetch(vq,
 928                             (u64)(uintptr_t)addr, size, type);
 929        if (uaddr)
 930                return uaddr;
 931
 932        return __vhost_get_user_slow(vq, addr, size, type);
 933}
 934
 935#define vhost_put_user(vq, x, ptr)              \
 936({ \
 937        int ret; \
 938        if (!vq->iotlb) { \
 939                ret = __put_user(x, ptr); \
 940        } else { \
 941                __typeof__(ptr) to = \
 942                        (__typeof__(ptr)) __vhost_get_user(vq, ptr,     \
 943                                          sizeof(*ptr), VHOST_ADDR_USED); \
 944                if (to != NULL) \
 945                        ret = __put_user(x, to); \
 946                else \
 947                        ret = -EFAULT;  \
 948        } \
 949        ret; \
 950})
 951
 952static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
 953{
 954        return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
 955                              vhost_avail_event(vq));
 956}
 957
 958static inline int vhost_put_used(struct vhost_virtqueue *vq,
 959                                 struct vring_used_elem *head, int idx,
 960                                 int count)
 961{
 962        return vhost_copy_to_user(vq, vq->used->ring + idx, head,
 963                                  count * sizeof(*head));
 964}
 965
 966static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
 967
 968{
 969        return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
 970                              &vq->used->flags);
 971}
 972
 973static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
 974
 975{
 976        return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
 977                              &vq->used->idx);
 978}
 979
 980#define vhost_get_user(vq, x, ptr, type)                \
 981({ \
 982        int ret; \
 983        if (!vq->iotlb) { \
 984                ret = __get_user(x, ptr); \
 985        } else { \
 986                __typeof__(ptr) from = \
 987                        (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
 988                                                           sizeof(*ptr), \
 989                                                           type); \
 990                if (from != NULL) \
 991                        ret = __get_user(x, from); \
 992                else \
 993                        ret = -EFAULT; \
 994        } \
 995        ret; \
 996})
 997
 998#define vhost_get_avail(vq, x, ptr) \
 999        vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1000
1001#define vhost_get_used(vq, x, ptr) \
1002        vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1003
1004static void vhost_dev_lock_vqs(struct vhost_dev *d)
1005{
1006        int i = 0;
1007        for (i = 0; i < d->nvqs; ++i)
1008                mutex_lock_nested(&d->vqs[i]->mutex, i);
1009}
1010
1011static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1012{
1013        int i = 0;
1014        for (i = 0; i < d->nvqs; ++i)
1015                mutex_unlock(&d->vqs[i]->mutex);
1016}
1017
1018static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1019                                      __virtio16 *idx)
1020{
1021        return vhost_get_avail(vq, *idx, &vq->avail->idx);
1022}
1023
1024static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1025                                       __virtio16 *head, int idx)
1026{
1027        return vhost_get_avail(vq, *head,
1028                               &vq->avail->ring[idx & (vq->num - 1)]);
1029}
1030
1031static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1032                                        __virtio16 *flags)
1033{
1034        return vhost_get_avail(vq, *flags, &vq->avail->flags);
1035}
1036
1037static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1038                                       __virtio16 *event)
1039{
1040        return vhost_get_avail(vq, *event, vhost_used_event(vq));
1041}
1042
1043static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1044                                     __virtio16 *idx)
1045{
1046        return vhost_get_used(vq, *idx, &vq->used->idx);
1047}
1048
1049static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1050                                 struct vring_desc *desc, int idx)
1051{
1052        return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1053}
1054
1055static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1056                                  struct vhost_iotlb_msg *msg)
1057{
1058        struct vhost_msg_node *node, *n;
1059
1060        spin_lock(&d->iotlb_lock);
1061
1062        list_for_each_entry_safe(node, n, &d->pending_list, node) {
1063                struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1064                if (msg->iova <= vq_msg->iova &&
1065                    msg->iova + msg->size - 1 >= vq_msg->iova &&
1066                    vq_msg->type == VHOST_IOTLB_MISS) {
1067                        vhost_poll_queue(&node->vq->poll);
1068                        list_del(&node->node);
1069                        kfree(node);
1070                }
1071        }
1072
1073        spin_unlock(&d->iotlb_lock);
1074}
1075
1076static bool umem_access_ok(u64 uaddr, u64 size, int access)
1077{
1078        unsigned long a = uaddr;
1079
1080        /* Make sure 64 bit math will not overflow. */
1081        if (vhost_overflow(uaddr, size))
1082                return false;
1083
1084        if ((access & VHOST_ACCESS_RO) &&
1085            !access_ok((void __user *)a, size))
1086                return false;
1087        if ((access & VHOST_ACCESS_WO) &&
1088            !access_ok((void __user *)a, size))
1089                return false;
1090        return true;
1091}
1092
1093static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1094                                   struct vhost_iotlb_msg *msg)
1095{
1096        int ret = 0;
1097
1098        mutex_lock(&dev->mutex);
1099        vhost_dev_lock_vqs(dev);
1100        switch (msg->type) {
1101        case VHOST_IOTLB_UPDATE:
1102                if (!dev->iotlb) {
1103                        ret = -EFAULT;
1104                        break;
1105                }
1106                if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1107                        ret = -EFAULT;
1108                        break;
1109                }
1110                vhost_vq_meta_reset(dev);
1111                if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1112                                          msg->iova + msg->size - 1,
1113                                          msg->uaddr, msg->perm)) {
1114                        ret = -ENOMEM;
1115                        break;
1116                }
1117                vhost_iotlb_notify_vq(dev, msg);
1118                break;
1119        case VHOST_IOTLB_INVALIDATE:
1120                if (!dev->iotlb) {
1121                        ret = -EFAULT;
1122                        break;
1123                }
1124                vhost_vq_meta_reset(dev);
1125                vhost_iotlb_del_range(dev->iotlb, msg->iova,
1126                                      msg->iova + msg->size - 1);
1127                break;
1128        default:
1129                ret = -EINVAL;
1130                break;
1131        }
1132
1133        vhost_dev_unlock_vqs(dev);
1134        mutex_unlock(&dev->mutex);
1135
1136        return ret;
1137}
1138ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1139                             struct iov_iter *from)
1140{
1141        struct vhost_iotlb_msg msg;
1142        size_t offset;
1143        int type, ret;
1144
1145        ret = copy_from_iter(&type, sizeof(type), from);
1146        if (ret != sizeof(type)) {
1147                ret = -EINVAL;
1148                goto done;
1149        }
1150
1151        switch (type) {
1152        case VHOST_IOTLB_MSG:
1153                /* There maybe a hole after type for V1 message type,
1154                 * so skip it here.
1155                 */
1156                offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1157                break;
1158        case VHOST_IOTLB_MSG_V2:
1159                offset = sizeof(__u32);
1160                break;
1161        default:
1162                ret = -EINVAL;
1163                goto done;
1164        }
1165
1166        iov_iter_advance(from, offset);
1167        ret = copy_from_iter(&msg, sizeof(msg), from);
1168        if (ret != sizeof(msg)) {
1169                ret = -EINVAL;
1170                goto done;
1171        }
1172
1173        if (dev->msg_handler)
1174                ret = dev->msg_handler(dev, &msg);
1175        else
1176                ret = vhost_process_iotlb_msg(dev, &msg);
1177        if (ret) {
1178                ret = -EFAULT;
1179                goto done;
1180        }
1181
1182        ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1183              sizeof(struct vhost_msg_v2);
1184done:
1185        return ret;
1186}
1187EXPORT_SYMBOL(vhost_chr_write_iter);
1188
1189__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1190                            poll_table *wait)
1191{
1192        __poll_t mask = 0;
1193
1194        poll_wait(file, &dev->wait, wait);
1195
1196        if (!list_empty(&dev->read_list))
1197                mask |= EPOLLIN | EPOLLRDNORM;
1198
1199        return mask;
1200}
1201EXPORT_SYMBOL(vhost_chr_poll);
1202
1203ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1204                            int noblock)
1205{
1206        DEFINE_WAIT(wait);
1207        struct vhost_msg_node *node;
1208        ssize_t ret = 0;
1209        unsigned size = sizeof(struct vhost_msg);
1210
1211        if (iov_iter_count(to) < size)
1212                return 0;
1213
1214        while (1) {
1215                if (!noblock)
1216                        prepare_to_wait(&dev->wait, &wait,
1217                                        TASK_INTERRUPTIBLE);
1218
1219                node = vhost_dequeue_msg(dev, &dev->read_list);
1220                if (node)
1221                        break;
1222                if (noblock) {
1223                        ret = -EAGAIN;
1224                        break;
1225                }
1226                if (signal_pending(current)) {
1227                        ret = -ERESTARTSYS;
1228                        break;
1229                }
1230                if (!dev->iotlb) {
1231                        ret = -EBADFD;
1232                        break;
1233                }
1234
1235                schedule();
1236        }
1237
1238        if (!noblock)
1239                finish_wait(&dev->wait, &wait);
1240
1241        if (node) {
1242                struct vhost_iotlb_msg *msg;
1243                void *start = &node->msg;
1244
1245                switch (node->msg.type) {
1246                case VHOST_IOTLB_MSG:
1247                        size = sizeof(node->msg);
1248                        msg = &node->msg.iotlb;
1249                        break;
1250                case VHOST_IOTLB_MSG_V2:
1251                        size = sizeof(node->msg_v2);
1252                        msg = &node->msg_v2.iotlb;
1253                        break;
1254                default:
1255                        BUG();
1256                        break;
1257                }
1258
1259                ret = copy_to_iter(start, size, to);
1260                if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1261                        kfree(node);
1262                        return ret;
1263                }
1264                vhost_enqueue_msg(dev, &dev->pending_list, node);
1265        }
1266
1267        return ret;
1268}
1269EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1270
1271static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1272{
1273        struct vhost_dev *dev = vq->dev;
1274        struct vhost_msg_node *node;
1275        struct vhost_iotlb_msg *msg;
1276        bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1277
1278        node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1279        if (!node)
1280                return -ENOMEM;
1281
1282        if (v2) {
1283                node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1284                msg = &node->msg_v2.iotlb;
1285        } else {
1286                msg = &node->msg.iotlb;
1287        }
1288
1289        msg->type = VHOST_IOTLB_MISS;
1290        msg->iova = iova;
1291        msg->perm = access;
1292
1293        vhost_enqueue_msg(dev, &dev->read_list, node);
1294
1295        return 0;
1296}
1297
1298static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1299                         vring_desc_t __user *desc,
1300                         vring_avail_t __user *avail,
1301                         vring_used_t __user *used)
1302
1303{
1304        /* If an IOTLB device is present, the vring addresses are
1305         * GIOVAs. Access validation occurs at prefetch time. */
1306        if (vq->iotlb)
1307                return true;
1308
1309        return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1310               access_ok(avail, vhost_get_avail_size(vq, num)) &&
1311               access_ok(used, vhost_get_used_size(vq, num));
1312}
1313
1314static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1315                                 const struct vhost_iotlb_map *map,
1316                                 int type)
1317{
1318        int access = (type == VHOST_ADDR_USED) ?
1319                     VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1320
1321        if (likely(map->perm & access))
1322                vq->meta_iotlb[type] = map;
1323}
1324
1325static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1326                            int access, u64 addr, u64 len, int type)
1327{
1328        const struct vhost_iotlb_map *map;
1329        struct vhost_iotlb *umem = vq->iotlb;
1330        u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1331
1332        if (vhost_vq_meta_fetch(vq, addr, len, type))
1333                return true;
1334
1335        while (len > s) {
1336                map = vhost_iotlb_itree_first(umem, addr, last);
1337                if (map == NULL || map->start > addr) {
1338                        vhost_iotlb_miss(vq, addr, access);
1339                        return false;
1340                } else if (!(map->perm & access)) {
1341                        /* Report the possible access violation by
1342                         * request another translation from userspace.
1343                         */
1344                        return false;
1345                }
1346
1347                size = map->size - addr + map->start;
1348
1349                if (orig_addr == addr && size >= len)
1350                        vhost_vq_meta_update(vq, map, type);
1351
1352                s += size;
1353                addr += size;
1354        }
1355
1356        return true;
1357}
1358
1359int vq_meta_prefetch(struct vhost_virtqueue *vq)
1360{
1361        unsigned int num = vq->num;
1362
1363        if (!vq->iotlb)
1364                return 1;
1365
1366        return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1367                               vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1368               iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1369                               vhost_get_avail_size(vq, num),
1370                               VHOST_ADDR_AVAIL) &&
1371               iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1372                               vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1373}
1374EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1375
1376/* Can we log writes? */
1377/* Caller should have device mutex but not vq mutex */
1378bool vhost_log_access_ok(struct vhost_dev *dev)
1379{
1380        return memory_access_ok(dev, dev->umem, 1);
1381}
1382EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1383
1384static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1385                                  void __user *log_base,
1386                                  bool log_used,
1387                                  u64 log_addr)
1388{
1389        /* If an IOTLB device is present, log_addr is a GIOVA that
1390         * will never be logged by log_used(). */
1391        if (vq->iotlb)
1392                return true;
1393
1394        return !log_used || log_access_ok(log_base, log_addr,
1395                                          vhost_get_used_size(vq, vq->num));
1396}
1397
1398/* Verify access for write logging. */
1399/* Caller should have vq mutex and device mutex */
1400static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1401                             void __user *log_base)
1402{
1403        return vq_memory_access_ok(log_base, vq->umem,
1404                                   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1405                vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
1406}
1407
1408/* Can we start vq? */
1409/* Caller should have vq mutex and device mutex */
1410bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1411{
1412        if (!vq_log_access_ok(vq, vq->log_base))
1413                return false;
1414
1415        return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1416}
1417EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1418
1419static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1420{
1421        struct vhost_memory mem, *newmem;
1422        struct vhost_memory_region *region;
1423        struct vhost_iotlb *newumem, *oldumem;
1424        unsigned long size = offsetof(struct vhost_memory, regions);
1425        int i;
1426
1427        if (copy_from_user(&mem, m, size))
1428                return -EFAULT;
1429        if (mem.padding)
1430                return -EOPNOTSUPP;
1431        if (mem.nregions > max_mem_regions)
1432                return -E2BIG;
1433        newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1434                        GFP_KERNEL);
1435        if (!newmem)
1436                return -ENOMEM;
1437
1438        memcpy(newmem, &mem, size);
1439        if (copy_from_user(newmem->regions, m->regions,
1440                           flex_array_size(newmem, regions, mem.nregions))) {
1441                kvfree(newmem);
1442                return -EFAULT;
1443        }
1444
1445        newumem = iotlb_alloc();
1446        if (!newumem) {
1447                kvfree(newmem);
1448                return -ENOMEM;
1449        }
1450
1451        for (region = newmem->regions;
1452             region < newmem->regions + mem.nregions;
1453             region++) {
1454                if (vhost_iotlb_add_range(newumem,
1455                                          region->guest_phys_addr,
1456                                          region->guest_phys_addr +
1457                                          region->memory_size - 1,
1458                                          region->userspace_addr,
1459                                          VHOST_MAP_RW))
1460                        goto err;
1461        }
1462
1463        if (!memory_access_ok(d, newumem, 0))
1464                goto err;
1465
1466        oldumem = d->umem;
1467        d->umem = newumem;
1468
1469        /* All memory accesses are done under some VQ mutex. */
1470        for (i = 0; i < d->nvqs; ++i) {
1471                mutex_lock(&d->vqs[i]->mutex);
1472                d->vqs[i]->umem = newumem;
1473                mutex_unlock(&d->vqs[i]->mutex);
1474        }
1475
1476        kvfree(newmem);
1477        vhost_iotlb_free(oldumem);
1478        return 0;
1479
1480err:
1481        vhost_iotlb_free(newumem);
1482        kvfree(newmem);
1483        return -EFAULT;
1484}
1485
1486static long vhost_vring_set_num(struct vhost_dev *d,
1487                                struct vhost_virtqueue *vq,
1488                                void __user *argp)
1489{
1490        struct vhost_vring_state s;
1491
1492        /* Resizing ring with an active backend?
1493         * You don't want to do that. */
1494        if (vq->private_data)
1495                return -EBUSY;
1496
1497        if (copy_from_user(&s, argp, sizeof s))
1498                return -EFAULT;
1499
1500        if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1501                return -EINVAL;
1502        vq->num = s.num;
1503
1504        return 0;
1505}
1506
1507static long vhost_vring_set_addr(struct vhost_dev *d,
1508                                 struct vhost_virtqueue *vq,
1509                                 void __user *argp)
1510{
1511        struct vhost_vring_addr a;
1512
1513        if (copy_from_user(&a, argp, sizeof a))
1514                return -EFAULT;
1515        if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1516                return -EOPNOTSUPP;
1517
1518        /* For 32bit, verify that the top 32bits of the user
1519           data are set to zero. */
1520        if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1521            (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1522            (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1523                return -EFAULT;
1524
1525        /* Make sure it's safe to cast pointers to vring types. */
1526        BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1527        BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1528        if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1529            (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1530            (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1531                return -EINVAL;
1532
1533        /* We only verify access here if backend is configured.
1534         * If it is not, we don't as size might not have been setup.
1535         * We will verify when backend is configured. */
1536        if (vq->private_data) {
1537                if (!vq_access_ok(vq, vq->num,
1538                        (void __user *)(unsigned long)a.desc_user_addr,
1539                        (void __user *)(unsigned long)a.avail_user_addr,
1540                        (void __user *)(unsigned long)a.used_user_addr))
1541                        return -EINVAL;
1542
1543                /* Also validate log access for used ring if enabled. */
1544                if (!vq_log_used_access_ok(vq, vq->log_base,
1545                                a.flags & (0x1 << VHOST_VRING_F_LOG),
1546                                a.log_guest_addr))
1547                        return -EINVAL;
1548        }
1549
1550        vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1551        vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1552        vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1553        vq->log_addr = a.log_guest_addr;
1554        vq->used = (void __user *)(unsigned long)a.used_user_addr;
1555
1556        return 0;
1557}
1558
1559static long vhost_vring_set_num_addr(struct vhost_dev *d,
1560                                     struct vhost_virtqueue *vq,
1561                                     unsigned int ioctl,
1562                                     void __user *argp)
1563{
1564        long r;
1565
1566        mutex_lock(&vq->mutex);
1567
1568        switch (ioctl) {
1569        case VHOST_SET_VRING_NUM:
1570                r = vhost_vring_set_num(d, vq, argp);
1571                break;
1572        case VHOST_SET_VRING_ADDR:
1573                r = vhost_vring_set_addr(d, vq, argp);
1574                break;
1575        default:
1576                BUG();
1577        }
1578
1579        mutex_unlock(&vq->mutex);
1580
1581        return r;
1582}
1583long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1584{
1585        struct file *eventfp, *filep = NULL;
1586        bool pollstart = false, pollstop = false;
1587        struct eventfd_ctx *ctx = NULL;
1588        u32 __user *idxp = argp;
1589        struct vhost_virtqueue *vq;
1590        struct vhost_vring_state s;
1591        struct vhost_vring_file f;
1592        u32 idx;
1593        long r;
1594
1595        r = get_user(idx, idxp);
1596        if (r < 0)
1597                return r;
1598        if (idx >= d->nvqs)
1599                return -ENOBUFS;
1600
1601        idx = array_index_nospec(idx, d->nvqs);
1602        vq = d->vqs[idx];
1603
1604        if (ioctl == VHOST_SET_VRING_NUM ||
1605            ioctl == VHOST_SET_VRING_ADDR) {
1606                return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1607        }
1608
1609        mutex_lock(&vq->mutex);
1610
1611        switch (ioctl) {
1612        case VHOST_SET_VRING_BASE:
1613                /* Moving base with an active backend?
1614                 * You don't want to do that. */
1615                if (vq->private_data) {
1616                        r = -EBUSY;
1617                        break;
1618                }
1619                if (copy_from_user(&s, argp, sizeof s)) {
1620                        r = -EFAULT;
1621                        break;
1622                }
1623                if (s.num > 0xffff) {
1624                        r = -EINVAL;
1625                        break;
1626                }
1627                vq->last_avail_idx = s.num;
1628                /* Forget the cached index value. */
1629                vq->avail_idx = vq->last_avail_idx;
1630                break;
1631        case VHOST_GET_VRING_BASE:
1632                s.index = idx;
1633                s.num = vq->last_avail_idx;
1634                if (copy_to_user(argp, &s, sizeof s))
1635                        r = -EFAULT;
1636                break;
1637        case VHOST_SET_VRING_KICK:
1638                if (copy_from_user(&f, argp, sizeof f)) {
1639                        r = -EFAULT;
1640                        break;
1641                }
1642                eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
1643                if (IS_ERR(eventfp)) {
1644                        r = PTR_ERR(eventfp);
1645                        break;
1646                }
1647                if (eventfp != vq->kick) {
1648                        pollstop = (filep = vq->kick) != NULL;
1649                        pollstart = (vq->kick = eventfp) != NULL;
1650                } else
1651                        filep = eventfp;
1652                break;
1653        case VHOST_SET_VRING_CALL:
1654                if (copy_from_user(&f, argp, sizeof f)) {
1655                        r = -EFAULT;
1656                        break;
1657                }
1658                ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1659                if (IS_ERR(ctx)) {
1660                        r = PTR_ERR(ctx);
1661                        break;
1662                }
1663
1664                swap(ctx, vq->call_ctx.ctx);
1665                break;
1666        case VHOST_SET_VRING_ERR:
1667                if (copy_from_user(&f, argp, sizeof f)) {
1668                        r = -EFAULT;
1669                        break;
1670                }
1671                ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1672                if (IS_ERR(ctx)) {
1673                        r = PTR_ERR(ctx);
1674                        break;
1675                }
1676                swap(ctx, vq->error_ctx);
1677                break;
1678        case VHOST_SET_VRING_ENDIAN:
1679                r = vhost_set_vring_endian(vq, argp);
1680                break;
1681        case VHOST_GET_VRING_ENDIAN:
1682                r = vhost_get_vring_endian(vq, idx, argp);
1683                break;
1684        case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1685                if (copy_from_user(&s, argp, sizeof(s))) {
1686                        r = -EFAULT;
1687                        break;
1688                }
1689                vq->busyloop_timeout = s.num;
1690                break;
1691        case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1692                s.index = idx;
1693                s.num = vq->busyloop_timeout;
1694                if (copy_to_user(argp, &s, sizeof(s)))
1695                        r = -EFAULT;
1696                break;
1697        default:
1698                r = -ENOIOCTLCMD;
1699        }
1700
1701        if (pollstop && vq->handle_kick)
1702                vhost_poll_stop(&vq->poll);
1703
1704        if (!IS_ERR_OR_NULL(ctx))
1705                eventfd_ctx_put(ctx);
1706        if (filep)
1707                fput(filep);
1708
1709        if (pollstart && vq->handle_kick)
1710                r = vhost_poll_start(&vq->poll, vq->kick);
1711
1712        mutex_unlock(&vq->mutex);
1713
1714        if (pollstop && vq->handle_kick)
1715                vhost_poll_flush(&vq->poll);
1716        return r;
1717}
1718EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1719
1720int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1721{
1722        struct vhost_iotlb *niotlb, *oiotlb;
1723        int i;
1724
1725        niotlb = iotlb_alloc();
1726        if (!niotlb)
1727                return -ENOMEM;
1728
1729        oiotlb = d->iotlb;
1730        d->iotlb = niotlb;
1731
1732        for (i = 0; i < d->nvqs; ++i) {
1733                struct vhost_virtqueue *vq = d->vqs[i];
1734
1735                mutex_lock(&vq->mutex);
1736                vq->iotlb = niotlb;
1737                __vhost_vq_meta_reset(vq);
1738                mutex_unlock(&vq->mutex);
1739        }
1740
1741        vhost_iotlb_free(oiotlb);
1742
1743        return 0;
1744}
1745EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1746
1747/* Caller must have device mutex */
1748long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1749{
1750        struct eventfd_ctx *ctx;
1751        u64 p;
1752        long r;
1753        int i, fd;
1754
1755        /* If you are not the owner, you can become one */
1756        if (ioctl == VHOST_SET_OWNER) {
1757                r = vhost_dev_set_owner(d);
1758                goto done;
1759        }
1760
1761        /* You must be the owner to do anything else */
1762        r = vhost_dev_check_owner(d);
1763        if (r)
1764                goto done;
1765
1766        switch (ioctl) {
1767        case VHOST_SET_MEM_TABLE:
1768                r = vhost_set_memory(d, argp);
1769                break;
1770        case VHOST_SET_LOG_BASE:
1771                if (copy_from_user(&p, argp, sizeof p)) {
1772                        r = -EFAULT;
1773                        break;
1774                }
1775                if ((u64)(unsigned long)p != p) {
1776                        r = -EFAULT;
1777                        break;
1778                }
1779                for (i = 0; i < d->nvqs; ++i) {
1780                        struct vhost_virtqueue *vq;
1781                        void __user *base = (void __user *)(unsigned long)p;
1782                        vq = d->vqs[i];
1783                        mutex_lock(&vq->mutex);
1784                        /* If ring is inactive, will check when it's enabled. */
1785                        if (vq->private_data && !vq_log_access_ok(vq, base))
1786                                r = -EFAULT;
1787                        else
1788                                vq->log_base = base;
1789                        mutex_unlock(&vq->mutex);
1790                }
1791                break;
1792        case VHOST_SET_LOG_FD:
1793                r = get_user(fd, (int __user *)argp);
1794                if (r < 0)
1795                        break;
1796                ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
1797                if (IS_ERR(ctx)) {
1798                        r = PTR_ERR(ctx);
1799                        break;
1800                }
1801                swap(ctx, d->log_ctx);
1802                for (i = 0; i < d->nvqs; ++i) {
1803                        mutex_lock(&d->vqs[i]->mutex);
1804                        d->vqs[i]->log_ctx = d->log_ctx;
1805                        mutex_unlock(&d->vqs[i]->mutex);
1806                }
1807                if (ctx)
1808                        eventfd_ctx_put(ctx);
1809                break;
1810        default:
1811                r = -ENOIOCTLCMD;
1812                break;
1813        }
1814done:
1815        return r;
1816}
1817EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1818
1819/* TODO: This is really inefficient.  We need something like get_user()
1820 * (instruction directly accesses the data, with an exception table entry
1821 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1822 */
1823static int set_bit_to_user(int nr, void __user *addr)
1824{
1825        unsigned long log = (unsigned long)addr;
1826        struct page *page;
1827        void *base;
1828        int bit = nr + (log % PAGE_SIZE) * 8;
1829        int r;
1830
1831        r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
1832        if (r < 0)
1833                return r;
1834        BUG_ON(r != 1);
1835        base = kmap_atomic(page);
1836        set_bit(bit, base);
1837        kunmap_atomic(base);
1838        unpin_user_pages_dirty_lock(&page, 1, true);
1839        return 0;
1840}
1841
1842static int log_write(void __user *log_base,
1843                     u64 write_address, u64 write_length)
1844{
1845        u64 write_page = write_address / VHOST_PAGE_SIZE;
1846        int r;
1847
1848        if (!write_length)
1849                return 0;
1850        write_length += write_address % VHOST_PAGE_SIZE;
1851        for (;;) {
1852                u64 base = (u64)(unsigned long)log_base;
1853                u64 log = base + write_page / 8;
1854                int bit = write_page % 8;
1855                if ((u64)(unsigned long)log != log)
1856                        return -EFAULT;
1857                r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1858                if (r < 0)
1859                        return r;
1860                if (write_length <= VHOST_PAGE_SIZE)
1861                        break;
1862                write_length -= VHOST_PAGE_SIZE;
1863                write_page += 1;
1864        }
1865        return r;
1866}
1867
1868static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1869{
1870        struct vhost_iotlb *umem = vq->umem;
1871        struct vhost_iotlb_map *u;
1872        u64 start, end, l, min;
1873        int r;
1874        bool hit = false;
1875
1876        while (len) {
1877                min = len;
1878                /* More than one GPAs can be mapped into a single HVA. So
1879                 * iterate all possible umems here to be safe.
1880                 */
1881                list_for_each_entry(u, &umem->list, link) {
1882                        if (u->addr > hva - 1 + len ||
1883                            u->addr - 1 + u->size < hva)
1884                                continue;
1885                        start = max(u->addr, hva);
1886                        end = min(u->addr - 1 + u->size, hva - 1 + len);
1887                        l = end - start + 1;
1888                        r = log_write(vq->log_base,
1889                                      u->start + start - u->addr,
1890                                      l);
1891                        if (r < 0)
1892                                return r;
1893                        hit = true;
1894                        min = min(l, min);
1895                }
1896
1897                if (!hit)
1898                        return -EFAULT;
1899
1900                len -= min;
1901                hva += min;
1902        }
1903
1904        return 0;
1905}
1906
1907static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1908{
1909        struct iovec *iov = vq->log_iov;
1910        int i, ret;
1911
1912        if (!vq->iotlb)
1913                return log_write(vq->log_base, vq->log_addr + used_offset, len);
1914
1915        ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1916                             len, iov, 64, VHOST_ACCESS_WO);
1917        if (ret < 0)
1918                return ret;
1919
1920        for (i = 0; i < ret; i++) {
1921                ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1922                                    iov[i].iov_len);
1923                if (ret)
1924                        return ret;
1925        }
1926
1927        return 0;
1928}
1929
1930int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1931                    unsigned int log_num, u64 len, struct iovec *iov, int count)
1932{
1933        int i, r;
1934
1935        /* Make sure data written is seen before log. */
1936        smp_wmb();
1937
1938        if (vq->iotlb) {
1939                for (i = 0; i < count; i++) {
1940                        r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1941                                          iov[i].iov_len);
1942                        if (r < 0)
1943                                return r;
1944                }
1945                return 0;
1946        }
1947
1948        for (i = 0; i < log_num; ++i) {
1949                u64 l = min(log[i].len, len);
1950                r = log_write(vq->log_base, log[i].addr, l);
1951                if (r < 0)
1952                        return r;
1953                len -= l;
1954                if (!len) {
1955                        if (vq->log_ctx)
1956                                eventfd_signal(vq->log_ctx, 1);
1957                        return 0;
1958                }
1959        }
1960        /* Length written exceeds what we have stored. This is a bug. */
1961        BUG();
1962        return 0;
1963}
1964EXPORT_SYMBOL_GPL(vhost_log_write);
1965
1966static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1967{
1968        void __user *used;
1969        if (vhost_put_used_flags(vq))
1970                return -EFAULT;
1971        if (unlikely(vq->log_used)) {
1972                /* Make sure the flag is seen before log. */
1973                smp_wmb();
1974                /* Log used flag write. */
1975                used = &vq->used->flags;
1976                log_used(vq, (used - (void __user *)vq->used),
1977                         sizeof vq->used->flags);
1978                if (vq->log_ctx)
1979                        eventfd_signal(vq->log_ctx, 1);
1980        }
1981        return 0;
1982}
1983
1984static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1985{
1986        if (vhost_put_avail_event(vq))
1987                return -EFAULT;
1988        if (unlikely(vq->log_used)) {
1989                void __user *used;
1990                /* Make sure the event is seen before log. */
1991                smp_wmb();
1992                /* Log avail event write */
1993                used = vhost_avail_event(vq);
1994                log_used(vq, (used - (void __user *)vq->used),
1995                         sizeof *vhost_avail_event(vq));
1996                if (vq->log_ctx)
1997                        eventfd_signal(vq->log_ctx, 1);
1998        }
1999        return 0;
2000}
2001
2002int vhost_vq_init_access(struct vhost_virtqueue *vq)
2003{
2004        __virtio16 last_used_idx;
2005        int r;
2006        bool is_le = vq->is_le;
2007
2008        if (!vq->private_data)
2009                return 0;
2010
2011        vhost_init_is_le(vq);
2012
2013        r = vhost_update_used_flags(vq);
2014        if (r)
2015                goto err;
2016        vq->signalled_used_valid = false;
2017        if (!vq->iotlb &&
2018            !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2019                r = -EFAULT;
2020                goto err;
2021        }
2022        r = vhost_get_used_idx(vq, &last_used_idx);
2023        if (r) {
2024                vq_err(vq, "Can't access used idx at %p\n",
2025                       &vq->used->idx);
2026                goto err;
2027        }
2028        vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2029        return 0;
2030
2031err:
2032        vq->is_le = is_le;
2033        return r;
2034}
2035EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2036
2037static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2038                          struct iovec iov[], int iov_size, int access)
2039{
2040        const struct vhost_iotlb_map *map;
2041        struct vhost_dev *dev = vq->dev;
2042        struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2043        struct iovec *_iov;
2044        u64 s = 0;
2045        int ret = 0;
2046
2047        while ((u64)len > s) {
2048                u64 size;
2049                if (unlikely(ret >= iov_size)) {
2050                        ret = -ENOBUFS;
2051                        break;
2052                }
2053
2054                map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
2055                if (map == NULL || map->start > addr) {
2056                        if (umem != dev->iotlb) {
2057                                ret = -EFAULT;
2058                                break;
2059                        }
2060                        ret = -EAGAIN;
2061                        break;
2062                } else if (!(map->perm & access)) {
2063                        ret = -EPERM;
2064                        break;
2065                }
2066
2067                _iov = iov + ret;
2068                size = map->size - addr + map->start;
2069                _iov->iov_len = min((u64)len - s, size);
2070                _iov->iov_base = (void __user *)(unsigned long)
2071                                 (map->addr + addr - map->start);
2072                s += size;
2073                addr += size;
2074                ++ret;
2075        }
2076
2077        if (ret == -EAGAIN)
2078                vhost_iotlb_miss(vq, addr, access);
2079        return ret;
2080}
2081
2082/* Each buffer in the virtqueues is actually a chain of descriptors.  This
2083 * function returns the next descriptor in the chain,
2084 * or -1U if we're at the end. */
2085static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2086{
2087        unsigned int next;
2088
2089        /* If this descriptor says it doesn't chain, we're done. */
2090        if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2091                return -1U;
2092
2093        /* Check they're not leading us off end of descriptors. */
2094        next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2095        return next;
2096}
2097
2098static int get_indirect(struct vhost_virtqueue *vq,
2099                        struct iovec iov[], unsigned int iov_size,
2100                        unsigned int *out_num, unsigned int *in_num,
2101                        struct vhost_log *log, unsigned int *log_num,
2102                        struct vring_desc *indirect)
2103{
2104        struct vring_desc desc;
2105        unsigned int i = 0, count, found = 0;
2106        u32 len = vhost32_to_cpu(vq, indirect->len);
2107        struct iov_iter from;
2108        int ret, access;
2109
2110        /* Sanity check */
2111        if (unlikely(len % sizeof desc)) {
2112                vq_err(vq, "Invalid length in indirect descriptor: "
2113                       "len 0x%llx not multiple of 0x%zx\n",
2114                       (unsigned long long)len,
2115                       sizeof desc);
2116                return -EINVAL;
2117        }
2118
2119        ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2120                             UIO_MAXIOV, VHOST_ACCESS_RO);
2121        if (unlikely(ret < 0)) {
2122                if (ret != -EAGAIN)
2123                        vq_err(vq, "Translation failure %d in indirect.\n", ret);
2124                return ret;
2125        }
2126        iov_iter_init(&from, READ, vq->indirect, ret, len);
2127        count = len / sizeof desc;
2128        /* Buffers are chained via a 16 bit next field, so
2129         * we can have at most 2^16 of these. */
2130        if (unlikely(count > USHRT_MAX + 1)) {
2131                vq_err(vq, "Indirect buffer length too big: %d\n",
2132                       indirect->len);
2133                return -E2BIG;
2134        }
2135
2136        do {
2137                unsigned iov_count = *in_num + *out_num;
2138                if (unlikely(++found > count)) {
2139                        vq_err(vq, "Loop detected: last one at %u "
2140                               "indirect size %u\n",
2141                               i, count);
2142                        return -EINVAL;
2143                }
2144                if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2145                        vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2146                               i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2147                        return -EINVAL;
2148                }
2149                if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2150                        vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2151                               i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2152                        return -EINVAL;
2153                }
2154
2155                if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2156                        access = VHOST_ACCESS_WO;
2157                else
2158                        access = VHOST_ACCESS_RO;
2159
2160                ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2161                                     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2162                                     iov_size - iov_count, access);
2163                if (unlikely(ret < 0)) {
2164                        if (ret != -EAGAIN)
2165                                vq_err(vq, "Translation failure %d indirect idx %d\n",
2166                                        ret, i);
2167                        return ret;
2168                }
2169                /* If this is an input descriptor, increment that count. */
2170                if (access == VHOST_ACCESS_WO) {
2171                        *in_num += ret;
2172                        if (unlikely(log && ret)) {
2173                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2174                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2175                                ++*log_num;
2176                        }
2177                } else {
2178                        /* If it's an output descriptor, they're all supposed
2179                         * to come before any input descriptors. */
2180                        if (unlikely(*in_num)) {
2181                                vq_err(vq, "Indirect descriptor "
2182                                       "has out after in: idx %d\n", i);
2183                                return -EINVAL;
2184                        }
2185                        *out_num += ret;
2186                }
2187        } while ((i = next_desc(vq, &desc)) != -1);
2188        return 0;
2189}
2190
2191/* This looks in the virtqueue and for the first available buffer, and converts
2192 * it to an iovec for convenient access.  Since descriptors consist of some
2193 * number of output then some number of input descriptors, it's actually two
2194 * iovecs, but we pack them into one and note how many of each there were.
2195 *
2196 * This function returns the descriptor number found, or vq->num (which is
2197 * never a valid descriptor number) if none was found.  A negative code is
2198 * returned on error. */
2199int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2200                      struct iovec iov[], unsigned int iov_size,
2201                      unsigned int *out_num, unsigned int *in_num,
2202                      struct vhost_log *log, unsigned int *log_num)
2203{
2204        struct vring_desc desc;
2205        unsigned int i, head, found = 0;
2206        u16 last_avail_idx;
2207        __virtio16 avail_idx;
2208        __virtio16 ring_head;
2209        int ret, access;
2210
2211        /* Check it isn't doing very strange things with descriptor numbers. */
2212        last_avail_idx = vq->last_avail_idx;
2213
2214        if (vq->avail_idx == vq->last_avail_idx) {
2215                if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2216                        vq_err(vq, "Failed to access avail idx at %p\n",
2217                                &vq->avail->idx);
2218                        return -EFAULT;
2219                }
2220                vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2221
2222                if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2223                        vq_err(vq, "Guest moved used index from %u to %u",
2224                                last_avail_idx, vq->avail_idx);
2225                        return -EFAULT;
2226                }
2227
2228                /* If there's nothing new since last we looked, return
2229                 * invalid.
2230                 */
2231                if (vq->avail_idx == last_avail_idx)
2232                        return vq->num;
2233
2234                /* Only get avail ring entries after they have been
2235                 * exposed by guest.
2236                 */
2237                smp_rmb();
2238        }
2239
2240        /* Grab the next descriptor number they're advertising, and increment
2241         * the index we've seen. */
2242        if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2243                vq_err(vq, "Failed to read head: idx %d address %p\n",
2244                       last_avail_idx,
2245                       &vq->avail->ring[last_avail_idx % vq->num]);
2246                return -EFAULT;
2247        }
2248
2249        head = vhost16_to_cpu(vq, ring_head);
2250
2251        /* If their number is silly, that's an error. */
2252        if (unlikely(head >= vq->num)) {
2253                vq_err(vq, "Guest says index %u > %u is available",
2254                       head, vq->num);
2255                return -EINVAL;
2256        }
2257
2258        /* When we start there are none of either input nor output. */
2259        *out_num = *in_num = 0;
2260        if (unlikely(log))
2261                *log_num = 0;
2262
2263        i = head;
2264        do {
2265                unsigned iov_count = *in_num + *out_num;
2266                if (unlikely(i >= vq->num)) {
2267                        vq_err(vq, "Desc index is %u > %u, head = %u",
2268                               i, vq->num, head);
2269                        return -EINVAL;
2270                }
2271                if (unlikely(++found > vq->num)) {
2272                        vq_err(vq, "Loop detected: last one at %u "
2273                               "vq size %u head %u\n",
2274                               i, vq->num, head);
2275                        return -EINVAL;
2276                }
2277                ret = vhost_get_desc(vq, &desc, i);
2278                if (unlikely(ret)) {
2279                        vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2280                               i, vq->desc + i);
2281                        return -EFAULT;
2282                }
2283                if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2284                        ret = get_indirect(vq, iov, iov_size,
2285                                           out_num, in_num,
2286                                           log, log_num, &desc);
2287                        if (unlikely(ret < 0)) {
2288                                if (ret != -EAGAIN)
2289                                        vq_err(vq, "Failure detected "
2290                                                "in indirect descriptor at idx %d\n", i);
2291                                return ret;
2292                        }
2293                        continue;
2294                }
2295
2296                if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2297                        access = VHOST_ACCESS_WO;
2298                else
2299                        access = VHOST_ACCESS_RO;
2300                ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2301                                     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2302                                     iov_size - iov_count, access);
2303                if (unlikely(ret < 0)) {
2304                        if (ret != -EAGAIN)
2305                                vq_err(vq, "Translation failure %d descriptor idx %d\n",
2306                                        ret, i);
2307                        return ret;
2308                }
2309                if (access == VHOST_ACCESS_WO) {
2310                        /* If this is an input descriptor,
2311                         * increment that count. */
2312                        *in_num += ret;
2313                        if (unlikely(log && ret)) {
2314                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2315                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2316                                ++*log_num;
2317                        }
2318                } else {
2319                        /* If it's an output descriptor, they're all supposed
2320                         * to come before any input descriptors. */
2321                        if (unlikely(*in_num)) {
2322                                vq_err(vq, "Descriptor has out after in: "
2323                                       "idx %d\n", i);
2324                                return -EINVAL;
2325                        }
2326                        *out_num += ret;
2327                }
2328        } while ((i = next_desc(vq, &desc)) != -1);
2329
2330        /* On success, increment avail index. */
2331        vq->last_avail_idx++;
2332
2333        /* Assume notifications from guest are disabled at this point,
2334         * if they aren't we would need to update avail_event index. */
2335        BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2336        return head;
2337}
2338EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2339
2340/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2341void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2342{
2343        vq->last_avail_idx -= n;
2344}
2345EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2346
2347/* After we've used one of their buffers, we tell them about it.  We'll then
2348 * want to notify the guest, using eventfd. */
2349int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2350{
2351        struct vring_used_elem heads = {
2352                cpu_to_vhost32(vq, head),
2353                cpu_to_vhost32(vq, len)
2354        };
2355
2356        return vhost_add_used_n(vq, &heads, 1);
2357}
2358EXPORT_SYMBOL_GPL(vhost_add_used);
2359
2360static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2361                            struct vring_used_elem *heads,
2362                            unsigned count)
2363{
2364        vring_used_elem_t __user *used;
2365        u16 old, new;
2366        int start;
2367
2368        start = vq->last_used_idx & (vq->num - 1);
2369        used = vq->used->ring + start;
2370        if (vhost_put_used(vq, heads, start, count)) {
2371                vq_err(vq, "Failed to write used");
2372                return -EFAULT;
2373        }
2374        if (unlikely(vq->log_used)) {
2375                /* Make sure data is seen before log. */
2376                smp_wmb();
2377                /* Log used ring entry write. */
2378                log_used(vq, ((void __user *)used - (void __user *)vq->used),
2379                         count * sizeof *used);
2380        }
2381        old = vq->last_used_idx;
2382        new = (vq->last_used_idx += count);
2383        /* If the driver never bothers to signal in a very long while,
2384         * used index might wrap around. If that happens, invalidate
2385         * signalled_used index we stored. TODO: make sure driver
2386         * signals at least once in 2^16 and remove this. */
2387        if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2388                vq->signalled_used_valid = false;
2389        return 0;
2390}
2391
2392/* After we've used one of their buffers, we tell them about it.  We'll then
2393 * want to notify the guest, using eventfd. */
2394int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2395                     unsigned count)
2396{
2397        int start, n, r;
2398
2399        start = vq->last_used_idx & (vq->num - 1);
2400        n = vq->num - start;
2401        if (n < count) {
2402                r = __vhost_add_used_n(vq, heads, n);
2403                if (r < 0)
2404                        return r;
2405                heads += n;
2406                count -= n;
2407        }
2408        r = __vhost_add_used_n(vq, heads, count);
2409
2410        /* Make sure buffer is written before we update index. */
2411        smp_wmb();
2412        if (vhost_put_used_idx(vq)) {
2413                vq_err(vq, "Failed to increment used idx");
2414                return -EFAULT;
2415        }
2416        if (unlikely(vq->log_used)) {
2417                /* Make sure used idx is seen before log. */
2418                smp_wmb();
2419                /* Log used index update. */
2420                log_used(vq, offsetof(struct vring_used, idx),
2421                         sizeof vq->used->idx);
2422                if (vq->log_ctx)
2423                        eventfd_signal(vq->log_ctx, 1);
2424        }
2425        return r;
2426}
2427EXPORT_SYMBOL_GPL(vhost_add_used_n);
2428
2429static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2430{
2431        __u16 old, new;
2432        __virtio16 event;
2433        bool v;
2434        /* Flush out used index updates. This is paired
2435         * with the barrier that the Guest executes when enabling
2436         * interrupts. */
2437        smp_mb();
2438
2439        if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2440            unlikely(vq->avail_idx == vq->last_avail_idx))
2441                return true;
2442
2443        if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2444                __virtio16 flags;
2445                if (vhost_get_avail_flags(vq, &flags)) {
2446                        vq_err(vq, "Failed to get flags");
2447                        return true;
2448                }
2449                return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2450        }
2451        old = vq->signalled_used;
2452        v = vq->signalled_used_valid;
2453        new = vq->signalled_used = vq->last_used_idx;
2454        vq->signalled_used_valid = true;
2455
2456        if (unlikely(!v))
2457                return true;
2458
2459        if (vhost_get_used_event(vq, &event)) {
2460                vq_err(vq, "Failed to get used event idx");
2461                return true;
2462        }
2463        return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2464}
2465
2466/* This actually signals the guest, using eventfd. */
2467void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2468{
2469        /* Signal the Guest tell them we used something up. */
2470        if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2471                eventfd_signal(vq->call_ctx.ctx, 1);
2472}
2473EXPORT_SYMBOL_GPL(vhost_signal);
2474
2475/* And here's the combo meal deal.  Supersize me! */
2476void vhost_add_used_and_signal(struct vhost_dev *dev,
2477                               struct vhost_virtqueue *vq,
2478                               unsigned int head, int len)
2479{
2480        vhost_add_used(vq, head, len);
2481        vhost_signal(dev, vq);
2482}
2483EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2484
2485/* multi-buffer version of vhost_add_used_and_signal */
2486void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2487                                 struct vhost_virtqueue *vq,
2488                                 struct vring_used_elem *heads, unsigned count)
2489{
2490        vhost_add_used_n(vq, heads, count);
2491        vhost_signal(dev, vq);
2492}
2493EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2494
2495/* return true if we're sure that avaiable ring is empty */
2496bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2497{
2498        __virtio16 avail_idx;
2499        int r;
2500
2501        if (vq->avail_idx != vq->last_avail_idx)
2502                return false;
2503
2504        r = vhost_get_avail_idx(vq, &avail_idx);
2505        if (unlikely(r))
2506                return false;
2507        vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2508
2509        return vq->avail_idx == vq->last_avail_idx;
2510}
2511EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2512
2513/* OK, now we need to know about added descriptors. */
2514bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2515{
2516        __virtio16 avail_idx;
2517        int r;
2518
2519        if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2520                return false;
2521        vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2522        if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2523                r = vhost_update_used_flags(vq);
2524                if (r) {
2525                        vq_err(vq, "Failed to enable notification at %p: %d\n",
2526                               &vq->used->flags, r);
2527                        return false;
2528                }
2529        } else {
2530                r = vhost_update_avail_event(vq, vq->avail_idx);
2531                if (r) {
2532                        vq_err(vq, "Failed to update avail event index at %p: %d\n",
2533                               vhost_avail_event(vq), r);
2534                        return false;
2535                }
2536        }
2537        /* They could have slipped one in as we were doing that: make
2538         * sure it's written, then check again. */
2539        smp_mb();
2540        r = vhost_get_avail_idx(vq, &avail_idx);
2541        if (r) {
2542                vq_err(vq, "Failed to check avail idx at %p: %d\n",
2543                       &vq->avail->idx, r);
2544                return false;
2545        }
2546
2547        return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2548}
2549EXPORT_SYMBOL_GPL(vhost_enable_notify);
2550
2551/* We don't need to be notified again. */
2552void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2553{
2554        int r;
2555
2556        if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2557                return;
2558        vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2559        if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2560                r = vhost_update_used_flags(vq);
2561                if (r)
2562                        vq_err(vq, "Failed to disable notification at %p: %d\n",
2563                               &vq->used->flags, r);
2564        }
2565}
2566EXPORT_SYMBOL_GPL(vhost_disable_notify);
2567
2568/* Create a new message. */
2569struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2570{
2571        struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2572        if (!node)
2573                return NULL;
2574
2575        /* Make sure all padding within the structure is initialized. */
2576        memset(&node->msg, 0, sizeof node->msg);
2577        node->vq = vq;
2578        node->msg.type = type;
2579        return node;
2580}
2581EXPORT_SYMBOL_GPL(vhost_new_msg);
2582
2583void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2584                       struct vhost_msg_node *node)
2585{
2586        spin_lock(&dev->iotlb_lock);
2587        list_add_tail(&node->node, head);
2588        spin_unlock(&dev->iotlb_lock);
2589
2590        wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2591}
2592EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2593
2594struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2595                                         struct list_head *head)
2596{
2597        struct vhost_msg_node *node = NULL;
2598
2599        spin_lock(&dev->iotlb_lock);
2600        if (!list_empty(head)) {
2601                node = list_first_entry(head, struct vhost_msg_node,
2602                                        node);
2603                list_del(&node->node);
2604        }
2605        spin_unlock(&dev->iotlb_lock);
2606
2607        return node;
2608}
2609EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2610
2611void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2612{
2613        struct vhost_virtqueue *vq;
2614        int i;
2615
2616        mutex_lock(&dev->mutex);
2617        for (i = 0; i < dev->nvqs; ++i) {
2618                vq = dev->vqs[i];
2619                mutex_lock(&vq->mutex);
2620                vq->acked_backend_features = features;
2621                mutex_unlock(&vq->mutex);
2622        }
2623        mutex_unlock(&dev->mutex);
2624}
2625EXPORT_SYMBOL_GPL(vhost_set_backend_features);
2626
2627static int __init vhost_init(void)
2628{
2629        return 0;
2630}
2631
2632static void __exit vhost_exit(void)
2633{
2634}
2635
2636module_init(vhost_init);
2637module_exit(vhost_exit);
2638
2639MODULE_VERSION("0.0.1");
2640MODULE_LICENSE("GPL v2");
2641MODULE_AUTHOR("Michael S. Tsirkin");
2642MODULE_DESCRIPTION("Host kernel accelerator for virtio");
2643