linux/drivers/virt/acrn/irqfd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ACRN HSM irqfd: use eventfd objects to inject virtual interrupts
   4 *
   5 * Copyright (C) 2020 Intel Corporation. All rights reserved.
   6 *
   7 * Authors:
   8 *      Shuo Liu <shuo.a.liu@intel.com>
   9 *      Yakui Zhao <yakui.zhao@intel.com>
  10 */
  11
  12#include <linux/eventfd.h>
  13#include <linux/file.h>
  14#include <linux/poll.h>
  15#include <linux/slab.h>
  16
  17#include "acrn_drv.h"
  18
  19static LIST_HEAD(acrn_irqfd_clients);
  20static DEFINE_MUTEX(acrn_irqfds_mutex);
  21
  22/**
  23 * struct hsm_irqfd - Properties of HSM irqfd
  24 * @vm:         Associated VM pointer
  25 * @wait:       Entry of wait-queue
  26 * @shutdown:   Async shutdown work
  27 * @eventfd:    Associated eventfd
  28 * @list:       Entry within &acrn_vm.irqfds of irqfds of a VM
  29 * @pt:         Structure for select/poll on the associated eventfd
  30 * @msi:        MSI data
  31 */
  32struct hsm_irqfd {
  33        struct acrn_vm          *vm;
  34        wait_queue_entry_t      wait;
  35        struct work_struct      shutdown;
  36        struct eventfd_ctx      *eventfd;
  37        struct list_head        list;
  38        poll_table              pt;
  39        struct acrn_msi_entry   msi;
  40};
  41
  42static void acrn_irqfd_inject(struct hsm_irqfd *irqfd)
  43{
  44        struct acrn_vm *vm = irqfd->vm;
  45
  46        acrn_msi_inject(vm, irqfd->msi.msi_addr,
  47                        irqfd->msi.msi_data);
  48}
  49
  50static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd)
  51{
  52        u64 cnt;
  53
  54        lockdep_assert_held(&irqfd->vm->irqfds_lock);
  55
  56        /* remove from wait queue */
  57        list_del_init(&irqfd->list);
  58        eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
  59        eventfd_ctx_put(irqfd->eventfd);
  60        kfree(irqfd);
  61}
  62
  63static void hsm_irqfd_shutdown_work(struct work_struct *work)
  64{
  65        struct hsm_irqfd *irqfd;
  66        struct acrn_vm *vm;
  67
  68        irqfd = container_of(work, struct hsm_irqfd, shutdown);
  69        vm = irqfd->vm;
  70        mutex_lock(&vm->irqfds_lock);
  71        if (!list_empty(&irqfd->list))
  72                hsm_irqfd_shutdown(irqfd);
  73        mutex_unlock(&vm->irqfds_lock);
  74}
  75
  76/* Called with wqh->lock held and interrupts disabled */
  77static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
  78                            int sync, void *key)
  79{
  80        unsigned long poll_bits = (unsigned long)key;
  81        struct hsm_irqfd *irqfd;
  82        struct acrn_vm *vm;
  83
  84        irqfd = container_of(wait, struct hsm_irqfd, wait);
  85        vm = irqfd->vm;
  86        if (poll_bits & POLLIN)
  87                /* An event has been signaled, inject an interrupt */
  88                acrn_irqfd_inject(irqfd);
  89
  90        if (poll_bits & POLLHUP)
  91                /* Do shutdown work in thread to hold wqh->lock */
  92                queue_work(vm->irqfd_wq, &irqfd->shutdown);
  93
  94        return 0;
  95}
  96
  97static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh,
  98                                poll_table *pt)
  99{
 100        struct hsm_irqfd *irqfd;
 101
 102        irqfd = container_of(pt, struct hsm_irqfd, pt);
 103        add_wait_queue(wqh, &irqfd->wait);
 104}
 105
 106/*
 107 * Assign an eventfd to a VM and create a HSM irqfd associated with the
 108 * eventfd. The properties of the HSM irqfd are built from a &struct
 109 * acrn_irqfd.
 110 */
 111static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
 112{
 113        struct eventfd_ctx *eventfd = NULL;
 114        struct hsm_irqfd *irqfd, *tmp;
 115        __poll_t events;
 116        struct fd f;
 117        int ret = 0;
 118
 119        irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
 120        if (!irqfd)
 121                return -ENOMEM;
 122
 123        irqfd->vm = vm;
 124        memcpy(&irqfd->msi, &args->msi, sizeof(args->msi));
 125        INIT_LIST_HEAD(&irqfd->list);
 126        INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
 127
 128        f = fdget(args->fd);
 129        if (!f.file) {
 130                ret = -EBADF;
 131                goto out;
 132        }
 133
 134        eventfd = eventfd_ctx_fileget(f.file);
 135        if (IS_ERR(eventfd)) {
 136                ret = PTR_ERR(eventfd);
 137                goto fail;
 138        }
 139
 140        irqfd->eventfd = eventfd;
 141
 142        /*
 143         * Install custom wake-up handling to be notified whenever underlying
 144         * eventfd is signaled.
 145         */
 146        init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup);
 147        init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func);
 148
 149        mutex_lock(&vm->irqfds_lock);
 150        list_for_each_entry(tmp, &vm->irqfds, list) {
 151                if (irqfd->eventfd != tmp->eventfd)
 152                        continue;
 153                ret = -EBUSY;
 154                mutex_unlock(&vm->irqfds_lock);
 155                goto fail;
 156        }
 157        list_add_tail(&irqfd->list, &vm->irqfds);
 158        mutex_unlock(&vm->irqfds_lock);
 159
 160        /* Check the pending event in this stage */
 161        events = vfs_poll(f.file, &irqfd->pt);
 162
 163        if (events & EPOLLIN)
 164                acrn_irqfd_inject(irqfd);
 165
 166        fdput(f);
 167        return 0;
 168fail:
 169        if (eventfd && !IS_ERR(eventfd))
 170                eventfd_ctx_put(eventfd);
 171
 172        fdput(f);
 173out:
 174        kfree(irqfd);
 175        return ret;
 176}
 177
 178static int acrn_irqfd_deassign(struct acrn_vm *vm,
 179                               struct acrn_irqfd *args)
 180{
 181        struct hsm_irqfd *irqfd, *tmp;
 182        struct eventfd_ctx *eventfd;
 183
 184        eventfd = eventfd_ctx_fdget(args->fd);
 185        if (IS_ERR(eventfd))
 186                return PTR_ERR(eventfd);
 187
 188        mutex_lock(&vm->irqfds_lock);
 189        list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
 190                if (irqfd->eventfd == eventfd) {
 191                        hsm_irqfd_shutdown(irqfd);
 192                        break;
 193                }
 194        }
 195        mutex_unlock(&vm->irqfds_lock);
 196        eventfd_ctx_put(eventfd);
 197
 198        return 0;
 199}
 200
 201int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args)
 202{
 203        int ret;
 204
 205        if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN)
 206                ret = acrn_irqfd_deassign(vm, args);
 207        else
 208                ret = acrn_irqfd_assign(vm, args);
 209
 210        return ret;
 211}
 212
 213int acrn_irqfd_init(struct acrn_vm *vm)
 214{
 215        INIT_LIST_HEAD(&vm->irqfds);
 216        mutex_init(&vm->irqfds_lock);
 217        vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid);
 218        if (!vm->irqfd_wq)
 219                return -ENOMEM;
 220
 221        dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid);
 222        return 0;
 223}
 224
 225void acrn_irqfd_deinit(struct acrn_vm *vm)
 226{
 227        struct hsm_irqfd *irqfd, *next;
 228
 229        dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid);
 230        destroy_workqueue(vm->irqfd_wq);
 231        mutex_lock(&vm->irqfds_lock);
 232        list_for_each_entry_safe(irqfd, next, &vm->irqfds, list)
 233                hsm_irqfd_shutdown(irqfd);
 234        mutex_unlock(&vm->irqfds_lock);
 235}
 236