linux/drivers/misc/vmw_vmci/vmci_event.c
<<
>>
Prefs
   1/*
   2 * VMware VMCI Driver
   3 *
   4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation version 2 and no later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 * for more details.
  14 */
  15
  16#include <linux/vmw_vmci_defs.h>
  17#include <linux/vmw_vmci_api.h>
  18#include <linux/list.h>
  19#include <linux/module.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/rculist.h>
  23
  24#include "vmci_driver.h"
  25#include "vmci_event.h"
  26
  27#define EVENT_MAGIC 0xEABE0000
  28#define VMCI_EVENT_MAX_ATTEMPTS 10
  29
  30struct vmci_subscription {
  31        u32 id;
  32        u32 event;
  33        vmci_event_cb callback;
  34        void *callback_data;
  35        struct list_head node;  /* on one of subscriber lists */
  36};
  37
  38static struct list_head subscriber_array[VMCI_EVENT_MAX];
  39static DEFINE_MUTEX(subscriber_mutex);
  40
  41int __init vmci_event_init(void)
  42{
  43        int i;
  44
  45        for (i = 0; i < VMCI_EVENT_MAX; i++)
  46                INIT_LIST_HEAD(&subscriber_array[i]);
  47
  48        return VMCI_SUCCESS;
  49}
  50
  51void vmci_event_exit(void)
  52{
  53        int e;
  54
  55        /* We free all memory at exit. */
  56        for (e = 0; e < VMCI_EVENT_MAX; e++) {
  57                struct vmci_subscription *cur, *p2;
  58                list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
  59
  60                        /*
  61                         * We should never get here because all events
  62                         * should have been unregistered before we try
  63                         * to unload the driver module.
  64                         */
  65                        pr_warn("Unexpected free events occurring\n");
  66                        list_del(&cur->node);
  67                        kfree(cur);
  68                }
  69        }
  70}
  71
  72/*
  73 * Find entry. Assumes subscriber_mutex is held.
  74 */
  75static struct vmci_subscription *event_find(u32 sub_id)
  76{
  77        int e;
  78
  79        for (e = 0; e < VMCI_EVENT_MAX; e++) {
  80                struct vmci_subscription *cur;
  81                list_for_each_entry(cur, &subscriber_array[e], node) {
  82                        if (cur->id == sub_id)
  83                                return cur;
  84                }
  85        }
  86        return NULL;
  87}
  88
  89/*
  90 * Actually delivers the events to the subscribers.
  91 * The callback function for each subscriber is invoked.
  92 */
  93static void event_deliver(struct vmci_event_msg *event_msg)
  94{
  95        struct vmci_subscription *cur;
  96        struct list_head *subscriber_list;
  97
  98        rcu_read_lock();
  99        subscriber_list = &subscriber_array[event_msg->event_data.event];
 100        list_for_each_entry_rcu(cur, subscriber_list, node) {
 101                cur->callback(cur->id, &event_msg->event_data,
 102                              cur->callback_data);
 103        }
 104        rcu_read_unlock();
 105}
 106
 107/*
 108 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
 109 * subscribers for given event.
 110 */
 111int vmci_event_dispatch(struct vmci_datagram *msg)
 112{
 113        struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
 114
 115        if (msg->payload_size < sizeof(u32) ||
 116            msg->payload_size > sizeof(struct vmci_event_data_max))
 117                return VMCI_ERROR_INVALID_ARGS;
 118
 119        if (!VMCI_EVENT_VALID(event_msg->event_data.event))
 120                return VMCI_ERROR_EVENT_UNKNOWN;
 121
 122        event_deliver(event_msg);
 123        return VMCI_SUCCESS;
 124}
 125
 126/*
 127 * vmci_event_subscribe() - Subscribe to a given event.
 128 * @event:      The event to subscribe to.
 129 * @callback:   The callback to invoke upon the event.
 130 * @callback_data:      Data to pass to the callback.
 131 * @subscription_id:    ID used to track subscription.  Used with
 132 *              vmci_event_unsubscribe()
 133 *
 134 * Subscribes to the provided event. The callback specified will be
 135 * fired from RCU critical section and therefore must not sleep.
 136 */
 137int vmci_event_subscribe(u32 event,
 138                         vmci_event_cb callback,
 139                         void *callback_data,
 140                         u32 *new_subscription_id)
 141{
 142        struct vmci_subscription *sub;
 143        int attempts;
 144        int retval;
 145        bool have_new_id = false;
 146
 147        if (!new_subscription_id) {
 148                pr_devel("%s: Invalid subscription (NULL)\n", __func__);
 149                return VMCI_ERROR_INVALID_ARGS;
 150        }
 151
 152        if (!VMCI_EVENT_VALID(event) || !callback) {
 153                pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
 154                         __func__, event, callback, callback_data);
 155                return VMCI_ERROR_INVALID_ARGS;
 156        }
 157
 158        sub = kzalloc(sizeof(*sub), GFP_KERNEL);
 159        if (!sub)
 160                return VMCI_ERROR_NO_MEM;
 161
 162        sub->id = VMCI_EVENT_MAX;
 163        sub->event = event;
 164        sub->callback = callback;
 165        sub->callback_data = callback_data;
 166        INIT_LIST_HEAD(&sub->node);
 167
 168        mutex_lock(&subscriber_mutex);
 169
 170        /* Creation of a new event is always allowed. */
 171        for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
 172                static u32 subscription_id;
 173                /*
 174                 * We try to get an id a couple of time before
 175                 * claiming we are out of resources.
 176                 */
 177
 178                /* Test for duplicate id. */
 179                if (!event_find(++subscription_id)) {
 180                        sub->id = subscription_id;
 181                        have_new_id = true;
 182                        break;
 183                }
 184        }
 185
 186        if (have_new_id) {
 187                list_add_rcu(&sub->node, &subscriber_array[event]);
 188                retval = VMCI_SUCCESS;
 189        } else {
 190                retval = VMCI_ERROR_NO_RESOURCES;
 191        }
 192
 193        mutex_unlock(&subscriber_mutex);
 194
 195        *new_subscription_id = sub->id;
 196        return retval;
 197}
 198EXPORT_SYMBOL_GPL(vmci_event_subscribe);
 199
 200/*
 201 * vmci_event_unsubscribe() - unsubscribe from an event.
 202 * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
 203 *
 204 * Unsubscribe from given event. Removes it from list and frees it.
 205 * Will return callback_data if requested by caller.
 206 */
 207int vmci_event_unsubscribe(u32 sub_id)
 208{
 209        struct vmci_subscription *s;
 210
 211        mutex_lock(&subscriber_mutex);
 212        s = event_find(sub_id);
 213        if (s)
 214                list_del_rcu(&s->node);
 215        mutex_unlock(&subscriber_mutex);
 216
 217        if (!s)
 218                return VMCI_ERROR_NOT_FOUND;
 219
 220        synchronize_rcu();
 221        kfree(s);
 222
 223        return VMCI_SUCCESS;
 224}
 225EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
 226