1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22
23#include "vmci_driver.h"
24#include "vmci_event.h"
25
26#define EVENT_MAGIC 0xEABE0000
27#define VMCI_EVENT_MAX_ATTEMPTS 10
28
29struct vmci_subscription {
30 u32 id;
31 u32 event;
32 vmci_event_cb callback;
33 void *callback_data;
34 struct list_head node;
35};
36
37static struct list_head subscriber_array[VMCI_EVENT_MAX];
38static DEFINE_MUTEX(subscriber_mutex);
39
40int __init vmci_event_init(void)
41{
42 int i;
43
44 for (i = 0; i < VMCI_EVENT_MAX; i++)
45 INIT_LIST_HEAD(&subscriber_array[i]);
46
47 return VMCI_SUCCESS;
48}
49
50void vmci_event_exit(void)
51{
52 int e;
53
54
55 for (e = 0; e < VMCI_EVENT_MAX; e++) {
56 struct vmci_subscription *cur, *p2;
57 list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
58
59
60
61
62
63
64 pr_warn("Unexpected free events occurring\n");
65 list_del(&cur->node);
66 kfree(cur);
67 }
68 }
69}
70
71
72
73
74static struct vmci_subscription *event_find(u32 sub_id)
75{
76 int e;
77
78 for (e = 0; e < VMCI_EVENT_MAX; e++) {
79 struct vmci_subscription *cur;
80 list_for_each_entry(cur, &subscriber_array[e], node) {
81 if (cur->id == sub_id)
82 return cur;
83 }
84 }
85 return NULL;
86}
87
88
89
90
91
92static void event_deliver(struct vmci_event_msg *event_msg)
93{
94 struct vmci_subscription *cur;
95 struct list_head *subscriber_list;
96
97 rcu_read_lock();
98 subscriber_list = &subscriber_array[event_msg->event_data.event];
99 list_for_each_entry_rcu(cur, subscriber_list, node) {
100 cur->callback(cur->id, &event_msg->event_data,
101 cur->callback_data);
102 }
103 rcu_read_unlock();
104}
105
106
107
108
109
110int vmci_event_dispatch(struct vmci_datagram *msg)
111{
112 struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
113
114 if (msg->payload_size < sizeof(u32) ||
115 msg->payload_size > sizeof(struct vmci_event_data_max))
116 return VMCI_ERROR_INVALID_ARGS;
117
118 if (!VMCI_EVENT_VALID(event_msg->event_data.event))
119 return VMCI_ERROR_EVENT_UNKNOWN;
120
121 event_deliver(event_msg);
122 return VMCI_SUCCESS;
123}
124
125
126
127
128
129
130
131
132
133
134
135
136int vmci_event_subscribe(u32 event,
137 vmci_event_cb callback,
138 void *callback_data,
139 u32 *new_subscription_id)
140{
141 struct vmci_subscription *sub;
142 int attempts;
143 int retval;
144 bool have_new_id = false;
145
146 if (!new_subscription_id) {
147 pr_devel("%s: Invalid subscription (NULL)\n", __func__);
148 return VMCI_ERROR_INVALID_ARGS;
149 }
150
151 if (!VMCI_EVENT_VALID(event) || !callback) {
152 pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
153 __func__, event, callback, callback_data);
154 return VMCI_ERROR_INVALID_ARGS;
155 }
156
157 sub = kzalloc(sizeof(*sub), GFP_KERNEL);
158 if (!sub)
159 return VMCI_ERROR_NO_MEM;
160
161 sub->id = VMCI_EVENT_MAX;
162 sub->event = event;
163 sub->callback = callback;
164 sub->callback_data = callback_data;
165 INIT_LIST_HEAD(&sub->node);
166
167 mutex_lock(&subscriber_mutex);
168
169
170 for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
171 static u32 subscription_id;
172
173
174
175
176
177
178 if (!event_find(++subscription_id)) {
179 sub->id = subscription_id;
180 have_new_id = true;
181 break;
182 }
183 }
184
185 if (have_new_id) {
186 list_add_rcu(&sub->node, &subscriber_array[event]);
187 retval = VMCI_SUCCESS;
188 } else {
189 retval = VMCI_ERROR_NO_RESOURCES;
190 }
191
192 mutex_unlock(&subscriber_mutex);
193
194 *new_subscription_id = sub->id;
195 return retval;
196}
197EXPORT_SYMBOL_GPL(vmci_event_subscribe);
198
199
200
201
202
203
204
205
206int vmci_event_unsubscribe(u32 sub_id)
207{
208 struct vmci_subscription *s;
209
210 mutex_lock(&subscriber_mutex);
211 s = event_find(sub_id);
212 if (s)
213 list_del_rcu(&s->node);
214 mutex_unlock(&subscriber_mutex);
215
216 if (!s)
217 return VMCI_ERROR_NOT_FOUND;
218
219 synchronize_rcu();
220 kfree(s);
221
222 return VMCI_SUCCESS;
223}
224EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
225