1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/list.h>
26#include <linux/workqueue.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/skbuff.h>
30#include <linux/suspend.h>
31#include <linux/connector.h>
32#include <linux/delay.h>
33
34
35
36
37
38
39
40
41
42
43static void cn_queue_create(struct work_struct *work)
44{
45 struct cn_queue_dev *dev;
46
47 dev = container_of(work, struct cn_queue_dev, wq_creation);
48
49 dev->cn_queue = create_singlethread_workqueue(dev->name);
50
51 WARN_ON(!dev->cn_queue);
52}
53
54
55
56
57
58
59
60int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
61{
62 struct cn_queue_dev *pdev = cbq->pdev;
63
64 if (likely(pdev->cn_queue))
65 return queue_work(pdev->cn_queue, work);
66
67
68 if (atomic_inc_return(&pdev->wq_requested) == 1)
69 schedule_work(&pdev->wq_creation);
70 else
71 atomic_dec(&pdev->wq_requested);
72
73 return schedule_work(work);
74}
75
76void cn_queue_wrapper(struct work_struct *work)
77{
78 struct cn_callback_entry *cbq =
79 container_of(work, struct cn_callback_entry, work);
80 struct cn_callback_data *d = &cbq->data;
81 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
82 struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
83
84 d->callback(msg, nsp);
85
86 kfree_skb(d->skb);
87 d->skb = NULL;
88
89 kfree(d->free);
90}
91
92static struct cn_callback_entry *
93cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
94 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
95{
96 struct cn_callback_entry *cbq;
97
98 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
99 if (!cbq) {
100 printk(KERN_ERR "Failed to create new callback queue.\n");
101 return NULL;
102 }
103
104 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
105 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
106 cbq->data.callback = callback;
107
108 INIT_WORK(&cbq->work, &cn_queue_wrapper);
109 return cbq;
110}
111
112static void cn_queue_free_callback(struct cn_callback_entry *cbq)
113{
114
115 flush_scheduled_work();
116 if (cbq->pdev->cn_queue)
117 flush_workqueue(cbq->pdev->cn_queue);
118
119 kfree(cbq);
120}
121
122int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
123{
124 return ((i1->idx == i2->idx) && (i1->val == i2->val));
125}
126
127int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
128 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
129{
130 struct cn_callback_entry *cbq, *__cbq;
131 int found = 0;
132
133 cbq = cn_queue_alloc_callback_entry(name, id, callback);
134 if (!cbq)
135 return -ENOMEM;
136
137 atomic_inc(&dev->refcnt);
138 cbq->pdev = dev;
139
140 spin_lock_bh(&dev->queue_lock);
141 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
142 if (cn_cb_equal(&__cbq->id.id, id)) {
143 found = 1;
144 break;
145 }
146 }
147 if (!found)
148 list_add_tail(&cbq->callback_entry, &dev->queue_list);
149 spin_unlock_bh(&dev->queue_lock);
150
151 if (found) {
152 cn_queue_free_callback(cbq);
153 atomic_dec(&dev->refcnt);
154 return -EINVAL;
155 }
156
157 cbq->seq = 0;
158 cbq->group = cbq->id.id.idx;
159
160 return 0;
161}
162
163void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
164{
165 struct cn_callback_entry *cbq, *n;
166 int found = 0;
167
168 spin_lock_bh(&dev->queue_lock);
169 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
170 if (cn_cb_equal(&cbq->id.id, id)) {
171 list_del(&cbq->callback_entry);
172 found = 1;
173 break;
174 }
175 }
176 spin_unlock_bh(&dev->queue_lock);
177
178 if (found) {
179 cn_queue_free_callback(cbq);
180 atomic_dec(&dev->refcnt);
181 }
182}
183
184struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
185{
186 struct cn_queue_dev *dev;
187
188 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
189 if (!dev)
190 return NULL;
191
192 snprintf(dev->name, sizeof(dev->name), "%s", name);
193 atomic_set(&dev->refcnt, 0);
194 INIT_LIST_HEAD(&dev->queue_list);
195 spin_lock_init(&dev->queue_lock);
196 init_waitqueue_head(&dev->wq_created);
197
198 dev->nls = nls;
199
200 INIT_WORK(&dev->wq_creation, cn_queue_create);
201
202 return dev;
203}
204
205void cn_queue_free_dev(struct cn_queue_dev *dev)
206{
207 struct cn_callback_entry *cbq, *n;
208 long timeout;
209 DEFINE_WAIT(wait);
210
211
212 flush_scheduled_work();
213
214
215 prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
216 if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
217 timeout = schedule_timeout(HZ * 2);
218 if (!timeout && !dev->cn_queue)
219 WARN_ON(1);
220 }
221 finish_wait(&dev->wq_created, &wait);
222
223 if (dev->cn_queue) {
224 flush_workqueue(dev->cn_queue);
225 destroy_workqueue(dev->cn_queue);
226 }
227
228 spin_lock_bh(&dev->queue_lock);
229 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
230 list_del(&cbq->callback_entry);
231 spin_unlock_bh(&dev->queue_lock);
232
233 while (atomic_read(&dev->refcnt)) {
234 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
235 dev->name, atomic_read(&dev->refcnt));
236 msleep(1000);
237 }
238
239 kfree(dev);
240 dev = NULL;
241}
242