1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/async.h>
52#include <linux/atomic.h>
53#include <linux/ktime.h>
54#include <linux/export.h>
55#include <linux/wait.h>
56#include <linux/sched.h>
57#include <linux/slab.h>
58#include <linux/workqueue.h>
59
60static async_cookie_t next_cookie = 1;
61
62#define MAX_WORK 32768
63
64static LIST_HEAD(async_pending);
65static LIST_HEAD(async_running);
66static DEFINE_SPINLOCK(async_lock);
67
68struct async_entry {
69 struct list_head list;
70 struct work_struct work;
71 async_cookie_t cookie;
72 async_func_ptr *func;
73 void *data;
74 struct list_head *running;
75};
76
77static DECLARE_WAIT_QUEUE_HEAD(async_done);
78
79static atomic_t entry_count;
80
81extern int initcall_debug;
82
83
84
85
86
87static async_cookie_t __lowest_in_progress(struct list_head *running)
88{
89 struct async_entry *entry;
90
91 if (!list_empty(running)) {
92 entry = list_first_entry(running,
93 struct async_entry, list);
94 return entry->cookie;
95 }
96
97 list_for_each_entry(entry, &async_pending, list)
98 if (entry->running == running)
99 return entry->cookie;
100
101 return next_cookie;
102}
103
104static async_cookie_t lowest_in_progress(struct list_head *running)
105{
106 unsigned long flags;
107 async_cookie_t ret;
108
109 spin_lock_irqsave(&async_lock, flags);
110 ret = __lowest_in_progress(running);
111 spin_unlock_irqrestore(&async_lock, flags);
112 return ret;
113}
114
115
116
117
118static void async_run_entry_fn(struct work_struct *work)
119{
120 struct async_entry *entry =
121 container_of(work, struct async_entry, work);
122 unsigned long flags;
123 ktime_t uninitialized_var(calltime), delta, rettime;
124
125
126 spin_lock_irqsave(&async_lock, flags);
127 list_move_tail(&entry->list, entry->running);
128 spin_unlock_irqrestore(&async_lock, flags);
129
130
131 if (initcall_debug && system_state == SYSTEM_BOOTING) {
132 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
133 (long long)entry->cookie,
134 entry->func, task_pid_nr(current));
135 calltime = ktime_get();
136 }
137 entry->func(entry->data, entry->cookie);
138 if (initcall_debug && system_state == SYSTEM_BOOTING) {
139 rettime = ktime_get();
140 delta = ktime_sub(rettime, calltime);
141 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
142 (long long)entry->cookie,
143 entry->func,
144 (long long)ktime_to_ns(delta) >> 10);
145 }
146
147
148 spin_lock_irqsave(&async_lock, flags);
149 list_del(&entry->list);
150
151
152 kfree(entry);
153 atomic_dec(&entry_count);
154
155 spin_unlock_irqrestore(&async_lock, flags);
156
157
158 wake_up(&async_done);
159}
160
161static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
162{
163 struct async_entry *entry;
164 unsigned long flags;
165 async_cookie_t newcookie;
166
167
168 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
169
170
171
172
173
174 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
175 kfree(entry);
176 spin_lock_irqsave(&async_lock, flags);
177 newcookie = next_cookie++;
178 spin_unlock_irqrestore(&async_lock, flags);
179
180
181 ptr(data, newcookie);
182 return newcookie;
183 }
184 INIT_WORK(&entry->work, async_run_entry_fn);
185 entry->func = ptr;
186 entry->data = data;
187 entry->running = running;
188
189 spin_lock_irqsave(&async_lock, flags);
190 newcookie = entry->cookie = next_cookie++;
191 list_add_tail(&entry->list, &async_pending);
192 atomic_inc(&entry_count);
193 spin_unlock_irqrestore(&async_lock, flags);
194
195
196 queue_work(system_unbound_wq, &entry->work);
197
198 return newcookie;
199}
200
201
202
203
204
205
206
207
208
209async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
210{
211 return __async_schedule(ptr, data, &async_running);
212}
213EXPORT_SYMBOL_GPL(async_schedule);
214
215
216
217
218
219
220
221
222
223
224
225
226
227async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
228 struct list_head *running)
229{
230 return __async_schedule(ptr, data, running);
231}
232EXPORT_SYMBOL_GPL(async_schedule_domain);
233
234
235
236
237
238
239void async_synchronize_full(void)
240{
241 do {
242 async_synchronize_cookie(next_cookie);
243 } while (!list_empty(&async_running) || !list_empty(&async_pending));
244}
245EXPORT_SYMBOL_GPL(async_synchronize_full);
246
247
248
249
250
251
252
253
254void async_synchronize_full_domain(struct list_head *list)
255{
256 async_synchronize_cookie_domain(next_cookie, list);
257}
258EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
259
260
261
262
263
264
265
266
267
268
269void async_synchronize_cookie_domain(async_cookie_t cookie,
270 struct list_head *running)
271{
272 ktime_t uninitialized_var(starttime), delta, endtime;
273
274 if (initcall_debug && system_state == SYSTEM_BOOTING) {
275 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
276 starttime = ktime_get();
277 }
278
279 wait_event(async_done, lowest_in_progress(running) >= cookie);
280
281 if (initcall_debug && system_state == SYSTEM_BOOTING) {
282 endtime = ktime_get();
283 delta = ktime_sub(endtime, starttime);
284
285 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
286 task_pid_nr(current),
287 (long long)ktime_to_ns(delta) >> 10);
288 }
289}
290EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
291
292
293
294
295
296
297
298
299void async_synchronize_cookie(async_cookie_t cookie)
300{
301 async_synchronize_cookie_domain(cookie, &async_running);
302}
303EXPORT_SYMBOL_GPL(async_synchronize_cookie);
304