1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/async.h>
52#include <linux/bug.h>
53#include <linux/module.h>
54#include <linux/wait.h>
55#include <linux/sched.h>
56#include <linux/init.h>
57#include <linux/kthread.h>
58#include <linux/delay.h>
59#include <asm/atomic.h>
60
61static async_cookie_t next_cookie = 1;
62
63#define MAX_THREADS 256
64#define MAX_WORK 32768
65
66static LIST_HEAD(async_pending);
67static LIST_HEAD(async_running);
68static DEFINE_SPINLOCK(async_lock);
69
70static int async_enabled = 0;
71
72struct async_entry {
73 struct list_head list;
74 async_cookie_t cookie;
75 async_func_ptr *func;
76 void *data;
77 struct list_head *running;
78};
79
80static DECLARE_WAIT_QUEUE_HEAD(async_done);
81static DECLARE_WAIT_QUEUE_HEAD(async_new);
82
83static atomic_t entry_count;
84static atomic_t thread_count;
85
86extern int initcall_debug;
87
88
89
90
91
92static async_cookie_t __lowest_in_progress(struct list_head *running)
93{
94 struct async_entry *entry;
95
96 if (!list_empty(running)) {
97 entry = list_first_entry(running,
98 struct async_entry, list);
99 return entry->cookie;
100 }
101
102 list_for_each_entry(entry, &async_pending, list)
103 if (entry->running == running)
104 return entry->cookie;
105
106 return next_cookie;
107}
108
109static async_cookie_t lowest_in_progress(struct list_head *running)
110{
111 unsigned long flags;
112 async_cookie_t ret;
113
114 spin_lock_irqsave(&async_lock, flags);
115 ret = __lowest_in_progress(running);
116 spin_unlock_irqrestore(&async_lock, flags);
117 return ret;
118}
119
120
121
122static void run_one_entry(void)
123{
124 unsigned long flags;
125 struct async_entry *entry;
126 ktime_t calltime, delta, rettime;
127
128
129
130 spin_lock_irqsave(&async_lock, flags);
131 if (list_empty(&async_pending))
132 goto out;
133 entry = list_first_entry(&async_pending, struct async_entry, list);
134
135
136 list_move_tail(&entry->list, entry->running);
137 spin_unlock_irqrestore(&async_lock, flags);
138
139
140 if (initcall_debug && system_state == SYSTEM_BOOTING) {
141 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
142 entry->func, task_pid_nr(current));
143 calltime = ktime_get();
144 }
145 entry->func(entry->data, entry->cookie);
146 if (initcall_debug && system_state == SYSTEM_BOOTING) {
147 rettime = ktime_get();
148 delta = ktime_sub(rettime, calltime);
149 printk("initcall %lli_%pF returned 0 after %lld usecs\n",
150 (long long)entry->cookie,
151 entry->func,
152 (long long)ktime_to_ns(delta) >> 10);
153 }
154
155
156 spin_lock_irqsave(&async_lock, flags);
157 list_del(&entry->list);
158
159
160 kfree(entry);
161 atomic_dec(&entry_count);
162
163 spin_unlock_irqrestore(&async_lock, flags);
164
165
166 wake_up(&async_done);
167 return;
168
169out:
170 spin_unlock_irqrestore(&async_lock, flags);
171}
172
173
174static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
175{
176 struct async_entry *entry;
177 unsigned long flags;
178 async_cookie_t newcookie;
179
180
181
182 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
183
184
185
186
187
188 if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
189 kfree(entry);
190 spin_lock_irqsave(&async_lock, flags);
191 newcookie = next_cookie++;
192 spin_unlock_irqrestore(&async_lock, flags);
193
194
195 ptr(data, newcookie);
196 return newcookie;
197 }
198 entry->func = ptr;
199 entry->data = data;
200 entry->running = running;
201
202 spin_lock_irqsave(&async_lock, flags);
203 newcookie = entry->cookie = next_cookie++;
204 list_add_tail(&entry->list, &async_pending);
205 atomic_inc(&entry_count);
206 spin_unlock_irqrestore(&async_lock, flags);
207 wake_up(&async_new);
208 return newcookie;
209}
210
211
212
213
214
215
216
217
218
219async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
220{
221 return __async_schedule(ptr, data, &async_running);
222}
223EXPORT_SYMBOL_GPL(async_schedule);
224
225
226
227
228
229
230
231
232
233
234
235
236
237async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
238 struct list_head *running)
239{
240 return __async_schedule(ptr, data, running);
241}
242EXPORT_SYMBOL_GPL(async_schedule_domain);
243
244
245
246
247
248
249void async_synchronize_full(void)
250{
251 do {
252 async_synchronize_cookie(next_cookie);
253 } while (!list_empty(&async_running) || !list_empty(&async_pending));
254}
255EXPORT_SYMBOL_GPL(async_synchronize_full);
256
257
258
259
260
261
262
263
264void async_synchronize_full_domain(struct list_head *list)
265{
266 async_synchronize_cookie_domain(next_cookie, list);
267}
268EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
269
270
271
272
273
274
275
276
277
278
279void async_synchronize_cookie_domain(async_cookie_t cookie,
280 struct list_head *running)
281{
282 ktime_t starttime, delta, endtime;
283
284 if (initcall_debug && system_state == SYSTEM_BOOTING) {
285 printk("async_waiting @ %i\n", task_pid_nr(current));
286 starttime = ktime_get();
287 }
288
289 wait_event(async_done, lowest_in_progress(running) >= cookie);
290
291 if (initcall_debug && system_state == SYSTEM_BOOTING) {
292 endtime = ktime_get();
293 delta = ktime_sub(endtime, starttime);
294
295 printk("async_continuing @ %i after %lli usec\n",
296 task_pid_nr(current),
297 (long long)ktime_to_ns(delta) >> 10);
298 }
299}
300EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
301
302
303
304
305
306
307
308
309void async_synchronize_cookie(async_cookie_t cookie)
310{
311 async_synchronize_cookie_domain(cookie, &async_running);
312}
313EXPORT_SYMBOL_GPL(async_synchronize_cookie);
314
315
316static int async_thread(void *unused)
317{
318 DECLARE_WAITQUEUE(wq, current);
319 add_wait_queue(&async_new, &wq);
320
321 while (!kthread_should_stop()) {
322 int ret = HZ;
323 set_current_state(TASK_INTERRUPTIBLE);
324
325
326
327
328
329 rmb();
330 if (!list_empty(&async_pending))
331 run_one_entry();
332 else
333 ret = schedule_timeout(HZ);
334
335 if (ret == 0) {
336
337
338
339
340
341 atomic_dec(&thread_count);
342 wmb();
343 if (list_empty(&async_pending))
344 break;
345
346
347
348
349 atomic_inc(&thread_count);
350 }
351 }
352 remove_wait_queue(&async_new, &wq);
353
354 return 0;
355}
356
357static int async_manager_thread(void *unused)
358{
359 DECLARE_WAITQUEUE(wq, current);
360 add_wait_queue(&async_new, &wq);
361
362 while (!kthread_should_stop()) {
363 int tc, ec;
364
365 set_current_state(TASK_INTERRUPTIBLE);
366
367 tc = atomic_read(&thread_count);
368 rmb();
369 ec = atomic_read(&entry_count);
370
371 while (tc < ec && tc < MAX_THREADS) {
372 if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
373 tc))) {
374 msleep(100);
375 continue;
376 }
377 atomic_inc(&thread_count);
378 tc++;
379 }
380
381 schedule();
382 }
383 remove_wait_queue(&async_new, &wq);
384
385 return 0;
386}
387
388static int __init async_init(void)
389{
390 async_enabled =
391 !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
392
393 WARN_ON(!async_enabled);
394 return 0;
395}
396
397core_initcall(async_init);
398