1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/async.h>
52#include <linux/module.h>
53#include <linux/wait.h>
54#include <linux/sched.h>
55#include <linux/slab.h>
56#include <linux/workqueue.h>
57#include <asm/atomic.h>
58
59static async_cookie_t next_cookie = 1;
60
61#define MAX_WORK 32768
62
63static LIST_HEAD(async_pending);
64static LIST_HEAD(async_running);
65static DEFINE_SPINLOCK(async_lock);
66
67struct async_entry {
68 struct list_head list;
69 struct work_struct work;
70 async_cookie_t cookie;
71 async_func_ptr *func;
72 void *data;
73 struct list_head *running;
74};
75
76static DECLARE_WAIT_QUEUE_HEAD(async_done);
77
78static atomic_t entry_count;
79
80extern int initcall_debug;
81
82
83
84
85
86static async_cookie_t __lowest_in_progress(struct list_head *running)
87{
88 struct async_entry *entry;
89
90 if (!list_empty(running)) {
91 entry = list_first_entry(running,
92 struct async_entry, list);
93 return entry->cookie;
94 }
95
96 list_for_each_entry(entry, &async_pending, list)
97 if (entry->running == running)
98 return entry->cookie;
99
100 return next_cookie;
101}
102
103static async_cookie_t lowest_in_progress(struct list_head *running)
104{
105 unsigned long flags;
106 async_cookie_t ret;
107
108 spin_lock_irqsave(&async_lock, flags);
109 ret = __lowest_in_progress(running);
110 spin_unlock_irqrestore(&async_lock, flags);
111 return ret;
112}
113
114
115
116
117static void async_run_entry_fn(struct work_struct *work)
118{
119 struct async_entry *entry =
120 container_of(work, struct async_entry, work);
121 unsigned long flags;
122 ktime_t calltime, delta, rettime;
123
124
125 spin_lock_irqsave(&async_lock, flags);
126 list_move_tail(&entry->list, entry->running);
127 spin_unlock_irqrestore(&async_lock, flags);
128
129
130 if (initcall_debug && system_state == SYSTEM_BOOTING) {
131 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
132 entry->func, task_pid_nr(current));
133 calltime = ktime_get();
134 }
135 entry->func(entry->data, entry->cookie);
136 if (initcall_debug && system_state == SYSTEM_BOOTING) {
137 rettime = ktime_get();
138 delta = ktime_sub(rettime, calltime);
139 printk("initcall %lli_%pF returned 0 after %lld usecs\n",
140 (long long)entry->cookie,
141 entry->func,
142 (long long)ktime_to_ns(delta) >> 10);
143 }
144
145
146 spin_lock_irqsave(&async_lock, flags);
147 list_del(&entry->list);
148
149
150 kfree(entry);
151 atomic_dec(&entry_count);
152
153 spin_unlock_irqrestore(&async_lock, flags);
154
155
156 wake_up(&async_done);
157}
158
159static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
160{
161 struct async_entry *entry;
162 unsigned long flags;
163 async_cookie_t newcookie;
164
165
166 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
167
168
169
170
171
172 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
173 kfree(entry);
174 spin_lock_irqsave(&async_lock, flags);
175 newcookie = next_cookie++;
176 spin_unlock_irqrestore(&async_lock, flags);
177
178
179 ptr(data, newcookie);
180 return newcookie;
181 }
182 INIT_WORK(&entry->work, async_run_entry_fn);
183 entry->func = ptr;
184 entry->data = data;
185 entry->running = running;
186
187 spin_lock_irqsave(&async_lock, flags);
188 newcookie = entry->cookie = next_cookie++;
189 list_add_tail(&entry->list, &async_pending);
190 atomic_inc(&entry_count);
191 spin_unlock_irqrestore(&async_lock, flags);
192
193
194 queue_work(system_unbound_wq, &entry->work);
195
196 return newcookie;
197}
198
199
200
201
202
203
204
205
206
207async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
208{
209 return __async_schedule(ptr, data, &async_running);
210}
211EXPORT_SYMBOL_GPL(async_schedule);
212
213
214
215
216
217
218
219
220
221
222
223
224
225async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
226 struct list_head *running)
227{
228 return __async_schedule(ptr, data, running);
229}
230EXPORT_SYMBOL_GPL(async_schedule_domain);
231
232
233
234
235
236
237void async_synchronize_full(void)
238{
239 do {
240 async_synchronize_cookie(next_cookie);
241 } while (!list_empty(&async_running) || !list_empty(&async_pending));
242}
243EXPORT_SYMBOL_GPL(async_synchronize_full);
244
245
246
247
248
249
250
251
252void async_synchronize_full_domain(struct list_head *list)
253{
254 async_synchronize_cookie_domain(next_cookie, list);
255}
256EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
257
258
259
260
261
262
263
264
265
266
267void async_synchronize_cookie_domain(async_cookie_t cookie,
268 struct list_head *running)
269{
270 ktime_t starttime, delta, endtime;
271
272 if (initcall_debug && system_state == SYSTEM_BOOTING) {
273 printk("async_waiting @ %i\n", task_pid_nr(current));
274 starttime = ktime_get();
275 }
276
277 wait_event(async_done, lowest_in_progress(running) >= cookie);
278
279 if (initcall_debug && system_state == SYSTEM_BOOTING) {
280 endtime = ktime_get();
281 delta = ktime_sub(endtime, starttime);
282
283 printk("async_continuing @ %i after %lli usec\n",
284 task_pid_nr(current),
285 (long long)ktime_to_ns(delta) >> 10);
286 }
287}
288EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
289
290
291
292
293
294
295
296
297void async_synchronize_cookie(async_cookie_t cookie)
298{
299 async_synchronize_cookie_domain(cookie, &async_running);
300}
301EXPORT_SYMBOL_GPL(async_synchronize_cookie);
302