1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/async.h>
52#include <linux/atomic.h>
53#include <linux/ktime.h>
54#include <linux/export.h>
55#include <linux/wait.h>
56#include <linux/sched.h>
57#include <linux/slab.h>
58#include <linux/workqueue.h>
59
60static async_cookie_t next_cookie = 1;
61
62#define MAX_WORK 32768
63
64static LIST_HEAD(async_pending);
65static ASYNC_DOMAIN(async_running);
66static LIST_HEAD(async_domains);
67static DEFINE_SPINLOCK(async_lock);
68static DEFINE_MUTEX(async_register_mutex);
69
70struct async_entry {
71 struct list_head list;
72 struct work_struct work;
73 async_cookie_t cookie;
74 async_func_ptr *func;
75 void *data;
76 struct async_domain *running;
77};
78
79static DECLARE_WAIT_QUEUE_HEAD(async_done);
80
81static atomic_t entry_count;
82
83
84
85
86
87static async_cookie_t __lowest_in_progress(struct async_domain *running)
88{
89 async_cookie_t first_running = next_cookie;
90 async_cookie_t first_pending = next_cookie;
91 struct async_entry *entry;
92
93
94
95
96
97 if (!list_empty(&running->domain)) {
98 entry = list_first_entry(&running->domain, typeof(*entry), list);
99 first_running = entry->cookie;
100 }
101
102 list_for_each_entry(entry, &async_pending, list) {
103 if (entry->running == running) {
104 first_pending = entry->cookie;
105 break;
106 }
107 }
108
109 return min(first_running, first_pending);
110}
111
112static async_cookie_t lowest_in_progress(struct async_domain *running)
113{
114 unsigned long flags;
115 async_cookie_t ret;
116
117 spin_lock_irqsave(&async_lock, flags);
118 ret = __lowest_in_progress(running);
119 spin_unlock_irqrestore(&async_lock, flags);
120 return ret;
121}
122
123
124
125
126static void async_run_entry_fn(struct work_struct *work)
127{
128 struct async_entry *entry =
129 container_of(work, struct async_entry, work);
130 struct async_entry *pos;
131 unsigned long flags;
132 ktime_t uninitialized_var(calltime), delta, rettime;
133 struct async_domain *running = entry->running;
134
135
136 spin_lock_irqsave(&async_lock, flags);
137 list_for_each_entry_reverse(pos, &running->domain, list)
138 if (entry->cookie < pos->cookie)
139 break;
140 list_move_tail(&entry->list, &pos->list);
141 spin_unlock_irqrestore(&async_lock, flags);
142
143
144 if (initcall_debug && system_state == SYSTEM_BOOTING) {
145 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
146 (long long)entry->cookie,
147 entry->func, task_pid_nr(current));
148 calltime = ktime_get();
149 }
150 entry->func(entry->data, entry->cookie);
151 if (initcall_debug && system_state == SYSTEM_BOOTING) {
152 rettime = ktime_get();
153 delta = ktime_sub(rettime, calltime);
154 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
155 (long long)entry->cookie,
156 entry->func,
157 (long long)ktime_to_ns(delta) >> 10);
158 }
159
160
161 spin_lock_irqsave(&async_lock, flags);
162 list_del(&entry->list);
163 if (running->registered && --running->count == 0)
164 list_del_init(&running->node);
165
166
167 kfree(entry);
168 atomic_dec(&entry_count);
169
170 spin_unlock_irqrestore(&async_lock, flags);
171
172
173 wake_up(&async_done);
174}
175
176static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
177{
178 struct async_entry *entry;
179 unsigned long flags;
180 async_cookie_t newcookie;
181
182
183 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
184
185
186
187
188
189 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
190 kfree(entry);
191 spin_lock_irqsave(&async_lock, flags);
192 newcookie = next_cookie++;
193 spin_unlock_irqrestore(&async_lock, flags);
194
195
196 ptr(data, newcookie);
197 return newcookie;
198 }
199 INIT_WORK(&entry->work, async_run_entry_fn);
200 entry->func = ptr;
201 entry->data = data;
202 entry->running = running;
203
204 spin_lock_irqsave(&async_lock, flags);
205 newcookie = entry->cookie = next_cookie++;
206 list_add_tail(&entry->list, &async_pending);
207 if (running->registered && running->count++ == 0)
208 list_add_tail(&running->node, &async_domains);
209 atomic_inc(&entry_count);
210 spin_unlock_irqrestore(&async_lock, flags);
211
212
213 current->flags |= PF_USED_ASYNC;
214
215
216 queue_work(system_unbound_wq, &entry->work);
217
218 return newcookie;
219}
220
221
222
223
224
225
226
227
228
229async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
230{
231 return __async_schedule(ptr, data, &async_running);
232}
233EXPORT_SYMBOL_GPL(async_schedule);
234
235
236
237
238
239
240
241
242
243
244
245
246
247async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
248 struct async_domain *running)
249{
250 return __async_schedule(ptr, data, running);
251}
252EXPORT_SYMBOL_GPL(async_schedule_domain);
253
254
255
256
257
258
259void async_synchronize_full(void)
260{
261 mutex_lock(&async_register_mutex);
262 do {
263 struct async_domain *domain = NULL;
264
265 spin_lock_irq(&async_lock);
266 if (!list_empty(&async_domains))
267 domain = list_first_entry(&async_domains, typeof(*domain), node);
268 spin_unlock_irq(&async_lock);
269
270 async_synchronize_cookie_domain(next_cookie, domain);
271 } while (!list_empty(&async_domains));
272 mutex_unlock(&async_register_mutex);
273}
274EXPORT_SYMBOL_GPL(async_synchronize_full);
275
276
277
278
279
280
281
282
283
284
285void async_unregister_domain(struct async_domain *domain)
286{
287 mutex_lock(&async_register_mutex);
288 spin_lock_irq(&async_lock);
289 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
290 !list_empty(&domain->domain));
291 domain->registered = 0;
292 spin_unlock_irq(&async_lock);
293 mutex_unlock(&async_register_mutex);
294}
295EXPORT_SYMBOL_GPL(async_unregister_domain);
296
297
298
299
300
301
302
303
304void async_synchronize_full_domain(struct async_domain *domain)
305{
306 async_synchronize_cookie_domain(next_cookie, domain);
307}
308EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
309
310
311
312
313
314
315
316
317
318
319void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
320{
321 ktime_t uninitialized_var(starttime), delta, endtime;
322
323 if (!running)
324 return;
325
326 if (initcall_debug && system_state == SYSTEM_BOOTING) {
327 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
328 starttime = ktime_get();
329 }
330
331 wait_event(async_done, lowest_in_progress(running) >= cookie);
332
333 if (initcall_debug && system_state == SYSTEM_BOOTING) {
334 endtime = ktime_get();
335 delta = ktime_sub(endtime, starttime);
336
337 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
338 task_pid_nr(current),
339 (long long)ktime_to_ns(delta) >> 10);
340 }
341}
342EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
343
344
345
346
347
348
349
350
351void async_synchronize_cookie(async_cookie_t cookie)
352{
353 async_synchronize_cookie_domain(cookie, &async_running);
354}
355EXPORT_SYMBOL_GPL(async_synchronize_cookie);
356