1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/async.h>
52#include <linux/atomic.h>
53#include <linux/ktime.h>
54#include <linux/export.h>
55#include <linux/wait.h>
56#include <linux/sched.h>
57#include <linux/slab.h>
58#include <linux/workqueue.h>
59
60#include "workqueue_internal.h"
61
62static async_cookie_t next_cookie = 1;
63
64#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX
66
67static LIST_HEAD(async_global_pending);
68static ASYNC_DOMAIN(async_dfl_domain);
69static DEFINE_SPINLOCK(async_lock);
70
71struct async_entry {
72 struct list_head domain_list;
73 struct list_head global_list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_t func;
77 void *data;
78 struct async_domain *domain;
79};
80
81static DECLARE_WAIT_QUEUE_HEAD(async_done);
82
83static atomic_t entry_count;
84
85static async_cookie_t lowest_in_progress(struct async_domain *domain)
86{
87 struct list_head *pending;
88 async_cookie_t ret = ASYNC_COOKIE_MAX;
89 unsigned long flags;
90
91 spin_lock_irqsave(&async_lock, flags);
92
93 if (domain)
94 pending = &domain->pending;
95 else
96 pending = &async_global_pending;
97
98 if (!list_empty(pending))
99 ret = list_first_entry(pending, struct async_entry,
100 domain_list)->cookie;
101
102 spin_unlock_irqrestore(&async_lock, flags);
103 return ret;
104}
105
106
107
108
109static void async_run_entry_fn(struct work_struct *work)
110{
111 struct async_entry *entry =
112 container_of(work, struct async_entry, work);
113 unsigned long flags;
114 ktime_t uninitialized_var(calltime), delta, rettime;
115
116
117 if (initcall_debug && system_state == SYSTEM_BOOTING) {
118 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
119 (long long)entry->cookie,
120 entry->func, task_pid_nr(current));
121 calltime = ktime_get();
122 }
123 entry->func(entry->data, entry->cookie);
124 if (initcall_debug && system_state == SYSTEM_BOOTING) {
125 rettime = ktime_get();
126 delta = ktime_sub(rettime, calltime);
127 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
128 (long long)entry->cookie,
129 entry->func,
130 (long long)ktime_to_ns(delta) >> 10);
131 }
132
133
134 spin_lock_irqsave(&async_lock, flags);
135 list_del_init(&entry->domain_list);
136 list_del_init(&entry->global_list);
137
138
139 kfree(entry);
140 atomic_dec(&entry_count);
141
142 spin_unlock_irqrestore(&async_lock, flags);
143
144
145 wake_up(&async_done);
146}
147
148static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain)
149{
150 struct async_entry *entry;
151 unsigned long flags;
152 async_cookie_t newcookie;
153
154
155 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
156
157
158
159
160
161 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
162 kfree(entry);
163 spin_lock_irqsave(&async_lock, flags);
164 newcookie = next_cookie++;
165 spin_unlock_irqrestore(&async_lock, flags);
166
167
168 func(data, newcookie);
169 return newcookie;
170 }
171 INIT_LIST_HEAD(&entry->domain_list);
172 INIT_LIST_HEAD(&entry->global_list);
173 INIT_WORK(&entry->work, async_run_entry_fn);
174 entry->func = func;
175 entry->data = data;
176 entry->domain = domain;
177
178 spin_lock_irqsave(&async_lock, flags);
179
180
181 newcookie = entry->cookie = next_cookie++;
182
183 list_add_tail(&entry->domain_list, &domain->pending);
184 if (domain->registered)
185 list_add_tail(&entry->global_list, &async_global_pending);
186
187 atomic_inc(&entry_count);
188 spin_unlock_irqrestore(&async_lock, flags);
189
190
191 current->flags |= PF_USED_ASYNC;
192
193
194 queue_work(system_unbound_wq, &entry->work);
195
196 return newcookie;
197}
198
199
200
201
202
203
204
205
206
207async_cookie_t async_schedule(async_func_t func, void *data)
208{
209 return __async_schedule(func, data, &async_dfl_domain);
210}
211EXPORT_SYMBOL_GPL(async_schedule);
212
213
214
215
216
217
218
219
220
221
222
223
224
225async_cookie_t async_schedule_domain(async_func_t func, void *data,
226 struct async_domain *domain)
227{
228 return __async_schedule(func, data, domain);
229}
230EXPORT_SYMBOL_GPL(async_schedule_domain);
231
232
233
234
235
236
237void async_synchronize_full(void)
238{
239 async_synchronize_full_domain(NULL);
240}
241EXPORT_SYMBOL_GPL(async_synchronize_full);
242
243
244
245
246
247
248
249
250
251
252void async_unregister_domain(struct async_domain *domain)
253{
254 spin_lock_irq(&async_lock);
255 WARN_ON(!domain->registered || !list_empty(&domain->pending));
256 domain->registered = 0;
257 spin_unlock_irq(&async_lock);
258}
259EXPORT_SYMBOL_GPL(async_unregister_domain);
260
261
262
263
264
265
266
267
268void async_synchronize_full_domain(struct async_domain *domain)
269{
270 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
271}
272EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
273
274
275
276
277
278
279
280
281
282
283void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
284{
285 ktime_t uninitialized_var(starttime), delta, endtime;
286
287 if (initcall_debug && system_state == SYSTEM_BOOTING) {
288 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
289 starttime = ktime_get();
290 }
291
292 wait_event(async_done, lowest_in_progress(domain) >= cookie);
293
294 if (initcall_debug && system_state == SYSTEM_BOOTING) {
295 endtime = ktime_get();
296 delta = ktime_sub(endtime, starttime);
297
298 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
299 task_pid_nr(current),
300 (long long)ktime_to_ns(delta) >> 10);
301 }
302}
303EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
304
305
306
307
308
309
310
311
312void async_synchronize_cookie(async_cookie_t cookie)
313{
314 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
315}
316EXPORT_SYMBOL_GPL(async_synchronize_cookie);
317
318
319
320
321
322
323bool current_is_async(void)
324{
325 struct worker *worker = current_wq_worker();
326
327 return worker && worker->current_func == async_run_entry_fn;
328}
329