1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/async.h>
52#include <linux/atomic.h>
53#include <linux/ktime.h>
54#include <linux/export.h>
55#include <linux/wait.h>
56#include <linux/sched.h>
57#include <linux/slab.h>
58#include <linux/workqueue.h>
59
60#include "workqueue_internal.h"
61
62static async_cookie_t next_cookie = 1;
63
64#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX
66
67static LIST_HEAD(async_global_pending);
68static ASYNC_DOMAIN(async_dfl_domain);
69static DEFINE_SPINLOCK(async_lock);
70
71struct async_entry {
72 struct list_head domain_list;
73 struct list_head global_list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_t func;
77 void *data;
78 struct async_domain *domain;
79};
80
81static DECLARE_WAIT_QUEUE_HEAD(async_done);
82
83static atomic_t entry_count;
84
85static async_cookie_t lowest_in_progress(struct async_domain *domain)
86{
87 struct async_entry *first = NULL;
88 async_cookie_t ret = ASYNC_COOKIE_MAX;
89 unsigned long flags;
90
91 spin_lock_irqsave(&async_lock, flags);
92
93 if (domain) {
94 if (!list_empty(&domain->pending))
95 first = list_first_entry(&domain->pending,
96 struct async_entry, domain_list);
97 } else {
98 if (!list_empty(&async_global_pending))
99 first = list_first_entry(&async_global_pending,
100 struct async_entry, global_list);
101 }
102
103 if (first)
104 ret = first->cookie;
105
106 spin_unlock_irqrestore(&async_lock, flags);
107 return ret;
108}
109
110
111
112
113static void async_run_entry_fn(struct work_struct *work)
114{
115 struct async_entry *entry =
116 container_of(work, struct async_entry, work);
117 unsigned long flags;
118 ktime_t uninitialized_var(calltime), delta, rettime;
119
120
121 if (initcall_debug && system_state < SYSTEM_RUNNING) {
122 pr_debug("calling %lli_%pF @ %i\n",
123 (long long)entry->cookie,
124 entry->func, task_pid_nr(current));
125 calltime = ktime_get();
126 }
127 entry->func(entry->data, entry->cookie);
128 if (initcall_debug && system_state < SYSTEM_RUNNING) {
129 rettime = ktime_get();
130 delta = ktime_sub(rettime, calltime);
131 pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n",
132 (long long)entry->cookie,
133 entry->func,
134 (long long)ktime_to_ns(delta) >> 10);
135 }
136
137
138 spin_lock_irqsave(&async_lock, flags);
139 list_del_init(&entry->domain_list);
140 list_del_init(&entry->global_list);
141
142
143 kfree(entry);
144 atomic_dec(&entry_count);
145
146 spin_unlock_irqrestore(&async_lock, flags);
147
148
149 wake_up(&async_done);
150}
151
152static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain)
153{
154 struct async_entry *entry;
155 unsigned long flags;
156 async_cookie_t newcookie;
157
158
159 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
160
161
162
163
164
165 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
166 kfree(entry);
167 spin_lock_irqsave(&async_lock, flags);
168 newcookie = next_cookie++;
169 spin_unlock_irqrestore(&async_lock, flags);
170
171
172 func(data, newcookie);
173 return newcookie;
174 }
175 INIT_LIST_HEAD(&entry->domain_list);
176 INIT_LIST_HEAD(&entry->global_list);
177 INIT_WORK(&entry->work, async_run_entry_fn);
178 entry->func = func;
179 entry->data = data;
180 entry->domain = domain;
181
182 spin_lock_irqsave(&async_lock, flags);
183
184
185 newcookie = entry->cookie = next_cookie++;
186
187 list_add_tail(&entry->domain_list, &domain->pending);
188 if (domain->registered)
189 list_add_tail(&entry->global_list, &async_global_pending);
190
191 atomic_inc(&entry_count);
192 spin_unlock_irqrestore(&async_lock, flags);
193
194
195 current->flags |= PF_USED_ASYNC;
196
197
198 queue_work(system_unbound_wq, &entry->work);
199
200 return newcookie;
201}
202
203
204
205
206
207
208
209
210
211async_cookie_t async_schedule(async_func_t func, void *data)
212{
213 return __async_schedule(func, data, &async_dfl_domain);
214}
215EXPORT_SYMBOL_GPL(async_schedule);
216
217
218
219
220
221
222
223
224
225
226
227
228
229async_cookie_t async_schedule_domain(async_func_t func, void *data,
230 struct async_domain *domain)
231{
232 return __async_schedule(func, data, domain);
233}
234EXPORT_SYMBOL_GPL(async_schedule_domain);
235
236
237
238
239
240
241void async_synchronize_full(void)
242{
243 async_synchronize_full_domain(NULL);
244}
245EXPORT_SYMBOL_GPL(async_synchronize_full);
246
247
248
249
250
251
252
253
254
255
256void async_unregister_domain(struct async_domain *domain)
257{
258 spin_lock_irq(&async_lock);
259 WARN_ON(!domain->registered || !list_empty(&domain->pending));
260 domain->registered = 0;
261 spin_unlock_irq(&async_lock);
262}
263EXPORT_SYMBOL_GPL(async_unregister_domain);
264
265
266
267
268
269
270
271
272void async_synchronize_full_domain(struct async_domain *domain)
273{
274 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
275}
276EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
277
278
279
280
281
282
283
284
285
286
287void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
288{
289 ktime_t uninitialized_var(starttime), delta, endtime;
290
291 if (initcall_debug && system_state < SYSTEM_RUNNING) {
292 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
293 starttime = ktime_get();
294 }
295
296 wait_event(async_done, lowest_in_progress(domain) >= cookie);
297
298 if (initcall_debug && system_state < SYSTEM_RUNNING) {
299 endtime = ktime_get();
300 delta = ktime_sub(endtime, starttime);
301
302 pr_debug("async_continuing @ %i after %lli usec\n",
303 task_pid_nr(current),
304 (long long)ktime_to_ns(delta) >> 10);
305 }
306}
307EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
308
309
310
311
312
313
314
315
316void async_synchronize_cookie(async_cookie_t cookie)
317{
318 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
319}
320EXPORT_SYMBOL_GPL(async_synchronize_cookie);
321
322
323
324
325
326
327bool current_is_async(void)
328{
329 struct worker *worker = current_wq_worker();
330
331 return worker && worker->current_func == async_run_entry_fn;
332}
333EXPORT_SYMBOL_GPL(current_is_async);
334