1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/async.h>
48#include <linux/atomic.h>
49#include <linux/ktime.h>
50#include <linux/export.h>
51#include <linux/wait.h>
52#include <linux/sched.h>
53#include <linux/slab.h>
54#include <linux/workqueue.h>
55
56#include "workqueue_internal.h"
57
58static async_cookie_t next_cookie = 1;
59
60#define MAX_WORK 32768
61#define ASYNC_COOKIE_MAX ULLONG_MAX
62
63static LIST_HEAD(async_global_pending);
64static ASYNC_DOMAIN(async_dfl_domain);
65static DEFINE_SPINLOCK(async_lock);
66
67struct async_entry {
68 struct list_head domain_list;
69 struct list_head global_list;
70 struct work_struct work;
71 async_cookie_t cookie;
72 async_func_t func;
73 void *data;
74 struct async_domain *domain;
75};
76
77static DECLARE_WAIT_QUEUE_HEAD(async_done);
78
79static atomic_t entry_count;
80
81static async_cookie_t lowest_in_progress(struct async_domain *domain)
82{
83 struct async_entry *first = NULL;
84 async_cookie_t ret = ASYNC_COOKIE_MAX;
85 unsigned long flags;
86
87 spin_lock_irqsave(&async_lock, flags);
88
89 if (domain) {
90 if (!list_empty(&domain->pending))
91 first = list_first_entry(&domain->pending,
92 struct async_entry, domain_list);
93 } else {
94 if (!list_empty(&async_global_pending))
95 first = list_first_entry(&async_global_pending,
96 struct async_entry, global_list);
97 }
98
99 if (first)
100 ret = first->cookie;
101
102 spin_unlock_irqrestore(&async_lock, flags);
103 return ret;
104}
105
106
107
108
109static void async_run_entry_fn(struct work_struct *work)
110{
111 struct async_entry *entry =
112 container_of(work, struct async_entry, work);
113 unsigned long flags;
114 ktime_t calltime, delta, rettime;
115
116
117 if (initcall_debug && system_state < SYSTEM_RUNNING) {
118 pr_debug("calling %lli_%pS @ %i\n",
119 (long long)entry->cookie,
120 entry->func, task_pid_nr(current));
121 calltime = ktime_get();
122 }
123 entry->func(entry->data, entry->cookie);
124 if (initcall_debug && system_state < SYSTEM_RUNNING) {
125 rettime = ktime_get();
126 delta = ktime_sub(rettime, calltime);
127 pr_debug("initcall %lli_%pS returned 0 after %lld usecs\n",
128 (long long)entry->cookie,
129 entry->func,
130 (long long)ktime_to_ns(delta) >> 10);
131 }
132
133
134 spin_lock_irqsave(&async_lock, flags);
135 list_del_init(&entry->domain_list);
136 list_del_init(&entry->global_list);
137
138
139 kfree(entry);
140 atomic_dec(&entry_count);
141
142 spin_unlock_irqrestore(&async_lock, flags);
143
144
145 wake_up(&async_done);
146}
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
166 int node, struct async_domain *domain)
167{
168 struct async_entry *entry;
169 unsigned long flags;
170 async_cookie_t newcookie;
171
172
173 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
174
175
176
177
178
179 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
180 kfree(entry);
181 spin_lock_irqsave(&async_lock, flags);
182 newcookie = next_cookie++;
183 spin_unlock_irqrestore(&async_lock, flags);
184
185
186 func(data, newcookie);
187 return newcookie;
188 }
189 INIT_LIST_HEAD(&entry->domain_list);
190 INIT_LIST_HEAD(&entry->global_list);
191 INIT_WORK(&entry->work, async_run_entry_fn);
192 entry->func = func;
193 entry->data = data;
194 entry->domain = domain;
195
196 spin_lock_irqsave(&async_lock, flags);
197
198
199 newcookie = entry->cookie = next_cookie++;
200
201 list_add_tail(&entry->domain_list, &domain->pending);
202 if (domain->registered)
203 list_add_tail(&entry->global_list, &async_global_pending);
204
205 atomic_inc(&entry_count);
206 spin_unlock_irqrestore(&async_lock, flags);
207
208
209 current->flags |= PF_USED_ASYNC;
210
211
212 queue_work_node(node, system_unbound_wq, &entry->work);
213
214 return newcookie;
215}
216EXPORT_SYMBOL_GPL(async_schedule_node_domain);
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
232{
233 return async_schedule_node_domain(func, data, node, &async_dfl_domain);
234}
235EXPORT_SYMBOL_GPL(async_schedule_node);
236
237
238
239
240
241
242void async_synchronize_full(void)
243{
244 async_synchronize_full_domain(NULL);
245}
246EXPORT_SYMBOL_GPL(async_synchronize_full);
247
248
249
250
251
252
253
254
255
256
257void async_unregister_domain(struct async_domain *domain)
258{
259 spin_lock_irq(&async_lock);
260 WARN_ON(!domain->registered || !list_empty(&domain->pending));
261 domain->registered = 0;
262 spin_unlock_irq(&async_lock);
263}
264EXPORT_SYMBOL_GPL(async_unregister_domain);
265
266
267
268
269
270
271
272
273void async_synchronize_full_domain(struct async_domain *domain)
274{
275 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
276}
277EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
278
279
280
281
282
283
284
285
286
287
288void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
289{
290 ktime_t starttime, delta, endtime;
291
292 if (initcall_debug && system_state < SYSTEM_RUNNING) {
293 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
294 starttime = ktime_get();
295 }
296
297 wait_event(async_done, lowest_in_progress(domain) >= cookie);
298
299 if (initcall_debug && system_state < SYSTEM_RUNNING) {
300 endtime = ktime_get();
301 delta = ktime_sub(endtime, starttime);
302
303 pr_debug("async_continuing @ %i after %lli usec\n",
304 task_pid_nr(current),
305 (long long)ktime_to_ns(delta) >> 10);
306 }
307}
308EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
309
310
311
312
313
314
315
316
317void async_synchronize_cookie(async_cookie_t cookie)
318{
319 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
320}
321EXPORT_SYMBOL_GPL(async_synchronize_cookie);
322
323
324
325
326
327
328bool current_is_async(void)
329{
330 struct worker *worker = current_wq_worker();
331
332 return worker && worker->current_func == async_run_entry_fn;
333}
334EXPORT_SYMBOL_GPL(current_is_async);
335