1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/async.h>
48#include <linux/atomic.h>
49#include <linux/ktime.h>
50#include <linux/export.h>
51#include <linux/wait.h>
52#include <linux/sched.h>
53#include <linux/slab.h>
54#include <linux/workqueue.h>
55
56#include "workqueue_internal.h"
57
58static async_cookie_t next_cookie = 1;
59
60#define MAX_WORK 32768
61#define ASYNC_COOKIE_MAX ULLONG_MAX
62
63static LIST_HEAD(async_global_pending);
64static ASYNC_DOMAIN(async_dfl_domain);
65static DEFINE_SPINLOCK(async_lock);
66
67struct async_entry {
68 struct list_head domain_list;
69 struct list_head global_list;
70 struct work_struct work;
71 async_cookie_t cookie;
72 async_func_t func;
73 void *data;
74 struct async_domain *domain;
75};
76
77static DECLARE_WAIT_QUEUE_HEAD(async_done);
78
79static atomic_t entry_count;
80
81static long long microseconds_since(ktime_t start)
82{
83 ktime_t now = ktime_get();
84 return ktime_to_ns(ktime_sub(now, start)) >> 10;
85}
86
87static async_cookie_t lowest_in_progress(struct async_domain *domain)
88{
89 struct async_entry *first = NULL;
90 async_cookie_t ret = ASYNC_COOKIE_MAX;
91 unsigned long flags;
92
93 spin_lock_irqsave(&async_lock, flags);
94
95 if (domain) {
96 if (!list_empty(&domain->pending))
97 first = list_first_entry(&domain->pending,
98 struct async_entry, domain_list);
99 } else {
100 if (!list_empty(&async_global_pending))
101 first = list_first_entry(&async_global_pending,
102 struct async_entry, global_list);
103 }
104
105 if (first)
106 ret = first->cookie;
107
108 spin_unlock_irqrestore(&async_lock, flags);
109 return ret;
110}
111
112
113
114
115static void async_run_entry_fn(struct work_struct *work)
116{
117 struct async_entry *entry =
118 container_of(work, struct async_entry, work);
119 unsigned long flags;
120 ktime_t calltime;
121
122
123 pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
124 entry->func, task_pid_nr(current));
125 calltime = ktime_get();
126
127 entry->func(entry->data, entry->cookie);
128
129 pr_debug("initcall %lli_%pS returned after %lld usecs\n",
130 (long long)entry->cookie, entry->func,
131 microseconds_since(calltime));
132
133
134 spin_lock_irqsave(&async_lock, flags);
135 list_del_init(&entry->domain_list);
136 list_del_init(&entry->global_list);
137
138
139 kfree(entry);
140 atomic_dec(&entry_count);
141
142 spin_unlock_irqrestore(&async_lock, flags);
143
144
145 wake_up(&async_done);
146}
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
166 int node, struct async_domain *domain)
167{
168 struct async_entry *entry;
169 unsigned long flags;
170 async_cookie_t newcookie;
171
172
173 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
174
175
176
177
178
179 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
180 kfree(entry);
181 spin_lock_irqsave(&async_lock, flags);
182 newcookie = next_cookie++;
183 spin_unlock_irqrestore(&async_lock, flags);
184
185
186 func(data, newcookie);
187 return newcookie;
188 }
189 INIT_LIST_HEAD(&entry->domain_list);
190 INIT_LIST_HEAD(&entry->global_list);
191 INIT_WORK(&entry->work, async_run_entry_fn);
192 entry->func = func;
193 entry->data = data;
194 entry->domain = domain;
195
196 spin_lock_irqsave(&async_lock, flags);
197
198
199 newcookie = entry->cookie = next_cookie++;
200
201 list_add_tail(&entry->domain_list, &domain->pending);
202 if (domain->registered)
203 list_add_tail(&entry->global_list, &async_global_pending);
204
205 atomic_inc(&entry_count);
206 spin_unlock_irqrestore(&async_lock, flags);
207
208
209 current->flags |= PF_USED_ASYNC;
210
211
212 queue_work_node(node, system_unbound_wq, &entry->work);
213
214 return newcookie;
215}
216EXPORT_SYMBOL_GPL(async_schedule_node_domain);
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
232{
233 return async_schedule_node_domain(func, data, node, &async_dfl_domain);
234}
235EXPORT_SYMBOL_GPL(async_schedule_node);
236
237
238
239
240
241
242void async_synchronize_full(void)
243{
244 async_synchronize_full_domain(NULL);
245}
246EXPORT_SYMBOL_GPL(async_synchronize_full);
247
248
249
250
251
252
253
254
255void async_synchronize_full_domain(struct async_domain *domain)
256{
257 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
258}
259EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
260
261
262
263
264
265
266
267
268
269
270void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
271{
272 ktime_t starttime;
273
274 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
275 starttime = ktime_get();
276
277 wait_event(async_done, lowest_in_progress(domain) >= cookie);
278
279 pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
280 microseconds_since(starttime));
281}
282EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
283
284
285
286
287
288
289
290
291void async_synchronize_cookie(async_cookie_t cookie)
292{
293 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
294}
295EXPORT_SYMBOL_GPL(async_synchronize_cookie);
296
297
298
299
300
301
302bool current_is_async(void)
303{
304 struct worker *worker = current_wq_worker();
305
306 return worker && worker->current_func == async_run_entry_fn;
307}
308EXPORT_SYMBOL_GPL(current_is_async);
309