1
2
3
4
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/slab.h>
11#include <linux/sched/task.h>
12
13#include "blk.h"
14
15
16
17
18static struct kmem_cache *iocontext_cachep;
19
20
21
22
23
24
25
26void get_io_context(struct io_context *ioc)
27{
28 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
29 atomic_long_inc(&ioc->refcount);
30}
31
32static void icq_free_icq_rcu(struct rcu_head *head)
33{
34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35
36 kmem_cache_free(icq->__rcu_icq_cache, icq);
37}
38
39
40
41
42
43static void ioc_exit_icq(struct io_cq *icq)
44{
45 struct elevator_type *et = icq->q->elevator->type;
46
47 if (icq->flags & ICQ_EXITED)
48 return;
49
50 if (et->ops.exit_icq)
51 et->ops.exit_icq(icq);
52
53 icq->flags |= ICQ_EXITED;
54}
55
56
57
58
59
60static void ioc_destroy_icq(struct io_cq *icq)
61{
62 struct io_context *ioc = icq->ioc;
63 struct request_queue *q = icq->q;
64 struct elevator_type *et = q->elevator->type;
65
66 lockdep_assert_held(&ioc->lock);
67
68 radix_tree_delete(&ioc->icq_tree, icq->q->id);
69 hlist_del_init(&icq->ioc_node);
70 list_del_init(&icq->q_node);
71
72
73
74
75
76
77 if (rcu_access_pointer(ioc->icq_hint) == icq)
78 rcu_assign_pointer(ioc->icq_hint, NULL);
79
80 ioc_exit_icq(icq);
81
82
83
84
85
86 icq->__rcu_icq_cache = et->icq_cache;
87 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
88}
89
90
91
92
93
94static void ioc_release_fn(struct work_struct *work)
95{
96 struct io_context *ioc = container_of(work, struct io_context,
97 release_work);
98 unsigned long flags;
99
100
101
102
103
104
105
106 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
107
108 while (!hlist_empty(&ioc->icq_list)) {
109 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
110 struct io_cq, ioc_node);
111 struct request_queue *q = icq->q;
112
113 if (spin_trylock(&q->queue_lock)) {
114 ioc_destroy_icq(icq);
115 spin_unlock(&q->queue_lock);
116 } else {
117 spin_unlock_irqrestore(&ioc->lock, flags);
118 cpu_relax();
119 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
120 }
121 }
122
123 spin_unlock_irqrestore(&ioc->lock, flags);
124
125 kmem_cache_free(iocontext_cachep, ioc);
126}
127
128
129
130
131
132
133
134
135void put_io_context(struct io_context *ioc)
136{
137 unsigned long flags;
138 bool free_ioc = false;
139
140 if (ioc == NULL)
141 return;
142
143 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
144
145
146
147
148
149 if (atomic_long_dec_and_test(&ioc->refcount)) {
150 spin_lock_irqsave(&ioc->lock, flags);
151 if (!hlist_empty(&ioc->icq_list))
152 queue_work(system_power_efficient_wq,
153 &ioc->release_work);
154 else
155 free_ioc = true;
156 spin_unlock_irqrestore(&ioc->lock, flags);
157 }
158
159 if (free_ioc)
160 kmem_cache_free(iocontext_cachep, ioc);
161}
162
163
164
165
166
167
168
169
170void put_io_context_active(struct io_context *ioc)
171{
172 unsigned long flags;
173 struct io_cq *icq;
174
175 if (!atomic_dec_and_test(&ioc->active_ref)) {
176 put_io_context(ioc);
177 return;
178 }
179
180
181
182
183
184
185 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
186 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
187 if (icq->flags & ICQ_EXITED)
188 continue;
189
190 ioc_exit_icq(icq);
191 }
192 spin_unlock_irqrestore(&ioc->lock, flags);
193
194 put_io_context(ioc);
195}
196
197
198void exit_io_context(struct task_struct *task)
199{
200 struct io_context *ioc;
201
202 task_lock(task);
203 ioc = task->io_context;
204 task->io_context = NULL;
205 task_unlock(task);
206
207 atomic_dec(&ioc->nr_tasks);
208 put_io_context_active(ioc);
209}
210
211static void __ioc_clear_queue(struct list_head *icq_list)
212{
213 unsigned long flags;
214
215 while (!list_empty(icq_list)) {
216 struct io_cq *icq = list_entry(icq_list->next,
217 struct io_cq, q_node);
218 struct io_context *ioc = icq->ioc;
219
220 spin_lock_irqsave(&ioc->lock, flags);
221 ioc_destroy_icq(icq);
222 spin_unlock_irqrestore(&ioc->lock, flags);
223 }
224}
225
226
227
228
229
230
231
232void ioc_clear_queue(struct request_queue *q)
233{
234 LIST_HEAD(icq_list);
235
236 spin_lock_irq(&q->queue_lock);
237 list_splice_init(&q->icq_list, &icq_list);
238 spin_unlock_irq(&q->queue_lock);
239
240 __ioc_clear_queue(&icq_list);
241}
242
243int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
244{
245 struct io_context *ioc;
246 int ret;
247
248 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
249 node);
250 if (unlikely(!ioc))
251 return -ENOMEM;
252
253
254 atomic_long_set(&ioc->refcount, 1);
255 atomic_set(&ioc->nr_tasks, 1);
256 atomic_set(&ioc->active_ref, 1);
257 spin_lock_init(&ioc->lock);
258 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
259 INIT_HLIST_HEAD(&ioc->icq_list);
260 INIT_WORK(&ioc->release_work, ioc_release_fn);
261
262
263
264
265
266
267
268
269 task_lock(task);
270 if (!task->io_context &&
271 (task == current || !(task->flags & PF_EXITING)))
272 task->io_context = ioc;
273 else
274 kmem_cache_free(iocontext_cachep, ioc);
275
276 ret = task->io_context ? 0 : -EBUSY;
277
278 task_unlock(task);
279
280 return ret;
281}
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296struct io_context *get_task_io_context(struct task_struct *task,
297 gfp_t gfp_flags, int node)
298{
299 struct io_context *ioc;
300
301 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
302
303 do {
304 task_lock(task);
305 ioc = task->io_context;
306 if (likely(ioc)) {
307 get_io_context(ioc);
308 task_unlock(task);
309 return ioc;
310 }
311 task_unlock(task);
312 } while (!create_task_io_context(task, gfp_flags, node));
313
314 return NULL;
315}
316
317
318
319
320
321
322
323
324
325struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
326{
327 struct io_cq *icq;
328
329 lockdep_assert_held(&q->queue_lock);
330
331
332
333
334
335
336
337 rcu_read_lock();
338 icq = rcu_dereference(ioc->icq_hint);
339 if (icq && icq->q == q)
340 goto out;
341
342 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
343 if (icq && icq->q == q)
344 rcu_assign_pointer(ioc->icq_hint, icq);
345 else
346 icq = NULL;
347out:
348 rcu_read_unlock();
349 return icq;
350}
351EXPORT_SYMBOL(ioc_lookup_icq);
352
353
354
355
356
357
358
359
360
361
362
363
364
365struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
366 gfp_t gfp_mask)
367{
368 struct elevator_type *et = q->elevator->type;
369 struct io_cq *icq;
370
371
372 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
373 q->node);
374 if (!icq)
375 return NULL;
376
377 if (radix_tree_maybe_preload(gfp_mask) < 0) {
378 kmem_cache_free(et->icq_cache, icq);
379 return NULL;
380 }
381
382 icq->ioc = ioc;
383 icq->q = q;
384 INIT_LIST_HEAD(&icq->q_node);
385 INIT_HLIST_NODE(&icq->ioc_node);
386
387
388 spin_lock_irq(&q->queue_lock);
389 spin_lock(&ioc->lock);
390
391 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
392 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
393 list_add(&icq->q_node, &q->icq_list);
394 if (et->ops.init_icq)
395 et->ops.init_icq(icq);
396 } else {
397 kmem_cache_free(et->icq_cache, icq);
398 icq = ioc_lookup_icq(ioc, q);
399 if (!icq)
400 printk(KERN_ERR "cfq: icq link failed!\n");
401 }
402
403 spin_unlock(&ioc->lock);
404 spin_unlock_irq(&q->queue_lock);
405 radix_tree_preload_end();
406 return icq;
407}
408
409static int __init blk_ioc_init(void)
410{
411 iocontext_cachep = kmem_cache_create("blkdev_ioc",
412 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
413 return 0;
414}
415subsys_initcall(blk_ioc_init);
416