1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h>
10#include <linux/slab.h>
11
12#include "blk.h"
13
14
15
16
17static struct kmem_cache *iocontext_cachep;
18
19
20
21
22
23
24
25void get_io_context(struct io_context *ioc)
26{
27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
32static void icq_free_icq_rcu(struct rcu_head *head)
33{
34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35
36 kmem_cache_free(icq->__rcu_icq_cache, icq);
37}
38
39
40
41
42
43static void ioc_exit_icq(struct io_cq *icq)
44{
45 struct elevator_type_aux *aux = icq->q->elevator->aux;
46
47 if (icq->flags & ICQ_EXITED)
48 return;
49
50 if (aux->uses_mq && aux->ops.mq.exit_icq)
51 aux->ops.mq.exit_icq(icq);
52 else if (!aux->uses_mq && aux->ops.sq.elevator_exit_icq_fn)
53 aux->ops.sq.elevator_exit_icq_fn(icq);
54
55 icq->flags |= ICQ_EXITED;
56}
57
58
59
60
61
62static void ioc_destroy_icq(struct io_cq *icq)
63{
64 struct io_context *ioc = icq->ioc;
65 struct request_queue *q = icq->q;
66 struct elevator_type *et = q->elevator->type;
67
68 lockdep_assert_held(&ioc->lock);
69
70 radix_tree_delete(&ioc->icq_tree, icq->q->id);
71 hlist_del_init(&icq->ioc_node);
72 list_del_init(&icq->q_node);
73
74
75
76
77
78
79 if (rcu_dereference_raw(ioc->icq_hint) == icq)
80 rcu_assign_pointer(ioc->icq_hint, NULL);
81
82 ioc_exit_icq(icq);
83
84
85
86
87
88 icq->__rcu_icq_cache = et->icq_cache;
89 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
90}
91
92
93
94
95
96static void ioc_release_fn(struct work_struct *work)
97{
98 struct io_context *ioc = container_of(work, struct io_context,
99 release_work);
100 unsigned long flags;
101
102
103
104
105
106
107
108 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
109
110 while (!hlist_empty(&ioc->icq_list)) {
111 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
112 struct io_cq, ioc_node);
113 struct request_queue *q = icq->q;
114
115 if (spin_trylock(q->queue_lock)) {
116 ioc_destroy_icq(icq);
117 spin_unlock(q->queue_lock);
118 } else {
119 spin_unlock_irqrestore(&ioc->lock, flags);
120 cpu_relax();
121 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
122 }
123 }
124
125 spin_unlock_irqrestore(&ioc->lock, flags);
126
127 kmem_cache_free(iocontext_cachep, ioc);
128}
129
130
131
132
133
134
135
136
137void put_io_context(struct io_context *ioc)
138{
139 unsigned long flags;
140 bool free_ioc = false;
141
142 if (ioc == NULL)
143 return;
144
145 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
146
147
148
149
150
151 if (atomic_long_dec_and_test(&ioc->refcount)) {
152 spin_lock_irqsave(&ioc->lock, flags);
153 if (!hlist_empty(&ioc->icq_list))
154 schedule_work(&ioc->release_work);
155 else
156 free_ioc = true;
157 spin_unlock_irqrestore(&ioc->lock, flags);
158 }
159
160 if (free_ioc)
161 kmem_cache_free(iocontext_cachep, ioc);
162}
163EXPORT_SYMBOL(put_io_context);
164
165
166
167
168
169
170
171
172void put_io_context_active(struct io_context *ioc)
173{
174 struct elevator_type *et;
175 unsigned long flags;
176 struct io_cq *icq;
177
178 if (!atomic_dec_and_test(&ioc->active_ref)) {
179 put_io_context(ioc);
180 return;
181 }
182
183
184
185
186
187
188retry:
189 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
190 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
191 if (icq->flags & ICQ_EXITED)
192 continue;
193
194 et = icq->q->elevator->type;
195 if (icq->q->elevator->aux->uses_mq) {
196 ioc_exit_icq(icq);
197 } else {
198 if (spin_trylock(icq->q->queue_lock)) {
199 ioc_exit_icq(icq);
200 spin_unlock(icq->q->queue_lock);
201 } else {
202 spin_unlock_irqrestore(&ioc->lock, flags);
203 cpu_relax();
204 goto retry;
205 }
206 }
207 }
208 spin_unlock_irqrestore(&ioc->lock, flags);
209
210 put_io_context(ioc);
211}
212
213
214void exit_io_context(struct task_struct *task)
215{
216 struct io_context *ioc;
217
218 task_lock(task);
219 ioc = task->io_context;
220 task->io_context = NULL;
221 task_unlock(task);
222
223 atomic_dec(&ioc->nr_tasks);
224 put_io_context_active(ioc);
225}
226
227static void __ioc_clear_queue(struct list_head *icq_list)
228{
229 unsigned long flags;
230
231 while (!list_empty(icq_list)) {
232 struct io_cq *icq = list_entry(icq_list->next,
233 struct io_cq, q_node);
234 struct io_context *ioc = icq->ioc;
235
236 spin_lock_irqsave(&ioc->lock, flags);
237 ioc_destroy_icq(icq);
238 spin_unlock_irqrestore(&ioc->lock, flags);
239 }
240}
241
242
243
244
245
246
247
248void ioc_clear_queue(struct request_queue *q)
249{
250 LIST_HEAD(icq_list);
251
252 spin_lock_irq(q->queue_lock);
253 list_splice_init(&q->icq_list, &icq_list);
254
255 if (q->mq_ops) {
256 spin_unlock_irq(q->queue_lock);
257 __ioc_clear_queue(&icq_list);
258 } else {
259 __ioc_clear_queue(&icq_list);
260 spin_unlock_irq(q->queue_lock);
261 }
262}
263
264int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
265{
266 struct io_context *ioc;
267 int ret;
268
269 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
270 node);
271 if (unlikely(!ioc))
272 return -ENOMEM;
273
274
275 atomic_long_set(&ioc->refcount, 1);
276 atomic_set(&ioc->nr_tasks, 1);
277 atomic_set(&ioc->active_ref, 1);
278 spin_lock_init(&ioc->lock);
279 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
280 INIT_HLIST_HEAD(&ioc->icq_list);
281 INIT_WORK(&ioc->release_work, ioc_release_fn);
282
283
284
285
286
287
288
289
290 task_lock(task);
291 if (!task->io_context &&
292 (task == current || !(task->flags & PF_EXITING)))
293 task->io_context = ioc;
294 else
295 kmem_cache_free(iocontext_cachep, ioc);
296
297 ret = task->io_context ? 0 : -EBUSY;
298
299 task_unlock(task);
300
301 return ret;
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317struct io_context *get_task_io_context(struct task_struct *task,
318 gfp_t gfp_flags, int node)
319{
320 struct io_context *ioc;
321
322 might_sleep_if(gfp_flags & __GFP_WAIT);
323
324 do {
325 task_lock(task);
326 ioc = task->io_context;
327 if (likely(ioc)) {
328 get_io_context(ioc);
329 task_unlock(task);
330 return ioc;
331 }
332 task_unlock(task);
333 } while (!create_task_io_context(task, gfp_flags, node));
334
335 return NULL;
336}
337EXPORT_SYMBOL(get_task_io_context);
338
339
340
341
342
343
344
345
346
347struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
348{
349 struct io_cq *icq;
350
351 lockdep_assert_held(q->queue_lock);
352
353
354
355
356
357
358
359 rcu_read_lock();
360 icq = rcu_dereference(ioc->icq_hint);
361 if (icq && icq->q == q)
362 goto out;
363
364 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
365 if (icq && icq->q == q)
366 rcu_assign_pointer(ioc->icq_hint, icq);
367 else
368 icq = NULL;
369out:
370 rcu_read_unlock();
371 return icq;
372}
373EXPORT_SYMBOL(ioc_lookup_icq);
374
375
376
377
378
379
380
381
382
383
384
385
386
387struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
388 gfp_t gfp_mask)
389{
390 struct elevator_type *et = q->elevator->type;
391 struct elevator_type_aux *aux = q->elevator->aux;
392 struct io_cq *icq;
393
394
395 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
396 q->node);
397 if (!icq)
398 return NULL;
399
400 if (radix_tree_maybe_preload(gfp_mask) < 0) {
401 kmem_cache_free(et->icq_cache, icq);
402 return NULL;
403 }
404
405 icq->ioc = ioc;
406 icq->q = q;
407 INIT_LIST_HEAD(&icq->q_node);
408 INIT_HLIST_NODE(&icq->ioc_node);
409
410
411 spin_lock_irq(q->queue_lock);
412 spin_lock(&ioc->lock);
413
414 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
415 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
416 list_add(&icq->q_node, &q->icq_list);
417 if (aux->uses_mq && aux->ops.mq.init_icq)
418 aux->ops.mq.init_icq(icq);
419 else if (!aux->uses_mq && aux->ops.sq.elevator_init_icq_fn)
420 aux->ops.sq.elevator_init_icq_fn(icq);
421 } else {
422 kmem_cache_free(et->icq_cache, icq);
423 icq = ioc_lookup_icq(ioc, q);
424 if (!icq)
425 printk(KERN_ERR "cfq: icq link failed!\n");
426 }
427
428 spin_unlock(&ioc->lock);
429 spin_unlock_irq(q->queue_lock);
430 radix_tree_preload_end();
431 return icq;
432}
433
434static int __init blk_ioc_init(void)
435{
436 iocontext_cachep = kmem_cache_create("blkdev_ioc",
437 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
438 return 0;
439}
440subsys_initcall(blk_ioc_init);
441