1
2
3
4
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/slab.h>
11#include <linux/sched/task.h>
12
13#include "blk.h"
14
15
16
17
18static struct kmem_cache *iocontext_cachep;
19
20
21
22
23
24
25
26void get_io_context(struct io_context *ioc)
27{
28 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
29 atomic_long_inc(&ioc->refcount);
30}
31EXPORT_SYMBOL(get_io_context);
32
33static void icq_free_icq_rcu(struct rcu_head *head)
34{
35 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
36
37 kmem_cache_free(icq->__rcu_icq_cache, icq);
38}
39
40
41
42
43
44static void ioc_exit_icq(struct io_cq *icq)
45{
46 struct elevator_type *et = icq->q->elevator->type;
47
48 if (icq->flags & ICQ_EXITED)
49 return;
50
51 if (et->uses_mq && et->ops.mq.exit_icq)
52 et->ops.mq.exit_icq(icq);
53 else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
54 et->ops.sq.elevator_exit_icq_fn(icq);
55
56 icq->flags |= ICQ_EXITED;
57}
58
59
60
61
62
63static void ioc_destroy_icq(struct io_cq *icq)
64{
65 struct io_context *ioc = icq->ioc;
66 struct request_queue *q = icq->q;
67 struct elevator_type *et = q->elevator->type;
68
69 lockdep_assert_held(&ioc->lock);
70
71 radix_tree_delete(&ioc->icq_tree, icq->q->id);
72 hlist_del_init(&icq->ioc_node);
73 list_del_init(&icq->q_node);
74
75
76
77
78
79
80 if (rcu_access_pointer(ioc->icq_hint) == icq)
81 rcu_assign_pointer(ioc->icq_hint, NULL);
82
83 ioc_exit_icq(icq);
84
85
86
87
88
89 icq->__rcu_icq_cache = et->icq_cache;
90 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
91}
92
93
94
95
96
97static void ioc_release_fn(struct work_struct *work)
98{
99 struct io_context *ioc = container_of(work, struct io_context,
100 release_work);
101 unsigned long flags;
102
103
104
105
106
107
108
109 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
110
111 while (!hlist_empty(&ioc->icq_list)) {
112 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
113 struct io_cq, ioc_node);
114 struct request_queue *q = icq->q;
115
116 if (spin_trylock(q->queue_lock)) {
117 ioc_destroy_icq(icq);
118 spin_unlock(q->queue_lock);
119 } else {
120 spin_unlock_irqrestore(&ioc->lock, flags);
121 cpu_relax();
122 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
123 }
124 }
125
126 spin_unlock_irqrestore(&ioc->lock, flags);
127
128 kmem_cache_free(iocontext_cachep, ioc);
129}
130
131
132
133
134
135
136
137
138void put_io_context(struct io_context *ioc)
139{
140 unsigned long flags;
141 bool free_ioc = false;
142
143 if (ioc == NULL)
144 return;
145
146 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
147
148
149
150
151
152 if (atomic_long_dec_and_test(&ioc->refcount)) {
153 spin_lock_irqsave(&ioc->lock, flags);
154 if (!hlist_empty(&ioc->icq_list))
155 queue_work(system_power_efficient_wq,
156 &ioc->release_work);
157 else
158 free_ioc = true;
159 spin_unlock_irqrestore(&ioc->lock, flags);
160 }
161
162 if (free_ioc)
163 kmem_cache_free(iocontext_cachep, ioc);
164}
165EXPORT_SYMBOL(put_io_context);
166
167
168
169
170
171
172
173
174void put_io_context_active(struct io_context *ioc)
175{
176 struct elevator_type *et;
177 unsigned long flags;
178 struct io_cq *icq;
179
180 if (!atomic_dec_and_test(&ioc->active_ref)) {
181 put_io_context(ioc);
182 return;
183 }
184
185
186
187
188
189
190retry:
191 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
192 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
193 if (icq->flags & ICQ_EXITED)
194 continue;
195
196 et = icq->q->elevator->type;
197 if (et->uses_mq) {
198 ioc_exit_icq(icq);
199 } else {
200 if (spin_trylock(icq->q->queue_lock)) {
201 ioc_exit_icq(icq);
202 spin_unlock(icq->q->queue_lock);
203 } else {
204 spin_unlock_irqrestore(&ioc->lock, flags);
205 cpu_relax();
206 goto retry;
207 }
208 }
209 }
210 spin_unlock_irqrestore(&ioc->lock, flags);
211
212 put_io_context(ioc);
213}
214
215
216void exit_io_context(struct task_struct *task)
217{
218 struct io_context *ioc;
219
220 task_lock(task);
221 ioc = task->io_context;
222 task->io_context = NULL;
223 task_unlock(task);
224
225 atomic_dec(&ioc->nr_tasks);
226 put_io_context_active(ioc);
227}
228
229static void __ioc_clear_queue(struct list_head *icq_list)
230{
231 unsigned long flags;
232
233 while (!list_empty(icq_list)) {
234 struct io_cq *icq = list_entry(icq_list->next,
235 struct io_cq, q_node);
236 struct io_context *ioc = icq->ioc;
237
238 spin_lock_irqsave(&ioc->lock, flags);
239 ioc_destroy_icq(icq);
240 spin_unlock_irqrestore(&ioc->lock, flags);
241 }
242}
243
244
245
246
247
248
249
250void ioc_clear_queue(struct request_queue *q)
251{
252 LIST_HEAD(icq_list);
253
254 spin_lock_irq(q->queue_lock);
255 list_splice_init(&q->icq_list, &icq_list);
256
257 if (q->mq_ops) {
258 spin_unlock_irq(q->queue_lock);
259 __ioc_clear_queue(&icq_list);
260 } else {
261 __ioc_clear_queue(&icq_list);
262 spin_unlock_irq(q->queue_lock);
263 }
264}
265
266int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
267{
268 struct io_context *ioc;
269 int ret;
270
271 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
272 node);
273 if (unlikely(!ioc))
274 return -ENOMEM;
275
276
277 atomic_long_set(&ioc->refcount, 1);
278 atomic_set(&ioc->nr_tasks, 1);
279 atomic_set(&ioc->active_ref, 1);
280 spin_lock_init(&ioc->lock);
281 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
282 INIT_HLIST_HEAD(&ioc->icq_list);
283 INIT_WORK(&ioc->release_work, ioc_release_fn);
284
285
286
287
288
289
290
291
292 task_lock(task);
293 if (!task->io_context &&
294 (task == current || !(task->flags & PF_EXITING)))
295 task->io_context = ioc;
296 else
297 kmem_cache_free(iocontext_cachep, ioc);
298
299 ret = task->io_context ? 0 : -EBUSY;
300
301 task_unlock(task);
302
303 return ret;
304}
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319struct io_context *get_task_io_context(struct task_struct *task,
320 gfp_t gfp_flags, int node)
321{
322 struct io_context *ioc;
323
324 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
325
326 do {
327 task_lock(task);
328 ioc = task->io_context;
329 if (likely(ioc)) {
330 get_io_context(ioc);
331 task_unlock(task);
332 return ioc;
333 }
334 task_unlock(task);
335 } while (!create_task_io_context(task, gfp_flags, node));
336
337 return NULL;
338}
339EXPORT_SYMBOL(get_task_io_context);
340
341
342
343
344
345
346
347
348
349struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
350{
351 struct io_cq *icq;
352
353 lockdep_assert_held(q->queue_lock);
354
355
356
357
358
359
360
361 rcu_read_lock();
362 icq = rcu_dereference(ioc->icq_hint);
363 if (icq && icq->q == q)
364 goto out;
365
366 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
367 if (icq && icq->q == q)
368 rcu_assign_pointer(ioc->icq_hint, icq);
369 else
370 icq = NULL;
371out:
372 rcu_read_unlock();
373 return icq;
374}
375EXPORT_SYMBOL(ioc_lookup_icq);
376
377
378
379
380
381
382
383
384
385
386
387
388
389struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
390 gfp_t gfp_mask)
391{
392 struct elevator_type *et = q->elevator->type;
393 struct io_cq *icq;
394
395
396 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
397 q->node);
398 if (!icq)
399 return NULL;
400
401 if (radix_tree_maybe_preload(gfp_mask) < 0) {
402 kmem_cache_free(et->icq_cache, icq);
403 return NULL;
404 }
405
406 icq->ioc = ioc;
407 icq->q = q;
408 INIT_LIST_HEAD(&icq->q_node);
409 INIT_HLIST_NODE(&icq->ioc_node);
410
411
412 spin_lock_irq(q->queue_lock);
413 spin_lock(&ioc->lock);
414
415 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
416 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
417 list_add(&icq->q_node, &q->icq_list);
418 if (et->uses_mq && et->ops.mq.init_icq)
419 et->ops.mq.init_icq(icq);
420 else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
421 et->ops.sq.elevator_init_icq_fn(icq);
422 } else {
423 kmem_cache_free(et->icq_cache, icq);
424 icq = ioc_lookup_icq(ioc, q);
425 if (!icq)
426 printk(KERN_ERR "cfq: icq link failed!\n");
427 }
428
429 spin_unlock(&ioc->lock);
430 spin_unlock_irq(q->queue_lock);
431 radix_tree_preload_end();
432 return icq;
433}
434
435static int __init blk_ioc_init(void)
436{
437 iocontext_cachep = kmem_cache_create("blkdev_ioc",
438 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
439 return 0;
440}
441subsys_initcall(blk_ioc_init);
442