1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/ioprio.h>
18#include <linux/kdev_t.h>
19#include <linux/module.h>
20#include <linux/sched/signal.h>
21#include <linux/err.h>
22#include <linux/blkdev.h>
23#include <linux/backing-dev.h>
24#include <linux/slab.h>
25#include <linux/genhd.h>
26#include <linux/delay.h>
27#include <linux/atomic.h>
28#include <linux/ctype.h>
29#include <linux/blk-cgroup.h>
30#include <linux/tracehook.h>
31#include "blk.h"
32
33#define MAX_KEY_LEN 100
34
35
36
37
38
39
40
41
42static DEFINE_MUTEX(blkcg_pol_register_mutex);
43static DEFINE_MUTEX(blkcg_pol_mutex);
44
45struct blkcg blkcg_root;
46EXPORT_SYMBOL_GPL(blkcg_root);
47
48struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
49
50static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
51
52static LIST_HEAD(all_blkcgs);
53
54static bool blkcg_debug_stats = false;
55
56static bool blkcg_policy_enabled(struct request_queue *q,
57 const struct blkcg_policy *pol)
58{
59 return pol && test_bit(pol->plid, q->blkcg_pols);
60}
61
62
63
64
65
66
67
68static void blkg_free(struct blkcg_gq *blkg)
69{
70 int i;
71
72 if (!blkg)
73 return;
74
75 for (i = 0; i < BLKCG_MAX_POLS; i++)
76 if (blkg->pd[i])
77 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
78
79 if (blkg->blkcg != &blkcg_root)
80 blk_exit_rl(blkg->q, &blkg->rl);
81
82 blkg_rwstat_exit(&blkg->stat_ios);
83 blkg_rwstat_exit(&blkg->stat_bytes);
84 kfree(blkg);
85}
86
87
88
89
90
91
92
93
94
95static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
96 gfp_t gfp_mask)
97{
98 struct blkcg_gq *blkg;
99 int i;
100
101
102 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
103 if (!blkg)
104 return NULL;
105
106 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
107 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
108 goto err_free;
109
110 blkg->q = q;
111 INIT_LIST_HEAD(&blkg->q_node);
112 blkg->blkcg = blkcg;
113 atomic_set(&blkg->refcnt, 1);
114
115
116 if (blkcg != &blkcg_root) {
117 if (blk_init_rl(&blkg->rl, q, gfp_mask))
118 goto err_free;
119 blkg->rl.blkg = blkg;
120 }
121
122 for (i = 0; i < BLKCG_MAX_POLS; i++) {
123 struct blkcg_policy *pol = blkcg_policy[i];
124 struct blkg_policy_data *pd;
125
126 if (!blkcg_policy_enabled(q, pol))
127 continue;
128
129
130 pd = pol->pd_alloc_fn(gfp_mask, q->node);
131 if (!pd)
132 goto err_free;
133
134 blkg->pd[i] = pd;
135 pd->blkg = blkg;
136 pd->plid = i;
137 }
138
139 return blkg;
140
141err_free:
142 blkg_free(blkg);
143 return NULL;
144}
145
146struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
147 struct request_queue *q, bool update_hint)
148{
149 struct blkcg_gq *blkg;
150
151
152
153
154
155
156
157 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
158 if (blkg && blkg->q == q) {
159 if (update_hint) {
160 lockdep_assert_held(q->queue_lock);
161 rcu_assign_pointer(blkcg->blkg_hint, blkg);
162 }
163 return blkg;
164 }
165
166 return NULL;
167}
168EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
169
170
171
172
173
174static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
175 struct request_queue *q,
176 struct blkcg_gq *new_blkg)
177{
178 struct blkcg_gq *blkg;
179 struct bdi_writeback_congested *wb_congested;
180 int i, ret;
181
182 WARN_ON_ONCE(!rcu_read_lock_held());
183 lockdep_assert_held(q->queue_lock);
184
185
186 if (!css_tryget_online(&blkcg->css)) {
187 ret = -ENODEV;
188 goto err_free_blkg;
189 }
190
191 wb_congested = wb_congested_get_create(q->backing_dev_info,
192 blkcg->css.id,
193 GFP_NOWAIT | __GFP_NOWARN);
194 if (!wb_congested) {
195 ret = -ENOMEM;
196 goto err_put_css;
197 }
198
199
200 if (!new_blkg) {
201 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
202 if (unlikely(!new_blkg)) {
203 ret = -ENOMEM;
204 goto err_put_congested;
205 }
206 }
207 blkg = new_blkg;
208 blkg->wb_congested = wb_congested;
209
210
211 if (blkcg_parent(blkcg)) {
212 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
213 if (WARN_ON_ONCE(!blkg->parent)) {
214 ret = -ENODEV;
215 goto err_put_congested;
216 }
217 blkg_get(blkg->parent);
218 }
219
220
221 for (i = 0; i < BLKCG_MAX_POLS; i++) {
222 struct blkcg_policy *pol = blkcg_policy[i];
223
224 if (blkg->pd[i] && pol->pd_init_fn)
225 pol->pd_init_fn(blkg->pd[i]);
226 }
227
228
229 spin_lock(&blkcg->lock);
230 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
231 if (likely(!ret)) {
232 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
233 list_add(&blkg->q_node, &q->blkg_list);
234
235 for (i = 0; i < BLKCG_MAX_POLS; i++) {
236 struct blkcg_policy *pol = blkcg_policy[i];
237
238 if (blkg->pd[i] && pol->pd_online_fn)
239 pol->pd_online_fn(blkg->pd[i]);
240 }
241 }
242 blkg->online = true;
243 spin_unlock(&blkcg->lock);
244
245 if (!ret)
246 return blkg;
247
248
249 blkg_put(blkg);
250 return ERR_PTR(ret);
251
252err_put_congested:
253 wb_congested_put(wb_congested);
254err_put_css:
255 css_put(&blkcg->css);
256err_free_blkg:
257 blkg_free(new_blkg);
258 return ERR_PTR(ret);
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
276 struct request_queue *q)
277{
278 struct blkcg_gq *blkg;
279
280 WARN_ON_ONCE(!rcu_read_lock_held());
281 lockdep_assert_held(q->queue_lock);
282
283
284
285
286
287 if (unlikely(blk_queue_bypass(q)))
288 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
289
290 blkg = __blkg_lookup(blkcg, q, true);
291 if (blkg)
292 return blkg;
293
294
295
296
297
298 while (true) {
299 struct blkcg *pos = blkcg;
300 struct blkcg *parent = blkcg_parent(blkcg);
301
302 while (parent && !__blkg_lookup(parent, q, false)) {
303 pos = parent;
304 parent = blkcg_parent(parent);
305 }
306
307 blkg = blkg_create(pos, q, NULL);
308 if (pos == blkcg || IS_ERR(blkg))
309 return blkg;
310 }
311}
312
313static void blkg_destroy(struct blkcg_gq *blkg)
314{
315 struct blkcg *blkcg = blkg->blkcg;
316 struct blkcg_gq *parent = blkg->parent;
317 int i;
318
319 lockdep_assert_held(blkg->q->queue_lock);
320 lockdep_assert_held(&blkcg->lock);
321
322
323 WARN_ON_ONCE(list_empty(&blkg->q_node));
324 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
325
326 for (i = 0; i < BLKCG_MAX_POLS; i++) {
327 struct blkcg_policy *pol = blkcg_policy[i];
328
329 if (blkg->pd[i] && pol->pd_offline_fn)
330 pol->pd_offline_fn(blkg->pd[i]);
331 }
332
333 if (parent) {
334 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
335 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
336 }
337
338 blkg->online = false;
339
340 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
341 list_del_init(&blkg->q_node);
342 hlist_del_init_rcu(&blkg->blkcg_node);
343
344
345
346
347
348
349 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
350 rcu_assign_pointer(blkcg->blkg_hint, NULL);
351
352
353
354
355
356 blkg_put(blkg);
357}
358
359
360
361
362
363
364
365static void blkg_destroy_all(struct request_queue *q)
366{
367 struct blkcg_gq *blkg, *n;
368
369 lockdep_assert_held(q->queue_lock);
370
371 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
372 struct blkcg *blkcg = blkg->blkcg;
373
374 spin_lock(&blkcg->lock);
375 blkg_destroy(blkg);
376 spin_unlock(&blkcg->lock);
377 }
378
379 q->root_blkg = NULL;
380 q->root_rl.blkg = NULL;
381}
382
383
384
385
386
387
388
389
390
391void __blkg_release_rcu(struct rcu_head *rcu_head)
392{
393 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
394
395
396 css_put(&blkg->blkcg->css);
397 if (blkg->parent)
398 blkg_put(blkg->parent);
399
400 wb_congested_put(blkg->wb_congested);
401
402 blkg_free(blkg);
403}
404EXPORT_SYMBOL_GPL(__blkg_release_rcu);
405
406
407
408
409
410struct request_list *__blk_queue_next_rl(struct request_list *rl,
411 struct request_queue *q)
412{
413 struct list_head *ent;
414 struct blkcg_gq *blkg;
415
416
417
418
419
420 if (rl == &q->root_rl) {
421 ent = &q->blkg_list;
422
423 if (list_empty(ent))
424 return NULL;
425 } else {
426 blkg = container_of(rl, struct blkcg_gq, rl);
427 ent = &blkg->q_node;
428 }
429
430
431 ent = ent->next;
432 if (ent == &q->root_blkg->q_node)
433 ent = ent->next;
434 if (ent == &q->blkg_list)
435 return NULL;
436
437 blkg = container_of(ent, struct blkcg_gq, q_node);
438 return &blkg->rl;
439}
440
441static int blkcg_reset_stats(struct cgroup_subsys_state *css,
442 struct cftype *cftype, u64 val)
443{
444 struct blkcg *blkcg = css_to_blkcg(css);
445 struct blkcg_gq *blkg;
446 int i;
447
448 mutex_lock(&blkcg_pol_mutex);
449 spin_lock_irq(&blkcg->lock);
450
451
452
453
454
455
456 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
457 blkg_rwstat_reset(&blkg->stat_bytes);
458 blkg_rwstat_reset(&blkg->stat_ios);
459
460 for (i = 0; i < BLKCG_MAX_POLS; i++) {
461 struct blkcg_policy *pol = blkcg_policy[i];
462
463 if (blkg->pd[i] && pol->pd_reset_stats_fn)
464 pol->pd_reset_stats_fn(blkg->pd[i]);
465 }
466 }
467
468 spin_unlock_irq(&blkcg->lock);
469 mutex_unlock(&blkcg_pol_mutex);
470 return 0;
471}
472
473const char *blkg_dev_name(struct blkcg_gq *blkg)
474{
475
476 if (blkg->q->backing_dev_info->dev)
477 return dev_name(blkg->q->backing_dev_info->dev);
478 return NULL;
479}
480EXPORT_SYMBOL_GPL(blkg_dev_name);
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
501 u64 (*prfill)(struct seq_file *,
502 struct blkg_policy_data *, int),
503 const struct blkcg_policy *pol, int data,
504 bool show_total)
505{
506 struct blkcg_gq *blkg;
507 u64 total = 0;
508
509 rcu_read_lock();
510 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
511 spin_lock_irq(blkg->q->queue_lock);
512 if (blkcg_policy_enabled(blkg->q, pol))
513 total += prfill(sf, blkg->pd[pol->plid], data);
514 spin_unlock_irq(blkg->q->queue_lock);
515 }
516 rcu_read_unlock();
517
518 if (show_total)
519 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
520}
521EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
522
523
524
525
526
527
528
529
530
531u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
532{
533 const char *dname = blkg_dev_name(pd->blkg);
534
535 if (!dname)
536 return 0;
537
538 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
539 return v;
540}
541EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
542
543
544
545
546
547
548
549
550
551u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
552 const struct blkg_rwstat *rwstat)
553{
554 static const char *rwstr[] = {
555 [BLKG_RWSTAT_READ] = "Read",
556 [BLKG_RWSTAT_WRITE] = "Write",
557 [BLKG_RWSTAT_SYNC] = "Sync",
558 [BLKG_RWSTAT_ASYNC] = "Async",
559 [BLKG_RWSTAT_DISCARD] = "Discard",
560 };
561 const char *dname = blkg_dev_name(pd->blkg);
562 u64 v;
563 int i;
564
565 if (!dname)
566 return 0;
567
568 for (i = 0; i < BLKG_RWSTAT_NR; i++)
569 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
570 (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
571
572 v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
573 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
574 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
575 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
576 return v;
577}
578EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
579
580
581
582
583
584
585
586
587
588u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
589{
590 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
591}
592EXPORT_SYMBOL_GPL(blkg_prfill_stat);
593
594
595
596
597
598
599
600
601
602u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
603 int off)
604{
605 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
606
607 return __blkg_prfill_rwstat(sf, pd, &rwstat);
608}
609EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
610
611static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
612 struct blkg_policy_data *pd, int off)
613{
614 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
615
616 return __blkg_prfill_rwstat(sf, pd, &rwstat);
617}
618
619
620
621
622
623
624
625
626
627int blkg_print_stat_bytes(struct seq_file *sf, void *v)
628{
629 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
630 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
631 offsetof(struct blkcg_gq, stat_bytes), true);
632 return 0;
633}
634EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
635
636
637
638
639
640
641
642
643
644int blkg_print_stat_ios(struct seq_file *sf, void *v)
645{
646 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
647 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
648 offsetof(struct blkcg_gq, stat_ios), true);
649 return 0;
650}
651EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
652
653static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
654 struct blkg_policy_data *pd,
655 int off)
656{
657 struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
658 NULL, off);
659 return __blkg_prfill_rwstat(sf, pd, &rwstat);
660}
661
662
663
664
665
666
667int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
668{
669 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
670 blkg_prfill_rwstat_field_recursive,
671 (void *)seq_cft(sf)->private,
672 offsetof(struct blkcg_gq, stat_bytes), true);
673 return 0;
674}
675EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
676
677
678
679
680
681
682int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
683{
684 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
685 blkg_prfill_rwstat_field_recursive,
686 (void *)seq_cft(sf)->private,
687 offsetof(struct blkcg_gq, stat_ios), true);
688 return 0;
689}
690EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
706 struct blkcg_policy *pol, int off)
707{
708 struct blkcg_gq *pos_blkg;
709 struct cgroup_subsys_state *pos_css;
710 u64 sum = 0;
711
712 lockdep_assert_held(blkg->q->queue_lock);
713
714 rcu_read_lock();
715 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
716 struct blkg_stat *stat;
717
718 if (!pos_blkg->online)
719 continue;
720
721 if (pol)
722 stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
723 else
724 stat = (void *)blkg + off;
725
726 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
727 }
728 rcu_read_unlock();
729
730 return sum;
731}
732EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
748 struct blkcg_policy *pol, int off)
749{
750 struct blkcg_gq *pos_blkg;
751 struct cgroup_subsys_state *pos_css;
752 struct blkg_rwstat sum = { };
753 int i;
754
755 lockdep_assert_held(blkg->q->queue_lock);
756
757 rcu_read_lock();
758 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
759 struct blkg_rwstat *rwstat;
760
761 if (!pos_blkg->online)
762 continue;
763
764 if (pol)
765 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
766 else
767 rwstat = (void *)pos_blkg + off;
768
769 for (i = 0; i < BLKG_RWSTAT_NR; i++)
770 atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
771 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
772 &sum.aux_cnt[i]);
773 }
774 rcu_read_unlock();
775
776 return sum;
777}
778EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
779
780
781static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
782 const struct blkcg_policy *pol,
783 struct request_queue *q)
784{
785 WARN_ON_ONCE(!rcu_read_lock_held());
786 lockdep_assert_held(q->queue_lock);
787
788 if (!blkcg_policy_enabled(q, pol))
789 return ERR_PTR(-EOPNOTSUPP);
790
791
792
793
794
795 if (unlikely(blk_queue_bypass(q)))
796 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
797
798 return __blkg_lookup(blkcg, q, true );
799}
800
801
802
803
804
805
806
807
808
809
810
811
812
813int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
814 char *input, struct blkg_conf_ctx *ctx)
815 __acquires(rcu) __acquires(disk->queue->queue_lock)
816{
817 struct gendisk *disk;
818 struct request_queue *q;
819 struct blkcg_gq *blkg;
820 unsigned int major, minor;
821 int key_len, part, ret;
822 char *body;
823
824 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
825 return -EINVAL;
826
827 body = input + key_len;
828 if (!isspace(*body))
829 return -EINVAL;
830 body = skip_spaces(body);
831
832 disk = get_gendisk(MKDEV(major, minor), &part);
833 if (!disk)
834 return -ENODEV;
835 if (part) {
836 ret = -ENODEV;
837 goto fail;
838 }
839
840 q = disk->queue;
841
842 rcu_read_lock();
843 spin_lock_irq(q->queue_lock);
844
845 blkg = blkg_lookup_check(blkcg, pol, q);
846 if (IS_ERR(blkg)) {
847 ret = PTR_ERR(blkg);
848 goto fail_unlock;
849 }
850
851 if (blkg)
852 goto success;
853
854
855
856
857
858 while (true) {
859 struct blkcg *pos = blkcg;
860 struct blkcg *parent;
861 struct blkcg_gq *new_blkg;
862
863 parent = blkcg_parent(blkcg);
864 while (parent && !__blkg_lookup(parent, q, false)) {
865 pos = parent;
866 parent = blkcg_parent(parent);
867 }
868
869
870 spin_unlock_irq(q->queue_lock);
871 rcu_read_unlock();
872
873 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
874 if (unlikely(!new_blkg)) {
875 ret = -ENOMEM;
876 goto fail;
877 }
878
879 rcu_read_lock();
880 spin_lock_irq(q->queue_lock);
881
882 blkg = blkg_lookup_check(pos, pol, q);
883 if (IS_ERR(blkg)) {
884 ret = PTR_ERR(blkg);
885 goto fail_unlock;
886 }
887
888 if (blkg) {
889 blkg_free(new_blkg);
890 } else {
891 blkg = blkg_create(pos, q, new_blkg);
892 if (unlikely(IS_ERR(blkg))) {
893 ret = PTR_ERR(blkg);
894 goto fail_unlock;
895 }
896 }
897
898 if (pos == blkcg)
899 goto success;
900 }
901success:
902 ctx->disk = disk;
903 ctx->blkg = blkg;
904 ctx->body = body;
905 return 0;
906
907fail_unlock:
908 spin_unlock_irq(q->queue_lock);
909 rcu_read_unlock();
910fail:
911 put_disk_and_module(disk);
912
913
914
915
916
917
918 if (ret == -EBUSY) {
919 msleep(10);
920 ret = restart_syscall();
921 }
922 return ret;
923}
924EXPORT_SYMBOL_GPL(blkg_conf_prep);
925
926
927
928
929
930
931
932
933void blkg_conf_finish(struct blkg_conf_ctx *ctx)
934 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
935{
936 spin_unlock_irq(ctx->disk->queue->queue_lock);
937 rcu_read_unlock();
938 put_disk_and_module(ctx->disk);
939}
940EXPORT_SYMBOL_GPL(blkg_conf_finish);
941
942static int blkcg_print_stat(struct seq_file *sf, void *v)
943{
944 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
945 struct blkcg_gq *blkg;
946
947 rcu_read_lock();
948
949 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
950 const char *dname;
951 char *buf;
952 struct blkg_rwstat rwstat;
953 u64 rbytes, wbytes, rios, wios, dbytes, dios;
954 size_t size = seq_get_buf(sf, &buf), off = 0;
955 int i;
956 bool has_stats = false;
957
958 dname = blkg_dev_name(blkg);
959 if (!dname)
960 continue;
961
962
963
964
965
966
967
968 off += scnprintf(buf+off, size-off, "%s ", dname);
969
970 spin_lock_irq(blkg->q->queue_lock);
971
972 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
973 offsetof(struct blkcg_gq, stat_bytes));
974 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
975 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
976 dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
977
978 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
979 offsetof(struct blkcg_gq, stat_ios));
980 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
981 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
982 dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
983
984 spin_unlock_irq(blkg->q->queue_lock);
985
986 if (rbytes || wbytes || rios || wios) {
987 has_stats = true;
988 off += scnprintf(buf+off, size-off,
989 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
990 rbytes, wbytes, rios, wios,
991 dbytes, dios);
992 }
993
994 if (!blkcg_debug_stats)
995 goto next;
996
997 if (atomic_read(&blkg->use_delay)) {
998 has_stats = true;
999 off += scnprintf(buf+off, size-off,
1000 " use_delay=%d delay_nsec=%llu",
1001 atomic_read(&blkg->use_delay),
1002 (unsigned long long)atomic64_read(&blkg->delay_nsec));
1003 }
1004
1005 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1006 struct blkcg_policy *pol = blkcg_policy[i];
1007 size_t written;
1008
1009 if (!blkg->pd[i] || !pol->pd_stat_fn)
1010 continue;
1011
1012 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
1013 if (written)
1014 has_stats = true;
1015 off += written;
1016 }
1017next:
1018 if (has_stats) {
1019 off += scnprintf(buf+off, size-off, "\n");
1020 seq_commit(sf, off);
1021 }
1022 }
1023
1024 rcu_read_unlock();
1025 return 0;
1026}
1027
1028static struct cftype blkcg_files[] = {
1029 {
1030 .name = "stat",
1031 .flags = CFTYPE_NOT_ON_ROOT,
1032 .seq_show = blkcg_print_stat,
1033 },
1034 { }
1035};
1036
1037static struct cftype blkcg_legacy_files[] = {
1038 {
1039 .name = "reset_stats",
1040 .write_u64 = blkcg_reset_stats,
1041 },
1042 { }
1043};
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074static void blkcg_css_offline(struct cgroup_subsys_state *css)
1075{
1076 struct blkcg *blkcg = css_to_blkcg(css);
1077
1078
1079 wb_blkcg_offline(blkcg);
1080
1081
1082 blkcg_cgwb_put(blkcg);
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096void blkcg_destroy_blkgs(struct blkcg *blkcg)
1097{
1098 spin_lock_irq(&blkcg->lock);
1099
1100 while (!hlist_empty(&blkcg->blkg_list)) {
1101 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1102 struct blkcg_gq, blkcg_node);
1103 struct request_queue *q = blkg->q;
1104
1105 if (spin_trylock(q->queue_lock)) {
1106 blkg_destroy(blkg);
1107 spin_unlock(q->queue_lock);
1108 } else {
1109 spin_unlock_irq(&blkcg->lock);
1110 cpu_relax();
1111 spin_lock_irq(&blkcg->lock);
1112 }
1113 }
1114
1115 spin_unlock_irq(&blkcg->lock);
1116}
1117
1118static void blkcg_css_free(struct cgroup_subsys_state *css)
1119{
1120 struct blkcg *blkcg = css_to_blkcg(css);
1121 int i;
1122
1123 mutex_lock(&blkcg_pol_mutex);
1124
1125 list_del(&blkcg->all_blkcgs_node);
1126
1127 for (i = 0; i < BLKCG_MAX_POLS; i++)
1128 if (blkcg->cpd[i])
1129 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1130
1131 mutex_unlock(&blkcg_pol_mutex);
1132
1133 kfree(blkcg);
1134}
1135
1136static struct cgroup_subsys_state *
1137blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1138{
1139 struct blkcg *blkcg;
1140 struct cgroup_subsys_state *ret;
1141 int i;
1142
1143 mutex_lock(&blkcg_pol_mutex);
1144
1145 if (!parent_css) {
1146 blkcg = &blkcg_root;
1147 } else {
1148 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1149 if (!blkcg) {
1150 ret = ERR_PTR(-ENOMEM);
1151 goto unlock;
1152 }
1153 }
1154
1155 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1156 struct blkcg_policy *pol = blkcg_policy[i];
1157 struct blkcg_policy_data *cpd;
1158
1159
1160
1161
1162
1163
1164
1165 if (!pol || !pol->cpd_alloc_fn)
1166 continue;
1167
1168 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1169 if (!cpd) {
1170 ret = ERR_PTR(-ENOMEM);
1171 goto free_pd_blkcg;
1172 }
1173 blkcg->cpd[i] = cpd;
1174 cpd->blkcg = blkcg;
1175 cpd->plid = i;
1176 if (pol->cpd_init_fn)
1177 pol->cpd_init_fn(cpd);
1178 }
1179
1180 spin_lock_init(&blkcg->lock);
1181 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1182 INIT_HLIST_HEAD(&blkcg->blkg_list);
1183#ifdef CONFIG_CGROUP_WRITEBACK
1184 INIT_LIST_HEAD(&blkcg->cgwb_list);
1185 refcount_set(&blkcg->cgwb_refcnt, 1);
1186#endif
1187 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1188
1189 mutex_unlock(&blkcg_pol_mutex);
1190 return &blkcg->css;
1191
1192free_pd_blkcg:
1193 for (i--; i >= 0; i--)
1194 if (blkcg->cpd[i])
1195 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1196
1197 if (blkcg != &blkcg_root)
1198 kfree(blkcg);
1199unlock:
1200 mutex_unlock(&blkcg_pol_mutex);
1201 return ret;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214int blkcg_init_queue(struct request_queue *q)
1215{
1216 struct blkcg_gq *new_blkg, *blkg;
1217 bool preloaded;
1218 int ret;
1219
1220 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1221 if (!new_blkg)
1222 return -ENOMEM;
1223
1224 preloaded = !radix_tree_preload(GFP_KERNEL);
1225
1226
1227 rcu_read_lock();
1228 spin_lock_irq(q->queue_lock);
1229 blkg = blkg_create(&blkcg_root, q, new_blkg);
1230 if (IS_ERR(blkg))
1231 goto err_unlock;
1232 q->root_blkg = blkg;
1233 q->root_rl.blkg = blkg;
1234 spin_unlock_irq(q->queue_lock);
1235 rcu_read_unlock();
1236
1237 if (preloaded)
1238 radix_tree_preload_end();
1239
1240 ret = blk_iolatency_init(q);
1241 if (ret) {
1242 spin_lock_irq(q->queue_lock);
1243 blkg_destroy_all(q);
1244 spin_unlock_irq(q->queue_lock);
1245 return ret;
1246 }
1247
1248 ret = blk_throtl_init(q);
1249 if (ret) {
1250 spin_lock_irq(q->queue_lock);
1251 blkg_destroy_all(q);
1252 spin_unlock_irq(q->queue_lock);
1253 }
1254 return ret;
1255
1256err_unlock:
1257 spin_unlock_irq(q->queue_lock);
1258 rcu_read_unlock();
1259 if (preloaded)
1260 radix_tree_preload_end();
1261 return PTR_ERR(blkg);
1262}
1263
1264
1265
1266
1267
1268
1269
1270void blkcg_drain_queue(struct request_queue *q)
1271{
1272 lockdep_assert_held(q->queue_lock);
1273
1274
1275
1276
1277
1278 if (!q->root_blkg)
1279 return;
1280
1281 blk_throtl_drain(q);
1282}
1283
1284
1285
1286
1287
1288
1289
1290void blkcg_exit_queue(struct request_queue *q)
1291{
1292 spin_lock_irq(q->queue_lock);
1293 blkg_destroy_all(q);
1294 spin_unlock_irq(q->queue_lock);
1295
1296 blk_throtl_exit(q);
1297}
1298
1299
1300
1301
1302
1303
1304
1305static int blkcg_can_attach(struct cgroup_taskset *tset)
1306{
1307 struct task_struct *task;
1308 struct cgroup_subsys_state *dst_css;
1309 struct io_context *ioc;
1310 int ret = 0;
1311
1312
1313 cgroup_taskset_for_each(task, dst_css, tset) {
1314 task_lock(task);
1315 ioc = task->io_context;
1316 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1317 ret = -EINVAL;
1318 task_unlock(task);
1319 if (ret)
1320 break;
1321 }
1322 return ret;
1323}
1324
1325static void blkcg_bind(struct cgroup_subsys_state *root_css)
1326{
1327 int i;
1328
1329 mutex_lock(&blkcg_pol_mutex);
1330
1331 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1332 struct blkcg_policy *pol = blkcg_policy[i];
1333 struct blkcg *blkcg;
1334
1335 if (!pol || !pol->cpd_bind_fn)
1336 continue;
1337
1338 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1339 if (blkcg->cpd[pol->plid])
1340 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1341 }
1342 mutex_unlock(&blkcg_pol_mutex);
1343}
1344
1345static void blkcg_exit(struct task_struct *tsk)
1346{
1347 if (tsk->throttle_queue)
1348 blk_put_queue(tsk->throttle_queue);
1349 tsk->throttle_queue = NULL;
1350}
1351
1352struct cgroup_subsys io_cgrp_subsys = {
1353 .css_alloc = blkcg_css_alloc,
1354 .css_offline = blkcg_css_offline,
1355 .css_free = blkcg_css_free,
1356 .can_attach = blkcg_can_attach,
1357 .bind = blkcg_bind,
1358 .dfl_cftypes = blkcg_files,
1359 .legacy_cftypes = blkcg_legacy_files,
1360 .legacy_name = "blkio",
1361 .exit = blkcg_exit,
1362#ifdef CONFIG_MEMCG
1363
1364
1365
1366
1367
1368 .depends_on = 1 << memory_cgrp_id,
1369#endif
1370};
1371EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389int blkcg_activate_policy(struct request_queue *q,
1390 const struct blkcg_policy *pol)
1391{
1392 struct blkg_policy_data *pd_prealloc = NULL;
1393 struct blkcg_gq *blkg;
1394 int ret;
1395
1396 if (blkcg_policy_enabled(q, pol))
1397 return 0;
1398
1399 if (q->mq_ops)
1400 blk_mq_freeze_queue(q);
1401 else
1402 blk_queue_bypass_start(q);
1403pd_prealloc:
1404 if (!pd_prealloc) {
1405 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1406 if (!pd_prealloc) {
1407 ret = -ENOMEM;
1408 goto out_bypass_end;
1409 }
1410 }
1411
1412 spin_lock_irq(q->queue_lock);
1413
1414 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1415 struct blkg_policy_data *pd;
1416
1417 if (blkg->pd[pol->plid])
1418 continue;
1419
1420 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1421 if (!pd)
1422 swap(pd, pd_prealloc);
1423 if (!pd) {
1424 spin_unlock_irq(q->queue_lock);
1425 goto pd_prealloc;
1426 }
1427
1428 blkg->pd[pol->plid] = pd;
1429 pd->blkg = blkg;
1430 pd->plid = pol->plid;
1431 if (pol->pd_init_fn)
1432 pol->pd_init_fn(pd);
1433 }
1434
1435 __set_bit(pol->plid, q->blkcg_pols);
1436 ret = 0;
1437
1438 spin_unlock_irq(q->queue_lock);
1439out_bypass_end:
1440 if (q->mq_ops)
1441 blk_mq_unfreeze_queue(q);
1442 else
1443 blk_queue_bypass_end(q);
1444 if (pd_prealloc)
1445 pol->pd_free_fn(pd_prealloc);
1446 return ret;
1447}
1448EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458void blkcg_deactivate_policy(struct request_queue *q,
1459 const struct blkcg_policy *pol)
1460{
1461 struct blkcg_gq *blkg;
1462
1463 if (!blkcg_policy_enabled(q, pol))
1464 return;
1465
1466 if (q->mq_ops)
1467 blk_mq_freeze_queue(q);
1468 else
1469 blk_queue_bypass_start(q);
1470
1471 spin_lock_irq(q->queue_lock);
1472
1473 __clear_bit(pol->plid, q->blkcg_pols);
1474
1475 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1476 if (blkg->pd[pol->plid]) {
1477 if (pol->pd_offline_fn)
1478 pol->pd_offline_fn(blkg->pd[pol->plid]);
1479 pol->pd_free_fn(blkg->pd[pol->plid]);
1480 blkg->pd[pol->plid] = NULL;
1481 }
1482 }
1483
1484 spin_unlock_irq(q->queue_lock);
1485
1486 if (q->mq_ops)
1487 blk_mq_unfreeze_queue(q);
1488 else
1489 blk_queue_bypass_end(q);
1490}
1491EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1492
1493
1494
1495
1496
1497
1498
1499
1500int blkcg_policy_register(struct blkcg_policy *pol)
1501{
1502 struct blkcg *blkcg;
1503 int i, ret;
1504
1505 mutex_lock(&blkcg_pol_register_mutex);
1506 mutex_lock(&blkcg_pol_mutex);
1507
1508
1509 ret = -ENOSPC;
1510 for (i = 0; i < BLKCG_MAX_POLS; i++)
1511 if (!blkcg_policy[i])
1512 break;
1513 if (i >= BLKCG_MAX_POLS) {
1514 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1515 goto err_unlock;
1516 }
1517
1518
1519 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1520 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1521 goto err_unlock;
1522
1523
1524 pol->plid = i;
1525 blkcg_policy[pol->plid] = pol;
1526
1527
1528 if (pol->cpd_alloc_fn) {
1529 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1530 struct blkcg_policy_data *cpd;
1531
1532 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1533 if (!cpd)
1534 goto err_free_cpds;
1535
1536 blkcg->cpd[pol->plid] = cpd;
1537 cpd->blkcg = blkcg;
1538 cpd->plid = pol->plid;
1539 pol->cpd_init_fn(cpd);
1540 }
1541 }
1542
1543 mutex_unlock(&blkcg_pol_mutex);
1544
1545
1546 if (pol->dfl_cftypes)
1547 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1548 pol->dfl_cftypes));
1549 if (pol->legacy_cftypes)
1550 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1551 pol->legacy_cftypes));
1552 mutex_unlock(&blkcg_pol_register_mutex);
1553 return 0;
1554
1555err_free_cpds:
1556 if (pol->cpd_free_fn) {
1557 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1558 if (blkcg->cpd[pol->plid]) {
1559 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1560 blkcg->cpd[pol->plid] = NULL;
1561 }
1562 }
1563 }
1564 blkcg_policy[pol->plid] = NULL;
1565err_unlock:
1566 mutex_unlock(&blkcg_pol_mutex);
1567 mutex_unlock(&blkcg_pol_register_mutex);
1568 return ret;
1569}
1570EXPORT_SYMBOL_GPL(blkcg_policy_register);
1571
1572
1573
1574
1575
1576
1577
1578void blkcg_policy_unregister(struct blkcg_policy *pol)
1579{
1580 struct blkcg *blkcg;
1581
1582 mutex_lock(&blkcg_pol_register_mutex);
1583
1584 if (WARN_ON(blkcg_policy[pol->plid] != pol))
1585 goto out_unlock;
1586
1587
1588 if (pol->dfl_cftypes)
1589 cgroup_rm_cftypes(pol->dfl_cftypes);
1590 if (pol->legacy_cftypes)
1591 cgroup_rm_cftypes(pol->legacy_cftypes);
1592
1593
1594 mutex_lock(&blkcg_pol_mutex);
1595
1596 if (pol->cpd_free_fn) {
1597 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1598 if (blkcg->cpd[pol->plid]) {
1599 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1600 blkcg->cpd[pol->plid] = NULL;
1601 }
1602 }
1603 }
1604 blkcg_policy[pol->plid] = NULL;
1605
1606 mutex_unlock(&blkcg_pol_mutex);
1607out_unlock:
1608 mutex_unlock(&blkcg_pol_register_mutex);
1609}
1610EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1611
1612
1613
1614
1615
1616
1617
1618static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1619{
1620 u64 old = atomic64_read(&blkg->delay_start);
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 if (time_before64(old + NSEC_PER_SEC, now) &&
1636 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1637 u64 cur = atomic64_read(&blkg->delay_nsec);
1638 u64 sub = min_t(u64, blkg->last_delay, now - old);
1639 int cur_use = atomic_read(&blkg->use_delay);
1640
1641
1642
1643
1644
1645 if (cur_use < blkg->last_use)
1646 sub = max_t(u64, sub, blkg->last_delay >> 1);
1647
1648
1649
1650
1651
1652
1653
1654 if (unlikely(cur < sub)) {
1655 atomic64_set(&blkg->delay_nsec, 0);
1656 blkg->last_delay = 0;
1657 } else {
1658 atomic64_sub(sub, &blkg->delay_nsec);
1659 blkg->last_delay = cur - sub;
1660 }
1661 blkg->last_use = cur_use;
1662 }
1663}
1664
1665
1666
1667
1668
1669
1670
1671static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1672{
1673 u64 now = ktime_to_ns(ktime_get());
1674 u64 exp;
1675 u64 delay_nsec = 0;
1676 int tok;
1677
1678 while (blkg->parent) {
1679 if (atomic_read(&blkg->use_delay)) {
1680 blkcg_scale_delay(blkg, now);
1681 delay_nsec = max_t(u64, delay_nsec,
1682 atomic64_read(&blkg->delay_nsec));
1683 }
1684 blkg = blkg->parent;
1685 }
1686
1687 if (!delay_nsec)
1688 return;
1689
1690
1691
1692
1693
1694
1695
1696
1697 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1698
1699
1700
1701
1702
1703
1704
1705 exp = ktime_add_ns(now, delay_nsec);
1706 tok = io_schedule_prepare();
1707 do {
1708 __set_current_state(TASK_KILLABLE);
1709 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1710 break;
1711 } while (!fatal_signal_pending(current));
1712 io_schedule_finish(tok);
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725void blkcg_maybe_throttle_current(void)
1726{
1727 struct request_queue *q = current->throttle_queue;
1728 struct cgroup_subsys_state *css;
1729 struct blkcg *blkcg;
1730 struct blkcg_gq *blkg;
1731 bool use_memdelay = current->use_memdelay;
1732
1733 if (!q)
1734 return;
1735
1736 current->throttle_queue = NULL;
1737 current->use_memdelay = false;
1738
1739 rcu_read_lock();
1740 css = kthread_blkcg();
1741 if (css)
1742 blkcg = css_to_blkcg(css);
1743 else
1744 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1745
1746 if (!blkcg)
1747 goto out;
1748 blkg = blkg_lookup(blkcg, q);
1749 if (!blkg)
1750 goto out;
1751 blkg = blkg_try_get(blkg);
1752 if (!blkg)
1753 goto out;
1754 rcu_read_unlock();
1755
1756 blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1757 blkg_put(blkg);
1758 blk_put_queue(q);
1759 return;
1760out:
1761 rcu_read_unlock();
1762 blk_put_queue(q);
1763}
1764EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1784{
1785 if (unlikely(current->flags & PF_KTHREAD))
1786 return;
1787
1788 if (!blk_get_queue(q))
1789 return;
1790
1791 if (current->throttle_queue)
1792 blk_put_queue(current->throttle_queue);
1793 current->throttle_queue = q;
1794 if (use_memdelay)
1795 current->use_memdelay = use_memdelay;
1796 set_notify_resume(current);
1797}
1798EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1809{
1810 blkcg_scale_delay(blkg, now);
1811 atomic64_add(delta, &blkg->delay_nsec);
1812}
1813EXPORT_SYMBOL_GPL(blkcg_add_delay);
1814
1815module_param(blkcg_debug_stats, bool, 0644);
1816MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1817