1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/blktrace_api.h>
13#include <linux/blk-cgroup.h>
14#include "blk.h"
15#include "blk-cgroup-rwstat.h"
16
17
18#define THROTL_GRP_QUANTUM 8
19
20
21#define THROTL_QUANTUM 32
22
23
24#define DFL_THROTL_SLICE_HD (HZ / 10)
25#define DFL_THROTL_SLICE_SSD (HZ / 50)
26#define MAX_THROTL_SLICE (HZ)
27#define MAX_IDLE_TIME (5L * 1000 * 1000)
28#define MIN_THROTL_BPS (320 * 1024)
29#define MIN_THROTL_IOPS (10)
30#define DFL_LATENCY_TARGET (-1L)
31#define DFL_IDLE_THRESHOLD (0)
32#define DFL_HD_BASELINE_LATENCY (4000L)
33#define LATENCY_FILTERED_SSD (0)
34
35
36
37
38#define LATENCY_FILTERED_HD (1000L)
39
40static struct blkcg_policy blkcg_policy_throtl;
41
42
43static struct workqueue_struct *kthrotld_workqueue;
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68struct throtl_qnode {
69 struct list_head node;
70 struct bio_list bios;
71 struct throtl_grp *tg;
72};
73
74struct throtl_service_queue {
75 struct throtl_service_queue *parent_sq;
76
77
78
79
80
81 struct list_head queued[2];
82 unsigned int nr_queued[2];
83
84
85
86
87
88 struct rb_root_cached pending_tree;
89 unsigned int nr_pending;
90 unsigned long first_pending_disptime;
91 struct timer_list pending_timer;
92};
93
94enum tg_state_flags {
95 THROTL_TG_PENDING = 1 << 0,
96 THROTL_TG_WAS_EMPTY = 1 << 1,
97};
98
99#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
100
101enum {
102 LIMIT_LOW,
103 LIMIT_MAX,
104 LIMIT_CNT,
105};
106
107struct throtl_grp {
108
109 struct blkg_policy_data pd;
110
111
112 struct rb_node rb_node;
113
114
115 struct throtl_data *td;
116
117
118 struct throtl_service_queue service_queue;
119
120
121
122
123
124
125
126
127
128 struct throtl_qnode qnode_on_self[2];
129 struct throtl_qnode qnode_on_parent[2];
130
131
132
133
134
135
136 unsigned long disptime;
137
138 unsigned int flags;
139
140
141 bool has_rules[2];
142
143
144 uint64_t bps[2][LIMIT_CNT];
145
146 uint64_t bps_conf[2][LIMIT_CNT];
147
148
149 unsigned int iops[2][LIMIT_CNT];
150
151 unsigned int iops_conf[2][LIMIT_CNT];
152
153
154 uint64_t bytes_disp[2];
155
156 unsigned int io_disp[2];
157
158 unsigned long last_low_overflow_time[2];
159
160 uint64_t last_bytes_disp[2];
161 unsigned int last_io_disp[2];
162
163 unsigned long last_check_time;
164
165 unsigned long latency_target;
166 unsigned long latency_target_conf;
167
168 unsigned long slice_start[2];
169 unsigned long slice_end[2];
170
171 unsigned long last_finish_time;
172 unsigned long checked_last_finish_time;
173 unsigned long avg_idletime;
174 unsigned long idletime_threshold;
175 unsigned long idletime_threshold_conf;
176
177 unsigned int bio_cnt;
178 unsigned int bad_bio_cnt;
179 unsigned long bio_cnt_reset_time;
180
181 struct blkg_rwstat stat_bytes;
182 struct blkg_rwstat stat_ios;
183};
184
185
186#define LATENCY_BUCKET_SIZE 9
187
188struct latency_bucket {
189 unsigned long total_latency;
190 int samples;
191};
192
193struct avg_latency_bucket {
194 unsigned long latency;
195 bool valid;
196};
197
198struct throtl_data
199{
200
201 struct throtl_service_queue service_queue;
202
203 struct request_queue *queue;
204
205
206 unsigned int nr_queued[2];
207
208 unsigned int throtl_slice;
209
210
211 struct work_struct dispatch_work;
212 unsigned int limit_index;
213 bool limit_valid[LIMIT_CNT];
214
215 unsigned long low_upgrade_time;
216 unsigned long low_downgrade_time;
217
218 unsigned int scale;
219
220 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
221 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
222 struct latency_bucket __percpu *latency_buckets[2];
223 unsigned long last_calculate_time;
224 unsigned long filtered_latency;
225
226 bool track_bio_latency;
227};
228
229static void throtl_pending_timer_fn(struct timer_list *t);
230
231static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
232{
233 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
234}
235
236static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
237{
238 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
239}
240
241static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
242{
243 return pd_to_blkg(&tg->pd);
244}
245
246
247
248
249
250
251
252
253static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
254{
255 if (sq && sq->parent_sq)
256 return container_of(sq, struct throtl_grp, service_queue);
257 else
258 return NULL;
259}
260
261
262
263
264
265
266
267
268static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
269{
270 struct throtl_grp *tg = sq_to_tg(sq);
271
272 if (tg)
273 return tg->td;
274 else
275 return container_of(sq, struct throtl_data, service_queue);
276}
277
278
279
280
281
282
283
284
285
286static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
287{
288
289 if (td->scale < 4096 && time_after_eq(jiffies,
290 td->low_upgrade_time + td->scale * td->throtl_slice))
291 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
292
293 return low + (low >> 1) * td->scale;
294}
295
296static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
297{
298 struct blkcg_gq *blkg = tg_to_blkg(tg);
299 struct throtl_data *td;
300 uint64_t ret;
301
302 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
303 return U64_MAX;
304
305 td = tg->td;
306 ret = tg->bps[rw][td->limit_index];
307 if (ret == 0 && td->limit_index == LIMIT_LOW) {
308
309 if (!list_empty(&blkg->blkcg->css.children) ||
310 tg->iops[rw][td->limit_index])
311 return U64_MAX;
312 else
313 return MIN_THROTL_BPS;
314 }
315
316 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
317 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
318 uint64_t adjusted;
319
320 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
321 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
322 }
323 return ret;
324}
325
326static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
327{
328 struct blkcg_gq *blkg = tg_to_blkg(tg);
329 struct throtl_data *td;
330 unsigned int ret;
331
332 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
333 return UINT_MAX;
334
335 td = tg->td;
336 ret = tg->iops[rw][td->limit_index];
337 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
338
339 if (!list_empty(&blkg->blkcg->css.children) ||
340 tg->bps[rw][td->limit_index])
341 return UINT_MAX;
342 else
343 return MIN_THROTL_IOPS;
344 }
345
346 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
347 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
348 uint64_t adjusted;
349
350 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
351 if (adjusted > UINT_MAX)
352 adjusted = UINT_MAX;
353 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
354 }
355 return ret;
356}
357
358#define request_bucket_index(sectors) \
359 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
360
361
362
363
364
365
366
367
368
369
370#define throtl_log(sq, fmt, args...) do { \
371 struct throtl_grp *__tg = sq_to_tg((sq)); \
372 struct throtl_data *__td = sq_to_td((sq)); \
373 \
374 (void)__td; \
375 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
376 break; \
377 if ((__tg)) { \
378 blk_add_cgroup_trace_msg(__td->queue, \
379 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
380 } else { \
381 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
382 } \
383} while (0)
384
385static inline unsigned int throtl_bio_data_size(struct bio *bio)
386{
387
388 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
389 return 512;
390 return bio->bi_iter.bi_size;
391}
392
393static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
394{
395 INIT_LIST_HEAD(&qn->node);
396 bio_list_init(&qn->bios);
397 qn->tg = tg;
398}
399
400
401
402
403
404
405
406
407
408
409
410static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
411 struct list_head *queued)
412{
413 bio_list_add(&qn->bios, bio);
414 if (list_empty(&qn->node)) {
415 list_add_tail(&qn->node, queued);
416 blkg_get(tg_to_blkg(qn->tg));
417 }
418}
419
420
421
422
423
424static struct bio *throtl_peek_queued(struct list_head *queued)
425{
426 struct throtl_qnode *qn;
427 struct bio *bio;
428
429 if (list_empty(queued))
430 return NULL;
431
432 qn = list_first_entry(queued, struct throtl_qnode, node);
433 bio = bio_list_peek(&qn->bios);
434 WARN_ON_ONCE(!bio);
435 return bio;
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452static struct bio *throtl_pop_queued(struct list_head *queued,
453 struct throtl_grp **tg_to_put)
454{
455 struct throtl_qnode *qn;
456 struct bio *bio;
457
458 if (list_empty(queued))
459 return NULL;
460
461 qn = list_first_entry(queued, struct throtl_qnode, node);
462 bio = bio_list_pop(&qn->bios);
463 WARN_ON_ONCE(!bio);
464
465 if (bio_list_empty(&qn->bios)) {
466 list_del_init(&qn->node);
467 if (tg_to_put)
468 *tg_to_put = qn->tg;
469 else
470 blkg_put(tg_to_blkg(qn->tg));
471 } else {
472 list_move_tail(&qn->node, queued);
473 }
474
475 return bio;
476}
477
478
479static void throtl_service_queue_init(struct throtl_service_queue *sq)
480{
481 INIT_LIST_HEAD(&sq->queued[0]);
482 INIT_LIST_HEAD(&sq->queued[1]);
483 sq->pending_tree = RB_ROOT_CACHED;
484 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
485}
486
487static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
488 struct request_queue *q,
489 struct blkcg *blkcg)
490{
491 struct throtl_grp *tg;
492 int rw;
493
494 tg = kzalloc_node(sizeof(*tg), gfp, q->node);
495 if (!tg)
496 return NULL;
497
498 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
499 goto err_free_tg;
500
501 if (blkg_rwstat_init(&tg->stat_ios, gfp))
502 goto err_exit_stat_bytes;
503
504 throtl_service_queue_init(&tg->service_queue);
505
506 for (rw = READ; rw <= WRITE; rw++) {
507 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
508 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
509 }
510
511 RB_CLEAR_NODE(&tg->rb_node);
512 tg->bps[READ][LIMIT_MAX] = U64_MAX;
513 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
514 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
515 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
516 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
517 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
518 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
519 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
520
521
522 tg->latency_target = DFL_LATENCY_TARGET;
523 tg->latency_target_conf = DFL_LATENCY_TARGET;
524 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
525 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
526
527 return &tg->pd;
528
529err_exit_stat_bytes:
530 blkg_rwstat_exit(&tg->stat_bytes);
531err_free_tg:
532 kfree(tg);
533 return NULL;
534}
535
536static void throtl_pd_init(struct blkg_policy_data *pd)
537{
538 struct throtl_grp *tg = pd_to_tg(pd);
539 struct blkcg_gq *blkg = tg_to_blkg(tg);
540 struct throtl_data *td = blkg->q->td;
541 struct throtl_service_queue *sq = &tg->service_queue;
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556 sq->parent_sq = &td->service_queue;
557 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
558 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
559 tg->td = td;
560}
561
562
563
564
565
566
567static void tg_update_has_rules(struct throtl_grp *tg)
568{
569 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
570 struct throtl_data *td = tg->td;
571 int rw;
572
573 for (rw = READ; rw <= WRITE; rw++)
574 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
575 (td->limit_valid[td->limit_index] &&
576 (tg_bps_limit(tg, rw) != U64_MAX ||
577 tg_iops_limit(tg, rw) != UINT_MAX));
578}
579
580static void throtl_pd_online(struct blkg_policy_data *pd)
581{
582 struct throtl_grp *tg = pd_to_tg(pd);
583
584
585
586
587 tg_update_has_rules(tg);
588}
589
590#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
591static void blk_throtl_update_limit_valid(struct throtl_data *td)
592{
593 struct cgroup_subsys_state *pos_css;
594 struct blkcg_gq *blkg;
595 bool low_valid = false;
596
597 rcu_read_lock();
598 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
599 struct throtl_grp *tg = blkg_to_tg(blkg);
600
601 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
602 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
603 low_valid = true;
604 break;
605 }
606 }
607 rcu_read_unlock();
608
609 td->limit_valid[LIMIT_LOW] = low_valid;
610}
611#else
612static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
613{
614}
615#endif
616
617static void throtl_upgrade_state(struct throtl_data *td);
618static void throtl_pd_offline(struct blkg_policy_data *pd)
619{
620 struct throtl_grp *tg = pd_to_tg(pd);
621
622 tg->bps[READ][LIMIT_LOW] = 0;
623 tg->bps[WRITE][LIMIT_LOW] = 0;
624 tg->iops[READ][LIMIT_LOW] = 0;
625 tg->iops[WRITE][LIMIT_LOW] = 0;
626
627 blk_throtl_update_limit_valid(tg->td);
628
629 if (!tg->td->limit_valid[tg->td->limit_index])
630 throtl_upgrade_state(tg->td);
631}
632
633static void throtl_pd_free(struct blkg_policy_data *pd)
634{
635 struct throtl_grp *tg = pd_to_tg(pd);
636
637 del_timer_sync(&tg->service_queue.pending_timer);
638 blkg_rwstat_exit(&tg->stat_bytes);
639 blkg_rwstat_exit(&tg->stat_ios);
640 kfree(tg);
641}
642
643static struct throtl_grp *
644throtl_rb_first(struct throtl_service_queue *parent_sq)
645{
646 struct rb_node *n;
647
648 n = rb_first_cached(&parent_sq->pending_tree);
649 WARN_ON_ONCE(!n);
650 if (!n)
651 return NULL;
652 return rb_entry_tg(n);
653}
654
655static void throtl_rb_erase(struct rb_node *n,
656 struct throtl_service_queue *parent_sq)
657{
658 rb_erase_cached(n, &parent_sq->pending_tree);
659 RB_CLEAR_NODE(n);
660 --parent_sq->nr_pending;
661}
662
663static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
664{
665 struct throtl_grp *tg;
666
667 tg = throtl_rb_first(parent_sq);
668 if (!tg)
669 return;
670
671 parent_sq->first_pending_disptime = tg->disptime;
672}
673
674static void tg_service_queue_add(struct throtl_grp *tg)
675{
676 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
677 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
678 struct rb_node *parent = NULL;
679 struct throtl_grp *__tg;
680 unsigned long key = tg->disptime;
681 bool leftmost = true;
682
683 while (*node != NULL) {
684 parent = *node;
685 __tg = rb_entry_tg(parent);
686
687 if (time_before(key, __tg->disptime))
688 node = &parent->rb_left;
689 else {
690 node = &parent->rb_right;
691 leftmost = false;
692 }
693 }
694
695 rb_link_node(&tg->rb_node, parent, node);
696 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
697 leftmost);
698}
699
700static void throtl_enqueue_tg(struct throtl_grp *tg)
701{
702 if (!(tg->flags & THROTL_TG_PENDING)) {
703 tg_service_queue_add(tg);
704 tg->flags |= THROTL_TG_PENDING;
705 tg->service_queue.parent_sq->nr_pending++;
706 }
707}
708
709static void throtl_dequeue_tg(struct throtl_grp *tg)
710{
711 if (tg->flags & THROTL_TG_PENDING) {
712 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
713 tg->flags &= ~THROTL_TG_PENDING;
714 }
715}
716
717
718static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
719 unsigned long expires)
720{
721 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
722
723
724
725
726
727
728
729
730 if (time_after(expires, max_expire))
731 expires = max_expire;
732 mod_timer(&sq->pending_timer, expires);
733 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
734 expires - jiffies, jiffies);
735}
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
756 bool force)
757{
758
759 if (!sq->nr_pending)
760 return true;
761
762 update_min_dispatch_time(sq);
763
764
765 if (force || time_after(sq->first_pending_disptime, jiffies)) {
766 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
767 return true;
768 }
769
770
771 return false;
772}
773
774static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
775 bool rw, unsigned long start)
776{
777 tg->bytes_disp[rw] = 0;
778 tg->io_disp[rw] = 0;
779
780
781
782
783
784
785
786 if (time_after_eq(start, tg->slice_start[rw]))
787 tg->slice_start[rw] = start;
788
789 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
790 throtl_log(&tg->service_queue,
791 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
792 rw == READ ? 'R' : 'W', tg->slice_start[rw],
793 tg->slice_end[rw], jiffies);
794}
795
796static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
797{
798 tg->bytes_disp[rw] = 0;
799 tg->io_disp[rw] = 0;
800 tg->slice_start[rw] = jiffies;
801 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
802 throtl_log(&tg->service_queue,
803 "[%c] new slice start=%lu end=%lu jiffies=%lu",
804 rw == READ ? 'R' : 'W', tg->slice_start[rw],
805 tg->slice_end[rw], jiffies);
806}
807
808static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
809 unsigned long jiffy_end)
810{
811 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
812}
813
814static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
815 unsigned long jiffy_end)
816{
817 throtl_set_slice_end(tg, rw, jiffy_end);
818 throtl_log(&tg->service_queue,
819 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
820 rw == READ ? 'R' : 'W', tg->slice_start[rw],
821 tg->slice_end[rw], jiffies);
822}
823
824
825static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
826{
827 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
828 return false;
829
830 return true;
831}
832
833
834static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
835{
836 unsigned long nr_slices, time_elapsed, io_trim;
837 u64 bytes_trim, tmp;
838
839 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
840
841
842
843
844
845
846 if (throtl_slice_used(tg, rw))
847 return;
848
849
850
851
852
853
854
855
856
857 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
858
859 time_elapsed = jiffies - tg->slice_start[rw];
860
861 nr_slices = time_elapsed / tg->td->throtl_slice;
862
863 if (!nr_slices)
864 return;
865 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
866 do_div(tmp, HZ);
867 bytes_trim = tmp;
868
869 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
870 HZ;
871
872 if (!bytes_trim && !io_trim)
873 return;
874
875 if (tg->bytes_disp[rw] >= bytes_trim)
876 tg->bytes_disp[rw] -= bytes_trim;
877 else
878 tg->bytes_disp[rw] = 0;
879
880 if (tg->io_disp[rw] >= io_trim)
881 tg->io_disp[rw] -= io_trim;
882 else
883 tg->io_disp[rw] = 0;
884
885 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
886
887 throtl_log(&tg->service_queue,
888 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
889 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
890 tg->slice_start[rw], tg->slice_end[rw], jiffies);
891}
892
893static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
894 u32 iops_limit, unsigned long *wait)
895{
896 bool rw = bio_data_dir(bio);
897 unsigned int io_allowed;
898 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
899 u64 tmp;
900
901 if (iops_limit == UINT_MAX) {
902 if (wait)
903 *wait = 0;
904 return true;
905 }
906
907 jiffy_elapsed = jiffies - tg->slice_start[rw];
908
909
910 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
911
912
913
914
915
916
917
918
919 tmp = (u64)iops_limit * jiffy_elapsed_rnd;
920 do_div(tmp, HZ);
921
922 if (tmp > UINT_MAX)
923 io_allowed = UINT_MAX;
924 else
925 io_allowed = tmp;
926
927 if (tg->io_disp[rw] + 1 <= io_allowed) {
928 if (wait)
929 *wait = 0;
930 return true;
931 }
932
933
934 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
935
936 if (wait)
937 *wait = jiffy_wait;
938 return false;
939}
940
941static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
942 u64 bps_limit, unsigned long *wait)
943{
944 bool rw = bio_data_dir(bio);
945 u64 bytes_allowed, extra_bytes, tmp;
946 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
947 unsigned int bio_size = throtl_bio_data_size(bio);
948
949 if (bps_limit == U64_MAX) {
950 if (wait)
951 *wait = 0;
952 return true;
953 }
954
955 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
956
957
958 if (!jiffy_elapsed)
959 jiffy_elapsed_rnd = tg->td->throtl_slice;
960
961 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
962
963 tmp = bps_limit * jiffy_elapsed_rnd;
964 do_div(tmp, HZ);
965 bytes_allowed = tmp;
966
967 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
968 if (wait)
969 *wait = 0;
970 return true;
971 }
972
973
974 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
975 jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
976
977 if (!jiffy_wait)
978 jiffy_wait = 1;
979
980
981
982
983
984 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
985 if (wait)
986 *wait = jiffy_wait;
987 return false;
988}
989
990
991
992
993
994static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
995 unsigned long *wait)
996{
997 bool rw = bio_data_dir(bio);
998 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
999 u64 bps_limit = tg_bps_limit(tg, rw);
1000 u32 iops_limit = tg_iops_limit(tg, rw);
1001
1002
1003
1004
1005
1006
1007
1008 BUG_ON(tg->service_queue.nr_queued[rw] &&
1009 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
1010
1011
1012 if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
1013 if (wait)
1014 *wait = 0;
1015 return true;
1016 }
1017
1018
1019
1020
1021
1022
1023
1024
1025 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1026 throtl_start_new_slice(tg, rw);
1027 else {
1028 if (time_before(tg->slice_end[rw],
1029 jiffies + tg->td->throtl_slice))
1030 throtl_extend_slice(tg, rw,
1031 jiffies + tg->td->throtl_slice);
1032 }
1033
1034 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
1035 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
1036 if (wait)
1037 *wait = 0;
1038 return true;
1039 }
1040
1041 max_wait = max(bps_wait, iops_wait);
1042
1043 if (wait)
1044 *wait = max_wait;
1045
1046 if (time_before(tg->slice_end[rw], jiffies + max_wait))
1047 throtl_extend_slice(tg, rw, jiffies + max_wait);
1048
1049 return false;
1050}
1051
1052static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1053{
1054 bool rw = bio_data_dir(bio);
1055 unsigned int bio_size = throtl_bio_data_size(bio);
1056
1057
1058 tg->bytes_disp[rw] += bio_size;
1059 tg->io_disp[rw]++;
1060 tg->last_bytes_disp[rw] += bio_size;
1061 tg->last_io_disp[rw]++;
1062
1063
1064
1065
1066
1067
1068
1069 if (!bio_flagged(bio, BIO_THROTTLED))
1070 bio_set_flag(bio, BIO_THROTTLED);
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1083 struct throtl_grp *tg)
1084{
1085 struct throtl_service_queue *sq = &tg->service_queue;
1086 bool rw = bio_data_dir(bio);
1087
1088 if (!qn)
1089 qn = &tg->qnode_on_self[rw];
1090
1091
1092
1093
1094
1095
1096
1097 if (!sq->nr_queued[rw])
1098 tg->flags |= THROTL_TG_WAS_EMPTY;
1099
1100 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1101
1102 sq->nr_queued[rw]++;
1103 throtl_enqueue_tg(tg);
1104}
1105
1106static void tg_update_disptime(struct throtl_grp *tg)
1107{
1108 struct throtl_service_queue *sq = &tg->service_queue;
1109 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1110 struct bio *bio;
1111
1112 bio = throtl_peek_queued(&sq->queued[READ]);
1113 if (bio)
1114 tg_may_dispatch(tg, bio, &read_wait);
1115
1116 bio = throtl_peek_queued(&sq->queued[WRITE]);
1117 if (bio)
1118 tg_may_dispatch(tg, bio, &write_wait);
1119
1120 min_wait = min(read_wait, write_wait);
1121 disptime = jiffies + min_wait;
1122
1123
1124 throtl_dequeue_tg(tg);
1125 tg->disptime = disptime;
1126 throtl_enqueue_tg(tg);
1127
1128
1129 tg->flags &= ~THROTL_TG_WAS_EMPTY;
1130}
1131
1132static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1133 struct throtl_grp *parent_tg, bool rw)
1134{
1135 if (throtl_slice_used(parent_tg, rw)) {
1136 throtl_start_new_slice_with_credit(parent_tg, rw,
1137 child_tg->slice_start[rw]);
1138 }
1139
1140}
1141
1142static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1143{
1144 struct throtl_service_queue *sq = &tg->service_queue;
1145 struct throtl_service_queue *parent_sq = sq->parent_sq;
1146 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1147 struct throtl_grp *tg_to_put = NULL;
1148 struct bio *bio;
1149
1150
1151
1152
1153
1154
1155
1156 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1157 sq->nr_queued[rw]--;
1158
1159 throtl_charge_bio(tg, bio);
1160
1161
1162
1163
1164
1165
1166
1167
1168 if (parent_tg) {
1169 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1170 start_parent_slice_with_credit(tg, parent_tg, rw);
1171 } else {
1172 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1173 &parent_sq->queued[rw]);
1174 BUG_ON(tg->td->nr_queued[rw] <= 0);
1175 tg->td->nr_queued[rw]--;
1176 }
1177
1178 throtl_trim_slice(tg, rw);
1179
1180 if (tg_to_put)
1181 blkg_put(tg_to_blkg(tg_to_put));
1182}
1183
1184static int throtl_dispatch_tg(struct throtl_grp *tg)
1185{
1186 struct throtl_service_queue *sq = &tg->service_queue;
1187 unsigned int nr_reads = 0, nr_writes = 0;
1188 unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1189 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
1190 struct bio *bio;
1191
1192
1193
1194 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1195 tg_may_dispatch(tg, bio, NULL)) {
1196
1197 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1198 nr_reads++;
1199
1200 if (nr_reads >= max_nr_reads)
1201 break;
1202 }
1203
1204 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1205 tg_may_dispatch(tg, bio, NULL)) {
1206
1207 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1208 nr_writes++;
1209
1210 if (nr_writes >= max_nr_writes)
1211 break;
1212 }
1213
1214 return nr_reads + nr_writes;
1215}
1216
1217static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1218{
1219 unsigned int nr_disp = 0;
1220
1221 while (1) {
1222 struct throtl_grp *tg;
1223 struct throtl_service_queue *sq;
1224
1225 if (!parent_sq->nr_pending)
1226 break;
1227
1228 tg = throtl_rb_first(parent_sq);
1229 if (!tg)
1230 break;
1231
1232 if (time_before(jiffies, tg->disptime))
1233 break;
1234
1235 throtl_dequeue_tg(tg);
1236
1237 nr_disp += throtl_dispatch_tg(tg);
1238
1239 sq = &tg->service_queue;
1240 if (sq->nr_queued[0] || sq->nr_queued[1])
1241 tg_update_disptime(tg);
1242
1243 if (nr_disp >= THROTL_QUANTUM)
1244 break;
1245 }
1246
1247 return nr_disp;
1248}
1249
1250static bool throtl_can_upgrade(struct throtl_data *td,
1251 struct throtl_grp *this_tg);
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267static void throtl_pending_timer_fn(struct timer_list *t)
1268{
1269 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1270 struct throtl_grp *tg = sq_to_tg(sq);
1271 struct throtl_data *td = sq_to_td(sq);
1272 struct request_queue *q = td->queue;
1273 struct throtl_service_queue *parent_sq;
1274 bool dispatched;
1275 int ret;
1276
1277 spin_lock_irq(&q->queue_lock);
1278 if (throtl_can_upgrade(td, NULL))
1279 throtl_upgrade_state(td);
1280
1281again:
1282 parent_sq = sq->parent_sq;
1283 dispatched = false;
1284
1285 while (true) {
1286 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1287 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1288 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1289
1290 ret = throtl_select_dispatch(sq);
1291 if (ret) {
1292 throtl_log(sq, "bios disp=%u", ret);
1293 dispatched = true;
1294 }
1295
1296 if (throtl_schedule_next_dispatch(sq, false))
1297 break;
1298
1299
1300 spin_unlock_irq(&q->queue_lock);
1301 cpu_relax();
1302 spin_lock_irq(&q->queue_lock);
1303 }
1304
1305 if (!dispatched)
1306 goto out_unlock;
1307
1308 if (parent_sq) {
1309
1310 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1311 tg_update_disptime(tg);
1312 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1313
1314 sq = parent_sq;
1315 tg = sq_to_tg(sq);
1316 goto again;
1317 }
1318 }
1319 } else {
1320
1321 queue_work(kthrotld_workqueue, &td->dispatch_work);
1322 }
1323out_unlock:
1324 spin_unlock_irq(&q->queue_lock);
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1336{
1337 struct throtl_data *td = container_of(work, struct throtl_data,
1338 dispatch_work);
1339 struct throtl_service_queue *td_sq = &td->service_queue;
1340 struct request_queue *q = td->queue;
1341 struct bio_list bio_list_on_stack;
1342 struct bio *bio;
1343 struct blk_plug plug;
1344 int rw;
1345
1346 bio_list_init(&bio_list_on_stack);
1347
1348 spin_lock_irq(&q->queue_lock);
1349 for (rw = READ; rw <= WRITE; rw++)
1350 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1351 bio_list_add(&bio_list_on_stack, bio);
1352 spin_unlock_irq(&q->queue_lock);
1353
1354 if (!bio_list_empty(&bio_list_on_stack)) {
1355 blk_start_plug(&plug);
1356 while ((bio = bio_list_pop(&bio_list_on_stack)))
1357 submit_bio_noacct(bio);
1358 blk_finish_plug(&plug);
1359 }
1360}
1361
1362static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1363 int off)
1364{
1365 struct throtl_grp *tg = pd_to_tg(pd);
1366 u64 v = *(u64 *)((void *)tg + off);
1367
1368 if (v == U64_MAX)
1369 return 0;
1370 return __blkg_prfill_u64(sf, pd, v);
1371}
1372
1373static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1374 int off)
1375{
1376 struct throtl_grp *tg = pd_to_tg(pd);
1377 unsigned int v = *(unsigned int *)((void *)tg + off);
1378
1379 if (v == UINT_MAX)
1380 return 0;
1381 return __blkg_prfill_u64(sf, pd, v);
1382}
1383
1384static int tg_print_conf_u64(struct seq_file *sf, void *v)
1385{
1386 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1387 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1388 return 0;
1389}
1390
1391static int tg_print_conf_uint(struct seq_file *sf, void *v)
1392{
1393 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1394 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1395 return 0;
1396}
1397
1398static void tg_conf_updated(struct throtl_grp *tg, bool global)
1399{
1400 struct throtl_service_queue *sq = &tg->service_queue;
1401 struct cgroup_subsys_state *pos_css;
1402 struct blkcg_gq *blkg;
1403
1404 throtl_log(&tg->service_queue,
1405 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1406 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1407 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1408
1409
1410
1411
1412
1413
1414
1415
1416 blkg_for_each_descendant_pre(blkg, pos_css,
1417 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1418 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1419 struct throtl_grp *parent_tg;
1420
1421 tg_update_has_rules(this_tg);
1422
1423 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1424 !blkg->parent->parent)
1425 continue;
1426 parent_tg = blkg_to_tg(blkg->parent);
1427
1428
1429
1430
1431 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1432 parent_tg->idletime_threshold);
1433 this_tg->latency_target = max(this_tg->latency_target,
1434 parent_tg->latency_target);
1435 }
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 throtl_start_new_slice(tg, READ);
1446 throtl_start_new_slice(tg, WRITE);
1447
1448 if (tg->flags & THROTL_TG_PENDING) {
1449 tg_update_disptime(tg);
1450 throtl_schedule_next_dispatch(sq->parent_sq, true);
1451 }
1452}
1453
1454static ssize_t tg_set_conf(struct kernfs_open_file *of,
1455 char *buf, size_t nbytes, loff_t off, bool is_u64)
1456{
1457 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1458 struct blkg_conf_ctx ctx;
1459 struct throtl_grp *tg;
1460 int ret;
1461 u64 v;
1462
1463 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1464 if (ret)
1465 return ret;
1466
1467 ret = -EINVAL;
1468 if (sscanf(ctx.body, "%llu", &v) != 1)
1469 goto out_finish;
1470 if (!v)
1471 v = U64_MAX;
1472
1473 tg = blkg_to_tg(ctx.blkg);
1474
1475 if (is_u64)
1476 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1477 else
1478 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1479
1480 tg_conf_updated(tg, false);
1481 ret = 0;
1482out_finish:
1483 blkg_conf_finish(&ctx);
1484 return ret ?: nbytes;
1485}
1486
1487static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1488 char *buf, size_t nbytes, loff_t off)
1489{
1490 return tg_set_conf(of, buf, nbytes, off, true);
1491}
1492
1493static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1494 char *buf, size_t nbytes, loff_t off)
1495{
1496 return tg_set_conf(of, buf, nbytes, off, false);
1497}
1498
1499static int tg_print_rwstat(struct seq_file *sf, void *v)
1500{
1501 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1502 blkg_prfill_rwstat, &blkcg_policy_throtl,
1503 seq_cft(sf)->private, true);
1504 return 0;
1505}
1506
1507static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1508 struct blkg_policy_data *pd, int off)
1509{
1510 struct blkg_rwstat_sample sum;
1511
1512 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1513 &sum);
1514 return __blkg_prfill_rwstat(sf, pd, &sum);
1515}
1516
1517static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1518{
1519 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1520 tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1521 seq_cft(sf)->private, true);
1522 return 0;
1523}
1524
1525static struct cftype throtl_legacy_files[] = {
1526 {
1527 .name = "throttle.read_bps_device",
1528 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1529 .seq_show = tg_print_conf_u64,
1530 .write = tg_set_conf_u64,
1531 },
1532 {
1533 .name = "throttle.write_bps_device",
1534 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1535 .seq_show = tg_print_conf_u64,
1536 .write = tg_set_conf_u64,
1537 },
1538 {
1539 .name = "throttle.read_iops_device",
1540 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1541 .seq_show = tg_print_conf_uint,
1542 .write = tg_set_conf_uint,
1543 },
1544 {
1545 .name = "throttle.write_iops_device",
1546 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1547 .seq_show = tg_print_conf_uint,
1548 .write = tg_set_conf_uint,
1549 },
1550 {
1551 .name = "throttle.io_service_bytes",
1552 .private = offsetof(struct throtl_grp, stat_bytes),
1553 .seq_show = tg_print_rwstat,
1554 },
1555 {
1556 .name = "throttle.io_service_bytes_recursive",
1557 .private = offsetof(struct throtl_grp, stat_bytes),
1558 .seq_show = tg_print_rwstat_recursive,
1559 },
1560 {
1561 .name = "throttle.io_serviced",
1562 .private = offsetof(struct throtl_grp, stat_ios),
1563 .seq_show = tg_print_rwstat,
1564 },
1565 {
1566 .name = "throttle.io_serviced_recursive",
1567 .private = offsetof(struct throtl_grp, stat_ios),
1568 .seq_show = tg_print_rwstat_recursive,
1569 },
1570 { }
1571};
1572
1573static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1574 int off)
1575{
1576 struct throtl_grp *tg = pd_to_tg(pd);
1577 const char *dname = blkg_dev_name(pd->blkg);
1578 char bufs[4][21] = { "max", "max", "max", "max" };
1579 u64 bps_dft;
1580 unsigned int iops_dft;
1581 char idle_time[26] = "";
1582 char latency_time[26] = "";
1583
1584 if (!dname)
1585 return 0;
1586
1587 if (off == LIMIT_LOW) {
1588 bps_dft = 0;
1589 iops_dft = 0;
1590 } else {
1591 bps_dft = U64_MAX;
1592 iops_dft = UINT_MAX;
1593 }
1594
1595 if (tg->bps_conf[READ][off] == bps_dft &&
1596 tg->bps_conf[WRITE][off] == bps_dft &&
1597 tg->iops_conf[READ][off] == iops_dft &&
1598 tg->iops_conf[WRITE][off] == iops_dft &&
1599 (off != LIMIT_LOW ||
1600 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1601 tg->latency_target_conf == DFL_LATENCY_TARGET)))
1602 return 0;
1603
1604 if (tg->bps_conf[READ][off] != U64_MAX)
1605 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1606 tg->bps_conf[READ][off]);
1607 if (tg->bps_conf[WRITE][off] != U64_MAX)
1608 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1609 tg->bps_conf[WRITE][off]);
1610 if (tg->iops_conf[READ][off] != UINT_MAX)
1611 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1612 tg->iops_conf[READ][off]);
1613 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1614 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1615 tg->iops_conf[WRITE][off]);
1616 if (off == LIMIT_LOW) {
1617 if (tg->idletime_threshold_conf == ULONG_MAX)
1618 strcpy(idle_time, " idle=max");
1619 else
1620 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1621 tg->idletime_threshold_conf);
1622
1623 if (tg->latency_target_conf == ULONG_MAX)
1624 strcpy(latency_time, " latency=max");
1625 else
1626 snprintf(latency_time, sizeof(latency_time),
1627 " latency=%lu", tg->latency_target_conf);
1628 }
1629
1630 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1631 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1632 latency_time);
1633 return 0;
1634}
1635
1636static int tg_print_limit(struct seq_file *sf, void *v)
1637{
1638 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1639 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1640 return 0;
1641}
1642
1643static ssize_t tg_set_limit(struct kernfs_open_file *of,
1644 char *buf, size_t nbytes, loff_t off)
1645{
1646 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1647 struct blkg_conf_ctx ctx;
1648 struct throtl_grp *tg;
1649 u64 v[4];
1650 unsigned long idle_time;
1651 unsigned long latency_time;
1652 int ret;
1653 int index = of_cft(of)->private;
1654
1655 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1656 if (ret)
1657 return ret;
1658
1659 tg = blkg_to_tg(ctx.blkg);
1660
1661 v[0] = tg->bps_conf[READ][index];
1662 v[1] = tg->bps_conf[WRITE][index];
1663 v[2] = tg->iops_conf[READ][index];
1664 v[3] = tg->iops_conf[WRITE][index];
1665
1666 idle_time = tg->idletime_threshold_conf;
1667 latency_time = tg->latency_target_conf;
1668 while (true) {
1669 char tok[27];
1670 char *p;
1671 u64 val = U64_MAX;
1672 int len;
1673
1674 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1675 break;
1676 if (tok[0] == '\0')
1677 break;
1678 ctx.body += len;
1679
1680 ret = -EINVAL;
1681 p = tok;
1682 strsep(&p, "=");
1683 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1684 goto out_finish;
1685
1686 ret = -ERANGE;
1687 if (!val)
1688 goto out_finish;
1689
1690 ret = -EINVAL;
1691 if (!strcmp(tok, "rbps") && val > 1)
1692 v[0] = val;
1693 else if (!strcmp(tok, "wbps") && val > 1)
1694 v[1] = val;
1695 else if (!strcmp(tok, "riops") && val > 1)
1696 v[2] = min_t(u64, val, UINT_MAX);
1697 else if (!strcmp(tok, "wiops") && val > 1)
1698 v[3] = min_t(u64, val, UINT_MAX);
1699 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1700 idle_time = val;
1701 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1702 latency_time = val;
1703 else
1704 goto out_finish;
1705 }
1706
1707 tg->bps_conf[READ][index] = v[0];
1708 tg->bps_conf[WRITE][index] = v[1];
1709 tg->iops_conf[READ][index] = v[2];
1710 tg->iops_conf[WRITE][index] = v[3];
1711
1712 if (index == LIMIT_MAX) {
1713 tg->bps[READ][index] = v[0];
1714 tg->bps[WRITE][index] = v[1];
1715 tg->iops[READ][index] = v[2];
1716 tg->iops[WRITE][index] = v[3];
1717 }
1718 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1719 tg->bps_conf[READ][LIMIT_MAX]);
1720 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1721 tg->bps_conf[WRITE][LIMIT_MAX]);
1722 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1723 tg->iops_conf[READ][LIMIT_MAX]);
1724 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1725 tg->iops_conf[WRITE][LIMIT_MAX]);
1726 tg->idletime_threshold_conf = idle_time;
1727 tg->latency_target_conf = latency_time;
1728
1729
1730 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1731 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1732 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1733 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1734 tg->bps[READ][LIMIT_LOW] = 0;
1735 tg->bps[WRITE][LIMIT_LOW] = 0;
1736 tg->iops[READ][LIMIT_LOW] = 0;
1737 tg->iops[WRITE][LIMIT_LOW] = 0;
1738 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1739 tg->latency_target = DFL_LATENCY_TARGET;
1740 } else if (index == LIMIT_LOW) {
1741 tg->idletime_threshold = tg->idletime_threshold_conf;
1742 tg->latency_target = tg->latency_target_conf;
1743 }
1744
1745 blk_throtl_update_limit_valid(tg->td);
1746 if (tg->td->limit_valid[LIMIT_LOW]) {
1747 if (index == LIMIT_LOW)
1748 tg->td->limit_index = LIMIT_LOW;
1749 } else
1750 tg->td->limit_index = LIMIT_MAX;
1751 tg_conf_updated(tg, index == LIMIT_LOW &&
1752 tg->td->limit_valid[LIMIT_LOW]);
1753 ret = 0;
1754out_finish:
1755 blkg_conf_finish(&ctx);
1756 return ret ?: nbytes;
1757}
1758
1759static struct cftype throtl_files[] = {
1760#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1761 {
1762 .name = "low",
1763 .flags = CFTYPE_NOT_ON_ROOT,
1764 .seq_show = tg_print_limit,
1765 .write = tg_set_limit,
1766 .private = LIMIT_LOW,
1767 },
1768#endif
1769 {
1770 .name = "max",
1771 .flags = CFTYPE_NOT_ON_ROOT,
1772 .seq_show = tg_print_limit,
1773 .write = tg_set_limit,
1774 .private = LIMIT_MAX,
1775 },
1776 { }
1777};
1778
1779static void throtl_shutdown_wq(struct request_queue *q)
1780{
1781 struct throtl_data *td = q->td;
1782
1783 cancel_work_sync(&td->dispatch_work);
1784}
1785
1786static struct blkcg_policy blkcg_policy_throtl = {
1787 .dfl_cftypes = throtl_files,
1788 .legacy_cftypes = throtl_legacy_files,
1789
1790 .pd_alloc_fn = throtl_pd_alloc,
1791 .pd_init_fn = throtl_pd_init,
1792 .pd_online_fn = throtl_pd_online,
1793 .pd_offline_fn = throtl_pd_offline,
1794 .pd_free_fn = throtl_pd_free,
1795};
1796
1797static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1798{
1799 unsigned long rtime = jiffies, wtime = jiffies;
1800
1801 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1802 rtime = tg->last_low_overflow_time[READ];
1803 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1804 wtime = tg->last_low_overflow_time[WRITE];
1805 return min(rtime, wtime);
1806}
1807
1808
1809static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1810{
1811 struct throtl_service_queue *parent_sq;
1812 struct throtl_grp *parent = tg;
1813 unsigned long ret = __tg_last_low_overflow_time(tg);
1814
1815 while (true) {
1816 parent_sq = parent->service_queue.parent_sq;
1817 parent = sq_to_tg(parent_sq);
1818 if (!parent)
1819 break;
1820
1821
1822
1823
1824
1825 if (!parent->bps[READ][LIMIT_LOW] &&
1826 !parent->iops[READ][LIMIT_LOW] &&
1827 !parent->bps[WRITE][LIMIT_LOW] &&
1828 !parent->iops[WRITE][LIMIT_LOW])
1829 continue;
1830 if (time_after(__tg_last_low_overflow_time(parent), ret))
1831 ret = __tg_last_low_overflow_time(parent);
1832 }
1833 return ret;
1834}
1835
1836static bool throtl_tg_is_idle(struct throtl_grp *tg)
1837{
1838
1839
1840
1841
1842
1843
1844
1845 unsigned long time;
1846 bool ret;
1847
1848 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1849 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1850 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1851 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1852 tg->avg_idletime > tg->idletime_threshold ||
1853 (tg->latency_target && tg->bio_cnt &&
1854 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1855 throtl_log(&tg->service_queue,
1856 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1857 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1858 tg->bio_cnt, ret, tg->td->scale);
1859 return ret;
1860}
1861
1862static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1863{
1864 struct throtl_service_queue *sq = &tg->service_queue;
1865 bool read_limit, write_limit;
1866
1867
1868
1869
1870
1871 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1872 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1873 if (!read_limit && !write_limit)
1874 return true;
1875 if (read_limit && sq->nr_queued[READ] &&
1876 (!write_limit || sq->nr_queued[WRITE]))
1877 return true;
1878 if (write_limit && sq->nr_queued[WRITE] &&
1879 (!read_limit || sq->nr_queued[READ]))
1880 return true;
1881
1882 if (time_after_eq(jiffies,
1883 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1884 throtl_tg_is_idle(tg))
1885 return true;
1886 return false;
1887}
1888
1889static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1890{
1891 while (true) {
1892 if (throtl_tg_can_upgrade(tg))
1893 return true;
1894 tg = sq_to_tg(tg->service_queue.parent_sq);
1895 if (!tg || !tg_to_blkg(tg)->parent)
1896 return false;
1897 }
1898 return false;
1899}
1900
1901static bool throtl_can_upgrade(struct throtl_data *td,
1902 struct throtl_grp *this_tg)
1903{
1904 struct cgroup_subsys_state *pos_css;
1905 struct blkcg_gq *blkg;
1906
1907 if (td->limit_index != LIMIT_LOW)
1908 return false;
1909
1910 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1911 return false;
1912
1913 rcu_read_lock();
1914 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1915 struct throtl_grp *tg = blkg_to_tg(blkg);
1916
1917 if (tg == this_tg)
1918 continue;
1919 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1920 continue;
1921 if (!throtl_hierarchy_can_upgrade(tg)) {
1922 rcu_read_unlock();
1923 return false;
1924 }
1925 }
1926 rcu_read_unlock();
1927 return true;
1928}
1929
1930static void throtl_upgrade_check(struct throtl_grp *tg)
1931{
1932 unsigned long now = jiffies;
1933
1934 if (tg->td->limit_index != LIMIT_LOW)
1935 return;
1936
1937 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1938 return;
1939
1940 tg->last_check_time = now;
1941
1942 if (!time_after_eq(now,
1943 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1944 return;
1945
1946 if (throtl_can_upgrade(tg->td, NULL))
1947 throtl_upgrade_state(tg->td);
1948}
1949
1950static void throtl_upgrade_state(struct throtl_data *td)
1951{
1952 struct cgroup_subsys_state *pos_css;
1953 struct blkcg_gq *blkg;
1954
1955 throtl_log(&td->service_queue, "upgrade to max");
1956 td->limit_index = LIMIT_MAX;
1957 td->low_upgrade_time = jiffies;
1958 td->scale = 0;
1959 rcu_read_lock();
1960 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1961 struct throtl_grp *tg = blkg_to_tg(blkg);
1962 struct throtl_service_queue *sq = &tg->service_queue;
1963
1964 tg->disptime = jiffies - 1;
1965 throtl_select_dispatch(sq);
1966 throtl_schedule_next_dispatch(sq, true);
1967 }
1968 rcu_read_unlock();
1969 throtl_select_dispatch(&td->service_queue);
1970 throtl_schedule_next_dispatch(&td->service_queue, true);
1971 queue_work(kthrotld_workqueue, &td->dispatch_work);
1972}
1973
1974static void throtl_downgrade_state(struct throtl_data *td)
1975{
1976 td->scale /= 2;
1977
1978 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1979 if (td->scale) {
1980 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1981 return;
1982 }
1983
1984 td->limit_index = LIMIT_LOW;
1985 td->low_downgrade_time = jiffies;
1986}
1987
1988static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1989{
1990 struct throtl_data *td = tg->td;
1991 unsigned long now = jiffies;
1992
1993
1994
1995
1996
1997 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1998 time_after_eq(now, tg_last_low_overflow_time(tg) +
1999 td->throtl_slice) &&
2000 (!throtl_tg_is_idle(tg) ||
2001 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
2002 return true;
2003 return false;
2004}
2005
2006static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
2007{
2008 while (true) {
2009 if (!throtl_tg_can_downgrade(tg))
2010 return false;
2011 tg = sq_to_tg(tg->service_queue.parent_sq);
2012 if (!tg || !tg_to_blkg(tg)->parent)
2013 break;
2014 }
2015 return true;
2016}
2017
2018static void throtl_downgrade_check(struct throtl_grp *tg)
2019{
2020 uint64_t bps;
2021 unsigned int iops;
2022 unsigned long elapsed_time;
2023 unsigned long now = jiffies;
2024
2025 if (tg->td->limit_index != LIMIT_MAX ||
2026 !tg->td->limit_valid[LIMIT_LOW])
2027 return;
2028 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
2029 return;
2030 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
2031 return;
2032
2033 elapsed_time = now - tg->last_check_time;
2034 tg->last_check_time = now;
2035
2036 if (time_before(now, tg_last_low_overflow_time(tg) +
2037 tg->td->throtl_slice))
2038 return;
2039
2040 if (tg->bps[READ][LIMIT_LOW]) {
2041 bps = tg->last_bytes_disp[READ] * HZ;
2042 do_div(bps, elapsed_time);
2043 if (bps >= tg->bps[READ][LIMIT_LOW])
2044 tg->last_low_overflow_time[READ] = now;
2045 }
2046
2047 if (tg->bps[WRITE][LIMIT_LOW]) {
2048 bps = tg->last_bytes_disp[WRITE] * HZ;
2049 do_div(bps, elapsed_time);
2050 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2051 tg->last_low_overflow_time[WRITE] = now;
2052 }
2053
2054 if (tg->iops[READ][LIMIT_LOW]) {
2055 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2056 if (iops >= tg->iops[READ][LIMIT_LOW])
2057 tg->last_low_overflow_time[READ] = now;
2058 }
2059
2060 if (tg->iops[WRITE][LIMIT_LOW]) {
2061 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2062 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2063 tg->last_low_overflow_time[WRITE] = now;
2064 }
2065
2066
2067
2068
2069
2070 if (throtl_hierarchy_can_downgrade(tg))
2071 throtl_downgrade_state(tg->td);
2072
2073 tg->last_bytes_disp[READ] = 0;
2074 tg->last_bytes_disp[WRITE] = 0;
2075 tg->last_io_disp[READ] = 0;
2076 tg->last_io_disp[WRITE] = 0;
2077}
2078
2079static void blk_throtl_update_idletime(struct throtl_grp *tg)
2080{
2081 unsigned long now;
2082 unsigned long last_finish_time = tg->last_finish_time;
2083
2084 if (last_finish_time == 0)
2085 return;
2086
2087 now = ktime_get_ns() >> 10;
2088 if (now <= last_finish_time ||
2089 last_finish_time == tg->checked_last_finish_time)
2090 return;
2091
2092 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2093 tg->checked_last_finish_time = last_finish_time;
2094}
2095
2096#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2097static void throtl_update_latency_buckets(struct throtl_data *td)
2098{
2099 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2100 int i, cpu, rw;
2101 unsigned long last_latency[2] = { 0 };
2102 unsigned long latency[2];
2103
2104 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
2105 return;
2106 if (time_before(jiffies, td->last_calculate_time + HZ))
2107 return;
2108 td->last_calculate_time = jiffies;
2109
2110 memset(avg_latency, 0, sizeof(avg_latency));
2111 for (rw = READ; rw <= WRITE; rw++) {
2112 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2113 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2114
2115 for_each_possible_cpu(cpu) {
2116 struct latency_bucket *bucket;
2117
2118
2119 bucket = per_cpu_ptr(td->latency_buckets[rw],
2120 cpu);
2121 tmp->total_latency += bucket[i].total_latency;
2122 tmp->samples += bucket[i].samples;
2123 bucket[i].total_latency = 0;
2124 bucket[i].samples = 0;
2125 }
2126
2127 if (tmp->samples >= 32) {
2128 int samples = tmp->samples;
2129
2130 latency[rw] = tmp->total_latency;
2131
2132 tmp->total_latency = 0;
2133 tmp->samples = 0;
2134 latency[rw] /= samples;
2135 if (latency[rw] == 0)
2136 continue;
2137 avg_latency[rw][i].latency = latency[rw];
2138 }
2139 }
2140 }
2141
2142 for (rw = READ; rw <= WRITE; rw++) {
2143 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2144 if (!avg_latency[rw][i].latency) {
2145 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2146 td->avg_buckets[rw][i].latency =
2147 last_latency[rw];
2148 continue;
2149 }
2150
2151 if (!td->avg_buckets[rw][i].valid)
2152 latency[rw] = avg_latency[rw][i].latency;
2153 else
2154 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2155 avg_latency[rw][i].latency) >> 3;
2156
2157 td->avg_buckets[rw][i].latency = max(latency[rw],
2158 last_latency[rw]);
2159 td->avg_buckets[rw][i].valid = true;
2160 last_latency[rw] = td->avg_buckets[rw][i].latency;
2161 }
2162 }
2163
2164 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2165 throtl_log(&td->service_queue,
2166 "Latency bucket %d: read latency=%ld, read valid=%d, "
2167 "write latency=%ld, write valid=%d", i,
2168 td->avg_buckets[READ][i].latency,
2169 td->avg_buckets[READ][i].valid,
2170 td->avg_buckets[WRITE][i].latency,
2171 td->avg_buckets[WRITE][i].valid);
2172}
2173#else
2174static inline void throtl_update_latency_buckets(struct throtl_data *td)
2175{
2176}
2177#endif
2178
2179bool blk_throtl_bio(struct bio *bio)
2180{
2181 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
2182 struct blkcg_gq *blkg = bio->bi_blkg;
2183 struct throtl_qnode *qn = NULL;
2184 struct throtl_grp *tg = blkg_to_tg(blkg);
2185 struct throtl_service_queue *sq;
2186 bool rw = bio_data_dir(bio);
2187 bool throttled = false;
2188 struct throtl_data *td = tg->td;
2189
2190 rcu_read_lock();
2191
2192
2193 if (bio_flagged(bio, BIO_THROTTLED))
2194 goto out;
2195
2196 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2197 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2198 bio->bi_iter.bi_size);
2199 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2200 }
2201
2202 if (!tg->has_rules[rw])
2203 goto out;
2204
2205 spin_lock_irq(&q->queue_lock);
2206
2207 throtl_update_latency_buckets(td);
2208
2209 blk_throtl_update_idletime(tg);
2210
2211 sq = &tg->service_queue;
2212
2213again:
2214 while (true) {
2215 if (tg->last_low_overflow_time[rw] == 0)
2216 tg->last_low_overflow_time[rw] = jiffies;
2217 throtl_downgrade_check(tg);
2218 throtl_upgrade_check(tg);
2219
2220 if (sq->nr_queued[rw])
2221 break;
2222
2223
2224 if (!tg_may_dispatch(tg, bio, NULL)) {
2225 tg->last_low_overflow_time[rw] = jiffies;
2226 if (throtl_can_upgrade(td, tg)) {
2227 throtl_upgrade_state(td);
2228 goto again;
2229 }
2230 break;
2231 }
2232
2233
2234 throtl_charge_bio(tg, bio);
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247 throtl_trim_slice(tg, rw);
2248
2249
2250
2251
2252
2253
2254 qn = &tg->qnode_on_parent[rw];
2255 sq = sq->parent_sq;
2256 tg = sq_to_tg(sq);
2257 if (!tg)
2258 goto out_unlock;
2259 }
2260
2261
2262 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2263 rw == READ ? 'R' : 'W',
2264 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2265 tg_bps_limit(tg, rw),
2266 tg->io_disp[rw], tg_iops_limit(tg, rw),
2267 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2268
2269 tg->last_low_overflow_time[rw] = jiffies;
2270
2271 td->nr_queued[rw]++;
2272 throtl_add_bio_tg(bio, qn, tg);
2273 throttled = true;
2274
2275
2276
2277
2278
2279
2280
2281 if (tg->flags & THROTL_TG_WAS_EMPTY) {
2282 tg_update_disptime(tg);
2283 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2284 }
2285
2286out_unlock:
2287 spin_unlock_irq(&q->queue_lock);
2288out:
2289 bio_set_flag(bio, BIO_THROTTLED);
2290
2291#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2292 if (throttled || !td->track_bio_latency)
2293 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2294#endif
2295 rcu_read_unlock();
2296 return throttled;
2297}
2298
2299#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2300static void throtl_track_latency(struct throtl_data *td, sector_t size,
2301 int op, unsigned long time)
2302{
2303 struct latency_bucket *latency;
2304 int index;
2305
2306 if (!td || td->limit_index != LIMIT_LOW ||
2307 !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2308 !blk_queue_nonrot(td->queue))
2309 return;
2310
2311 index = request_bucket_index(size);
2312
2313 latency = get_cpu_ptr(td->latency_buckets[op]);
2314 latency[index].total_latency += time;
2315 latency[index].samples++;
2316 put_cpu_ptr(td->latency_buckets[op]);
2317}
2318
2319void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2320{
2321 struct request_queue *q = rq->q;
2322 struct throtl_data *td = q->td;
2323
2324 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2325 time_ns >> 10);
2326}
2327
2328void blk_throtl_bio_endio(struct bio *bio)
2329{
2330 struct blkcg_gq *blkg;
2331 struct throtl_grp *tg;
2332 u64 finish_time_ns;
2333 unsigned long finish_time;
2334 unsigned long start_time;
2335 unsigned long lat;
2336 int rw = bio_data_dir(bio);
2337
2338 blkg = bio->bi_blkg;
2339 if (!blkg)
2340 return;
2341 tg = blkg_to_tg(blkg);
2342 if (!tg->td->limit_valid[LIMIT_LOW])
2343 return;
2344
2345 finish_time_ns = ktime_get_ns();
2346 tg->last_finish_time = finish_time_ns >> 10;
2347
2348 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2349 finish_time = __bio_issue_time(finish_time_ns) >> 10;
2350 if (!start_time || finish_time <= start_time)
2351 return;
2352
2353 lat = finish_time - start_time;
2354
2355 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2356 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2357 bio_op(bio), lat);
2358
2359 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2360 int bucket;
2361 unsigned int threshold;
2362
2363 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2364 threshold = tg->td->avg_buckets[rw][bucket].latency +
2365 tg->latency_target;
2366 if (lat > threshold)
2367 tg->bad_bio_cnt++;
2368
2369
2370
2371
2372 tg->bio_cnt++;
2373 }
2374
2375 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2376 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2377 tg->bio_cnt /= 2;
2378 tg->bad_bio_cnt /= 2;
2379 }
2380}
2381#endif
2382
2383int blk_throtl_init(struct request_queue *q)
2384{
2385 struct throtl_data *td;
2386 int ret;
2387
2388 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2389 if (!td)
2390 return -ENOMEM;
2391 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2392 LATENCY_BUCKET_SIZE, __alignof__(u64));
2393 if (!td->latency_buckets[READ]) {
2394 kfree(td);
2395 return -ENOMEM;
2396 }
2397 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2398 LATENCY_BUCKET_SIZE, __alignof__(u64));
2399 if (!td->latency_buckets[WRITE]) {
2400 free_percpu(td->latency_buckets[READ]);
2401 kfree(td);
2402 return -ENOMEM;
2403 }
2404
2405 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2406 throtl_service_queue_init(&td->service_queue);
2407
2408 q->td = td;
2409 td->queue = q;
2410
2411 td->limit_valid[LIMIT_MAX] = true;
2412 td->limit_index = LIMIT_MAX;
2413 td->low_upgrade_time = jiffies;
2414 td->low_downgrade_time = jiffies;
2415
2416
2417 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2418 if (ret) {
2419 free_percpu(td->latency_buckets[READ]);
2420 free_percpu(td->latency_buckets[WRITE]);
2421 kfree(td);
2422 }
2423 return ret;
2424}
2425
2426void blk_throtl_exit(struct request_queue *q)
2427{
2428 BUG_ON(!q->td);
2429 throtl_shutdown_wq(q);
2430 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2431 free_percpu(q->td->latency_buckets[READ]);
2432 free_percpu(q->td->latency_buckets[WRITE]);
2433 kfree(q->td);
2434}
2435
2436void blk_throtl_register_queue(struct request_queue *q)
2437{
2438 struct throtl_data *td;
2439 int i;
2440
2441 td = q->td;
2442 BUG_ON(!td);
2443
2444 if (blk_queue_nonrot(q)) {
2445 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2446 td->filtered_latency = LATENCY_FILTERED_SSD;
2447 } else {
2448 td->throtl_slice = DFL_THROTL_SLICE_HD;
2449 td->filtered_latency = LATENCY_FILTERED_HD;
2450 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2451 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2452 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2453 }
2454 }
2455#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2456
2457 td->throtl_slice = DFL_THROTL_SLICE_HD;
2458#endif
2459
2460 td->track_bio_latency = !queue_is_mq(q);
2461 if (!td->track_bio_latency)
2462 blk_stat_enable_accounting(q);
2463}
2464
2465#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2466ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2467{
2468 if (!q->td)
2469 return -EINVAL;
2470 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2471}
2472
2473ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2474 const char *page, size_t count)
2475{
2476 unsigned long v;
2477 unsigned long t;
2478
2479 if (!q->td)
2480 return -EINVAL;
2481 if (kstrtoul(page, 10, &v))
2482 return -EINVAL;
2483 t = msecs_to_jiffies(v);
2484 if (t == 0 || t > MAX_THROTL_SLICE)
2485 return -EINVAL;
2486 q->td->throtl_slice = t;
2487 return count;
2488}
2489#endif
2490
2491static int __init throtl_init(void)
2492{
2493 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2494 if (!kthrotld_workqueue)
2495 panic("Failed to create kthrotld\n");
2496
2497 return blkcg_policy_register(&blkcg_policy_throtl);
2498}
2499
2500module_init(throtl_init);
2501