1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/blktrace_api.h>
13#include <linux/blk-cgroup.h>
14#include "blk.h"
15
16
17static int throtl_grp_quantum = 8;
18
19
20static int throtl_quantum = 32;
21
22
23#define DFL_THROTL_SLICE_HD (HZ / 10)
24#define DFL_THROTL_SLICE_SSD (HZ / 50)
25#define MAX_THROTL_SLICE (HZ)
26#define MAX_IDLE_TIME (5L * 1000 * 1000)
27#define MIN_THROTL_BPS (320 * 1024)
28#define MIN_THROTL_IOPS (10)
29#define DFL_LATENCY_TARGET (-1L)
30#define DFL_IDLE_THRESHOLD (0)
31#define DFL_HD_BASELINE_LATENCY (4000L)
32#define LATENCY_FILTERED_SSD (0)
33
34
35
36
37#define LATENCY_FILTERED_HD (1000L)
38
39static struct blkcg_policy blkcg_policy_throtl;
40
41
42static struct workqueue_struct *kthrotld_workqueue;
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67struct throtl_qnode {
68 struct list_head node;
69 struct bio_list bios;
70 struct throtl_grp *tg;
71};
72
73struct throtl_service_queue {
74 struct throtl_service_queue *parent_sq;
75
76
77
78
79
80 struct list_head queued[2];
81 unsigned int nr_queued[2];
82
83
84
85
86
87 struct rb_root_cached pending_tree;
88 unsigned int nr_pending;
89 unsigned long first_pending_disptime;
90 struct timer_list pending_timer;
91};
92
93enum tg_state_flags {
94 THROTL_TG_PENDING = 1 << 0,
95 THROTL_TG_WAS_EMPTY = 1 << 1,
96};
97
98#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
99
100enum {
101 LIMIT_LOW,
102 LIMIT_MAX,
103 LIMIT_CNT,
104};
105
106struct throtl_grp {
107
108 struct blkg_policy_data pd;
109
110
111 struct rb_node rb_node;
112
113
114 struct throtl_data *td;
115
116
117 struct throtl_service_queue service_queue;
118
119
120
121
122
123
124
125
126
127 struct throtl_qnode qnode_on_self[2];
128 struct throtl_qnode qnode_on_parent[2];
129
130
131
132
133
134
135 unsigned long disptime;
136
137 unsigned int flags;
138
139
140 bool has_rules[2];
141
142
143 uint64_t bps[2][LIMIT_CNT];
144
145 uint64_t bps_conf[2][LIMIT_CNT];
146
147
148 unsigned int iops[2][LIMIT_CNT];
149
150 unsigned int iops_conf[2][LIMIT_CNT];
151
152
153 uint64_t bytes_disp[2];
154
155 unsigned int io_disp[2];
156
157 unsigned long last_low_overflow_time[2];
158
159 uint64_t last_bytes_disp[2];
160 unsigned int last_io_disp[2];
161
162 unsigned long last_check_time;
163
164 unsigned long latency_target;
165 unsigned long latency_target_conf;
166
167 unsigned long slice_start[2];
168 unsigned long slice_end[2];
169
170 unsigned long last_finish_time;
171 unsigned long checked_last_finish_time;
172 unsigned long avg_idletime;
173 unsigned long idletime_threshold;
174 unsigned long idletime_threshold_conf;
175
176 unsigned int bio_cnt;
177 unsigned int bad_bio_cnt;
178 unsigned long bio_cnt_reset_time;
179};
180
181
182#define LATENCY_BUCKET_SIZE 9
183
184struct latency_bucket {
185 unsigned long total_latency;
186 int samples;
187};
188
189struct avg_latency_bucket {
190 unsigned long latency;
191 bool valid;
192};
193
194struct throtl_data
195{
196
197 struct throtl_service_queue service_queue;
198
199 struct request_queue *queue;
200
201
202 unsigned int nr_queued[2];
203
204 unsigned int throtl_slice;
205
206
207 struct work_struct dispatch_work;
208 unsigned int limit_index;
209 bool limit_valid[LIMIT_CNT];
210
211 unsigned long low_upgrade_time;
212 unsigned long low_downgrade_time;
213
214 unsigned int scale;
215
216 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
217 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
218 struct latency_bucket __percpu *latency_buckets[2];
219 unsigned long last_calculate_time;
220 unsigned long filtered_latency;
221
222 bool track_bio_latency;
223};
224
225static void throtl_pending_timer_fn(struct timer_list *t);
226
227static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
228{
229 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
230}
231
232static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
233{
234 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
235}
236
237static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
238{
239 return pd_to_blkg(&tg->pd);
240}
241
242
243
244
245
246
247
248
249static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
250{
251 if (sq && sq->parent_sq)
252 return container_of(sq, struct throtl_grp, service_queue);
253 else
254 return NULL;
255}
256
257
258
259
260
261
262
263
264static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
265{
266 struct throtl_grp *tg = sq_to_tg(sq);
267
268 if (tg)
269 return tg->td;
270 else
271 return container_of(sq, struct throtl_data, service_queue);
272}
273
274
275
276
277
278
279
280
281
282static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
283{
284
285 if (td->scale < 4096 && time_after_eq(jiffies,
286 td->low_upgrade_time + td->scale * td->throtl_slice))
287 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
288
289 return low + (low >> 1) * td->scale;
290}
291
292static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
293{
294 struct blkcg_gq *blkg = tg_to_blkg(tg);
295 struct throtl_data *td;
296 uint64_t ret;
297
298 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
299 return U64_MAX;
300
301 td = tg->td;
302 ret = tg->bps[rw][td->limit_index];
303 if (ret == 0 && td->limit_index == LIMIT_LOW) {
304
305 if (!list_empty(&blkg->blkcg->css.children) ||
306 tg->iops[rw][td->limit_index])
307 return U64_MAX;
308 else
309 return MIN_THROTL_BPS;
310 }
311
312 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
313 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
314 uint64_t adjusted;
315
316 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
317 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
318 }
319 return ret;
320}
321
322static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
323{
324 struct blkcg_gq *blkg = tg_to_blkg(tg);
325 struct throtl_data *td;
326 unsigned int ret;
327
328 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
329 return UINT_MAX;
330
331 td = tg->td;
332 ret = tg->iops[rw][td->limit_index];
333 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
334
335 if (!list_empty(&blkg->blkcg->css.children) ||
336 tg->bps[rw][td->limit_index])
337 return UINT_MAX;
338 else
339 return MIN_THROTL_IOPS;
340 }
341
342 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
343 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
344 uint64_t adjusted;
345
346 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
347 if (adjusted > UINT_MAX)
348 adjusted = UINT_MAX;
349 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
350 }
351 return ret;
352}
353
354#define request_bucket_index(sectors) \
355 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
356
357
358
359
360
361
362
363
364
365
366#define throtl_log(sq, fmt, args...) do { \
367 struct throtl_grp *__tg = sq_to_tg((sq)); \
368 struct throtl_data *__td = sq_to_td((sq)); \
369 \
370 (void)__td; \
371 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
372 break; \
373 if ((__tg)) { \
374 blk_add_cgroup_trace_msg(__td->queue, \
375 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
376 } else { \
377 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
378 } \
379} while (0)
380
381static inline unsigned int throtl_bio_data_size(struct bio *bio)
382{
383
384 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
385 return 512;
386 return bio->bi_iter.bi_size;
387}
388
389static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
390{
391 INIT_LIST_HEAD(&qn->node);
392 bio_list_init(&qn->bios);
393 qn->tg = tg;
394}
395
396
397
398
399
400
401
402
403
404
405
406static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
407 struct list_head *queued)
408{
409 bio_list_add(&qn->bios, bio);
410 if (list_empty(&qn->node)) {
411 list_add_tail(&qn->node, queued);
412 blkg_get(tg_to_blkg(qn->tg));
413 }
414}
415
416
417
418
419
420static struct bio *throtl_peek_queued(struct list_head *queued)
421{
422 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
423 struct bio *bio;
424
425 if (list_empty(queued))
426 return NULL;
427
428 bio = bio_list_peek(&qn->bios);
429 WARN_ON_ONCE(!bio);
430 return bio;
431}
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447static struct bio *throtl_pop_queued(struct list_head *queued,
448 struct throtl_grp **tg_to_put)
449{
450 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
451 struct bio *bio;
452
453 if (list_empty(queued))
454 return NULL;
455
456 bio = bio_list_pop(&qn->bios);
457 WARN_ON_ONCE(!bio);
458
459 if (bio_list_empty(&qn->bios)) {
460 list_del_init(&qn->node);
461 if (tg_to_put)
462 *tg_to_put = qn->tg;
463 else
464 blkg_put(tg_to_blkg(qn->tg));
465 } else {
466 list_move_tail(&qn->node, queued);
467 }
468
469 return bio;
470}
471
472
473static void throtl_service_queue_init(struct throtl_service_queue *sq)
474{
475 INIT_LIST_HEAD(&sq->queued[0]);
476 INIT_LIST_HEAD(&sq->queued[1]);
477 sq->pending_tree = RB_ROOT_CACHED;
478 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
479}
480
481static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
482{
483 struct throtl_grp *tg;
484 int rw;
485
486 tg = kzalloc_node(sizeof(*tg), gfp, node);
487 if (!tg)
488 return NULL;
489
490 throtl_service_queue_init(&tg->service_queue);
491
492 for (rw = READ; rw <= WRITE; rw++) {
493 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
494 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
495 }
496
497 RB_CLEAR_NODE(&tg->rb_node);
498 tg->bps[READ][LIMIT_MAX] = U64_MAX;
499 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
500 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
501 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
502 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
503 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
504 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
505 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
506
507
508 tg->latency_target = DFL_LATENCY_TARGET;
509 tg->latency_target_conf = DFL_LATENCY_TARGET;
510 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
511 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
512
513 return &tg->pd;
514}
515
516static void throtl_pd_init(struct blkg_policy_data *pd)
517{
518 struct throtl_grp *tg = pd_to_tg(pd);
519 struct blkcg_gq *blkg = tg_to_blkg(tg);
520 struct throtl_data *td = blkg->q->td;
521 struct throtl_service_queue *sq = &tg->service_queue;
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536 sq->parent_sq = &td->service_queue;
537 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
538 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
539 tg->td = td;
540}
541
542
543
544
545
546
547static void tg_update_has_rules(struct throtl_grp *tg)
548{
549 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
550 struct throtl_data *td = tg->td;
551 int rw;
552
553 for (rw = READ; rw <= WRITE; rw++)
554 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
555 (td->limit_valid[td->limit_index] &&
556 (tg_bps_limit(tg, rw) != U64_MAX ||
557 tg_iops_limit(tg, rw) != UINT_MAX));
558}
559
560static void throtl_pd_online(struct blkg_policy_data *pd)
561{
562 struct throtl_grp *tg = pd_to_tg(pd);
563
564
565
566
567 tg_update_has_rules(tg);
568}
569
570static void blk_throtl_update_limit_valid(struct throtl_data *td)
571{
572 struct cgroup_subsys_state *pos_css;
573 struct blkcg_gq *blkg;
574 bool low_valid = false;
575
576 rcu_read_lock();
577 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
578 struct throtl_grp *tg = blkg_to_tg(blkg);
579
580 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
581 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
582 low_valid = true;
583 break;
584 }
585 }
586 rcu_read_unlock();
587
588 td->limit_valid[LIMIT_LOW] = low_valid;
589}
590
591static void throtl_upgrade_state(struct throtl_data *td);
592static void throtl_pd_offline(struct blkg_policy_data *pd)
593{
594 struct throtl_grp *tg = pd_to_tg(pd);
595
596 tg->bps[READ][LIMIT_LOW] = 0;
597 tg->bps[WRITE][LIMIT_LOW] = 0;
598 tg->iops[READ][LIMIT_LOW] = 0;
599 tg->iops[WRITE][LIMIT_LOW] = 0;
600
601 blk_throtl_update_limit_valid(tg->td);
602
603 if (!tg->td->limit_valid[tg->td->limit_index])
604 throtl_upgrade_state(tg->td);
605}
606
607static void throtl_pd_free(struct blkg_policy_data *pd)
608{
609 struct throtl_grp *tg = pd_to_tg(pd);
610
611 del_timer_sync(&tg->service_queue.pending_timer);
612 kfree(tg);
613}
614
615static struct throtl_grp *
616throtl_rb_first(struct throtl_service_queue *parent_sq)
617{
618 struct rb_node *n;
619
620 if (!parent_sq->nr_pending)
621 return NULL;
622
623 n = rb_first_cached(&parent_sq->pending_tree);
624 WARN_ON_ONCE(!n);
625 if (!n)
626 return NULL;
627 return rb_entry_tg(n);
628}
629
630static void throtl_rb_erase(struct rb_node *n,
631 struct throtl_service_queue *parent_sq)
632{
633 rb_erase_cached(n, &parent_sq->pending_tree);
634 RB_CLEAR_NODE(n);
635 --parent_sq->nr_pending;
636}
637
638static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
639{
640 struct throtl_grp *tg;
641
642 tg = throtl_rb_first(parent_sq);
643 if (!tg)
644 return;
645
646 parent_sq->first_pending_disptime = tg->disptime;
647}
648
649static void tg_service_queue_add(struct throtl_grp *tg)
650{
651 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
652 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
653 struct rb_node *parent = NULL;
654 struct throtl_grp *__tg;
655 unsigned long key = tg->disptime;
656 bool leftmost = true;
657
658 while (*node != NULL) {
659 parent = *node;
660 __tg = rb_entry_tg(parent);
661
662 if (time_before(key, __tg->disptime))
663 node = &parent->rb_left;
664 else {
665 node = &parent->rb_right;
666 leftmost = false;
667 }
668 }
669
670 rb_link_node(&tg->rb_node, parent, node);
671 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
672 leftmost);
673}
674
675static void __throtl_enqueue_tg(struct throtl_grp *tg)
676{
677 tg_service_queue_add(tg);
678 tg->flags |= THROTL_TG_PENDING;
679 tg->service_queue.parent_sq->nr_pending++;
680}
681
682static void throtl_enqueue_tg(struct throtl_grp *tg)
683{
684 if (!(tg->flags & THROTL_TG_PENDING))
685 __throtl_enqueue_tg(tg);
686}
687
688static void __throtl_dequeue_tg(struct throtl_grp *tg)
689{
690 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
691 tg->flags &= ~THROTL_TG_PENDING;
692}
693
694static void throtl_dequeue_tg(struct throtl_grp *tg)
695{
696 if (tg->flags & THROTL_TG_PENDING)
697 __throtl_dequeue_tg(tg);
698}
699
700
701static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
702 unsigned long expires)
703{
704 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
705
706
707
708
709
710
711
712
713 if (time_after(expires, max_expire))
714 expires = max_expire;
715 mod_timer(&sq->pending_timer, expires);
716 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
717 expires - jiffies, jiffies);
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
739 bool force)
740{
741
742 if (!sq->nr_pending)
743 return true;
744
745 update_min_dispatch_time(sq);
746
747
748 if (force || time_after(sq->first_pending_disptime, jiffies)) {
749 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
750 return true;
751 }
752
753
754 return false;
755}
756
757static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
758 bool rw, unsigned long start)
759{
760 tg->bytes_disp[rw] = 0;
761 tg->io_disp[rw] = 0;
762
763
764
765
766
767
768
769 if (time_after_eq(start, tg->slice_start[rw]))
770 tg->slice_start[rw] = start;
771
772 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
773 throtl_log(&tg->service_queue,
774 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
775 rw == READ ? 'R' : 'W', tg->slice_start[rw],
776 tg->slice_end[rw], jiffies);
777}
778
779static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
780{
781 tg->bytes_disp[rw] = 0;
782 tg->io_disp[rw] = 0;
783 tg->slice_start[rw] = jiffies;
784 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
785 throtl_log(&tg->service_queue,
786 "[%c] new slice start=%lu end=%lu jiffies=%lu",
787 rw == READ ? 'R' : 'W', tg->slice_start[rw],
788 tg->slice_end[rw], jiffies);
789}
790
791static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
792 unsigned long jiffy_end)
793{
794 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
795}
796
797static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
798 unsigned long jiffy_end)
799{
800 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
801 throtl_log(&tg->service_queue,
802 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
803 rw == READ ? 'R' : 'W', tg->slice_start[rw],
804 tg->slice_end[rw], jiffies);
805}
806
807
808static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
809{
810 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
811 return false;
812
813 return true;
814}
815
816
817static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
818{
819 unsigned long nr_slices, time_elapsed, io_trim;
820 u64 bytes_trim, tmp;
821
822 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
823
824
825
826
827
828
829 if (throtl_slice_used(tg, rw))
830 return;
831
832
833
834
835
836
837
838
839
840 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
841
842 time_elapsed = jiffies - tg->slice_start[rw];
843
844 nr_slices = time_elapsed / tg->td->throtl_slice;
845
846 if (!nr_slices)
847 return;
848 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
849 do_div(tmp, HZ);
850 bytes_trim = tmp;
851
852 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
853 HZ;
854
855 if (!bytes_trim && !io_trim)
856 return;
857
858 if (tg->bytes_disp[rw] >= bytes_trim)
859 tg->bytes_disp[rw] -= bytes_trim;
860 else
861 tg->bytes_disp[rw] = 0;
862
863 if (tg->io_disp[rw] >= io_trim)
864 tg->io_disp[rw] -= io_trim;
865 else
866 tg->io_disp[rw] = 0;
867
868 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
869
870 throtl_log(&tg->service_queue,
871 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
872 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
873 tg->slice_start[rw], tg->slice_end[rw], jiffies);
874}
875
876static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
877 unsigned long *wait)
878{
879 bool rw = bio_data_dir(bio);
880 unsigned int io_allowed;
881 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
882 u64 tmp;
883
884 jiffy_elapsed = jiffies - tg->slice_start[rw];
885
886
887 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
888
889
890
891
892
893
894
895
896 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
897 do_div(tmp, HZ);
898
899 if (tmp > UINT_MAX)
900 io_allowed = UINT_MAX;
901 else
902 io_allowed = tmp;
903
904 if (tg->io_disp[rw] + 1 <= io_allowed) {
905 if (wait)
906 *wait = 0;
907 return true;
908 }
909
910
911 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
912
913 if (wait)
914 *wait = jiffy_wait;
915 return false;
916}
917
918static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
919 unsigned long *wait)
920{
921 bool rw = bio_data_dir(bio);
922 u64 bytes_allowed, extra_bytes, tmp;
923 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
924 unsigned int bio_size = throtl_bio_data_size(bio);
925
926 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
927
928
929 if (!jiffy_elapsed)
930 jiffy_elapsed_rnd = tg->td->throtl_slice;
931
932 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
933
934 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
935 do_div(tmp, HZ);
936 bytes_allowed = tmp;
937
938 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
939 if (wait)
940 *wait = 0;
941 return true;
942 }
943
944
945 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
946 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
947
948 if (!jiffy_wait)
949 jiffy_wait = 1;
950
951
952
953
954
955 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
956 if (wait)
957 *wait = jiffy_wait;
958 return false;
959}
960
961
962
963
964
965static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
966 unsigned long *wait)
967{
968 bool rw = bio_data_dir(bio);
969 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
970
971
972
973
974
975
976
977 BUG_ON(tg->service_queue.nr_queued[rw] &&
978 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
979
980
981 if (tg_bps_limit(tg, rw) == U64_MAX &&
982 tg_iops_limit(tg, rw) == UINT_MAX) {
983 if (wait)
984 *wait = 0;
985 return true;
986 }
987
988
989
990
991
992
993
994
995 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
996 throtl_start_new_slice(tg, rw);
997 else {
998 if (time_before(tg->slice_end[rw],
999 jiffies + tg->td->throtl_slice))
1000 throtl_extend_slice(tg, rw,
1001 jiffies + tg->td->throtl_slice);
1002 }
1003
1004 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1005 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1006 if (wait)
1007 *wait = 0;
1008 return true;
1009 }
1010
1011 max_wait = max(bps_wait, iops_wait);
1012
1013 if (wait)
1014 *wait = max_wait;
1015
1016 if (time_before(tg->slice_end[rw], jiffies + max_wait))
1017 throtl_extend_slice(tg, rw, jiffies + max_wait);
1018
1019 return false;
1020}
1021
1022static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1023{
1024 bool rw = bio_data_dir(bio);
1025 unsigned int bio_size = throtl_bio_data_size(bio);
1026
1027
1028 tg->bytes_disp[rw] += bio_size;
1029 tg->io_disp[rw]++;
1030 tg->last_bytes_disp[rw] += bio_size;
1031 tg->last_io_disp[rw]++;
1032
1033
1034
1035
1036
1037
1038
1039 if (!bio_flagged(bio, BIO_THROTTLED))
1040 bio_set_flag(bio, BIO_THROTTLED);
1041}
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1053 struct throtl_grp *tg)
1054{
1055 struct throtl_service_queue *sq = &tg->service_queue;
1056 bool rw = bio_data_dir(bio);
1057
1058 if (!qn)
1059 qn = &tg->qnode_on_self[rw];
1060
1061
1062
1063
1064
1065
1066
1067 if (!sq->nr_queued[rw])
1068 tg->flags |= THROTL_TG_WAS_EMPTY;
1069
1070 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1071
1072 sq->nr_queued[rw]++;
1073 throtl_enqueue_tg(tg);
1074}
1075
1076static void tg_update_disptime(struct throtl_grp *tg)
1077{
1078 struct throtl_service_queue *sq = &tg->service_queue;
1079 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1080 struct bio *bio;
1081
1082 bio = throtl_peek_queued(&sq->queued[READ]);
1083 if (bio)
1084 tg_may_dispatch(tg, bio, &read_wait);
1085
1086 bio = throtl_peek_queued(&sq->queued[WRITE]);
1087 if (bio)
1088 tg_may_dispatch(tg, bio, &write_wait);
1089
1090 min_wait = min(read_wait, write_wait);
1091 disptime = jiffies + min_wait;
1092
1093
1094 throtl_dequeue_tg(tg);
1095 tg->disptime = disptime;
1096 throtl_enqueue_tg(tg);
1097
1098
1099 tg->flags &= ~THROTL_TG_WAS_EMPTY;
1100}
1101
1102static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1103 struct throtl_grp *parent_tg, bool rw)
1104{
1105 if (throtl_slice_used(parent_tg, rw)) {
1106 throtl_start_new_slice_with_credit(parent_tg, rw,
1107 child_tg->slice_start[rw]);
1108 }
1109
1110}
1111
1112static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1113{
1114 struct throtl_service_queue *sq = &tg->service_queue;
1115 struct throtl_service_queue *parent_sq = sq->parent_sq;
1116 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1117 struct throtl_grp *tg_to_put = NULL;
1118 struct bio *bio;
1119
1120
1121
1122
1123
1124
1125
1126 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1127 sq->nr_queued[rw]--;
1128
1129 throtl_charge_bio(tg, bio);
1130
1131
1132
1133
1134
1135
1136
1137
1138 if (parent_tg) {
1139 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1140 start_parent_slice_with_credit(tg, parent_tg, rw);
1141 } else {
1142 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1143 &parent_sq->queued[rw]);
1144 BUG_ON(tg->td->nr_queued[rw] <= 0);
1145 tg->td->nr_queued[rw]--;
1146 }
1147
1148 throtl_trim_slice(tg, rw);
1149
1150 if (tg_to_put)
1151 blkg_put(tg_to_blkg(tg_to_put));
1152}
1153
1154static int throtl_dispatch_tg(struct throtl_grp *tg)
1155{
1156 struct throtl_service_queue *sq = &tg->service_queue;
1157 unsigned int nr_reads = 0, nr_writes = 0;
1158 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1159 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1160 struct bio *bio;
1161
1162
1163
1164 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1165 tg_may_dispatch(tg, bio, NULL)) {
1166
1167 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1168 nr_reads++;
1169
1170 if (nr_reads >= max_nr_reads)
1171 break;
1172 }
1173
1174 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1175 tg_may_dispatch(tg, bio, NULL)) {
1176
1177 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1178 nr_writes++;
1179
1180 if (nr_writes >= max_nr_writes)
1181 break;
1182 }
1183
1184 return nr_reads + nr_writes;
1185}
1186
1187static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1188{
1189 unsigned int nr_disp = 0;
1190
1191 while (1) {
1192 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1193 struct throtl_service_queue *sq;
1194
1195 if (!tg)
1196 break;
1197
1198 if (time_before(jiffies, tg->disptime))
1199 break;
1200
1201 throtl_dequeue_tg(tg);
1202
1203 nr_disp += throtl_dispatch_tg(tg);
1204
1205 sq = &tg->service_queue;
1206 if (sq->nr_queued[0] || sq->nr_queued[1])
1207 tg_update_disptime(tg);
1208
1209 if (nr_disp >= throtl_quantum)
1210 break;
1211 }
1212
1213 return nr_disp;
1214}
1215
1216static bool throtl_can_upgrade(struct throtl_data *td,
1217 struct throtl_grp *this_tg);
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233static void throtl_pending_timer_fn(struct timer_list *t)
1234{
1235 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1236 struct throtl_grp *tg = sq_to_tg(sq);
1237 struct throtl_data *td = sq_to_td(sq);
1238 struct request_queue *q = td->queue;
1239 struct throtl_service_queue *parent_sq;
1240 bool dispatched;
1241 int ret;
1242
1243 spin_lock_irq(&q->queue_lock);
1244 if (throtl_can_upgrade(td, NULL))
1245 throtl_upgrade_state(td);
1246
1247again:
1248 parent_sq = sq->parent_sq;
1249 dispatched = false;
1250
1251 while (true) {
1252 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1253 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1254 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1255
1256 ret = throtl_select_dispatch(sq);
1257 if (ret) {
1258 throtl_log(sq, "bios disp=%u", ret);
1259 dispatched = true;
1260 }
1261
1262 if (throtl_schedule_next_dispatch(sq, false))
1263 break;
1264
1265
1266 spin_unlock_irq(&q->queue_lock);
1267 cpu_relax();
1268 spin_lock_irq(&q->queue_lock);
1269 }
1270
1271 if (!dispatched)
1272 goto out_unlock;
1273
1274 if (parent_sq) {
1275
1276 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1277 tg_update_disptime(tg);
1278 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1279
1280 sq = parent_sq;
1281 tg = sq_to_tg(sq);
1282 goto again;
1283 }
1284 }
1285 } else {
1286
1287 queue_work(kthrotld_workqueue, &td->dispatch_work);
1288 }
1289out_unlock:
1290 spin_unlock_irq(&q->queue_lock);
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1302{
1303 struct throtl_data *td = container_of(work, struct throtl_data,
1304 dispatch_work);
1305 struct throtl_service_queue *td_sq = &td->service_queue;
1306 struct request_queue *q = td->queue;
1307 struct bio_list bio_list_on_stack;
1308 struct bio *bio;
1309 struct blk_plug plug;
1310 int rw;
1311
1312 bio_list_init(&bio_list_on_stack);
1313
1314 spin_lock_irq(&q->queue_lock);
1315 for (rw = READ; rw <= WRITE; rw++)
1316 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1317 bio_list_add(&bio_list_on_stack, bio);
1318 spin_unlock_irq(&q->queue_lock);
1319
1320 if (!bio_list_empty(&bio_list_on_stack)) {
1321 blk_start_plug(&plug);
1322 while((bio = bio_list_pop(&bio_list_on_stack)))
1323 generic_make_request(bio);
1324 blk_finish_plug(&plug);
1325 }
1326}
1327
1328static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1329 int off)
1330{
1331 struct throtl_grp *tg = pd_to_tg(pd);
1332 u64 v = *(u64 *)((void *)tg + off);
1333
1334 if (v == U64_MAX)
1335 return 0;
1336 return __blkg_prfill_u64(sf, pd, v);
1337}
1338
1339static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1340 int off)
1341{
1342 struct throtl_grp *tg = pd_to_tg(pd);
1343 unsigned int v = *(unsigned int *)((void *)tg + off);
1344
1345 if (v == UINT_MAX)
1346 return 0;
1347 return __blkg_prfill_u64(sf, pd, v);
1348}
1349
1350static int tg_print_conf_u64(struct seq_file *sf, void *v)
1351{
1352 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1353 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1354 return 0;
1355}
1356
1357static int tg_print_conf_uint(struct seq_file *sf, void *v)
1358{
1359 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1360 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1361 return 0;
1362}
1363
1364static void tg_conf_updated(struct throtl_grp *tg, bool global)
1365{
1366 struct throtl_service_queue *sq = &tg->service_queue;
1367 struct cgroup_subsys_state *pos_css;
1368 struct blkcg_gq *blkg;
1369
1370 throtl_log(&tg->service_queue,
1371 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1372 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1373 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1374
1375
1376
1377
1378
1379
1380
1381
1382 blkg_for_each_descendant_pre(blkg, pos_css,
1383 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1384 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1385 struct throtl_grp *parent_tg;
1386
1387 tg_update_has_rules(this_tg);
1388
1389 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1390 !blkg->parent->parent)
1391 continue;
1392 parent_tg = blkg_to_tg(blkg->parent);
1393
1394
1395
1396
1397 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1398 parent_tg->idletime_threshold);
1399 this_tg->latency_target = max(this_tg->latency_target,
1400 parent_tg->latency_target);
1401 }
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 throtl_start_new_slice(tg, 0);
1412 throtl_start_new_slice(tg, 1);
1413
1414 if (tg->flags & THROTL_TG_PENDING) {
1415 tg_update_disptime(tg);
1416 throtl_schedule_next_dispatch(sq->parent_sq, true);
1417 }
1418}
1419
1420static ssize_t tg_set_conf(struct kernfs_open_file *of,
1421 char *buf, size_t nbytes, loff_t off, bool is_u64)
1422{
1423 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1424 struct blkg_conf_ctx ctx;
1425 struct throtl_grp *tg;
1426 int ret;
1427 u64 v;
1428
1429 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1430 if (ret)
1431 return ret;
1432
1433 ret = -EINVAL;
1434 if (sscanf(ctx.body, "%llu", &v) != 1)
1435 goto out_finish;
1436 if (!v)
1437 v = U64_MAX;
1438
1439 tg = blkg_to_tg(ctx.blkg);
1440
1441 if (is_u64)
1442 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1443 else
1444 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1445
1446 tg_conf_updated(tg, false);
1447 ret = 0;
1448out_finish:
1449 blkg_conf_finish(&ctx);
1450 return ret ?: nbytes;
1451}
1452
1453static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1454 char *buf, size_t nbytes, loff_t off)
1455{
1456 return tg_set_conf(of, buf, nbytes, off, true);
1457}
1458
1459static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1460 char *buf, size_t nbytes, loff_t off)
1461{
1462 return tg_set_conf(of, buf, nbytes, off, false);
1463}
1464
1465static struct cftype throtl_legacy_files[] = {
1466 {
1467 .name = "throttle.read_bps_device",
1468 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1469 .seq_show = tg_print_conf_u64,
1470 .write = tg_set_conf_u64,
1471 },
1472 {
1473 .name = "throttle.write_bps_device",
1474 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1475 .seq_show = tg_print_conf_u64,
1476 .write = tg_set_conf_u64,
1477 },
1478 {
1479 .name = "throttle.read_iops_device",
1480 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1481 .seq_show = tg_print_conf_uint,
1482 .write = tg_set_conf_uint,
1483 },
1484 {
1485 .name = "throttle.write_iops_device",
1486 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1487 .seq_show = tg_print_conf_uint,
1488 .write = tg_set_conf_uint,
1489 },
1490 {
1491 .name = "throttle.io_service_bytes",
1492 .private = (unsigned long)&blkcg_policy_throtl,
1493 .seq_show = blkg_print_stat_bytes,
1494 },
1495 {
1496 .name = "throttle.io_service_bytes_recursive",
1497 .private = (unsigned long)&blkcg_policy_throtl,
1498 .seq_show = blkg_print_stat_bytes_recursive,
1499 },
1500 {
1501 .name = "throttle.io_serviced",
1502 .private = (unsigned long)&blkcg_policy_throtl,
1503 .seq_show = blkg_print_stat_ios,
1504 },
1505 {
1506 .name = "throttle.io_serviced_recursive",
1507 .private = (unsigned long)&blkcg_policy_throtl,
1508 .seq_show = blkg_print_stat_ios_recursive,
1509 },
1510 { }
1511};
1512
1513static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1514 int off)
1515{
1516 struct throtl_grp *tg = pd_to_tg(pd);
1517 const char *dname = blkg_dev_name(pd->blkg);
1518 char bufs[4][21] = { "max", "max", "max", "max" };
1519 u64 bps_dft;
1520 unsigned int iops_dft;
1521 char idle_time[26] = "";
1522 char latency_time[26] = "";
1523
1524 if (!dname)
1525 return 0;
1526
1527 if (off == LIMIT_LOW) {
1528 bps_dft = 0;
1529 iops_dft = 0;
1530 } else {
1531 bps_dft = U64_MAX;
1532 iops_dft = UINT_MAX;
1533 }
1534
1535 if (tg->bps_conf[READ][off] == bps_dft &&
1536 tg->bps_conf[WRITE][off] == bps_dft &&
1537 tg->iops_conf[READ][off] == iops_dft &&
1538 tg->iops_conf[WRITE][off] == iops_dft &&
1539 (off != LIMIT_LOW ||
1540 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1541 tg->latency_target_conf == DFL_LATENCY_TARGET)))
1542 return 0;
1543
1544 if (tg->bps_conf[READ][off] != U64_MAX)
1545 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1546 tg->bps_conf[READ][off]);
1547 if (tg->bps_conf[WRITE][off] != U64_MAX)
1548 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1549 tg->bps_conf[WRITE][off]);
1550 if (tg->iops_conf[READ][off] != UINT_MAX)
1551 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1552 tg->iops_conf[READ][off]);
1553 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1554 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1555 tg->iops_conf[WRITE][off]);
1556 if (off == LIMIT_LOW) {
1557 if (tg->idletime_threshold_conf == ULONG_MAX)
1558 strcpy(idle_time, " idle=max");
1559 else
1560 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1561 tg->idletime_threshold_conf);
1562
1563 if (tg->latency_target_conf == ULONG_MAX)
1564 strcpy(latency_time, " latency=max");
1565 else
1566 snprintf(latency_time, sizeof(latency_time),
1567 " latency=%lu", tg->latency_target_conf);
1568 }
1569
1570 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1571 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1572 latency_time);
1573 return 0;
1574}
1575
1576static int tg_print_limit(struct seq_file *sf, void *v)
1577{
1578 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1579 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1580 return 0;
1581}
1582
1583static ssize_t tg_set_limit(struct kernfs_open_file *of,
1584 char *buf, size_t nbytes, loff_t off)
1585{
1586 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1587 struct blkg_conf_ctx ctx;
1588 struct throtl_grp *tg;
1589 u64 v[4];
1590 unsigned long idle_time;
1591 unsigned long latency_time;
1592 int ret;
1593 int index = of_cft(of)->private;
1594
1595 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1596 if (ret)
1597 return ret;
1598
1599 tg = blkg_to_tg(ctx.blkg);
1600
1601 v[0] = tg->bps_conf[READ][index];
1602 v[1] = tg->bps_conf[WRITE][index];
1603 v[2] = tg->iops_conf[READ][index];
1604 v[3] = tg->iops_conf[WRITE][index];
1605
1606 idle_time = tg->idletime_threshold_conf;
1607 latency_time = tg->latency_target_conf;
1608 while (true) {
1609 char tok[27];
1610 char *p;
1611 u64 val = U64_MAX;
1612 int len;
1613
1614 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1615 break;
1616 if (tok[0] == '\0')
1617 break;
1618 ctx.body += len;
1619
1620 ret = -EINVAL;
1621 p = tok;
1622 strsep(&p, "=");
1623 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1624 goto out_finish;
1625
1626 ret = -ERANGE;
1627 if (!val)
1628 goto out_finish;
1629
1630 ret = -EINVAL;
1631 if (!strcmp(tok, "rbps"))
1632 v[0] = val;
1633 else if (!strcmp(tok, "wbps"))
1634 v[1] = val;
1635 else if (!strcmp(tok, "riops"))
1636 v[2] = min_t(u64, val, UINT_MAX);
1637 else if (!strcmp(tok, "wiops"))
1638 v[3] = min_t(u64, val, UINT_MAX);
1639 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1640 idle_time = val;
1641 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1642 latency_time = val;
1643 else
1644 goto out_finish;
1645 }
1646
1647 tg->bps_conf[READ][index] = v[0];
1648 tg->bps_conf[WRITE][index] = v[1];
1649 tg->iops_conf[READ][index] = v[2];
1650 tg->iops_conf[WRITE][index] = v[3];
1651
1652 if (index == LIMIT_MAX) {
1653 tg->bps[READ][index] = v[0];
1654 tg->bps[WRITE][index] = v[1];
1655 tg->iops[READ][index] = v[2];
1656 tg->iops[WRITE][index] = v[3];
1657 }
1658 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1659 tg->bps_conf[READ][LIMIT_MAX]);
1660 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1661 tg->bps_conf[WRITE][LIMIT_MAX]);
1662 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1663 tg->iops_conf[READ][LIMIT_MAX]);
1664 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1665 tg->iops_conf[WRITE][LIMIT_MAX]);
1666 tg->idletime_threshold_conf = idle_time;
1667 tg->latency_target_conf = latency_time;
1668
1669
1670 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1671 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1672 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1673 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1674 tg->bps[READ][LIMIT_LOW] = 0;
1675 tg->bps[WRITE][LIMIT_LOW] = 0;
1676 tg->iops[READ][LIMIT_LOW] = 0;
1677 tg->iops[WRITE][LIMIT_LOW] = 0;
1678 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1679 tg->latency_target = DFL_LATENCY_TARGET;
1680 } else if (index == LIMIT_LOW) {
1681 tg->idletime_threshold = tg->idletime_threshold_conf;
1682 tg->latency_target = tg->latency_target_conf;
1683 }
1684
1685 blk_throtl_update_limit_valid(tg->td);
1686 if (tg->td->limit_valid[LIMIT_LOW]) {
1687 if (index == LIMIT_LOW)
1688 tg->td->limit_index = LIMIT_LOW;
1689 } else
1690 tg->td->limit_index = LIMIT_MAX;
1691 tg_conf_updated(tg, index == LIMIT_LOW &&
1692 tg->td->limit_valid[LIMIT_LOW]);
1693 ret = 0;
1694out_finish:
1695 blkg_conf_finish(&ctx);
1696 return ret ?: nbytes;
1697}
1698
1699static struct cftype throtl_files[] = {
1700#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1701 {
1702 .name = "low",
1703 .flags = CFTYPE_NOT_ON_ROOT,
1704 .seq_show = tg_print_limit,
1705 .write = tg_set_limit,
1706 .private = LIMIT_LOW,
1707 },
1708#endif
1709 {
1710 .name = "max",
1711 .flags = CFTYPE_NOT_ON_ROOT,
1712 .seq_show = tg_print_limit,
1713 .write = tg_set_limit,
1714 .private = LIMIT_MAX,
1715 },
1716 { }
1717};
1718
1719static void throtl_shutdown_wq(struct request_queue *q)
1720{
1721 struct throtl_data *td = q->td;
1722
1723 cancel_work_sync(&td->dispatch_work);
1724}
1725
1726static struct blkcg_policy blkcg_policy_throtl = {
1727 .dfl_cftypes = throtl_files,
1728 .legacy_cftypes = throtl_legacy_files,
1729
1730 .pd_alloc_fn = throtl_pd_alloc,
1731 .pd_init_fn = throtl_pd_init,
1732 .pd_online_fn = throtl_pd_online,
1733 .pd_offline_fn = throtl_pd_offline,
1734 .pd_free_fn = throtl_pd_free,
1735};
1736
1737static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1738{
1739 unsigned long rtime = jiffies, wtime = jiffies;
1740
1741 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1742 rtime = tg->last_low_overflow_time[READ];
1743 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1744 wtime = tg->last_low_overflow_time[WRITE];
1745 return min(rtime, wtime);
1746}
1747
1748
1749static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1750{
1751 struct throtl_service_queue *parent_sq;
1752 struct throtl_grp *parent = tg;
1753 unsigned long ret = __tg_last_low_overflow_time(tg);
1754
1755 while (true) {
1756 parent_sq = parent->service_queue.parent_sq;
1757 parent = sq_to_tg(parent_sq);
1758 if (!parent)
1759 break;
1760
1761
1762
1763
1764
1765 if (!parent->bps[READ][LIMIT_LOW] &&
1766 !parent->iops[READ][LIMIT_LOW] &&
1767 !parent->bps[WRITE][LIMIT_LOW] &&
1768 !parent->iops[WRITE][LIMIT_LOW])
1769 continue;
1770 if (time_after(__tg_last_low_overflow_time(parent), ret))
1771 ret = __tg_last_low_overflow_time(parent);
1772 }
1773 return ret;
1774}
1775
1776static bool throtl_tg_is_idle(struct throtl_grp *tg)
1777{
1778
1779
1780
1781
1782
1783
1784
1785 unsigned long time;
1786 bool ret;
1787
1788 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1789 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1790 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1791 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1792 tg->avg_idletime > tg->idletime_threshold ||
1793 (tg->latency_target && tg->bio_cnt &&
1794 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1795 throtl_log(&tg->service_queue,
1796 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1797 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1798 tg->bio_cnt, ret, tg->td->scale);
1799 return ret;
1800}
1801
1802static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1803{
1804 struct throtl_service_queue *sq = &tg->service_queue;
1805 bool read_limit, write_limit;
1806
1807
1808
1809
1810
1811 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1812 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1813 if (!read_limit && !write_limit)
1814 return true;
1815 if (read_limit && sq->nr_queued[READ] &&
1816 (!write_limit || sq->nr_queued[WRITE]))
1817 return true;
1818 if (write_limit && sq->nr_queued[WRITE] &&
1819 (!read_limit || sq->nr_queued[READ]))
1820 return true;
1821
1822 if (time_after_eq(jiffies,
1823 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1824 throtl_tg_is_idle(tg))
1825 return true;
1826 return false;
1827}
1828
1829static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1830{
1831 while (true) {
1832 if (throtl_tg_can_upgrade(tg))
1833 return true;
1834 tg = sq_to_tg(tg->service_queue.parent_sq);
1835 if (!tg || !tg_to_blkg(tg)->parent)
1836 return false;
1837 }
1838 return false;
1839}
1840
1841static bool throtl_can_upgrade(struct throtl_data *td,
1842 struct throtl_grp *this_tg)
1843{
1844 struct cgroup_subsys_state *pos_css;
1845 struct blkcg_gq *blkg;
1846
1847 if (td->limit_index != LIMIT_LOW)
1848 return false;
1849
1850 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1851 return false;
1852
1853 rcu_read_lock();
1854 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1855 struct throtl_grp *tg = blkg_to_tg(blkg);
1856
1857 if (tg == this_tg)
1858 continue;
1859 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1860 continue;
1861 if (!throtl_hierarchy_can_upgrade(tg)) {
1862 rcu_read_unlock();
1863 return false;
1864 }
1865 }
1866 rcu_read_unlock();
1867 return true;
1868}
1869
1870static void throtl_upgrade_check(struct throtl_grp *tg)
1871{
1872 unsigned long now = jiffies;
1873
1874 if (tg->td->limit_index != LIMIT_LOW)
1875 return;
1876
1877 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1878 return;
1879
1880 tg->last_check_time = now;
1881
1882 if (!time_after_eq(now,
1883 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1884 return;
1885
1886 if (throtl_can_upgrade(tg->td, NULL))
1887 throtl_upgrade_state(tg->td);
1888}
1889
1890static void throtl_upgrade_state(struct throtl_data *td)
1891{
1892 struct cgroup_subsys_state *pos_css;
1893 struct blkcg_gq *blkg;
1894
1895 throtl_log(&td->service_queue, "upgrade to max");
1896 td->limit_index = LIMIT_MAX;
1897 td->low_upgrade_time = jiffies;
1898 td->scale = 0;
1899 rcu_read_lock();
1900 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1901 struct throtl_grp *tg = blkg_to_tg(blkg);
1902 struct throtl_service_queue *sq = &tg->service_queue;
1903
1904 tg->disptime = jiffies - 1;
1905 throtl_select_dispatch(sq);
1906 throtl_schedule_next_dispatch(sq, true);
1907 }
1908 rcu_read_unlock();
1909 throtl_select_dispatch(&td->service_queue);
1910 throtl_schedule_next_dispatch(&td->service_queue, true);
1911 queue_work(kthrotld_workqueue, &td->dispatch_work);
1912}
1913
1914static void throtl_downgrade_state(struct throtl_data *td, int new)
1915{
1916 td->scale /= 2;
1917
1918 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1919 if (td->scale) {
1920 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1921 return;
1922 }
1923
1924 td->limit_index = new;
1925 td->low_downgrade_time = jiffies;
1926}
1927
1928static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1929{
1930 struct throtl_data *td = tg->td;
1931 unsigned long now = jiffies;
1932
1933
1934
1935
1936
1937 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1938 time_after_eq(now, tg_last_low_overflow_time(tg) +
1939 td->throtl_slice) &&
1940 (!throtl_tg_is_idle(tg) ||
1941 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1942 return true;
1943 return false;
1944}
1945
1946static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1947{
1948 while (true) {
1949 if (!throtl_tg_can_downgrade(tg))
1950 return false;
1951 tg = sq_to_tg(tg->service_queue.parent_sq);
1952 if (!tg || !tg_to_blkg(tg)->parent)
1953 break;
1954 }
1955 return true;
1956}
1957
1958static void throtl_downgrade_check(struct throtl_grp *tg)
1959{
1960 uint64_t bps;
1961 unsigned int iops;
1962 unsigned long elapsed_time;
1963 unsigned long now = jiffies;
1964
1965 if (tg->td->limit_index != LIMIT_MAX ||
1966 !tg->td->limit_valid[LIMIT_LOW])
1967 return;
1968 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1969 return;
1970 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1971 return;
1972
1973 elapsed_time = now - tg->last_check_time;
1974 tg->last_check_time = now;
1975
1976 if (time_before(now, tg_last_low_overflow_time(tg) +
1977 tg->td->throtl_slice))
1978 return;
1979
1980 if (tg->bps[READ][LIMIT_LOW]) {
1981 bps = tg->last_bytes_disp[READ] * HZ;
1982 do_div(bps, elapsed_time);
1983 if (bps >= tg->bps[READ][LIMIT_LOW])
1984 tg->last_low_overflow_time[READ] = now;
1985 }
1986
1987 if (tg->bps[WRITE][LIMIT_LOW]) {
1988 bps = tg->last_bytes_disp[WRITE] * HZ;
1989 do_div(bps, elapsed_time);
1990 if (bps >= tg->bps[WRITE][LIMIT_LOW])
1991 tg->last_low_overflow_time[WRITE] = now;
1992 }
1993
1994 if (tg->iops[READ][LIMIT_LOW]) {
1995 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1996 if (iops >= tg->iops[READ][LIMIT_LOW])
1997 tg->last_low_overflow_time[READ] = now;
1998 }
1999
2000 if (tg->iops[WRITE][LIMIT_LOW]) {
2001 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2002 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2003 tg->last_low_overflow_time[WRITE] = now;
2004 }
2005
2006
2007
2008
2009
2010 if (throtl_hierarchy_can_downgrade(tg))
2011 throtl_downgrade_state(tg->td, LIMIT_LOW);
2012
2013 tg->last_bytes_disp[READ] = 0;
2014 tg->last_bytes_disp[WRITE] = 0;
2015 tg->last_io_disp[READ] = 0;
2016 tg->last_io_disp[WRITE] = 0;
2017}
2018
2019static void blk_throtl_update_idletime(struct throtl_grp *tg)
2020{
2021 unsigned long now = ktime_get_ns() >> 10;
2022 unsigned long last_finish_time = tg->last_finish_time;
2023
2024 if (now <= last_finish_time || last_finish_time == 0 ||
2025 last_finish_time == tg->checked_last_finish_time)
2026 return;
2027
2028 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2029 tg->checked_last_finish_time = last_finish_time;
2030}
2031
2032#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2033static void throtl_update_latency_buckets(struct throtl_data *td)
2034{
2035 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2036 int i, cpu, rw;
2037 unsigned long last_latency[2] = { 0 };
2038 unsigned long latency[2];
2039
2040 if (!blk_queue_nonrot(td->queue))
2041 return;
2042 if (time_before(jiffies, td->last_calculate_time + HZ))
2043 return;
2044 td->last_calculate_time = jiffies;
2045
2046 memset(avg_latency, 0, sizeof(avg_latency));
2047 for (rw = READ; rw <= WRITE; rw++) {
2048 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2049 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2050
2051 for_each_possible_cpu(cpu) {
2052 struct latency_bucket *bucket;
2053
2054
2055 bucket = per_cpu_ptr(td->latency_buckets[rw],
2056 cpu);
2057 tmp->total_latency += bucket[i].total_latency;
2058 tmp->samples += bucket[i].samples;
2059 bucket[i].total_latency = 0;
2060 bucket[i].samples = 0;
2061 }
2062
2063 if (tmp->samples >= 32) {
2064 int samples = tmp->samples;
2065
2066 latency[rw] = tmp->total_latency;
2067
2068 tmp->total_latency = 0;
2069 tmp->samples = 0;
2070 latency[rw] /= samples;
2071 if (latency[rw] == 0)
2072 continue;
2073 avg_latency[rw][i].latency = latency[rw];
2074 }
2075 }
2076 }
2077
2078 for (rw = READ; rw <= WRITE; rw++) {
2079 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2080 if (!avg_latency[rw][i].latency) {
2081 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2082 td->avg_buckets[rw][i].latency =
2083 last_latency[rw];
2084 continue;
2085 }
2086
2087 if (!td->avg_buckets[rw][i].valid)
2088 latency[rw] = avg_latency[rw][i].latency;
2089 else
2090 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2091 avg_latency[rw][i].latency) >> 3;
2092
2093 td->avg_buckets[rw][i].latency = max(latency[rw],
2094 last_latency[rw]);
2095 td->avg_buckets[rw][i].valid = true;
2096 last_latency[rw] = td->avg_buckets[rw][i].latency;
2097 }
2098 }
2099
2100 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2101 throtl_log(&td->service_queue,
2102 "Latency bucket %d: read latency=%ld, read valid=%d, "
2103 "write latency=%ld, write valid=%d", i,
2104 td->avg_buckets[READ][i].latency,
2105 td->avg_buckets[READ][i].valid,
2106 td->avg_buckets[WRITE][i].latency,
2107 td->avg_buckets[WRITE][i].valid);
2108}
2109#else
2110static inline void throtl_update_latency_buckets(struct throtl_data *td)
2111{
2112}
2113#endif
2114
2115bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2116 struct bio *bio)
2117{
2118 struct throtl_qnode *qn = NULL;
2119 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2120 struct throtl_service_queue *sq;
2121 bool rw = bio_data_dir(bio);
2122 bool throttled = false;
2123 struct throtl_data *td = tg->td;
2124
2125 WARN_ON_ONCE(!rcu_read_lock_held());
2126
2127
2128 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2129 goto out;
2130
2131 spin_lock_irq(&q->queue_lock);
2132
2133 throtl_update_latency_buckets(td);
2134
2135 blk_throtl_update_idletime(tg);
2136
2137 sq = &tg->service_queue;
2138
2139again:
2140 while (true) {
2141 if (tg->last_low_overflow_time[rw] == 0)
2142 tg->last_low_overflow_time[rw] = jiffies;
2143 throtl_downgrade_check(tg);
2144 throtl_upgrade_check(tg);
2145
2146 if (sq->nr_queued[rw])
2147 break;
2148
2149
2150 if (!tg_may_dispatch(tg, bio, NULL)) {
2151 tg->last_low_overflow_time[rw] = jiffies;
2152 if (throtl_can_upgrade(td, tg)) {
2153 throtl_upgrade_state(td);
2154 goto again;
2155 }
2156 break;
2157 }
2158
2159
2160 throtl_charge_bio(tg, bio);
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 throtl_trim_slice(tg, rw);
2174
2175
2176
2177
2178
2179
2180 qn = &tg->qnode_on_parent[rw];
2181 sq = sq->parent_sq;
2182 tg = sq_to_tg(sq);
2183 if (!tg)
2184 goto out_unlock;
2185 }
2186
2187
2188 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2189 rw == READ ? 'R' : 'W',
2190 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2191 tg_bps_limit(tg, rw),
2192 tg->io_disp[rw], tg_iops_limit(tg, rw),
2193 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2194
2195 tg->last_low_overflow_time[rw] = jiffies;
2196
2197 td->nr_queued[rw]++;
2198 throtl_add_bio_tg(bio, qn, tg);
2199 throttled = true;
2200
2201
2202
2203
2204
2205
2206
2207 if (tg->flags & THROTL_TG_WAS_EMPTY) {
2208 tg_update_disptime(tg);
2209 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2210 }
2211
2212out_unlock:
2213 spin_unlock_irq(&q->queue_lock);
2214out:
2215 bio_set_flag(bio, BIO_THROTTLED);
2216
2217#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2218 if (throttled || !td->track_bio_latency)
2219 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2220#endif
2221 return throttled;
2222}
2223
2224#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2225static void throtl_track_latency(struct throtl_data *td, sector_t size,
2226 int op, unsigned long time)
2227{
2228 struct latency_bucket *latency;
2229 int index;
2230
2231 if (!td || td->limit_index != LIMIT_LOW ||
2232 !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2233 !blk_queue_nonrot(td->queue))
2234 return;
2235
2236 index = request_bucket_index(size);
2237
2238 latency = get_cpu_ptr(td->latency_buckets[op]);
2239 latency[index].total_latency += time;
2240 latency[index].samples++;
2241 put_cpu_ptr(td->latency_buckets[op]);
2242}
2243
2244void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2245{
2246 struct request_queue *q = rq->q;
2247 struct throtl_data *td = q->td;
2248
2249 throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
2250}
2251
2252void blk_throtl_bio_endio(struct bio *bio)
2253{
2254 struct blkcg_gq *blkg;
2255 struct throtl_grp *tg;
2256 u64 finish_time_ns;
2257 unsigned long finish_time;
2258 unsigned long start_time;
2259 unsigned long lat;
2260 int rw = bio_data_dir(bio);
2261
2262 blkg = bio->bi_blkg;
2263 if (!blkg)
2264 return;
2265 tg = blkg_to_tg(blkg);
2266
2267 finish_time_ns = ktime_get_ns();
2268 tg->last_finish_time = finish_time_ns >> 10;
2269
2270 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2271 finish_time = __bio_issue_time(finish_time_ns) >> 10;
2272 if (!start_time || finish_time <= start_time)
2273 return;
2274
2275 lat = finish_time - start_time;
2276
2277 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2278 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2279 bio_op(bio), lat);
2280
2281 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2282 int bucket;
2283 unsigned int threshold;
2284
2285 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2286 threshold = tg->td->avg_buckets[rw][bucket].latency +
2287 tg->latency_target;
2288 if (lat > threshold)
2289 tg->bad_bio_cnt++;
2290
2291
2292
2293
2294 tg->bio_cnt++;
2295 }
2296
2297 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2298 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2299 tg->bio_cnt /= 2;
2300 tg->bad_bio_cnt /= 2;
2301 }
2302}
2303#endif
2304
2305
2306
2307
2308
2309
2310static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2311{
2312 struct throtl_grp *tg;
2313
2314 while ((tg = throtl_rb_first(parent_sq))) {
2315 struct throtl_service_queue *sq = &tg->service_queue;
2316 struct bio *bio;
2317
2318 throtl_dequeue_tg(tg);
2319
2320 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2321 tg_dispatch_one_bio(tg, bio_data_dir(bio));
2322 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2323 tg_dispatch_one_bio(tg, bio_data_dir(bio));
2324 }
2325}
2326
2327
2328
2329
2330
2331
2332
2333void blk_throtl_drain(struct request_queue *q)
2334 __releases(&q->queue_lock) __acquires(&q->queue_lock)
2335{
2336 struct throtl_data *td = q->td;
2337 struct blkcg_gq *blkg;
2338 struct cgroup_subsys_state *pos_css;
2339 struct bio *bio;
2340 int rw;
2341
2342 rcu_read_lock();
2343
2344
2345
2346
2347
2348
2349
2350 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2351 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2352
2353
2354 tg_drain_bios(&td->service_queue);
2355
2356 rcu_read_unlock();
2357 spin_unlock_irq(&q->queue_lock);
2358
2359
2360 for (rw = READ; rw <= WRITE; rw++)
2361 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2362 NULL)))
2363 generic_make_request(bio);
2364
2365 spin_lock_irq(&q->queue_lock);
2366}
2367
2368int blk_throtl_init(struct request_queue *q)
2369{
2370 struct throtl_data *td;
2371 int ret;
2372
2373 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2374 if (!td)
2375 return -ENOMEM;
2376 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2377 LATENCY_BUCKET_SIZE, __alignof__(u64));
2378 if (!td->latency_buckets[READ]) {
2379 kfree(td);
2380 return -ENOMEM;
2381 }
2382 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2383 LATENCY_BUCKET_SIZE, __alignof__(u64));
2384 if (!td->latency_buckets[WRITE]) {
2385 free_percpu(td->latency_buckets[READ]);
2386 kfree(td);
2387 return -ENOMEM;
2388 }
2389
2390 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2391 throtl_service_queue_init(&td->service_queue);
2392
2393 q->td = td;
2394 td->queue = q;
2395
2396 td->limit_valid[LIMIT_MAX] = true;
2397 td->limit_index = LIMIT_MAX;
2398 td->low_upgrade_time = jiffies;
2399 td->low_downgrade_time = jiffies;
2400
2401
2402 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2403 if (ret) {
2404 free_percpu(td->latency_buckets[READ]);
2405 free_percpu(td->latency_buckets[WRITE]);
2406 kfree(td);
2407 }
2408 return ret;
2409}
2410
2411void blk_throtl_exit(struct request_queue *q)
2412{
2413 BUG_ON(!q->td);
2414 throtl_shutdown_wq(q);
2415 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2416 free_percpu(q->td->latency_buckets[READ]);
2417 free_percpu(q->td->latency_buckets[WRITE]);
2418 kfree(q->td);
2419}
2420
2421void blk_throtl_register_queue(struct request_queue *q)
2422{
2423 struct throtl_data *td;
2424 int i;
2425
2426 td = q->td;
2427 BUG_ON(!td);
2428
2429 if (blk_queue_nonrot(q)) {
2430 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2431 td->filtered_latency = LATENCY_FILTERED_SSD;
2432 } else {
2433 td->throtl_slice = DFL_THROTL_SLICE_HD;
2434 td->filtered_latency = LATENCY_FILTERED_HD;
2435 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2436 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2437 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2438 }
2439 }
2440#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2441
2442 td->throtl_slice = DFL_THROTL_SLICE_HD;
2443#endif
2444
2445 td->track_bio_latency = !queue_is_mq(q);
2446 if (!td->track_bio_latency)
2447 blk_stat_enable_accounting(q);
2448}
2449
2450#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2451ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2452{
2453 if (!q->td)
2454 return -EINVAL;
2455 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2456}
2457
2458ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2459 const char *page, size_t count)
2460{
2461 unsigned long v;
2462 unsigned long t;
2463
2464 if (!q->td)
2465 return -EINVAL;
2466 if (kstrtoul(page, 10, &v))
2467 return -EINVAL;
2468 t = msecs_to_jiffies(v);
2469 if (t == 0 || t > MAX_THROTL_SLICE)
2470 return -EINVAL;
2471 q->td->throtl_slice = t;
2472 return count;
2473}
2474#endif
2475
2476static int __init throtl_init(void)
2477{
2478 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2479 if (!kthrotld_workqueue)
2480 panic("Failed to create kthrotld\n");
2481
2482 return blkcg_policy_register(&blkcg_policy_throtl);
2483}
2484
2485module_init(throtl_init);
2486