1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175#include <linux/kernel.h>
176#include <linux/module.h>
177#include <linux/timer.h>
178#include <linux/time64.h>
179#include <linux/parser.h>
180#include <linux/sched/signal.h>
181#include <linux/blk-cgroup.h>
182#include <asm/local.h>
183#include <asm/local64.h>
184#include "blk-rq-qos.h"
185#include "blk-stat.h"
186#include "blk-wbt.h"
187
188#ifdef CONFIG_TRACEPOINTS
189
190
191#define TRACE_IOCG_PATH_LEN 1024
192static DEFINE_SPINLOCK(trace_iocg_path_lock);
193static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195#define TRACE_IOCG_PATH(type, iocg, ...) \
196 do { \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
203 ##__VA_ARGS__); \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
205 } \
206 } while (0)
207
208#else
209#define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210#endif
211
212enum {
213 MILLION = 1000000,
214
215
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
218
219
220
221
222
223
224 MARGIN_MIN_PCT = 10,
225 MARGIN_LOW_PCT = 20,
226 MARGIN_TARGET_PCT = 50,
227
228 INUSE_ADJ_STEP_PCT = 25,
229
230
231 TIMER_SLACK_PCT = 1,
232
233
234 WEIGHT_ONE = 1 << 16,
235
236
237
238
239
240
241
242
243
244
245
246
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
251
252
253 VRATE_MIN_PPM = 10000,
254 VRATE_MAX_PPM = 100000000,
255
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
258
259
260 RQ_WAIT_BUSY_PCT = 5,
261
262
263 UNBUSY_THR_PCT = 75,
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
287 MIN_DELAY = 250,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
289
290
291 DFGV_USAGE_PCT = 50,
292 DFGV_PERIOD = 100 * USEC_PER_MSEC,
293
294
295 MAX_LAGGING_PERIODS = 10,
296
297
298 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
299
300
301
302
303
304
305 IOC_PAGE_SHIFT = 12,
306 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
307 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
308
309
310 LCOEF_RANDIO_PAGES = 4096,
311};
312
313enum ioc_running {
314 IOC_IDLE,
315 IOC_RUNNING,
316 IOC_STOP,
317};
318
319
320enum {
321 QOS_ENABLE,
322 QOS_CTRL,
323 NR_QOS_CTRL_PARAMS,
324};
325
326
327enum {
328 QOS_RPPM,
329 QOS_RLAT,
330 QOS_WPPM,
331 QOS_WLAT,
332 QOS_MIN,
333 QOS_MAX,
334 NR_QOS_PARAMS,
335};
336
337
338enum {
339 COST_CTRL,
340 COST_MODEL,
341 NR_COST_CTRL_PARAMS,
342};
343
344
345enum {
346 I_LCOEF_RBPS,
347 I_LCOEF_RSEQIOPS,
348 I_LCOEF_RRANDIOPS,
349 I_LCOEF_WBPS,
350 I_LCOEF_WSEQIOPS,
351 I_LCOEF_WRANDIOPS,
352 NR_I_LCOEFS,
353};
354
355enum {
356 LCOEF_RPAGE,
357 LCOEF_RSEQIO,
358 LCOEF_RRANDIO,
359 LCOEF_WPAGE,
360 LCOEF_WSEQIO,
361 LCOEF_WRANDIO,
362 NR_LCOEFS,
363};
364
365enum {
366 AUTOP_INVALID,
367 AUTOP_HDD,
368 AUTOP_SSD_QD1,
369 AUTOP_SSD_DFL,
370 AUTOP_SSD_FAST,
371};
372
373struct ioc_gq;
374
375struct ioc_params {
376 u32 qos[NR_QOS_PARAMS];
377 u64 i_lcoefs[NR_I_LCOEFS];
378 u64 lcoefs[NR_LCOEFS];
379 u32 too_fast_vrate_pct;
380 u32 too_slow_vrate_pct;
381};
382
383struct ioc_margins {
384 s64 min;
385 s64 low;
386 s64 target;
387};
388
389struct ioc_missed {
390 local_t nr_met;
391 local_t nr_missed;
392 u32 last_met;
393 u32 last_missed;
394};
395
396struct ioc_pcpu_stat {
397 struct ioc_missed missed[2];
398
399 local64_t rq_wait_ns;
400 u64 last_rq_wait_ns;
401};
402
403
404struct ioc {
405 struct rq_qos rqos;
406
407 bool enabled;
408
409 struct ioc_params params;
410 struct ioc_margins margins;
411 u32 period_us;
412 u32 timer_slack_ns;
413 u64 vrate_min;
414 u64 vrate_max;
415
416 spinlock_t lock;
417 struct timer_list timer;
418 struct list_head active_iocgs;
419 struct ioc_pcpu_stat __percpu *pcpu_stat;
420
421 enum ioc_running running;
422 atomic64_t vtime_rate;
423 u64 vtime_base_rate;
424 s64 vtime_err;
425
426 seqcount_spinlock_t period_seqcount;
427 u64 period_at;
428 u64 period_at_vtime;
429
430 atomic64_t cur_period;
431 int busy_level;
432
433 bool weights_updated;
434 atomic_t hweight_gen;
435
436
437 u64 dfgv_period_at;
438 u64 dfgv_period_rem;
439 u64 dfgv_usage_us_sum;
440
441 u64 autop_too_fast_at;
442 u64 autop_too_slow_at;
443 int autop_idx;
444 bool user_qos_params:1;
445 bool user_cost_model:1;
446};
447
448struct iocg_pcpu_stat {
449 local64_t abs_vusage;
450};
451
452struct iocg_stat {
453 u64 usage_us;
454 u64 wait_us;
455 u64 indebt_us;
456 u64 indelay_us;
457};
458
459
460struct ioc_gq {
461 struct blkg_policy_data pd;
462 struct ioc *ioc;
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482 u32 cfg_weight;
483 u32 weight;
484 u32 active;
485 u32 inuse;
486
487 u32 last_inuse;
488 s64 saved_margin;
489
490 sector_t cursor;
491
492
493
494
495
496
497
498
499
500
501
502 atomic64_t vtime;
503 atomic64_t done_vtime;
504 u64 abs_vdebt;
505
506
507 u64 delay;
508 u64 delay_at;
509
510
511
512
513
514 atomic64_t active_period;
515 struct list_head active_list;
516
517
518 u64 child_active_sum;
519 u64 child_inuse_sum;
520 u64 child_adjusted_sum;
521 int hweight_gen;
522 u32 hweight_active;
523 u32 hweight_inuse;
524 u32 hweight_donating;
525 u32 hweight_after_donation;
526
527 struct list_head walk_list;
528 struct list_head surplus_list;
529
530 struct wait_queue_head waitq;
531 struct hrtimer waitq_timer;
532
533
534 u64 activated_at;
535
536
537 struct iocg_pcpu_stat __percpu *pcpu_stat;
538 struct iocg_stat local_stat;
539 struct iocg_stat desc_stat;
540 struct iocg_stat last_stat;
541 u64 last_stat_abs_vusage;
542 u64 usage_delta_us;
543 u64 wait_since;
544 u64 indebt_since;
545 u64 indelay_since;
546
547
548 int level;
549 struct ioc_gq *ancestors[];
550};
551
552
553struct ioc_cgrp {
554 struct blkcg_policy_data cpd;
555 unsigned int dfl_weight;
556};
557
558struct ioc_now {
559 u64 now_ns;
560 u64 now;
561 u64 vnow;
562 u64 vrate;
563};
564
565struct iocg_wait {
566 struct wait_queue_entry wait;
567 struct bio *bio;
568 u64 abs_cost;
569 bool committed;
570};
571
572struct iocg_wake_ctx {
573 struct ioc_gq *iocg;
574 u32 hw_inuse;
575 s64 vbudget;
576};
577
578static const struct ioc_params autop[] = {
579 [AUTOP_HDD] = {
580 .qos = {
581 [QOS_RLAT] = 250000,
582 [QOS_WLAT] = 250000,
583 [QOS_MIN] = VRATE_MIN_PPM,
584 [QOS_MAX] = VRATE_MAX_PPM,
585 },
586 .i_lcoefs = {
587 [I_LCOEF_RBPS] = 174019176,
588 [I_LCOEF_RSEQIOPS] = 41708,
589 [I_LCOEF_RRANDIOPS] = 370,
590 [I_LCOEF_WBPS] = 178075866,
591 [I_LCOEF_WSEQIOPS] = 42705,
592 [I_LCOEF_WRANDIOPS] = 378,
593 },
594 },
595 [AUTOP_SSD_QD1] = {
596 .qos = {
597 [QOS_RLAT] = 25000,
598 [QOS_WLAT] = 25000,
599 [QOS_MIN] = VRATE_MIN_PPM,
600 [QOS_MAX] = VRATE_MAX_PPM,
601 },
602 .i_lcoefs = {
603 [I_LCOEF_RBPS] = 245855193,
604 [I_LCOEF_RSEQIOPS] = 61575,
605 [I_LCOEF_RRANDIOPS] = 6946,
606 [I_LCOEF_WBPS] = 141365009,
607 [I_LCOEF_WSEQIOPS] = 33716,
608 [I_LCOEF_WRANDIOPS] = 26796,
609 },
610 },
611 [AUTOP_SSD_DFL] = {
612 .qos = {
613 [QOS_RLAT] = 25000,
614 [QOS_WLAT] = 25000,
615 [QOS_MIN] = VRATE_MIN_PPM,
616 [QOS_MAX] = VRATE_MAX_PPM,
617 },
618 .i_lcoefs = {
619 [I_LCOEF_RBPS] = 488636629,
620 [I_LCOEF_RSEQIOPS] = 8932,
621 [I_LCOEF_RRANDIOPS] = 8518,
622 [I_LCOEF_WBPS] = 427891549,
623 [I_LCOEF_WSEQIOPS] = 28755,
624 [I_LCOEF_WRANDIOPS] = 21940,
625 },
626 .too_fast_vrate_pct = 500,
627 },
628 [AUTOP_SSD_FAST] = {
629 .qos = {
630 [QOS_RLAT] = 5000,
631 [QOS_WLAT] = 5000,
632 [QOS_MIN] = VRATE_MIN_PPM,
633 [QOS_MAX] = VRATE_MAX_PPM,
634 },
635 .i_lcoefs = {
636 [I_LCOEF_RBPS] = 3102524156LLU,
637 [I_LCOEF_RSEQIOPS] = 724816,
638 [I_LCOEF_RRANDIOPS] = 778122,
639 [I_LCOEF_WBPS] = 1742780862LLU,
640 [I_LCOEF_WSEQIOPS] = 425702,
641 [I_LCOEF_WRANDIOPS] = 443193,
642 },
643 .too_slow_vrate_pct = 10,
644 },
645};
646
647
648
649
650
651static u32 vrate_adj_pct[] =
652 { 0, 0, 0, 0,
653 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
654 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
655 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
656
657static struct blkcg_policy blkcg_policy_iocost;
658
659
660static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
661{
662 return container_of(rqos, struct ioc, rqos);
663}
664
665static struct ioc *q_to_ioc(struct request_queue *q)
666{
667 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
668}
669
670static const char *q_name(struct request_queue *q)
671{
672 if (blk_queue_registered(q))
673 return kobject_name(q->kobj.parent);
674 else
675 return "<unknown>";
676}
677
678static const char __maybe_unused *ioc_name(struct ioc *ioc)
679{
680 return q_name(ioc->rqos.q);
681}
682
683static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
684{
685 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
686}
687
688static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
689{
690 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
691}
692
693static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
694{
695 return pd_to_blkg(&iocg->pd);
696}
697
698static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
699{
700 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
701 struct ioc_cgrp, cpd);
702}
703
704
705
706
707
708static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
709{
710 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
711}
712
713
714
715
716static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
717{
718 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
719}
720
721static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
722 u64 abs_cost, u64 cost)
723{
724 struct iocg_pcpu_stat *gcs;
725
726 bio->bi_iocost_cost = cost;
727 atomic64_add(cost, &iocg->vtime);
728
729 gcs = get_cpu_ptr(iocg->pcpu_stat);
730 local64_add(abs_cost, &gcs->abs_vusage);
731 put_cpu_ptr(gcs);
732}
733
734static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
735{
736 if (lock_ioc) {
737 spin_lock_irqsave(&iocg->ioc->lock, *flags);
738 spin_lock(&iocg->waitq.lock);
739 } else {
740 spin_lock_irqsave(&iocg->waitq.lock, *flags);
741 }
742}
743
744static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
745{
746 if (unlock_ioc) {
747 spin_unlock(&iocg->waitq.lock);
748 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
749 } else {
750 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
751 }
752}
753
754#define CREATE_TRACE_POINTS
755#include <trace/events/iocost.h>
756
757static void ioc_refresh_margins(struct ioc *ioc)
758{
759 struct ioc_margins *margins = &ioc->margins;
760 u32 period_us = ioc->period_us;
761 u64 vrate = ioc->vtime_base_rate;
762
763 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
764 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
765 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
766}
767
768
769static void ioc_refresh_period_us(struct ioc *ioc)
770{
771 u32 ppm, lat, multi, period_us;
772
773 lockdep_assert_held(&ioc->lock);
774
775
776 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
777 ppm = ioc->params.qos[QOS_RPPM];
778 lat = ioc->params.qos[QOS_RLAT];
779 } else {
780 ppm = ioc->params.qos[QOS_WPPM];
781 lat = ioc->params.qos[QOS_WLAT];
782 }
783
784
785
786
787
788
789
790
791
792 if (ppm)
793 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
794 else
795 multi = 2;
796 period_us = multi * lat;
797 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
798
799
800 ioc->period_us = period_us;
801 ioc->timer_slack_ns = div64_u64(
802 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
803 100);
804 ioc_refresh_margins(ioc);
805}
806
807static int ioc_autop_idx(struct ioc *ioc)
808{
809 int idx = ioc->autop_idx;
810 const struct ioc_params *p = &autop[idx];
811 u32 vrate_pct;
812 u64 now_ns;
813
814
815 if (!blk_queue_nonrot(ioc->rqos.q))
816 return AUTOP_HDD;
817
818
819 if (blk_queue_depth(ioc->rqos.q) == 1)
820 return AUTOP_SSD_QD1;
821
822
823 if (idx < AUTOP_SSD_DFL)
824 return AUTOP_SSD_DFL;
825
826
827 if (ioc->user_qos_params || ioc->user_cost_model)
828 return idx;
829
830
831 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
832 now_ns = ktime_get_ns();
833
834 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
835 if (!ioc->autop_too_fast_at)
836 ioc->autop_too_fast_at = now_ns;
837 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
838 return idx + 1;
839 } else {
840 ioc->autop_too_fast_at = 0;
841 }
842
843 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
844 if (!ioc->autop_too_slow_at)
845 ioc->autop_too_slow_at = now_ns;
846 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
847 return idx - 1;
848 } else {
849 ioc->autop_too_slow_at = 0;
850 }
851
852 return idx;
853}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
869 u64 *page, u64 *seqio, u64 *randio)
870{
871 u64 v;
872
873 *page = *seqio = *randio = 0;
874
875 if (bps)
876 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
877 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
878
879 if (seqiops) {
880 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
881 if (v > *page)
882 *seqio = v - *page;
883 }
884
885 if (randiops) {
886 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
887 if (v > *page)
888 *randio = v - *page;
889 }
890}
891
892static void ioc_refresh_lcoefs(struct ioc *ioc)
893{
894 u64 *u = ioc->params.i_lcoefs;
895 u64 *c = ioc->params.lcoefs;
896
897 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
898 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
899 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
900 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
901}
902
903static bool ioc_refresh_params(struct ioc *ioc, bool force)
904{
905 const struct ioc_params *p;
906 int idx;
907
908 lockdep_assert_held(&ioc->lock);
909
910 idx = ioc_autop_idx(ioc);
911 p = &autop[idx];
912
913 if (idx == ioc->autop_idx && !force)
914 return false;
915
916 if (idx != ioc->autop_idx)
917 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
918
919 ioc->autop_idx = idx;
920 ioc->autop_too_fast_at = 0;
921 ioc->autop_too_slow_at = 0;
922
923 if (!ioc->user_qos_params)
924 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
925 if (!ioc->user_cost_model)
926 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
927
928 ioc_refresh_period_us(ioc);
929 ioc_refresh_lcoefs(ioc);
930
931 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
932 VTIME_PER_USEC, MILLION);
933 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
934 VTIME_PER_USEC, MILLION);
935
936 return true;
937}
938
939
940
941
942
943
944
945
946static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
947{
948 s64 pleft = ioc->period_at + ioc->period_us - now->now;
949 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
950 s64 vcomp, vcomp_min, vcomp_max;
951
952 lockdep_assert_held(&ioc->lock);
953
954
955 if (pleft <= 0)
956 goto done;
957
958
959
960
961
962
963 vcomp = -div64_s64(ioc->vtime_err, pleft);
964 vcomp_min = -(ioc->vtime_base_rate >> 1);
965 vcomp_max = ioc->vtime_base_rate;
966 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
967
968 ioc->vtime_err += vcomp * pleft;
969
970 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
971done:
972
973 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
974}
975
976
977static void ioc_now(struct ioc *ioc, struct ioc_now *now)
978{
979 unsigned seq;
980
981 now->now_ns = ktime_get();
982 now->now = ktime_to_us(now->now_ns);
983 now->vrate = atomic64_read(&ioc->vtime_rate);
984
985
986
987
988
989
990
991
992
993 do {
994 seq = read_seqcount_begin(&ioc->period_seqcount);
995 now->vnow = ioc->period_at_vtime +
996 (now->now - ioc->period_at) * now->vrate;
997 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
998}
999
1000static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1001{
1002 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1003
1004 write_seqcount_begin(&ioc->period_seqcount);
1005 ioc->period_at = now->now;
1006 ioc->period_at_vtime = now->vnow;
1007 write_seqcount_end(&ioc->period_seqcount);
1008
1009 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1010 add_timer(&ioc->timer);
1011}
1012
1013
1014
1015
1016
1017
1018static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1019 bool save, struct ioc_now *now)
1020{
1021 struct ioc *ioc = iocg->ioc;
1022 int lvl;
1023
1024 lockdep_assert_held(&ioc->lock);
1025
1026 inuse = clamp_t(u32, inuse, 1, active);
1027
1028 iocg->last_inuse = iocg->inuse;
1029 if (save)
1030 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1031
1032 if (active == iocg->active && inuse == iocg->inuse)
1033 return;
1034
1035 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1036 struct ioc_gq *parent = iocg->ancestors[lvl];
1037 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1038 u32 parent_active = 0, parent_inuse = 0;
1039
1040
1041 parent->child_active_sum += (s32)(active - child->active);
1042 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1043
1044 child->active = active;
1045 child->inuse = inuse;
1046
1047
1048
1049
1050
1051
1052 if (parent->child_active_sum) {
1053 parent_active = parent->weight;
1054 parent_inuse = DIV64_U64_ROUND_UP(
1055 parent_active * parent->child_inuse_sum,
1056 parent->child_active_sum);
1057 }
1058
1059
1060 if (parent_active == parent->active &&
1061 parent_inuse == parent->inuse)
1062 break;
1063
1064 active = parent_active;
1065 inuse = parent_inuse;
1066 }
1067
1068 ioc->weights_updated = true;
1069}
1070
1071static void commit_weights(struct ioc *ioc)
1072{
1073 lockdep_assert_held(&ioc->lock);
1074
1075 if (ioc->weights_updated) {
1076
1077 smp_wmb();
1078 atomic_inc(&ioc->hweight_gen);
1079 ioc->weights_updated = false;
1080 }
1081}
1082
1083static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1084 bool save, struct ioc_now *now)
1085{
1086 __propagate_weights(iocg, active, inuse, save, now);
1087 commit_weights(iocg->ioc);
1088}
1089
1090static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1091{
1092 struct ioc *ioc = iocg->ioc;
1093 int lvl;
1094 u32 hwa, hwi;
1095 int ioc_gen;
1096
1097
1098 ioc_gen = atomic_read(&ioc->hweight_gen);
1099 if (ioc_gen == iocg->hweight_gen)
1100 goto out;
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 smp_rmb();
1113
1114 hwa = hwi = WEIGHT_ONE;
1115 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1116 struct ioc_gq *parent = iocg->ancestors[lvl];
1117 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1118 u64 active_sum = READ_ONCE(parent->child_active_sum);
1119 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1120 u32 active = READ_ONCE(child->active);
1121 u32 inuse = READ_ONCE(child->inuse);
1122
1123
1124 if (!active_sum || !inuse_sum)
1125 continue;
1126
1127 active_sum = max_t(u64, active, active_sum);
1128 hwa = div64_u64((u64)hwa * active, active_sum);
1129
1130 inuse_sum = max_t(u64, inuse, inuse_sum);
1131 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1132 }
1133
1134 iocg->hweight_active = max_t(u32, hwa, 1);
1135 iocg->hweight_inuse = max_t(u32, hwi, 1);
1136 iocg->hweight_gen = ioc_gen;
1137out:
1138 if (hw_activep)
1139 *hw_activep = iocg->hweight_active;
1140 if (hw_inusep)
1141 *hw_inusep = iocg->hweight_inuse;
1142}
1143
1144
1145
1146
1147
1148static u32 current_hweight_max(struct ioc_gq *iocg)
1149{
1150 u32 hwm = WEIGHT_ONE;
1151 u32 inuse = iocg->active;
1152 u64 child_inuse_sum;
1153 int lvl;
1154
1155 lockdep_assert_held(&iocg->ioc->lock);
1156
1157 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1158 struct ioc_gq *parent = iocg->ancestors[lvl];
1159 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1160
1161 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1162 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1163 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1164 parent->child_active_sum);
1165 }
1166
1167 return max_t(u32, hwm, 1);
1168}
1169
1170static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1171{
1172 struct ioc *ioc = iocg->ioc;
1173 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1174 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1175 u32 weight;
1176
1177 lockdep_assert_held(&ioc->lock);
1178
1179 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1180 if (weight != iocg->weight && iocg->active)
1181 propagate_weights(iocg, weight, iocg->inuse, true, now);
1182 iocg->weight = weight;
1183}
1184
1185static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1186{
1187 struct ioc *ioc = iocg->ioc;
1188 u64 last_period, cur_period;
1189 u64 vtime, vtarget;
1190 int i;
1191
1192
1193
1194
1195
1196 if (!list_empty(&iocg->active_list)) {
1197 ioc_now(ioc, now);
1198 cur_period = atomic64_read(&ioc->cur_period);
1199 if (atomic64_read(&iocg->active_period) != cur_period)
1200 atomic64_set(&iocg->active_period, cur_period);
1201 return true;
1202 }
1203
1204
1205 if (iocg->child_active_sum)
1206 return false;
1207
1208 spin_lock_irq(&ioc->lock);
1209
1210 ioc_now(ioc, now);
1211
1212
1213 cur_period = atomic64_read(&ioc->cur_period);
1214 last_period = atomic64_read(&iocg->active_period);
1215 atomic64_set(&iocg->active_period, cur_period);
1216
1217
1218 if (!list_empty(&iocg->active_list))
1219 goto succeed_unlock;
1220 for (i = iocg->level - 1; i > 0; i--)
1221 if (!list_empty(&iocg->ancestors[i]->active_list))
1222 goto fail_unlock;
1223
1224 if (iocg->child_active_sum)
1225 goto fail_unlock;
1226
1227
1228
1229
1230
1231 vtarget = now->vnow - ioc->margins.target;
1232 vtime = atomic64_read(&iocg->vtime);
1233
1234 atomic64_add(vtarget - vtime, &iocg->vtime);
1235 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1236 vtime = vtarget;
1237
1238
1239
1240
1241
1242
1243 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1244 list_add(&iocg->active_list, &ioc->active_iocgs);
1245
1246 propagate_weights(iocg, iocg->weight,
1247 iocg->last_inuse ?: iocg->weight, true, now);
1248
1249 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1250 last_period, cur_period, vtime);
1251
1252 iocg->activated_at = now->now;
1253
1254 if (ioc->running == IOC_IDLE) {
1255 ioc->running = IOC_RUNNING;
1256 ioc->dfgv_period_at = now->now;
1257 ioc->dfgv_period_rem = 0;
1258 ioc_start_period(ioc, now);
1259 }
1260
1261succeed_unlock:
1262 spin_unlock_irq(&ioc->lock);
1263 return true;
1264
1265fail_unlock:
1266 spin_unlock_irq(&ioc->lock);
1267 return false;
1268}
1269
1270static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1271{
1272 struct ioc *ioc = iocg->ioc;
1273 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1274 u64 tdelta, delay, new_delay;
1275 s64 vover, vover_pct;
1276 u32 hwa;
1277
1278 lockdep_assert_held(&iocg->waitq.lock);
1279
1280
1281 tdelta = now->now - iocg->delay_at;
1282 if (iocg->delay)
1283 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1284 else
1285 delay = 0;
1286
1287
1288 current_hweight(iocg, &hwa, NULL);
1289 vover = atomic64_read(&iocg->vtime) +
1290 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1291 vover_pct = div64_s64(100 * vover,
1292 ioc->period_us * ioc->vtime_base_rate);
1293
1294 if (vover_pct <= MIN_DELAY_THR_PCT)
1295 new_delay = 0;
1296 else if (vover_pct >= MAX_DELAY_THR_PCT)
1297 new_delay = MAX_DELAY;
1298 else
1299 new_delay = MIN_DELAY +
1300 div_u64((MAX_DELAY - MIN_DELAY) *
1301 (vover_pct - MIN_DELAY_THR_PCT),
1302 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1303
1304
1305 if (new_delay > delay) {
1306 iocg->delay = new_delay;
1307 iocg->delay_at = now->now;
1308 delay = new_delay;
1309 }
1310
1311 if (delay >= MIN_DELAY) {
1312 if (!iocg->indelay_since)
1313 iocg->indelay_since = now->now;
1314 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1315 return true;
1316 } else {
1317 if (iocg->indelay_since) {
1318 iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1319 iocg->indelay_since = 0;
1320 }
1321 iocg->delay = 0;
1322 blkcg_clear_delay(blkg);
1323 return false;
1324 }
1325}
1326
1327static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1328 struct ioc_now *now)
1329{
1330 struct iocg_pcpu_stat *gcs;
1331
1332 lockdep_assert_held(&iocg->ioc->lock);
1333 lockdep_assert_held(&iocg->waitq.lock);
1334 WARN_ON_ONCE(list_empty(&iocg->active_list));
1335
1336
1337
1338
1339
1340 if (!iocg->abs_vdebt && abs_cost) {
1341 iocg->indebt_since = now->now;
1342 propagate_weights(iocg, iocg->active, 0, false, now);
1343 }
1344
1345 iocg->abs_vdebt += abs_cost;
1346
1347 gcs = get_cpu_ptr(iocg->pcpu_stat);
1348 local64_add(abs_cost, &gcs->abs_vusage);
1349 put_cpu_ptr(gcs);
1350}
1351
1352static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1353 struct ioc_now *now)
1354{
1355 lockdep_assert_held(&iocg->ioc->lock);
1356 lockdep_assert_held(&iocg->waitq.lock);
1357
1358
1359 WARN_ON_ONCE(list_empty(&iocg->active_list));
1360 WARN_ON_ONCE(iocg->inuse > 1);
1361
1362 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1363
1364
1365 if (!iocg->abs_vdebt) {
1366 iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1367 iocg->indebt_since = 0;
1368
1369 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1370 false, now);
1371 }
1372}
1373
1374static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1375 int flags, void *key)
1376{
1377 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1378 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1379 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1380
1381 ctx->vbudget -= cost;
1382
1383 if (ctx->vbudget < 0)
1384 return -1;
1385
1386 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1387
1388
1389
1390
1391
1392
1393 list_del_init(&wq_entry->entry);
1394 wait->committed = true;
1395
1396 default_wake_function(wq_entry, mode, flags, key);
1397 return 0;
1398}
1399
1400
1401
1402
1403
1404
1405static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1406 struct ioc_now *now)
1407{
1408 struct ioc *ioc = iocg->ioc;
1409 struct iocg_wake_ctx ctx = { .iocg = iocg };
1410 u64 vshortage, expires, oexpires;
1411 s64 vbudget;
1412 u32 hwa;
1413
1414 lockdep_assert_held(&iocg->waitq.lock);
1415
1416 current_hweight(iocg, &hwa, NULL);
1417 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1418
1419
1420 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1421 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1422 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1423 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1424
1425 lockdep_assert_held(&ioc->lock);
1426
1427 atomic64_add(vpay, &iocg->vtime);
1428 atomic64_add(vpay, &iocg->done_vtime);
1429 iocg_pay_debt(iocg, abs_vpay, now);
1430 vbudget -= vpay;
1431 }
1432
1433 if (iocg->abs_vdebt || iocg->delay)
1434 iocg_kick_delay(iocg, now);
1435
1436
1437
1438
1439
1440
1441
1442 if (iocg->abs_vdebt) {
1443 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1444 vbudget = min_t(s64, 0, vbudget - vdebt);
1445 }
1446
1447
1448
1449
1450
1451
1452 ctx.vbudget = vbudget;
1453 current_hweight(iocg, NULL, &ctx.hw_inuse);
1454
1455 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1456
1457 if (!waitqueue_active(&iocg->waitq)) {
1458 if (iocg->wait_since) {
1459 iocg->local_stat.wait_us += now->now - iocg->wait_since;
1460 iocg->wait_since = 0;
1461 }
1462 return;
1463 }
1464
1465 if (!iocg->wait_since)
1466 iocg->wait_since = now->now;
1467
1468 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1469 return;
1470
1471
1472 vshortage = -ctx.vbudget;
1473 expires = now->now_ns +
1474 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1475 NSEC_PER_USEC;
1476 expires += ioc->timer_slack_ns;
1477
1478
1479 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1480 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1481 abs(oexpires - expires) <= ioc->timer_slack_ns)
1482 return;
1483
1484 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1485 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1486}
1487
1488static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1489{
1490 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1491 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1492 struct ioc_now now;
1493 unsigned long flags;
1494
1495 ioc_now(iocg->ioc, &now);
1496
1497 iocg_lock(iocg, pay_debt, &flags);
1498 iocg_kick_waitq(iocg, pay_debt, &now);
1499 iocg_unlock(iocg, pay_debt, &flags);
1500
1501 return HRTIMER_NORESTART;
1502}
1503
1504static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1505{
1506 u32 nr_met[2] = { };
1507 u32 nr_missed[2] = { };
1508 u64 rq_wait_ns = 0;
1509 int cpu, rw;
1510
1511 for_each_online_cpu(cpu) {
1512 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1513 u64 this_rq_wait_ns;
1514
1515 for (rw = READ; rw <= WRITE; rw++) {
1516 u32 this_met = local_read(&stat->missed[rw].nr_met);
1517 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1518
1519 nr_met[rw] += this_met - stat->missed[rw].last_met;
1520 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1521 stat->missed[rw].last_met = this_met;
1522 stat->missed[rw].last_missed = this_missed;
1523 }
1524
1525 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1526 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1527 stat->last_rq_wait_ns = this_rq_wait_ns;
1528 }
1529
1530 for (rw = READ; rw <= WRITE; rw++) {
1531 if (nr_met[rw] + nr_missed[rw])
1532 missed_ppm_ar[rw] =
1533 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1534 nr_met[rw] + nr_missed[rw]);
1535 else
1536 missed_ppm_ar[rw] = 0;
1537 }
1538
1539 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1540 ioc->period_us * NSEC_PER_USEC);
1541}
1542
1543
1544static bool iocg_is_idle(struct ioc_gq *iocg)
1545{
1546 struct ioc *ioc = iocg->ioc;
1547
1548
1549 if (atomic64_read(&iocg->active_period) ==
1550 atomic64_read(&ioc->cur_period))
1551 return false;
1552
1553
1554 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1555 return false;
1556
1557 return true;
1558}
1559
1560
1561
1562
1563
1564
1565static void iocg_build_inner_walk(struct ioc_gq *iocg,
1566 struct list_head *inner_walk)
1567{
1568 int lvl;
1569
1570 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1571
1572
1573 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1574 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1575 break;
1576 }
1577
1578
1579 while (++lvl <= iocg->level - 1) {
1580 struct ioc_gq *inner = iocg->ancestors[lvl];
1581
1582
1583 list_add_tail(&inner->walk_list, inner_walk);
1584 }
1585}
1586
1587
1588static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1589{
1590 struct ioc *ioc = iocg->ioc;
1591 struct iocg_stat new_stat;
1592 u64 abs_vusage = 0;
1593 u64 vusage_delta;
1594 int cpu;
1595
1596 lockdep_assert_held(&iocg->ioc->lock);
1597
1598
1599 for_each_possible_cpu(cpu) {
1600 abs_vusage += local64_read(
1601 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1602 }
1603 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1604 iocg->last_stat_abs_vusage = abs_vusage;
1605
1606 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1607 iocg->local_stat.usage_us += iocg->usage_delta_us;
1608
1609
1610 new_stat.usage_us =
1611 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1612 new_stat.wait_us =
1613 iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1614 new_stat.indebt_us =
1615 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1616 new_stat.indelay_us =
1617 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1618
1619
1620 if (iocg->level > 0) {
1621 struct iocg_stat *parent_stat =
1622 &iocg->ancestors[iocg->level - 1]->desc_stat;
1623
1624 parent_stat->usage_us +=
1625 new_stat.usage_us - iocg->last_stat.usage_us;
1626 parent_stat->wait_us +=
1627 new_stat.wait_us - iocg->last_stat.wait_us;
1628 parent_stat->indebt_us +=
1629 new_stat.indebt_us - iocg->last_stat.indebt_us;
1630 parent_stat->indelay_us +=
1631 new_stat.indelay_us - iocg->last_stat.indelay_us;
1632 }
1633
1634 iocg->last_stat = new_stat;
1635}
1636
1637
1638static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1639{
1640 LIST_HEAD(inner_walk);
1641 struct ioc_gq *iocg, *tiocg;
1642
1643
1644 list_for_each_entry(iocg, target_iocgs, active_list) {
1645 iocg_flush_stat_one(iocg, now);
1646 iocg_build_inner_walk(iocg, &inner_walk);
1647 }
1648
1649
1650 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1651 iocg_flush_stat_one(iocg, now);
1652 list_del_init(&iocg->walk_list);
1653 }
1654}
1655
1656
1657
1658
1659
1660
1661static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1662 u32 usage, struct ioc_now *now)
1663{
1664 struct ioc *ioc = iocg->ioc;
1665 u64 vtime = atomic64_read(&iocg->vtime);
1666 s64 excess, delta, target, new_hwi;
1667
1668
1669 if (iocg->abs_vdebt)
1670 return 1;
1671
1672
1673 if (waitqueue_active(&iocg->waitq) ||
1674 time_after64(vtime, now->vnow - ioc->margins.min))
1675 return hwm;
1676
1677
1678 excess = now->vnow - vtime - ioc->margins.target;
1679 if (excess > 0) {
1680 atomic64_add(excess, &iocg->vtime);
1681 atomic64_add(excess, &iocg->done_vtime);
1682 vtime += excess;
1683 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1703 now->vnow - ioc->period_at_vtime);
1704 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1705 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1706
1707 return clamp_t(s64, new_hwi, 1, hwm);
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1768{
1769 LIST_HEAD(over_hwa);
1770 LIST_HEAD(inner_walk);
1771 struct ioc_gq *iocg, *tiocg, *root_iocg;
1772 u32 after_sum, over_sum, over_target, gamma;
1773
1774
1775
1776
1777
1778
1779
1780
1781 after_sum = 0;
1782 over_sum = 0;
1783 list_for_each_entry(iocg, surpluses, surplus_list) {
1784 u32 hwa;
1785
1786 current_hweight(iocg, &hwa, NULL);
1787 after_sum += iocg->hweight_after_donation;
1788
1789 if (iocg->hweight_after_donation > hwa) {
1790 over_sum += iocg->hweight_after_donation;
1791 list_add(&iocg->walk_list, &over_hwa);
1792 }
1793 }
1794
1795 if (after_sum >= WEIGHT_ONE) {
1796
1797
1798
1799
1800 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1801 WARN_ON_ONCE(over_sum <= over_delta);
1802 over_target = over_sum - over_delta;
1803 } else {
1804 over_target = 0;
1805 }
1806
1807 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1808 if (over_target)
1809 iocg->hweight_after_donation =
1810 div_u64((u64)iocg->hweight_after_donation *
1811 over_target, over_sum);
1812 list_del_init(&iocg->walk_list);
1813 }
1814
1815
1816
1817
1818
1819 list_for_each_entry(iocg, surpluses, surplus_list) {
1820 iocg_build_inner_walk(iocg, &inner_walk);
1821 }
1822
1823 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1824 WARN_ON_ONCE(root_iocg->level > 0);
1825
1826 list_for_each_entry(iocg, &inner_walk, walk_list) {
1827 iocg->child_adjusted_sum = 0;
1828 iocg->hweight_donating = 0;
1829 iocg->hweight_after_donation = 0;
1830 }
1831
1832
1833
1834
1835
1836 list_for_each_entry(iocg, surpluses, surplus_list) {
1837 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1838
1839 parent->hweight_donating += iocg->hweight_donating;
1840 parent->hweight_after_donation += iocg->hweight_after_donation;
1841 }
1842
1843 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1844 if (iocg->level > 0) {
1845 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1846
1847 parent->hweight_donating += iocg->hweight_donating;
1848 parent->hweight_after_donation += iocg->hweight_after_donation;
1849 }
1850 }
1851
1852
1853
1854
1855
1856
1857 list_for_each_entry(iocg, &inner_walk, walk_list) {
1858 if (iocg->level) {
1859 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1860
1861 iocg->hweight_active = DIV64_U64_ROUND_UP(
1862 (u64)parent->hweight_active * iocg->active,
1863 parent->child_active_sum);
1864
1865 }
1866
1867 iocg->hweight_donating = min(iocg->hweight_donating,
1868 iocg->hweight_active);
1869 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1870 iocg->hweight_donating - 1);
1871 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1872 iocg->hweight_donating <= 1 ||
1873 iocg->hweight_after_donation == 0)) {
1874 pr_warn("iocg: invalid donation weights in ");
1875 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1876 pr_cont(": active=%u donating=%u after=%u\n",
1877 iocg->hweight_active, iocg->hweight_donating,
1878 iocg->hweight_after_donation);
1879 }
1880 }
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 gamma = DIV_ROUND_UP(
1897 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1898 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1899
1900
1901
1902
1903
1904 list_for_each_entry(iocg, &inner_walk, walk_list) {
1905 struct ioc_gq *parent;
1906 u32 inuse, wpt, wptp;
1907 u64 st, sf;
1908
1909 if (iocg->level == 0) {
1910
1911 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1912 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1913 WEIGHT_ONE - iocg->hweight_after_donation);
1914 continue;
1915 }
1916
1917 parent = iocg->ancestors[iocg->level - 1];
1918
1919
1920 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1921 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1922 WEIGHT_ONE) + iocg->hweight_after_donation;
1923
1924
1925 inuse = DIV64_U64_ROUND_UP(
1926 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1927 parent->hweight_inuse);
1928
1929
1930 st = DIV64_U64_ROUND_UP(
1931 iocg->child_active_sum * iocg->hweight_donating,
1932 iocg->hweight_active);
1933 sf = iocg->child_active_sum - st;
1934 wpt = DIV64_U64_ROUND_UP(
1935 (u64)iocg->active * iocg->hweight_donating,
1936 iocg->hweight_active);
1937 wptp = DIV64_U64_ROUND_UP(
1938 (u64)inuse * iocg->hweight_after_donation,
1939 iocg->hweight_inuse);
1940
1941 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1942 }
1943
1944
1945
1946
1947
1948 list_for_each_entry(iocg, surpluses, surplus_list) {
1949 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1950 u32 inuse;
1951
1952
1953
1954
1955
1956
1957
1958
1959 if (iocg->abs_vdebt) {
1960 WARN_ON_ONCE(iocg->inuse > 1);
1961 continue;
1962 }
1963
1964
1965 inuse = DIV64_U64_ROUND_UP(
1966 parent->child_adjusted_sum * iocg->hweight_after_donation,
1967 parent->hweight_inuse);
1968
1969 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
1970 iocg->inuse, inuse,
1971 iocg->hweight_inuse,
1972 iocg->hweight_after_donation);
1973
1974 __propagate_weights(iocg, iocg->active, inuse, true, now);
1975 }
1976
1977
1978 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
1979 list_del_init(&iocg->walk_list);
1980}
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
1994 struct ioc_now *now)
1995{
1996 struct ioc_gq *iocg;
1997 u64 dur, usage_pct, nr_cycles;
1998
1999
2000 if (!nr_debtors) {
2001 ioc->dfgv_period_at = now->now;
2002 ioc->dfgv_period_rem = 0;
2003 ioc->dfgv_usage_us_sum = 0;
2004 return;
2005 }
2006
2007
2008
2009
2010
2011
2012
2013 if (ioc->busy_level > 0)
2014 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2015
2016 ioc->dfgv_usage_us_sum += usage_us_sum;
2017 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2018 return;
2019
2020
2021
2022
2023
2024 dur = now->now - ioc->dfgv_period_at;
2025 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2026
2027 ioc->dfgv_period_at = now->now;
2028 ioc->dfgv_usage_us_sum = 0;
2029
2030
2031 if (usage_pct > DFGV_USAGE_PCT) {
2032 ioc->dfgv_period_rem = 0;
2033 return;
2034 }
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045 nr_cycles = dur + ioc->dfgv_period_rem;
2046 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2047
2048 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2049 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2050
2051 if (!iocg->abs_vdebt && !iocg->delay)
2052 continue;
2053
2054 spin_lock(&iocg->waitq.lock);
2055
2056 old_debt = iocg->abs_vdebt;
2057 old_delay = iocg->delay;
2058
2059 if (iocg->abs_vdebt)
2060 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2061 if (iocg->delay)
2062 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2063
2064 iocg_kick_waitq(iocg, true, now);
2065
2066 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2067 old_debt, iocg->abs_vdebt,
2068 old_delay, iocg->delay);
2069
2070 spin_unlock(&iocg->waitq.lock);
2071 }
2072}
2073
2074static void ioc_timer_fn(struct timer_list *timer)
2075{
2076 struct ioc *ioc = container_of(timer, struct ioc, timer);
2077 struct ioc_gq *iocg, *tiocg;
2078 struct ioc_now now;
2079 LIST_HEAD(surpluses);
2080 int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
2081 u64 usage_us_sum = 0;
2082 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2083 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2084 u32 missed_ppm[2], rq_wait_pct;
2085 u64 period_vtime;
2086 int prev_busy_level;
2087
2088
2089 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2090
2091
2092 spin_lock_irq(&ioc->lock);
2093
2094 ioc_now(ioc, &now);
2095
2096 period_vtime = now.vnow - ioc->period_at_vtime;
2097 if (WARN_ON_ONCE(!period_vtime)) {
2098 spin_unlock_irq(&ioc->lock);
2099 return;
2100 }
2101
2102
2103
2104
2105
2106
2107
2108 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2109 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2110 !iocg->delay && !iocg_is_idle(iocg))
2111 continue;
2112
2113 spin_lock(&iocg->waitq.lock);
2114
2115
2116 if (iocg->wait_since) {
2117 iocg->local_stat.wait_us += now.now - iocg->wait_since;
2118 iocg->wait_since = now.now;
2119 }
2120 if (iocg->indebt_since) {
2121 iocg->local_stat.indebt_us +=
2122 now.now - iocg->indebt_since;
2123 iocg->indebt_since = now.now;
2124 }
2125 if (iocg->indelay_since) {
2126 iocg->local_stat.indelay_us +=
2127 now.now - iocg->indelay_since;
2128 iocg->indelay_since = now.now;
2129 }
2130
2131 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2132 iocg->delay) {
2133
2134 iocg_kick_waitq(iocg, true, &now);
2135 if (iocg->abs_vdebt || iocg->delay)
2136 nr_debtors++;
2137 } else if (iocg_is_idle(iocg)) {
2138
2139 u64 vtime = atomic64_read(&iocg->vtime);
2140 s64 excess;
2141
2142
2143
2144
2145
2146
2147
2148 excess = now.vnow - vtime - ioc->margins.target;
2149 if (excess > 0) {
2150 u32 old_hwi;
2151
2152 current_hweight(iocg, NULL, &old_hwi);
2153 ioc->vtime_err -= div64_u64(excess * old_hwi,
2154 WEIGHT_ONE);
2155 }
2156
2157 __propagate_weights(iocg, 0, 0, false, &now);
2158 list_del_init(&iocg->active_list);
2159 }
2160
2161 spin_unlock(&iocg->waitq.lock);
2162 }
2163 commit_weights(ioc);
2164
2165
2166
2167
2168
2169 iocg_flush_stat(&ioc->active_iocgs, &now);
2170
2171
2172 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2173 u64 vdone, vtime, usage_us, usage_dur;
2174 u32 usage, hw_active, hw_inuse;
2175
2176
2177
2178
2179
2180 vdone = atomic64_read(&iocg->done_vtime);
2181 vtime = atomic64_read(&iocg->vtime);
2182 current_hweight(iocg, &hw_active, &hw_inuse);
2183
2184
2185
2186
2187
2188
2189
2190 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2191 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2192 time_after64(vtime, vdone) &&
2193 time_after64(vtime, now.vnow -
2194 MAX_LAGGING_PERIODS * period_vtime) &&
2195 time_before64(vdone, now.vnow - period_vtime))
2196 nr_lagging++;
2197
2198
2199
2200
2201
2202 usage_us = iocg->usage_delta_us;
2203 usage_us_sum += usage_us;
2204
2205 if (vdone != vtime) {
2206 u64 inflight_us = DIV64_U64_ROUND_UP(
2207 cost_to_abs_cost(vtime - vdone, hw_inuse),
2208 ioc->vtime_base_rate);
2209 usage_us = max(usage_us, inflight_us);
2210 }
2211
2212
2213 if (time_after64(iocg->activated_at, ioc->period_at))
2214 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2215 else
2216 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2217
2218 usage = clamp_t(u32,
2219 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2220 usage_dur),
2221 1, WEIGHT_ONE);
2222
2223
2224 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2225 if (hw_inuse < hw_active ||
2226 (!waitqueue_active(&iocg->waitq) &&
2227 time_before64(vtime, now.vnow - ioc->margins.low))) {
2228 u32 hwa, old_hwi, hwm, new_hwi;
2229
2230
2231
2232
2233
2234 current_hweight(iocg, &hwa, &old_hwi);
2235 hwm = current_hweight_max(iocg);
2236 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2237 usage, &now);
2238 if (new_hwi < hwm) {
2239 iocg->hweight_donating = hwa;
2240 iocg->hweight_after_donation = new_hwi;
2241 list_add(&iocg->surplus_list, &surpluses);
2242 } else {
2243 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2244 iocg->inuse, iocg->active,
2245 iocg->hweight_inuse, new_hwi);
2246
2247 __propagate_weights(iocg, iocg->active,
2248 iocg->active, true, &now);
2249 nr_shortages++;
2250 }
2251 } else {
2252
2253 nr_shortages++;
2254 }
2255 }
2256
2257 if (!list_empty(&surpluses) && nr_shortages)
2258 transfer_surpluses(&surpluses, &now);
2259
2260 commit_weights(ioc);
2261
2262
2263 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2264 list_del_init(&iocg->surplus_list);
2265
2266
2267
2268
2269
2270
2271
2272 prev_busy_level = ioc->busy_level;
2273 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2274 missed_ppm[READ] > ppm_rthr ||
2275 missed_ppm[WRITE] > ppm_wthr) {
2276
2277 ioc->busy_level = max(ioc->busy_level, 0);
2278 ioc->busy_level++;
2279 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2280 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2281 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2282
2283 if (nr_shortages) {
2284
2285
2286
2287
2288 ioc->busy_level = min(ioc->busy_level, 0);
2289
2290
2291
2292
2293
2294 if (!nr_lagging)
2295 ioc->busy_level--;
2296 } else {
2297
2298
2299
2300
2301
2302
2303 ioc->busy_level = 0;
2304 }
2305 } else {
2306
2307 ioc->busy_level = 0;
2308 }
2309
2310 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2311
2312 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
2313 u64 vrate = ioc->vtime_base_rate;
2314 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2315
2316
2317 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2318 vrate_min = VRATE_MIN;
2319
2320
2321
2322
2323
2324
2325 if (vrate < vrate_min) {
2326 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2327 100);
2328 vrate = min(vrate, vrate_min);
2329 } else if (vrate > vrate_max) {
2330 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2331 100);
2332 vrate = max(vrate, vrate_max);
2333 } else {
2334 int idx = min_t(int, abs(ioc->busy_level),
2335 ARRAY_SIZE(vrate_adj_pct) - 1);
2336 u32 adj_pct = vrate_adj_pct[idx];
2337
2338 if (ioc->busy_level > 0)
2339 adj_pct = 100 - adj_pct;
2340 else
2341 adj_pct = 100 + adj_pct;
2342
2343 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2344 vrate_min, vrate_max);
2345 }
2346
2347 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
2348 nr_lagging, nr_shortages);
2349
2350 ioc->vtime_base_rate = vrate;
2351 ioc_refresh_margins(ioc);
2352 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2353 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
2354 missed_ppm, rq_wait_pct, nr_lagging,
2355 nr_shortages);
2356 }
2357
2358 ioc_refresh_params(ioc, false);
2359
2360 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2361
2362
2363
2364
2365
2366 atomic64_inc(&ioc->cur_period);
2367
2368 if (ioc->running != IOC_STOP) {
2369 if (!list_empty(&ioc->active_iocgs)) {
2370 ioc_start_period(ioc, &now);
2371 } else {
2372 ioc->busy_level = 0;
2373 ioc->vtime_err = 0;
2374 ioc->running = IOC_IDLE;
2375 }
2376
2377 ioc_refresh_vrate(ioc, &now);
2378 }
2379
2380 spin_unlock_irq(&ioc->lock);
2381}
2382
2383static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2384 u64 abs_cost, struct ioc_now *now)
2385{
2386 struct ioc *ioc = iocg->ioc;
2387 struct ioc_margins *margins = &ioc->margins;
2388 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2389 u32 hwi, adj_step;
2390 s64 margin;
2391 u64 cost, new_inuse;
2392
2393 current_hweight(iocg, NULL, &hwi);
2394 old_hwi = hwi;
2395 cost = abs_cost_to_cost(abs_cost, hwi);
2396 margin = now->vnow - vtime - cost;
2397
2398
2399 if (iocg->abs_vdebt)
2400 return cost;
2401
2402
2403
2404
2405
2406 if (margin >= iocg->saved_margin || margin >= margins->low ||
2407 iocg->inuse == iocg->active)
2408 return cost;
2409
2410 spin_lock_irq(&ioc->lock);
2411
2412
2413 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2414 spin_unlock_irq(&ioc->lock);
2415 return cost;
2416 }
2417
2418
2419
2420
2421
2422
2423
2424
2425 new_inuse = iocg->inuse;
2426 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2427 do {
2428 new_inuse = new_inuse + adj_step;
2429 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2430 current_hweight(iocg, NULL, &hwi);
2431 cost = abs_cost_to_cost(abs_cost, hwi);
2432 } while (time_after64(vtime + cost, now->vnow) &&
2433 iocg->inuse != iocg->active);
2434
2435 spin_unlock_irq(&ioc->lock);
2436
2437 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2438 old_inuse, iocg->inuse, old_hwi, hwi);
2439
2440 return cost;
2441}
2442
2443static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2444 bool is_merge, u64 *costp)
2445{
2446 struct ioc *ioc = iocg->ioc;
2447 u64 coef_seqio, coef_randio, coef_page;
2448 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2449 u64 seek_pages = 0;
2450 u64 cost = 0;
2451
2452 switch (bio_op(bio)) {
2453 case REQ_OP_READ:
2454 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2455 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2456 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2457 break;
2458 case REQ_OP_WRITE:
2459 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2460 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2461 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2462 break;
2463 default:
2464 goto out;
2465 }
2466
2467 if (iocg->cursor) {
2468 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2469 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2470 }
2471
2472 if (!is_merge) {
2473 if (seek_pages > LCOEF_RANDIO_PAGES) {
2474 cost += coef_randio;
2475 } else {
2476 cost += coef_seqio;
2477 }
2478 }
2479 cost += pages * coef_page;
2480out:
2481 *costp = cost;
2482}
2483
2484static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2485{
2486 u64 cost;
2487
2488 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2489 return cost;
2490}
2491
2492static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2493 u64 *costp)
2494{
2495 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2496
2497 switch (req_op(rq)) {
2498 case REQ_OP_READ:
2499 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2500 break;
2501 case REQ_OP_WRITE:
2502 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2503 break;
2504 default:
2505 *costp = 0;
2506 }
2507}
2508
2509static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2510{
2511 u64 cost;
2512
2513 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2514 return cost;
2515}
2516
2517static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2518{
2519 struct blkcg_gq *blkg = bio->bi_blkg;
2520 struct ioc *ioc = rqos_to_ioc(rqos);
2521 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2522 struct ioc_now now;
2523 struct iocg_wait wait;
2524 u64 abs_cost, cost, vtime;
2525 bool use_debt, ioc_locked;
2526 unsigned long flags;
2527
2528
2529 if (!ioc->enabled || !iocg->level)
2530 return;
2531
2532
2533 abs_cost = calc_vtime_cost(bio, iocg, false);
2534 if (!abs_cost)
2535 return;
2536
2537 if (!iocg_activate(iocg, &now))
2538 return;
2539
2540 iocg->cursor = bio_end_sector(bio);
2541 vtime = atomic64_read(&iocg->vtime);
2542 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2543
2544
2545
2546
2547
2548
2549 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2550 time_before_eq64(vtime + cost, now.vnow)) {
2551 iocg_commit_bio(iocg, bio, abs_cost, cost);
2552 return;
2553 }
2554
2555
2556
2557
2558
2559
2560
2561
2562 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2563 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2564retry_lock:
2565 iocg_lock(iocg, ioc_locked, &flags);
2566
2567
2568
2569
2570
2571
2572
2573
2574 if (unlikely(list_empty(&iocg->active_list))) {
2575 iocg_unlock(iocg, ioc_locked, &flags);
2576 iocg_commit_bio(iocg, bio, abs_cost, cost);
2577 return;
2578 }
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597 if (use_debt) {
2598 iocg_incur_debt(iocg, abs_cost, &now);
2599 if (iocg_kick_delay(iocg, &now))
2600 blkcg_schedule_throttle(rqos->q,
2601 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2602 iocg_unlock(iocg, ioc_locked, &flags);
2603 return;
2604 }
2605
2606
2607 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2608 if (!ioc_locked) {
2609 iocg_unlock(iocg, false, &flags);
2610 ioc_locked = true;
2611 goto retry_lock;
2612 }
2613 propagate_weights(iocg, iocg->active, iocg->active, true,
2614 &now);
2615 }
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2631 wait.wait.private = current;
2632 wait.bio = bio;
2633 wait.abs_cost = abs_cost;
2634 wait.committed = false;
2635
2636 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2637 iocg_kick_waitq(iocg, ioc_locked, &now);
2638
2639 iocg_unlock(iocg, ioc_locked, &flags);
2640
2641 while (true) {
2642 set_current_state(TASK_UNINTERRUPTIBLE);
2643 if (wait.committed)
2644 break;
2645 io_schedule();
2646 }
2647
2648
2649 finish_wait(&iocg->waitq, &wait.wait);
2650}
2651
2652static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2653 struct bio *bio)
2654{
2655 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2656 struct ioc *ioc = iocg->ioc;
2657 sector_t bio_end = bio_end_sector(bio);
2658 struct ioc_now now;
2659 u64 vtime, abs_cost, cost;
2660 unsigned long flags;
2661
2662
2663 if (!ioc->enabled || !iocg->level)
2664 return;
2665
2666 abs_cost = calc_vtime_cost(bio, iocg, true);
2667 if (!abs_cost)
2668 return;
2669
2670 ioc_now(ioc, &now);
2671
2672 vtime = atomic64_read(&iocg->vtime);
2673 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2674
2675
2676 if (blk_rq_pos(rq) < bio_end &&
2677 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2678 iocg->cursor = bio_end;
2679
2680
2681
2682
2683
2684 if (rq->bio && rq->bio->bi_iocost_cost &&
2685 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2686 iocg_commit_bio(iocg, bio, abs_cost, cost);
2687 return;
2688 }
2689
2690
2691
2692
2693
2694
2695 spin_lock_irqsave(&ioc->lock, flags);
2696 spin_lock(&iocg->waitq.lock);
2697
2698 if (likely(!list_empty(&iocg->active_list))) {
2699 iocg_incur_debt(iocg, abs_cost, &now);
2700 if (iocg_kick_delay(iocg, &now))
2701 blkcg_schedule_throttle(rqos->q,
2702 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2703 } else {
2704 iocg_commit_bio(iocg, bio, abs_cost, cost);
2705 }
2706
2707 spin_unlock(&iocg->waitq.lock);
2708 spin_unlock_irqrestore(&ioc->lock, flags);
2709}
2710
2711static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2712{
2713 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2714
2715 if (iocg && bio->bi_iocost_cost)
2716 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2717}
2718
2719static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2720{
2721 struct ioc *ioc = rqos_to_ioc(rqos);
2722 struct ioc_pcpu_stat *ccs;
2723 u64 on_q_ns, rq_wait_ns, size_nsec;
2724 int pidx, rw;
2725
2726 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2727 return;
2728
2729 switch (req_op(rq) & REQ_OP_MASK) {
2730 case REQ_OP_READ:
2731 pidx = QOS_RLAT;
2732 rw = READ;
2733 break;
2734 case REQ_OP_WRITE:
2735 pidx = QOS_WLAT;
2736 rw = WRITE;
2737 break;
2738 default:
2739 return;
2740 }
2741
2742 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2743 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2744 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2745
2746 ccs = get_cpu_ptr(ioc->pcpu_stat);
2747
2748 if (on_q_ns <= size_nsec ||
2749 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2750 local_inc(&ccs->missed[rw].nr_met);
2751 else
2752 local_inc(&ccs->missed[rw].nr_missed);
2753
2754 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2755
2756 put_cpu_ptr(ccs);
2757}
2758
2759static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2760{
2761 struct ioc *ioc = rqos_to_ioc(rqos);
2762
2763 spin_lock_irq(&ioc->lock);
2764 ioc_refresh_params(ioc, false);
2765 spin_unlock_irq(&ioc->lock);
2766}
2767
2768static void ioc_rqos_exit(struct rq_qos *rqos)
2769{
2770 struct ioc *ioc = rqos_to_ioc(rqos);
2771
2772 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2773
2774 spin_lock_irq(&ioc->lock);
2775 ioc->running = IOC_STOP;
2776 spin_unlock_irq(&ioc->lock);
2777
2778 del_timer_sync(&ioc->timer);
2779 free_percpu(ioc->pcpu_stat);
2780 kfree(ioc);
2781}
2782
2783static struct rq_qos_ops ioc_rqos_ops = {
2784 .throttle = ioc_rqos_throttle,
2785 .merge = ioc_rqos_merge,
2786 .done_bio = ioc_rqos_done_bio,
2787 .done = ioc_rqos_done,
2788 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2789 .exit = ioc_rqos_exit,
2790};
2791
2792static int blk_iocost_init(struct request_queue *q)
2793{
2794 struct ioc *ioc;
2795 struct rq_qos *rqos;
2796 int i, cpu, ret;
2797
2798 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2799 if (!ioc)
2800 return -ENOMEM;
2801
2802 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2803 if (!ioc->pcpu_stat) {
2804 kfree(ioc);
2805 return -ENOMEM;
2806 }
2807
2808 for_each_possible_cpu(cpu) {
2809 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2810
2811 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2812 local_set(&ccs->missed[i].nr_met, 0);
2813 local_set(&ccs->missed[i].nr_missed, 0);
2814 }
2815 local64_set(&ccs->rq_wait_ns, 0);
2816 }
2817
2818 rqos = &ioc->rqos;
2819 rqos->id = RQ_QOS_COST;
2820 rqos->ops = &ioc_rqos_ops;
2821 rqos->q = q;
2822
2823 spin_lock_init(&ioc->lock);
2824 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2825 INIT_LIST_HEAD(&ioc->active_iocgs);
2826
2827 ioc->running = IOC_IDLE;
2828 ioc->vtime_base_rate = VTIME_PER_USEC;
2829 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2830 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2831 ioc->period_at = ktime_to_us(ktime_get());
2832 atomic64_set(&ioc->cur_period, 0);
2833 atomic_set(&ioc->hweight_gen, 0);
2834
2835 spin_lock_irq(&ioc->lock);
2836 ioc->autop_idx = AUTOP_INVALID;
2837 ioc_refresh_params(ioc, true);
2838 spin_unlock_irq(&ioc->lock);
2839
2840 rq_qos_add(q, rqos);
2841 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2842 if (ret) {
2843 rq_qos_del(q, rqos);
2844 free_percpu(ioc->pcpu_stat);
2845 kfree(ioc);
2846 return ret;
2847 }
2848 return 0;
2849}
2850
2851static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2852{
2853 struct ioc_cgrp *iocc;
2854
2855 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2856 if (!iocc)
2857 return NULL;
2858
2859 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2860 return &iocc->cpd;
2861}
2862
2863static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2864{
2865 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2866}
2867
2868static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2869 struct blkcg *blkcg)
2870{
2871 int levels = blkcg->css.cgroup->level + 1;
2872 struct ioc_gq *iocg;
2873
2874 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2875 if (!iocg)
2876 return NULL;
2877
2878 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2879 if (!iocg->pcpu_stat) {
2880 kfree(iocg);
2881 return NULL;
2882 }
2883
2884 return &iocg->pd;
2885}
2886
2887static void ioc_pd_init(struct blkg_policy_data *pd)
2888{
2889 struct ioc_gq *iocg = pd_to_iocg(pd);
2890 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2891 struct ioc *ioc = q_to_ioc(blkg->q);
2892 struct ioc_now now;
2893 struct blkcg_gq *tblkg;
2894 unsigned long flags;
2895
2896 ioc_now(ioc, &now);
2897
2898 iocg->ioc = ioc;
2899 atomic64_set(&iocg->vtime, now.vnow);
2900 atomic64_set(&iocg->done_vtime, now.vnow);
2901 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2902 INIT_LIST_HEAD(&iocg->active_list);
2903 INIT_LIST_HEAD(&iocg->walk_list);
2904 INIT_LIST_HEAD(&iocg->surplus_list);
2905 iocg->hweight_active = WEIGHT_ONE;
2906 iocg->hweight_inuse = WEIGHT_ONE;
2907
2908 init_waitqueue_head(&iocg->waitq);
2909 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2910 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2911
2912 iocg->level = blkg->blkcg->css.cgroup->level;
2913
2914 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2915 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2916 iocg->ancestors[tiocg->level] = tiocg;
2917 }
2918
2919 spin_lock_irqsave(&ioc->lock, flags);
2920 weight_updated(iocg, &now);
2921 spin_unlock_irqrestore(&ioc->lock, flags);
2922}
2923
2924static void ioc_pd_free(struct blkg_policy_data *pd)
2925{
2926 struct ioc_gq *iocg = pd_to_iocg(pd);
2927 struct ioc *ioc = iocg->ioc;
2928 unsigned long flags;
2929
2930 if (ioc) {
2931 spin_lock_irqsave(&ioc->lock, flags);
2932
2933 if (!list_empty(&iocg->active_list)) {
2934 struct ioc_now now;
2935
2936 ioc_now(ioc, &now);
2937 propagate_weights(iocg, 0, 0, false, &now);
2938 list_del_init(&iocg->active_list);
2939 }
2940
2941 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2942 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2943
2944 spin_unlock_irqrestore(&ioc->lock, flags);
2945
2946 hrtimer_cancel(&iocg->waitq_timer);
2947 }
2948 free_percpu(iocg->pcpu_stat);
2949 kfree(iocg);
2950}
2951
2952static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2953{
2954 struct ioc_gq *iocg = pd_to_iocg(pd);
2955 struct ioc *ioc = iocg->ioc;
2956 size_t pos = 0;
2957
2958 if (!ioc->enabled)
2959 return 0;
2960
2961 if (iocg->level == 0) {
2962 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
2963 ioc->vtime_base_rate * 10000,
2964 VTIME_PER_USEC);
2965 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
2966 vp10k / 100, vp10k % 100);
2967 }
2968
2969 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
2970 iocg->last_stat.usage_us);
2971
2972 if (blkcg_debug_stats)
2973 pos += scnprintf(buf + pos, size - pos,
2974 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
2975 iocg->last_stat.wait_us,
2976 iocg->last_stat.indebt_us,
2977 iocg->last_stat.indelay_us);
2978
2979 return pos;
2980}
2981
2982static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2983 int off)
2984{
2985 const char *dname = blkg_dev_name(pd->blkg);
2986 struct ioc_gq *iocg = pd_to_iocg(pd);
2987
2988 if (dname && iocg->cfg_weight)
2989 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
2990 return 0;
2991}
2992
2993
2994static int ioc_weight_show(struct seq_file *sf, void *v)
2995{
2996 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2997 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2998
2999 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3000 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3001 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3002 return 0;
3003}
3004
3005static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3006 size_t nbytes, loff_t off)
3007{
3008 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3009 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3010 struct blkg_conf_ctx ctx;
3011 struct ioc_now now;
3012 struct ioc_gq *iocg;
3013 u32 v;
3014 int ret;
3015
3016 if (!strchr(buf, ':')) {
3017 struct blkcg_gq *blkg;
3018
3019 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3020 return -EINVAL;
3021
3022 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3023 return -EINVAL;
3024
3025 spin_lock(&blkcg->lock);
3026 iocc->dfl_weight = v * WEIGHT_ONE;
3027 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3028 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3029
3030 if (iocg) {
3031 spin_lock_irq(&iocg->ioc->lock);
3032 ioc_now(iocg->ioc, &now);
3033 weight_updated(iocg, &now);
3034 spin_unlock_irq(&iocg->ioc->lock);
3035 }
3036 }
3037 spin_unlock(&blkcg->lock);
3038
3039 return nbytes;
3040 }
3041
3042 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3043 if (ret)
3044 return ret;
3045
3046 iocg = blkg_to_iocg(ctx.blkg);
3047
3048 if (!strncmp(ctx.body, "default", 7)) {
3049 v = 0;
3050 } else {
3051 if (!sscanf(ctx.body, "%u", &v))
3052 goto einval;
3053 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3054 goto einval;
3055 }
3056
3057 spin_lock(&iocg->ioc->lock);
3058 iocg->cfg_weight = v * WEIGHT_ONE;
3059 ioc_now(iocg->ioc, &now);
3060 weight_updated(iocg, &now);
3061 spin_unlock(&iocg->ioc->lock);
3062
3063 blkg_conf_finish(&ctx);
3064 return nbytes;
3065
3066einval:
3067 blkg_conf_finish(&ctx);
3068 return -EINVAL;
3069}
3070
3071static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3072 int off)
3073{
3074 const char *dname = blkg_dev_name(pd->blkg);
3075 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3076
3077 if (!dname)
3078 return 0;
3079
3080 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3081 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3082 ioc->params.qos[QOS_RPPM] / 10000,
3083 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3084 ioc->params.qos[QOS_RLAT],
3085 ioc->params.qos[QOS_WPPM] / 10000,
3086 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3087 ioc->params.qos[QOS_WLAT],
3088 ioc->params.qos[QOS_MIN] / 10000,
3089 ioc->params.qos[QOS_MIN] % 10000 / 100,
3090 ioc->params.qos[QOS_MAX] / 10000,
3091 ioc->params.qos[QOS_MAX] % 10000 / 100);
3092 return 0;
3093}
3094
3095static int ioc_qos_show(struct seq_file *sf, void *v)
3096{
3097 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3098
3099 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3100 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3101 return 0;
3102}
3103
3104static const match_table_t qos_ctrl_tokens = {
3105 { QOS_ENABLE, "enable=%u" },
3106 { QOS_CTRL, "ctrl=%s" },
3107 { NR_QOS_CTRL_PARAMS, NULL },
3108};
3109
3110static const match_table_t qos_tokens = {
3111 { QOS_RPPM, "rpct=%s" },
3112 { QOS_RLAT, "rlat=%u" },
3113 { QOS_WPPM, "wpct=%s" },
3114 { QOS_WLAT, "wlat=%u" },
3115 { QOS_MIN, "min=%s" },
3116 { QOS_MAX, "max=%s" },
3117 { NR_QOS_PARAMS, NULL },
3118};
3119
3120static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3121 size_t nbytes, loff_t off)
3122{
3123 struct gendisk *disk;
3124 struct ioc *ioc;
3125 u32 qos[NR_QOS_PARAMS];
3126 bool enable, user;
3127 char *p;
3128 int ret;
3129
3130 disk = blkcg_conf_get_disk(&input);
3131 if (IS_ERR(disk))
3132 return PTR_ERR(disk);
3133
3134 ioc = q_to_ioc(disk->queue);
3135 if (!ioc) {
3136 ret = blk_iocost_init(disk->queue);
3137 if (ret)
3138 goto err;
3139 ioc = q_to_ioc(disk->queue);
3140 }
3141
3142 spin_lock_irq(&ioc->lock);
3143 memcpy(qos, ioc->params.qos, sizeof(qos));
3144 enable = ioc->enabled;
3145 user = ioc->user_qos_params;
3146 spin_unlock_irq(&ioc->lock);
3147
3148 while ((p = strsep(&input, " \t\n"))) {
3149 substring_t args[MAX_OPT_ARGS];
3150 char buf[32];
3151 int tok;
3152 s64 v;
3153
3154 if (!*p)
3155 continue;
3156
3157 switch (match_token(p, qos_ctrl_tokens, args)) {
3158 case QOS_ENABLE:
3159 match_u64(&args[0], &v);
3160 enable = v;
3161 continue;
3162 case QOS_CTRL:
3163 match_strlcpy(buf, &args[0], sizeof(buf));
3164 if (!strcmp(buf, "auto"))
3165 user = false;
3166 else if (!strcmp(buf, "user"))
3167 user = true;
3168 else
3169 goto einval;
3170 continue;
3171 }
3172
3173 tok = match_token(p, qos_tokens, args);
3174 switch (tok) {
3175 case QOS_RPPM:
3176 case QOS_WPPM:
3177 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3178 sizeof(buf))
3179 goto einval;
3180 if (cgroup_parse_float(buf, 2, &v))
3181 goto einval;
3182 if (v < 0 || v > 10000)
3183 goto einval;
3184 qos[tok] = v * 100;
3185 break;
3186 case QOS_RLAT:
3187 case QOS_WLAT:
3188 if (match_u64(&args[0], &v))
3189 goto einval;
3190 qos[tok] = v;
3191 break;
3192 case QOS_MIN:
3193 case QOS_MAX:
3194 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3195 sizeof(buf))
3196 goto einval;
3197 if (cgroup_parse_float(buf, 2, &v))
3198 goto einval;
3199 if (v < 0)
3200 goto einval;
3201 qos[tok] = clamp_t(s64, v * 100,
3202 VRATE_MIN_PPM, VRATE_MAX_PPM);
3203 break;
3204 default:
3205 goto einval;
3206 }
3207 user = true;
3208 }
3209
3210 if (qos[QOS_MIN] > qos[QOS_MAX])
3211 goto einval;
3212
3213 spin_lock_irq(&ioc->lock);
3214
3215 if (enable) {
3216 blk_stat_enable_accounting(ioc->rqos.q);
3217 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3218 ioc->enabled = true;
3219 } else {
3220 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3221 ioc->enabled = false;
3222 }
3223
3224 if (user) {
3225 memcpy(ioc->params.qos, qos, sizeof(qos));
3226 ioc->user_qos_params = true;
3227 } else {
3228 ioc->user_qos_params = false;
3229 }
3230
3231 ioc_refresh_params(ioc, true);
3232 spin_unlock_irq(&ioc->lock);
3233
3234 put_disk_and_module(disk);
3235 return nbytes;
3236einval:
3237 ret = -EINVAL;
3238err:
3239 put_disk_and_module(disk);
3240 return ret;
3241}
3242
3243static u64 ioc_cost_model_prfill(struct seq_file *sf,
3244 struct blkg_policy_data *pd, int off)
3245{
3246 const char *dname = blkg_dev_name(pd->blkg);
3247 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3248 u64 *u = ioc->params.i_lcoefs;
3249
3250 if (!dname)
3251 return 0;
3252
3253 seq_printf(sf, "%s ctrl=%s model=linear "
3254 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3255 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3256 dname, ioc->user_cost_model ? "user" : "auto",
3257 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3258 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3259 return 0;
3260}
3261
3262static int ioc_cost_model_show(struct seq_file *sf, void *v)
3263{
3264 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3265
3266 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3267 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3268 return 0;
3269}
3270
3271static const match_table_t cost_ctrl_tokens = {
3272 { COST_CTRL, "ctrl=%s" },
3273 { COST_MODEL, "model=%s" },
3274 { NR_COST_CTRL_PARAMS, NULL },
3275};
3276
3277static const match_table_t i_lcoef_tokens = {
3278 { I_LCOEF_RBPS, "rbps=%u" },
3279 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3280 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3281 { I_LCOEF_WBPS, "wbps=%u" },
3282 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3283 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3284 { NR_I_LCOEFS, NULL },
3285};
3286
3287static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3288 size_t nbytes, loff_t off)
3289{
3290 struct gendisk *disk;
3291 struct ioc *ioc;
3292 u64 u[NR_I_LCOEFS];
3293 bool user;
3294 char *p;
3295 int ret;
3296
3297 disk = blkcg_conf_get_disk(&input);
3298 if (IS_ERR(disk))
3299 return PTR_ERR(disk);
3300
3301 ioc = q_to_ioc(disk->queue);
3302 if (!ioc) {
3303 ret = blk_iocost_init(disk->queue);
3304 if (ret)
3305 goto err;
3306 ioc = q_to_ioc(disk->queue);
3307 }
3308
3309 spin_lock_irq(&ioc->lock);
3310 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3311 user = ioc->user_cost_model;
3312 spin_unlock_irq(&ioc->lock);
3313
3314 while ((p = strsep(&input, " \t\n"))) {
3315 substring_t args[MAX_OPT_ARGS];
3316 char buf[32];
3317 int tok;
3318 u64 v;
3319
3320 if (!*p)
3321 continue;
3322
3323 switch (match_token(p, cost_ctrl_tokens, args)) {
3324 case COST_CTRL:
3325 match_strlcpy(buf, &args[0], sizeof(buf));
3326 if (!strcmp(buf, "auto"))
3327 user = false;
3328 else if (!strcmp(buf, "user"))
3329 user = true;
3330 else
3331 goto einval;
3332 continue;
3333 case COST_MODEL:
3334 match_strlcpy(buf, &args[0], sizeof(buf));
3335 if (strcmp(buf, "linear"))
3336 goto einval;
3337 continue;
3338 }
3339
3340 tok = match_token(p, i_lcoef_tokens, args);
3341 if (tok == NR_I_LCOEFS)
3342 goto einval;
3343 if (match_u64(&args[0], &v))
3344 goto einval;
3345 u[tok] = v;
3346 user = true;
3347 }
3348
3349 spin_lock_irq(&ioc->lock);
3350 if (user) {
3351 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3352 ioc->user_cost_model = true;
3353 } else {
3354 ioc->user_cost_model = false;
3355 }
3356 ioc_refresh_params(ioc, true);
3357 spin_unlock_irq(&ioc->lock);
3358
3359 put_disk_and_module(disk);
3360 return nbytes;
3361
3362einval:
3363 ret = -EINVAL;
3364err:
3365 put_disk_and_module(disk);
3366 return ret;
3367}
3368
3369static struct cftype ioc_files[] = {
3370 {
3371 .name = "weight",
3372 .flags = CFTYPE_NOT_ON_ROOT,
3373 .seq_show = ioc_weight_show,
3374 .write = ioc_weight_write,
3375 },
3376 {
3377 .name = "cost.qos",
3378 .flags = CFTYPE_ONLY_ON_ROOT,
3379 .seq_show = ioc_qos_show,
3380 .write = ioc_qos_write,
3381 },
3382 {
3383 .name = "cost.model",
3384 .flags = CFTYPE_ONLY_ON_ROOT,
3385 .seq_show = ioc_cost_model_show,
3386 .write = ioc_cost_model_write,
3387 },
3388 {}
3389};
3390
3391static struct blkcg_policy blkcg_policy_iocost = {
3392 .dfl_cftypes = ioc_files,
3393 .cpd_alloc_fn = ioc_cpd_alloc,
3394 .cpd_free_fn = ioc_cpd_free,
3395 .pd_alloc_fn = ioc_pd_alloc,
3396 .pd_init_fn = ioc_pd_init,
3397 .pd_free_fn = ioc_pd_free,
3398 .pd_stat_fn = ioc_pd_stat,
3399};
3400
3401static int __init ioc_init(void)
3402{
3403 return blkcg_policy_register(&blkcg_policy_iocost);
3404}
3405
3406static void __exit ioc_exit(void)
3407{
3408 blkcg_policy_unregister(&blkcg_policy_iocost);
3409}
3410
3411module_init(ioc_init);
3412module_exit(ioc_exit);
3413