1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/blk_types.h>
24#include <linux/slab.h>
25#include <linux/backing-dev.h>
26#include <linux/swap.h>
27
28#include "blk-wbt.h"
29#include "blk-rq-qos.h"
30
31#define CREATE_TRACE_POINTS
32#include <trace/events/wbt.h>
33
34static inline void wbt_clear_state(struct request *rq)
35{
36 rq->wbt_flags = 0;
37}
38
39static inline enum wbt_flags wbt_flags(struct request *rq)
40{
41 return rq->wbt_flags;
42}
43
44static inline bool wbt_is_tracked(struct request *rq)
45{
46 return rq->wbt_flags & WBT_TRACKED;
47}
48
49static inline bool wbt_is_read(struct request *rq)
50{
51 return rq->wbt_flags & WBT_READ;
52}
53
54enum {
55
56
57
58
59 RWB_DEF_DEPTH = 16,
60
61
62
63
64 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
65
66
67
68
69 RWB_MIN_WRITE_SAMPLES = 3,
70
71
72
73
74
75 RWB_UNKNOWN_BUMP = 5,
76};
77
78static inline bool rwb_enabled(struct rq_wb *rwb)
79{
80 return rwb && rwb->wb_normal != 0;
81}
82
83static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
84{
85 if (rwb_enabled(rwb)) {
86 const unsigned long cur = jiffies;
87
88 if (cur != *var)
89 *var = cur;
90 }
91}
92
93
94
95
96
97static bool wb_recent_wait(struct rq_wb *rwb)
98{
99 struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
100
101 return time_before(jiffies, wb->dirty_sleep + HZ);
102}
103
104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
105 enum wbt_flags wb_acct)
106{
107 if (wb_acct & WBT_KSWAPD)
108 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
109 else if (wb_acct & WBT_DISCARD)
110 return &rwb->rq_wait[WBT_RWQ_DISCARD];
111
112 return &rwb->rq_wait[WBT_RWQ_BG];
113}
114
115static void rwb_wake_all(struct rq_wb *rwb)
116{
117 int i;
118
119 for (i = 0; i < WBT_NUM_RWQ; i++) {
120 struct rq_wait *rqw = &rwb->rq_wait[i];
121
122 if (wq_has_sleeper(&rqw->wait))
123 wake_up_all(&rqw->wait);
124 }
125}
126
127static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
128 enum wbt_flags wb_acct)
129{
130 int inflight, limit;
131
132 inflight = atomic_dec_return(&rqw->inflight);
133
134
135
136
137
138 if (unlikely(!rwb_enabled(rwb))) {
139 rwb_wake_all(rwb);
140 return;
141 }
142
143
144
145
146
147
148 if (wb_acct & WBT_DISCARD)
149 limit = rwb->wb_background;
150 else if (rwb->wc && !wb_recent_wait(rwb))
151 limit = 0;
152 else
153 limit = rwb->wb_normal;
154
155
156
157
158 if (inflight && inflight >= limit)
159 return;
160
161 if (wq_has_sleeper(&rqw->wait)) {
162 int diff = limit - inflight;
163
164 if (!inflight || diff >= rwb->wb_background / 2)
165 wake_up_all(&rqw->wait);
166 }
167}
168
169static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
170{
171 struct rq_wb *rwb = RQWB(rqos);
172 struct rq_wait *rqw;
173
174 if (!(wb_acct & WBT_TRACKED))
175 return;
176
177 rqw = get_rq_wait(rwb, wb_acct);
178 wbt_rqw_done(rwb, rqw, wb_acct);
179}
180
181
182
183
184
185static void wbt_done(struct rq_qos *rqos, struct request *rq)
186{
187 struct rq_wb *rwb = RQWB(rqos);
188
189 if (!wbt_is_tracked(rq)) {
190 if (rwb->sync_cookie == rq) {
191 rwb->sync_issue = 0;
192 rwb->sync_cookie = NULL;
193 }
194
195 if (wbt_is_read(rq))
196 wb_timestamp(rwb, &rwb->last_comp);
197 } else {
198 WARN_ON_ONCE(rq == rwb->sync_cookie);
199 __wbt_done(rqos, wbt_flags(rq));
200 }
201 wbt_clear_state(rq);
202}
203
204static inline bool stat_sample_valid(struct blk_rq_stat *stat)
205{
206
207
208
209
210
211
212 return (stat[READ].nr_samples >= 1 &&
213 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
214}
215
216static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
217{
218 u64 now, issue = READ_ONCE(rwb->sync_issue);
219
220 if (!issue || !rwb->sync_cookie)
221 return 0;
222
223 now = ktime_to_ns(ktime_get());
224 return now - issue;
225}
226
227enum {
228 LAT_OK = 1,
229 LAT_UNKNOWN,
230 LAT_UNKNOWN_WRITES,
231 LAT_EXCEEDED,
232};
233
234static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
235{
236 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
237 struct rq_depth *rqd = &rwb->rq_depth;
238 u64 thislat;
239
240
241
242
243
244
245
246
247
248
249 thislat = rwb_sync_issue_lat(rwb);
250 if (thislat > rwb->cur_win_nsec ||
251 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
252 trace_wbt_lat(bdi, thislat);
253 return LAT_EXCEEDED;
254 }
255
256
257
258
259 if (!stat_sample_valid(stat)) {
260
261
262
263
264
265
266 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
267 wbt_inflight(rwb))
268 return LAT_UNKNOWN_WRITES;
269 return LAT_UNKNOWN;
270 }
271
272
273
274
275 if (stat[READ].min > rwb->min_lat_nsec) {
276 trace_wbt_lat(bdi, stat[READ].min);
277 trace_wbt_stat(bdi, stat);
278 return LAT_EXCEEDED;
279 }
280
281 if (rqd->scale_step)
282 trace_wbt_stat(bdi, stat);
283
284 return LAT_OK;
285}
286
287static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
288{
289 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
290 struct rq_depth *rqd = &rwb->rq_depth;
291
292 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
293 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
294}
295
296static void calc_wb_limits(struct rq_wb *rwb)
297{
298 if (rwb->min_lat_nsec == 0) {
299 rwb->wb_normal = rwb->wb_background = 0;
300 } else if (rwb->rq_depth.max_depth <= 2) {
301 rwb->wb_normal = rwb->rq_depth.max_depth;
302 rwb->wb_background = 1;
303 } else {
304 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
305 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
306 }
307}
308
309static void scale_up(struct rq_wb *rwb)
310{
311 if (!rq_depth_scale_up(&rwb->rq_depth))
312 return;
313 calc_wb_limits(rwb);
314 rwb->unknown_cnt = 0;
315 rwb_wake_all(rwb);
316 rwb_trace_step(rwb, tracepoint_string("scale up"));
317}
318
319static void scale_down(struct rq_wb *rwb, bool hard_throttle)
320{
321 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
322 return;
323 calc_wb_limits(rwb);
324 rwb->unknown_cnt = 0;
325 rwb_trace_step(rwb, tracepoint_string("scale down"));
326}
327
328static void rwb_arm_timer(struct rq_wb *rwb)
329{
330 struct rq_depth *rqd = &rwb->rq_depth;
331
332 if (rqd->scale_step > 0) {
333
334
335
336
337
338
339 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
340 int_sqrt((rqd->scale_step + 1) << 8));
341 } else {
342
343
344
345
346 rwb->cur_win_nsec = rwb->win_nsec;
347 }
348
349 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
350}
351
352static void wb_timer_fn(struct blk_stat_callback *cb)
353{
354 struct rq_wb *rwb = cb->data;
355 struct rq_depth *rqd = &rwb->rq_depth;
356 unsigned int inflight = wbt_inflight(rwb);
357 int status;
358
359 status = latency_exceeded(rwb, cb->stat);
360
361 trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
362 inflight);
363
364
365
366
367
368
369 switch (status) {
370 case LAT_EXCEEDED:
371 scale_down(rwb, true);
372 break;
373 case LAT_OK:
374 scale_up(rwb);
375 break;
376 case LAT_UNKNOWN_WRITES:
377
378
379
380
381
382 scale_up(rwb);
383 break;
384 case LAT_UNKNOWN:
385 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
386 break;
387
388
389
390
391
392 if (rqd->scale_step > 0)
393 scale_up(rwb);
394 else if (rqd->scale_step < 0)
395 scale_down(rwb, false);
396 break;
397 default:
398 break;
399 }
400
401
402
403
404 if (rqd->scale_step || inflight)
405 rwb_arm_timer(rwb);
406}
407
408static void wbt_update_limits(struct rq_wb *rwb)
409{
410 struct rq_depth *rqd = &rwb->rq_depth;
411
412 rqd->scale_step = 0;
413 rqd->scaled_max = false;
414
415 rq_depth_calc_max_depth(rqd);
416 calc_wb_limits(rwb);
417
418 rwb_wake_all(rwb);
419}
420
421u64 wbt_get_min_lat(struct request_queue *q)
422{
423 struct rq_qos *rqos = wbt_rq_qos(q);
424 if (!rqos)
425 return 0;
426 return RQWB(rqos)->min_lat_nsec;
427}
428
429void wbt_set_min_lat(struct request_queue *q, u64 val)
430{
431 struct rq_qos *rqos = wbt_rq_qos(q);
432 if (!rqos)
433 return;
434 RQWB(rqos)->min_lat_nsec = val;
435 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
436 wbt_update_limits(RQWB(rqos));
437}
438
439
440static bool close_io(struct rq_wb *rwb)
441{
442 const unsigned long now = jiffies;
443
444 return time_before(now, rwb->last_issue + HZ / 10) ||
445 time_before(now, rwb->last_comp + HZ / 10);
446}
447
448#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
449
450static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
451{
452 unsigned int limit;
453
454
455
456
457
458 if (!rwb_enabled(rwb))
459 return UINT_MAX;
460
461 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
462 return rwb->wb_background;
463
464
465
466
467
468
469
470
471
472 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
473 limit = rwb->rq_depth.max_depth;
474 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
475
476
477
478
479 limit = rwb->wb_background;
480 } else
481 limit = rwb->wb_normal;
482
483 return limit;
484}
485
486struct wbt_wait_data {
487 struct rq_wb *rwb;
488 enum wbt_flags wb_acct;
489 unsigned long rw;
490};
491
492static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
493{
494 struct wbt_wait_data *data = private_data;
495 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
496}
497
498static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
499{
500 struct wbt_wait_data *data = private_data;
501 wbt_rqw_done(data->rwb, rqw, data->wb_acct);
502}
503
504
505
506
507
508static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
509 unsigned long rw)
510{
511 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
512 struct wbt_wait_data data = {
513 .rwb = rwb,
514 .wb_acct = wb_acct,
515 .rw = rw,
516 };
517
518 rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
519}
520
521static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
522{
523 switch (bio_op(bio)) {
524 case REQ_OP_WRITE:
525
526
527
528 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
529 (REQ_SYNC | REQ_IDLE))
530 return false;
531 fallthrough;
532 case REQ_OP_DISCARD:
533 return true;
534 default:
535 return false;
536 }
537}
538
539static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
540{
541 enum wbt_flags flags = 0;
542
543 if (!rwb_enabled(rwb))
544 return 0;
545
546 if (bio_op(bio) == REQ_OP_READ) {
547 flags = WBT_READ;
548 } else if (wbt_should_throttle(rwb, bio)) {
549 if (current_is_kswapd())
550 flags |= WBT_KSWAPD;
551 if (bio_op(bio) == REQ_OP_DISCARD)
552 flags |= WBT_DISCARD;
553 flags |= WBT_TRACKED;
554 }
555 return flags;
556}
557
558static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
559{
560 struct rq_wb *rwb = RQWB(rqos);
561 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
562 __wbt_done(rqos, flags);
563}
564
565
566
567
568
569
570
571static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
572{
573 struct rq_wb *rwb = RQWB(rqos);
574 enum wbt_flags flags;
575
576 flags = bio_to_wbt_flags(rwb, bio);
577 if (!(flags & WBT_TRACKED)) {
578 if (flags & WBT_READ)
579 wb_timestamp(rwb, &rwb->last_issue);
580 return;
581 }
582
583 __wbt_wait(rwb, flags, bio->bi_opf);
584
585 if (!blk_stat_is_active(rwb->cb))
586 rwb_arm_timer(rwb);
587}
588
589static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
590{
591 struct rq_wb *rwb = RQWB(rqos);
592 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
593}
594
595static void wbt_issue(struct rq_qos *rqos, struct request *rq)
596{
597 struct rq_wb *rwb = RQWB(rqos);
598
599 if (!rwb_enabled(rwb))
600 return;
601
602
603
604
605
606
607
608
609 if (wbt_is_read(rq) && !rwb->sync_issue) {
610 rwb->sync_cookie = rq;
611 rwb->sync_issue = rq->io_start_time_ns;
612 }
613}
614
615static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
616{
617 struct rq_wb *rwb = RQWB(rqos);
618 if (!rwb_enabled(rwb))
619 return;
620 if (rq == rwb->sync_cookie) {
621 rwb->sync_issue = 0;
622 rwb->sync_cookie = NULL;
623 }
624}
625
626void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
627{
628 struct rq_qos *rqos = wbt_rq_qos(q);
629 if (rqos)
630 RQWB(rqos)->wc = write_cache_on;
631}
632
633
634
635
636void wbt_enable_default(struct request_queue *q)
637{
638 struct rq_qos *rqos = wbt_rq_qos(q);
639
640 if (rqos)
641 return;
642
643
644 if (!blk_queue_registered(q))
645 return;
646
647 if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
648 wbt_init(q);
649}
650EXPORT_SYMBOL_GPL(wbt_enable_default);
651
652u64 wbt_default_latency_nsec(struct request_queue *q)
653{
654
655
656
657
658 if (blk_queue_nonrot(q))
659 return 2000000ULL;
660 else
661 return 75000000ULL;
662}
663
664static int wbt_data_dir(const struct request *rq)
665{
666 const int op = req_op(rq);
667
668 if (op == REQ_OP_READ)
669 return READ;
670 else if (op_is_write(op))
671 return WRITE;
672
673
674 return -1;
675}
676
677static void wbt_queue_depth_changed(struct rq_qos *rqos)
678{
679 RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
680 wbt_update_limits(RQWB(rqos));
681}
682
683static void wbt_exit(struct rq_qos *rqos)
684{
685 struct rq_wb *rwb = RQWB(rqos);
686 struct request_queue *q = rqos->q;
687
688 blk_stat_remove_callback(q, rwb->cb);
689 blk_stat_free_callback(rwb->cb);
690 kfree(rwb);
691}
692
693
694
695
696void wbt_disable_default(struct request_queue *q)
697{
698 struct rq_qos *rqos = wbt_rq_qos(q);
699 struct rq_wb *rwb;
700 if (!rqos)
701 return;
702 rwb = RQWB(rqos);
703 if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
704 blk_stat_deactivate(rwb->cb);
705 rwb->wb_normal = 0;
706 }
707}
708EXPORT_SYMBOL_GPL(wbt_disable_default);
709
710#ifdef CONFIG_BLK_DEBUG_FS
711static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
712{
713 struct rq_qos *rqos = data;
714 struct rq_wb *rwb = RQWB(rqos);
715
716 seq_printf(m, "%llu\n", rwb->cur_win_nsec);
717 return 0;
718}
719
720static int wbt_enabled_show(void *data, struct seq_file *m)
721{
722 struct rq_qos *rqos = data;
723 struct rq_wb *rwb = RQWB(rqos);
724
725 seq_printf(m, "%d\n", rwb->enable_state);
726 return 0;
727}
728
729static int wbt_id_show(void *data, struct seq_file *m)
730{
731 struct rq_qos *rqos = data;
732
733 seq_printf(m, "%u\n", rqos->id);
734 return 0;
735}
736
737static int wbt_inflight_show(void *data, struct seq_file *m)
738{
739 struct rq_qos *rqos = data;
740 struct rq_wb *rwb = RQWB(rqos);
741 int i;
742
743 for (i = 0; i < WBT_NUM_RWQ; i++)
744 seq_printf(m, "%d: inflight %d\n", i,
745 atomic_read(&rwb->rq_wait[i].inflight));
746 return 0;
747}
748
749static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
750{
751 struct rq_qos *rqos = data;
752 struct rq_wb *rwb = RQWB(rqos);
753
754 seq_printf(m, "%lu\n", rwb->min_lat_nsec);
755 return 0;
756}
757
758static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
759{
760 struct rq_qos *rqos = data;
761 struct rq_wb *rwb = RQWB(rqos);
762
763 seq_printf(m, "%u\n", rwb->unknown_cnt);
764 return 0;
765}
766
767static int wbt_normal_show(void *data, struct seq_file *m)
768{
769 struct rq_qos *rqos = data;
770 struct rq_wb *rwb = RQWB(rqos);
771
772 seq_printf(m, "%u\n", rwb->wb_normal);
773 return 0;
774}
775
776static int wbt_background_show(void *data, struct seq_file *m)
777{
778 struct rq_qos *rqos = data;
779 struct rq_wb *rwb = RQWB(rqos);
780
781 seq_printf(m, "%u\n", rwb->wb_background);
782 return 0;
783}
784
785static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
786 {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
787 {"enabled", 0400, wbt_enabled_show},
788 {"id", 0400, wbt_id_show},
789 {"inflight", 0400, wbt_inflight_show},
790 {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
791 {"unknown_cnt", 0400, wbt_unknown_cnt_show},
792 {"wb_normal", 0400, wbt_normal_show},
793 {"wb_background", 0400, wbt_background_show},
794 {},
795};
796#endif
797
798static struct rq_qos_ops wbt_rqos_ops = {
799 .throttle = wbt_wait,
800 .issue = wbt_issue,
801 .track = wbt_track,
802 .requeue = wbt_requeue,
803 .done = wbt_done,
804 .cleanup = wbt_cleanup,
805 .queue_depth_changed = wbt_queue_depth_changed,
806 .exit = wbt_exit,
807#ifdef CONFIG_BLK_DEBUG_FS
808 .debugfs_attrs = wbt_debugfs_attrs,
809#endif
810};
811
812int wbt_init(struct request_queue *q)
813{
814 struct rq_wb *rwb;
815 int i;
816
817 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
818 if (!rwb)
819 return -ENOMEM;
820
821 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
822 if (!rwb->cb) {
823 kfree(rwb);
824 return -ENOMEM;
825 }
826
827 for (i = 0; i < WBT_NUM_RWQ; i++)
828 rq_wait_init(&rwb->rq_wait[i]);
829
830 rwb->rqos.id = RQ_QOS_WBT;
831 rwb->rqos.ops = &wbt_rqos_ops;
832 rwb->rqos.q = q;
833 rwb->last_comp = rwb->last_issue = jiffies;
834 rwb->win_nsec = RWB_WINDOW_NSEC;
835 rwb->enable_state = WBT_STATE_ON_DEFAULT;
836 rwb->wc = 1;
837 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
838 wbt_update_limits(rwb);
839
840
841
842
843 rq_qos_add(q, &rwb->rqos);
844 blk_stat_add_callback(q, rwb->cb);
845
846 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
847
848 wbt_queue_depth_changed(&rwb->rqos);
849 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
850
851 return 0;
852}
853