1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kernel.h>
22#include <linux/blk_types.h>
23#include <linux/slab.h>
24#include <linux/backing-dev.h>
25#include <linux/swap.h>
26
27#include "blk-wbt.h"
28#include "blk-rq-qos.h"
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/wbt.h>
32
33static inline void wbt_clear_state(struct request *rq)
34{
35 rq->wbt_flags = 0;
36}
37
38static inline enum wbt_flags wbt_flags(struct request *rq)
39{
40 return rq->wbt_flags;
41}
42
43static inline bool wbt_is_tracked(struct request *rq)
44{
45 return rq->wbt_flags & WBT_TRACKED;
46}
47
48static inline bool wbt_is_read(struct request *rq)
49{
50 return rq->wbt_flags & WBT_READ;
51}
52
53enum {
54
55
56
57
58 RWB_DEF_DEPTH = 16,
59
60
61
62
63 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
64
65
66
67
68 RWB_MIN_WRITE_SAMPLES = 3,
69
70
71
72
73
74 RWB_UNKNOWN_BUMP = 5,
75};
76
77static inline bool rwb_enabled(struct rq_wb *rwb)
78{
79 return rwb && rwb->wb_normal != 0;
80}
81
82static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
83{
84 if (rwb_enabled(rwb)) {
85 const unsigned long cur = jiffies;
86
87 if (cur != *var)
88 *var = cur;
89 }
90}
91
92
93
94
95
96static bool wb_recent_wait(struct rq_wb *rwb)
97{
98 struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
99
100 return time_before(jiffies, wb->dirty_sleep + HZ);
101}
102
103static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
104 enum wbt_flags wb_acct)
105{
106 if (wb_acct & WBT_KSWAPD)
107 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
108 else if (wb_acct & WBT_DISCARD)
109 return &rwb->rq_wait[WBT_RWQ_DISCARD];
110
111 return &rwb->rq_wait[WBT_RWQ_BG];
112}
113
114static void rwb_wake_all(struct rq_wb *rwb)
115{
116 int i;
117
118 for (i = 0; i < WBT_NUM_RWQ; i++) {
119 struct rq_wait *rqw = &rwb->rq_wait[i];
120
121 if (wq_has_sleeper(&rqw->wait))
122 wake_up_all(&rqw->wait);
123 }
124}
125
126static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
127 enum wbt_flags wb_acct)
128{
129 int inflight, limit;
130
131 inflight = atomic_dec_return(&rqw->inflight);
132
133
134
135
136
137 if (unlikely(!rwb_enabled(rwb))) {
138 rwb_wake_all(rwb);
139 return;
140 }
141
142
143
144
145
146
147 if (wb_acct & WBT_DISCARD)
148 limit = rwb->wb_background;
149 else if (rwb->wc && !wb_recent_wait(rwb))
150 limit = 0;
151 else
152 limit = rwb->wb_normal;
153
154
155
156
157 if (inflight && inflight >= limit)
158 return;
159
160 if (wq_has_sleeper(&rqw->wait)) {
161 int diff = limit - inflight;
162
163 if (!inflight || diff >= rwb->wb_background / 2)
164 wake_up_all(&rqw->wait);
165 }
166}
167
168static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
169{
170 struct rq_wb *rwb = RQWB(rqos);
171 struct rq_wait *rqw;
172
173 if (!(wb_acct & WBT_TRACKED))
174 return;
175
176 rqw = get_rq_wait(rwb, wb_acct);
177 wbt_rqw_done(rwb, rqw, wb_acct);
178}
179
180
181
182
183
184static void wbt_done(struct rq_qos *rqos, struct request *rq)
185{
186 struct rq_wb *rwb = RQWB(rqos);
187
188 if (!wbt_is_tracked(rq)) {
189 if (rwb->sync_cookie == rq) {
190 rwb->sync_issue = 0;
191 rwb->sync_cookie = NULL;
192 }
193
194 if (wbt_is_read(rq))
195 wb_timestamp(rwb, &rwb->last_comp);
196 } else {
197 WARN_ON_ONCE(rq == rwb->sync_cookie);
198 __wbt_done(rqos, wbt_flags(rq));
199 }
200 wbt_clear_state(rq);
201}
202
203static inline bool stat_sample_valid(struct blk_rq_stat *stat)
204{
205
206
207
208
209
210
211 return (stat[READ].nr_samples >= 1 &&
212 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
213}
214
215static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
216{
217 u64 now, issue = READ_ONCE(rwb->sync_issue);
218
219 if (!issue || !rwb->sync_cookie)
220 return 0;
221
222 now = ktime_to_ns(ktime_get());
223 return now - issue;
224}
225
226enum {
227 LAT_OK = 1,
228 LAT_UNKNOWN,
229 LAT_UNKNOWN_WRITES,
230 LAT_EXCEEDED,
231};
232
233static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
234{
235 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
236 struct rq_depth *rqd = &rwb->rq_depth;
237 u64 thislat;
238
239
240
241
242
243
244
245
246
247
248 thislat = rwb_sync_issue_lat(rwb);
249 if (thislat > rwb->cur_win_nsec ||
250 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
251 trace_wbt_lat(bdi, thislat);
252 return LAT_EXCEEDED;
253 }
254
255
256
257
258 if (!stat_sample_valid(stat)) {
259
260
261
262
263
264
265 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
266 wbt_inflight(rwb))
267 return LAT_UNKNOWN_WRITES;
268 return LAT_UNKNOWN;
269 }
270
271
272
273
274 if (stat[READ].min > rwb->min_lat_nsec) {
275 trace_wbt_lat(bdi, stat[READ].min);
276 trace_wbt_stat(bdi, stat);
277 return LAT_EXCEEDED;
278 }
279
280 if (rqd->scale_step)
281 trace_wbt_stat(bdi, stat);
282
283 return LAT_OK;
284}
285
286static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
287{
288 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
289 struct rq_depth *rqd = &rwb->rq_depth;
290
291 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
292 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
293}
294
295static void calc_wb_limits(struct rq_wb *rwb)
296{
297 if (rwb->min_lat_nsec == 0) {
298 rwb->wb_normal = rwb->wb_background = 0;
299 } else if (rwb->rq_depth.max_depth <= 2) {
300 rwb->wb_normal = rwb->rq_depth.max_depth;
301 rwb->wb_background = 1;
302 } else {
303 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
304 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
305 }
306}
307
308static void scale_up(struct rq_wb *rwb)
309{
310 rq_depth_scale_up(&rwb->rq_depth);
311 calc_wb_limits(rwb);
312 rwb->unknown_cnt = 0;
313 rwb_wake_all(rwb);
314 rwb_trace_step(rwb, "scale up");
315}
316
317static void scale_down(struct rq_wb *rwb, bool hard_throttle)
318{
319 rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
320 calc_wb_limits(rwb);
321 rwb->unknown_cnt = 0;
322 rwb_trace_step(rwb, "scale down");
323}
324
325static void rwb_arm_timer(struct rq_wb *rwb)
326{
327 struct rq_depth *rqd = &rwb->rq_depth;
328
329 if (rqd->scale_step > 0) {
330
331
332
333
334
335
336 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
337 int_sqrt((rqd->scale_step + 1) << 8));
338 } else {
339
340
341
342
343 rwb->cur_win_nsec = rwb->win_nsec;
344 }
345
346 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
347}
348
349static void wb_timer_fn(struct blk_stat_callback *cb)
350{
351 struct rq_wb *rwb = cb->data;
352 struct rq_depth *rqd = &rwb->rq_depth;
353 unsigned int inflight = wbt_inflight(rwb);
354 int status;
355
356 status = latency_exceeded(rwb, cb->stat);
357
358 trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
359 inflight);
360
361
362
363
364
365
366 switch (status) {
367 case LAT_EXCEEDED:
368 scale_down(rwb, true);
369 break;
370 case LAT_OK:
371 scale_up(rwb);
372 break;
373 case LAT_UNKNOWN_WRITES:
374
375
376
377
378
379 scale_up(rwb);
380 break;
381 case LAT_UNKNOWN:
382 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
383 break;
384
385
386
387
388
389 if (rqd->scale_step > 0)
390 scale_up(rwb);
391 else if (rqd->scale_step < 0)
392 scale_down(rwb, false);
393 break;
394 default:
395 break;
396 }
397
398
399
400
401 if (rqd->scale_step || inflight)
402 rwb_arm_timer(rwb);
403}
404
405static void __wbt_update_limits(struct rq_wb *rwb)
406{
407 struct rq_depth *rqd = &rwb->rq_depth;
408
409 rqd->scale_step = 0;
410 rqd->scaled_max = false;
411
412 rq_depth_calc_max_depth(rqd);
413 calc_wb_limits(rwb);
414
415 rwb_wake_all(rwb);
416}
417
418void wbt_update_limits(struct request_queue *q)
419{
420 struct rq_qos *rqos = wbt_rq_qos(q);
421 if (!rqos)
422 return;
423 __wbt_update_limits(RQWB(rqos));
424}
425
426u64 wbt_get_min_lat(struct request_queue *q)
427{
428 struct rq_qos *rqos = wbt_rq_qos(q);
429 if (!rqos)
430 return 0;
431 return RQWB(rqos)->min_lat_nsec;
432}
433
434void wbt_set_min_lat(struct request_queue *q, u64 val)
435{
436 struct rq_qos *rqos = wbt_rq_qos(q);
437 if (!rqos)
438 return;
439 RQWB(rqos)->min_lat_nsec = val;
440 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
441 __wbt_update_limits(RQWB(rqos));
442}
443
444
445static bool close_io(struct rq_wb *rwb)
446{
447 const unsigned long now = jiffies;
448
449 return time_before(now, rwb->last_issue + HZ / 10) ||
450 time_before(now, rwb->last_comp + HZ / 10);
451}
452
453#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
454
455static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
456{
457 unsigned int limit;
458
459
460
461
462
463 if (!rwb_enabled(rwb))
464 return UINT_MAX;
465
466 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
467 return rwb->wb_background;
468
469
470
471
472
473
474
475
476
477 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
478 limit = rwb->rq_depth.max_depth;
479 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
480
481
482
483
484 limit = rwb->wb_background;
485 } else
486 limit = rwb->wb_normal;
487
488 return limit;
489}
490
491struct wbt_wait_data {
492 struct wait_queue_entry wq;
493 struct task_struct *task;
494 struct rq_wb *rwb;
495 struct rq_wait *rqw;
496 unsigned long rw;
497 bool got_token;
498};
499
500static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
501 int wake_flags, void *key)
502{
503 struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
504 wq);
505
506
507
508
509
510 if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
511 return -1;
512
513 data->got_token = true;
514 list_del_init(&curr->entry);
515 wake_up_process(data->task);
516 return 1;
517}
518
519
520
521
522
523static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
524 unsigned long rw, spinlock_t *lock)
525 __releases(lock)
526 __acquires(lock)
527{
528 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
529 struct wbt_wait_data data = {
530 .wq = {
531 .func = wbt_wake_function,
532 .entry = LIST_HEAD_INIT(data.wq.entry),
533 },
534 .task = current,
535 .rwb = rwb,
536 .rqw = rqw,
537 .rw = rw,
538 };
539 bool has_sleeper;
540
541 has_sleeper = wq_has_sleeper(&rqw->wait);
542 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
543 return;
544
545 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
546 do {
547 if (data.got_token)
548 break;
549
550 if (!has_sleeper &&
551 rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
552 finish_wait(&rqw->wait, &data.wq);
553
554
555
556
557
558
559 if (data.got_token)
560 wbt_rqw_done(rwb, rqw, wb_acct);
561 break;
562 }
563
564 if (lock) {
565 spin_unlock_irq(lock);
566 io_schedule();
567 spin_lock_irq(lock);
568 } else
569 io_schedule();
570
571 has_sleeper = false;
572 } while (1);
573
574 finish_wait(&rqw->wait, &data.wq);
575}
576
577static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
578{
579 switch (bio_op(bio)) {
580 case REQ_OP_WRITE:
581
582
583
584 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
585 (REQ_SYNC | REQ_IDLE))
586 return false;
587
588 case REQ_OP_DISCARD:
589 return true;
590 default:
591 return false;
592 }
593}
594
595static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
596{
597 enum wbt_flags flags = 0;
598
599 if (!rwb_enabled(rwb))
600 return 0;
601
602 if (bio_op(bio) == REQ_OP_READ) {
603 flags = WBT_READ;
604 } else if (wbt_should_throttle(rwb, bio)) {
605 if (current_is_kswapd())
606 flags |= WBT_KSWAPD;
607 if (bio_op(bio) == REQ_OP_DISCARD)
608 flags |= WBT_DISCARD;
609 flags |= WBT_TRACKED;
610 }
611 return flags;
612}
613
614static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
615{
616 struct rq_wb *rwb = RQWB(rqos);
617 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
618 __wbt_done(rqos, flags);
619}
620
621
622
623
624
625
626
627static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
628{
629 struct rq_wb *rwb = RQWB(rqos);
630 enum wbt_flags flags;
631
632 flags = bio_to_wbt_flags(rwb, bio);
633 if (!(flags & WBT_TRACKED)) {
634 if (flags & WBT_READ)
635 wb_timestamp(rwb, &rwb->last_issue);
636 return;
637 }
638
639 __wbt_wait(rwb, flags, bio->bi_opf, lock);
640
641 if (!blk_stat_is_active(rwb->cb))
642 rwb_arm_timer(rwb);
643}
644
645static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
646{
647 struct rq_wb *rwb = RQWB(rqos);
648 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
649}
650
651void wbt_issue(struct rq_qos *rqos, struct request *rq)
652{
653 struct rq_wb *rwb = RQWB(rqos);
654
655 if (!rwb_enabled(rwb))
656 return;
657
658
659
660
661
662
663
664
665 if (wbt_is_read(rq) && !rwb->sync_issue) {
666 rwb->sync_cookie = rq;
667 rwb->sync_issue = rq->io_start_time_ns;
668 }
669}
670
671void wbt_requeue(struct rq_qos *rqos, struct request *rq)
672{
673 struct rq_wb *rwb = RQWB(rqos);
674 if (!rwb_enabled(rwb))
675 return;
676 if (rq == rwb->sync_cookie) {
677 rwb->sync_issue = 0;
678 rwb->sync_cookie = NULL;
679 }
680}
681
682void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
683{
684 struct rq_qos *rqos = wbt_rq_qos(q);
685 if (rqos) {
686 RQWB(rqos)->rq_depth.queue_depth = depth;
687 __wbt_update_limits(RQWB(rqos));
688 }
689}
690
691void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
692{
693 struct rq_qos *rqos = wbt_rq_qos(q);
694 if (rqos)
695 RQWB(rqos)->wc = write_cache_on;
696}
697
698
699
700
701void wbt_enable_default(struct request_queue *q)
702{
703 struct rq_qos *rqos = wbt_rq_qos(q);
704
705 if (rqos)
706 return;
707
708
709 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
710 return;
711
712 if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
713 (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
714 wbt_init(q);
715}
716EXPORT_SYMBOL_GPL(wbt_enable_default);
717
718u64 wbt_default_latency_nsec(struct request_queue *q)
719{
720
721
722
723
724 if (blk_queue_nonrot(q))
725 return 2000000ULL;
726 else
727 return 75000000ULL;
728}
729
730static int wbt_data_dir(const struct request *rq)
731{
732 const int op = req_op(rq);
733
734 if (op == REQ_OP_READ)
735 return READ;
736 else if (op_is_write(op))
737 return WRITE;
738
739
740 return -1;
741}
742
743static void wbt_exit(struct rq_qos *rqos)
744{
745 struct rq_wb *rwb = RQWB(rqos);
746 struct request_queue *q = rqos->q;
747
748 blk_stat_remove_callback(q, rwb->cb);
749 blk_stat_free_callback(rwb->cb);
750 kfree(rwb);
751}
752
753
754
755
756void wbt_disable_default(struct request_queue *q)
757{
758 struct rq_qos *rqos = wbt_rq_qos(q);
759 struct rq_wb *rwb;
760 if (!rqos)
761 return;
762 rwb = RQWB(rqos);
763 if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
764 rwb->wb_normal = 0;
765}
766EXPORT_SYMBOL_GPL(wbt_disable_default);
767
768
769static struct rq_qos_ops wbt_rqos_ops = {
770 .throttle = wbt_wait,
771 .issue = wbt_issue,
772 .track = wbt_track,
773 .requeue = wbt_requeue,
774 .done = wbt_done,
775 .cleanup = wbt_cleanup,
776 .exit = wbt_exit,
777};
778
779int wbt_init(struct request_queue *q)
780{
781 struct rq_wb *rwb;
782 int i;
783
784 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
785 if (!rwb)
786 return -ENOMEM;
787
788 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
789 if (!rwb->cb) {
790 kfree(rwb);
791 return -ENOMEM;
792 }
793
794 for (i = 0; i < WBT_NUM_RWQ; i++)
795 rq_wait_init(&rwb->rq_wait[i]);
796
797 rwb->rqos.id = RQ_QOS_WBT;
798 rwb->rqos.ops = &wbt_rqos_ops;
799 rwb->rqos.q = q;
800 rwb->last_comp = rwb->last_issue = jiffies;
801 rwb->win_nsec = RWB_WINDOW_NSEC;
802 rwb->enable_state = WBT_STATE_ON_DEFAULT;
803 rwb->wc = 1;
804 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
805 __wbt_update_limits(rwb);
806
807
808
809
810 rq_qos_add(q, &rwb->rqos);
811 blk_stat_add_callback(q, rwb->cb);
812
813 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
814
815 wbt_set_queue_depth(q, blk_queue_depth(q));
816 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
817
818 return 0;
819}
820