1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kernel.h>
22#include <linux/blk_types.h>
23#include <linux/slab.h>
24#include <linux/backing-dev.h>
25#include <linux/swap.h>
26
27#include "blk-wbt.h"
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/wbt.h>
31
32enum {
33
34
35
36
37 RWB_DEF_DEPTH = 16,
38
39
40
41
42 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
43
44
45
46
47 RWB_MIN_WRITE_SAMPLES = 3,
48
49
50
51
52
53 RWB_UNKNOWN_BUMP = 5,
54};
55
56static inline bool rwb_enabled(struct rq_wb *rwb)
57{
58 return rwb && rwb->wb_normal != 0;
59}
60
61
62
63
64
65static bool atomic_inc_below(atomic_t *v, int below)
66{
67 int cur = atomic_read(v);
68
69 for (;;) {
70 int old;
71
72 if (cur >= below)
73 return false;
74 old = atomic_cmpxchg(v, cur, cur + 1);
75 if (old == cur)
76 break;
77 cur = old;
78 }
79
80 return true;
81}
82
83static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
84{
85 if (rwb_enabled(rwb)) {
86 const unsigned long cur = jiffies;
87
88 if (cur != *var)
89 *var = cur;
90 }
91}
92
93
94
95
96
97static bool wb_recent_wait(struct rq_wb *rwb)
98{
99 struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
100
101 return time_before(jiffies, wb->dirty_sleep + HZ);
102}
103
104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd)
105{
106 return &rwb->rq_wait[is_kswapd];
107}
108
109static void rwb_wake_all(struct rq_wb *rwb)
110{
111 int i;
112
113 for (i = 0; i < WBT_NUM_RWQ; i++) {
114 struct rq_wait *rqw = &rwb->rq_wait[i];
115
116 if (waitqueue_active(&rqw->wait))
117 wake_up_all(&rqw->wait);
118 }
119}
120
121void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
122{
123 struct rq_wait *rqw;
124 int inflight, limit;
125
126 if (!(wb_acct & WBT_TRACKED))
127 return;
128
129 rqw = get_rq_wait(rwb, wb_acct & WBT_KSWAPD);
130 inflight = atomic_dec_return(&rqw->inflight);
131
132
133
134
135
136 if (unlikely(!rwb_enabled(rwb))) {
137 rwb_wake_all(rwb);
138 return;
139 }
140
141
142
143
144
145 if (rwb->wc && !wb_recent_wait(rwb))
146 limit = 0;
147 else
148 limit = rwb->wb_normal;
149
150
151
152
153 if (inflight && inflight >= limit)
154 return;
155
156 if (waitqueue_active(&rqw->wait)) {
157 int diff = limit - inflight;
158
159 if (!inflight || diff >= rwb->wb_background / 2)
160 wake_up_all(&rqw->wait);
161 }
162}
163
164
165
166
167
168void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
169{
170 if (!rwb)
171 return;
172
173 if (!wbt_is_tracked(stat)) {
174 if (rwb->sync_cookie == stat) {
175 rwb->sync_issue = 0;
176 rwb->sync_cookie = NULL;
177 }
178
179 if (wbt_is_read(stat))
180 wb_timestamp(rwb, &rwb->last_comp);
181 } else {
182 WARN_ON_ONCE(stat == rwb->sync_cookie);
183 __wbt_done(rwb, wbt_stat_to_mask(stat));
184 }
185 wbt_clear_state(stat);
186}
187
188
189
190
191static bool calc_wb_limits(struct rq_wb *rwb)
192{
193 unsigned int depth;
194 bool ret = false;
195
196 if (!rwb->min_lat_nsec) {
197 rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
198 return false;
199 }
200
201
202
203
204
205
206
207
208 if (rwb->queue_depth == 1) {
209 if (rwb->scale_step > 0)
210 rwb->wb_max = rwb->wb_normal = 1;
211 else {
212 rwb->wb_max = rwb->wb_normal = 2;
213 ret = true;
214 }
215 rwb->wb_background = 1;
216 } else {
217
218
219
220
221
222
223
224 depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
225 if (rwb->scale_step > 0)
226 depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
227 else if (rwb->scale_step < 0) {
228 unsigned int maxd = 3 * rwb->queue_depth / 4;
229
230 depth = 1 + ((depth - 1) << -rwb->scale_step);
231 if (depth > maxd) {
232 depth = maxd;
233 ret = true;
234 }
235 }
236
237
238
239
240
241 rwb->wb_max = depth;
242 rwb->wb_normal = (rwb->wb_max + 1) / 2;
243 rwb->wb_background = (rwb->wb_max + 3) / 4;
244 }
245
246 return ret;
247}
248
249static inline bool stat_sample_valid(struct blk_rq_stat *stat)
250{
251
252
253
254
255
256
257 return (stat[READ].nr_samples >= 1 &&
258 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
259}
260
261static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
262{
263 u64 now, issue = READ_ONCE(rwb->sync_issue);
264
265 if (!issue || !rwb->sync_cookie)
266 return 0;
267
268 now = ktime_to_ns(ktime_get());
269 return now - issue;
270}
271
272enum {
273 LAT_OK = 1,
274 LAT_UNKNOWN,
275 LAT_UNKNOWN_WRITES,
276 LAT_EXCEEDED,
277};
278
279static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
280{
281 struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
282 u64 thislat;
283
284
285
286
287
288
289
290
291
292
293 thislat = rwb_sync_issue_lat(rwb);
294 if (thislat > rwb->cur_win_nsec ||
295 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
296 trace_wbt_lat(bdi, thislat);
297 return LAT_EXCEEDED;
298 }
299
300
301
302
303 if (!stat_sample_valid(stat)) {
304
305
306
307
308
309
310 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
311 wbt_inflight(rwb))
312 return LAT_UNKNOWN_WRITES;
313 return LAT_UNKNOWN;
314 }
315
316
317
318
319 if (stat[READ].min > rwb->min_lat_nsec) {
320 trace_wbt_lat(bdi, stat[READ].min);
321 trace_wbt_stat(bdi, stat);
322 return LAT_EXCEEDED;
323 }
324
325 if (rwb->scale_step)
326 trace_wbt_stat(bdi, stat);
327
328 return LAT_OK;
329}
330
331static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
332{
333 struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
334
335 trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
336 rwb->wb_background, rwb->wb_normal, rwb->wb_max);
337}
338
339static void scale_up(struct rq_wb *rwb)
340{
341
342
343
344 if (rwb->scaled_max)
345 return;
346
347 rwb->scale_step--;
348 rwb->unknown_cnt = 0;
349
350 rwb->scaled_max = calc_wb_limits(rwb);
351
352 rwb_wake_all(rwb);
353
354 rwb_trace_step(rwb, "step up");
355}
356
357
358
359
360
361static void scale_down(struct rq_wb *rwb, bool hard_throttle)
362{
363
364
365
366
367
368 if (rwb->wb_max == 1)
369 return;
370
371 if (rwb->scale_step < 0 && hard_throttle)
372 rwb->scale_step = 0;
373 else
374 rwb->scale_step++;
375
376 rwb->scaled_max = false;
377 rwb->unknown_cnt = 0;
378 calc_wb_limits(rwb);
379 rwb_trace_step(rwb, "step down");
380}
381
382static void rwb_arm_timer(struct rq_wb *rwb)
383{
384 if (rwb->scale_step > 0) {
385
386
387
388
389
390
391 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
392 int_sqrt((rwb->scale_step + 1) << 8));
393 } else {
394
395
396
397
398 rwb->cur_win_nsec = rwb->win_nsec;
399 }
400
401 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
402}
403
404static void wb_timer_fn(struct blk_stat_callback *cb)
405{
406 struct rq_wb *rwb = cb->data;
407 unsigned int inflight = wbt_inflight(rwb);
408 int status;
409
410 status = latency_exceeded(rwb, cb->stat);
411
412 trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
413 inflight);
414
415
416
417
418
419
420 switch (status) {
421 case LAT_EXCEEDED:
422 scale_down(rwb, true);
423 break;
424 case LAT_OK:
425 scale_up(rwb);
426 break;
427 case LAT_UNKNOWN_WRITES:
428
429
430
431
432
433 scale_up(rwb);
434 break;
435 case LAT_UNKNOWN:
436 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
437 break;
438
439
440
441
442
443 if (rwb->scale_step > 0)
444 scale_up(rwb);
445 else if (rwb->scale_step < 0)
446 scale_down(rwb, false);
447 break;
448 default:
449 break;
450 }
451
452
453
454
455 if (rwb->scale_step || inflight)
456 rwb_arm_timer(rwb);
457}
458
459void wbt_update_limits(struct rq_wb *rwb)
460{
461 rwb->scale_step = 0;
462 rwb->scaled_max = false;
463 calc_wb_limits(rwb);
464
465 rwb_wake_all(rwb);
466}
467
468static bool close_io(struct rq_wb *rwb)
469{
470 const unsigned long now = jiffies;
471
472 return time_before(now, rwb->last_issue + HZ / 10) ||
473 time_before(now, rwb->last_comp + HZ / 10);
474}
475
476#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
477
478static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
479{
480 unsigned int limit;
481
482
483
484
485
486
487
488
489
490 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
491 limit = rwb->wb_max;
492 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
493
494
495
496
497 limit = rwb->wb_background;
498 } else
499 limit = rwb->wb_normal;
500
501 return limit;
502}
503
504static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
505 wait_queue_entry_t *wait, unsigned long rw)
506{
507
508
509
510
511
512 if (!rwb_enabled(rwb)) {
513 atomic_inc(&rqw->inflight);
514 return true;
515 }
516
517
518
519
520
521 if (waitqueue_active(&rqw->wait) &&
522 rqw->wait.head.next != &wait->entry)
523 return false;
524
525 return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
526}
527
528
529
530
531
532static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
533 __releases(lock)
534 __acquires(lock)
535{
536 struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
537 DEFINE_WAIT(wait);
538
539 if (may_queue(rwb, rqw, &wait, rw))
540 return;
541
542 do {
543 prepare_to_wait_exclusive(&rqw->wait, &wait,
544 TASK_UNINTERRUPTIBLE);
545
546 if (may_queue(rwb, rqw, &wait, rw))
547 break;
548
549 if (lock) {
550 spin_unlock_irq(lock);
551 io_schedule();
552 spin_lock_irq(lock);
553 } else
554 io_schedule();
555 } while (1);
556
557 finish_wait(&rqw->wait, &wait);
558}
559
560static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
561{
562 const int op = bio_op(bio);
563
564
565
566
567 if (op != REQ_OP_WRITE)
568 return false;
569
570
571
572
573 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE))
574 return false;
575
576 return true;
577}
578
579
580
581
582
583
584
585enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
586{
587 unsigned int ret = 0;
588
589 if (!rwb_enabled(rwb))
590 return 0;
591
592 if (bio_op(bio) == REQ_OP_READ)
593 ret = WBT_READ;
594
595 if (!wbt_should_throttle(rwb, bio)) {
596 if (ret & WBT_READ)
597 wb_timestamp(rwb, &rwb->last_issue);
598 return ret;
599 }
600
601 __wbt_wait(rwb, bio->bi_opf, lock);
602
603 if (!blk_stat_is_active(rwb->cb))
604 rwb_arm_timer(rwb);
605
606 if (current_is_kswapd())
607 ret |= WBT_KSWAPD;
608
609 return ret | WBT_TRACKED;
610}
611
612void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
613{
614 if (!rwb_enabled(rwb))
615 return;
616
617
618
619
620
621
622
623
624
625 if (wbt_is_read(stat) && !rwb->sync_issue) {
626 rwb->sync_cookie = stat;
627 rwb->sync_issue = blk_stat_time(stat);
628 }
629}
630
631void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
632{
633 if (!rwb_enabled(rwb))
634 return;
635 if (stat == rwb->sync_cookie) {
636 rwb->sync_issue = 0;
637 rwb->sync_cookie = NULL;
638 }
639}
640
641void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
642{
643 if (rwb) {
644 rwb->queue_depth = depth;
645 wbt_update_limits(rwb);
646 }
647}
648
649void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
650{
651 if (rwb)
652 rwb->wc = write_cache_on;
653}
654
655
656
657
658void wbt_disable_default(struct request_queue *q)
659{
660 struct rq_wb *rwb = q->rq_wb;
661
662 if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT)
663 wbt_exit(q);
664}
665EXPORT_SYMBOL_GPL(wbt_disable_default);
666
667
668
669
670void wbt_enable_default(struct request_queue *q)
671{
672
673 if (q->rq_wb)
674 return;
675
676
677 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
678 return;
679
680 if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
681 (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
682 wbt_init(q);
683}
684EXPORT_SYMBOL_GPL(wbt_enable_default);
685
686u64 wbt_default_latency_nsec(struct request_queue *q)
687{
688
689
690
691
692 if (blk_queue_nonrot(q))
693 return 2000000ULL;
694 else
695 return 75000000ULL;
696}
697
698static int wbt_data_dir(const struct request *rq)
699{
700 const int op = req_op(rq);
701
702 if (op == REQ_OP_READ)
703 return READ;
704 else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH)
705 return WRITE;
706
707
708 return -1;
709}
710
711int wbt_init(struct request_queue *q)
712{
713 struct rq_wb *rwb;
714 int i;
715
716 BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
717
718 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
719 if (!rwb)
720 return -ENOMEM;
721
722 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
723 if (!rwb->cb) {
724 kfree(rwb);
725 return -ENOMEM;
726 }
727
728 for (i = 0; i < WBT_NUM_RWQ; i++) {
729 atomic_set(&rwb->rq_wait[i].inflight, 0);
730 init_waitqueue_head(&rwb->rq_wait[i].wait);
731 }
732
733 rwb->last_comp = rwb->last_issue = jiffies;
734 rwb->queue = q;
735 rwb->win_nsec = RWB_WINDOW_NSEC;
736 rwb->enable_state = WBT_STATE_ON_DEFAULT;
737 wbt_update_limits(rwb);
738
739
740
741
742 q->rq_wb = rwb;
743 blk_stat_add_callback(q, rwb->cb);
744
745 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
746
747 wbt_set_queue_depth(rwb, blk_queue_depth(q));
748 wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
749
750 return 0;
751}
752
753void wbt_exit(struct request_queue *q)
754{
755 struct rq_wb *rwb = q->rq_wb;
756
757 if (rwb) {
758 blk_stat_remove_callback(q, rwb->cb);
759 blk_stat_free_callback(rwb->cb);
760 q->rq_wb = NULL;
761 kfree(rwb);
762 }
763}
764