linux/block/bfq-iosched.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Budget Fair Queueing (BFQ) I/O scheduler.
   4 *
   5 * Based on ideas and code from CFQ:
   6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   7 *
   8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   9 *                    Paolo Valente <paolo.valente@unimore.it>
  10 *
  11 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
  12 *                    Arianna Avanzini <avanzini@google.com>
  13 *
  14 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
  15 *
  16 * BFQ is a proportional-share I/O scheduler, with some extra
  17 * low-latency capabilities. BFQ also supports full hierarchical
  18 * scheduling through cgroups. Next paragraphs provide an introduction
  19 * on BFQ inner workings. Details on BFQ benefits, usage and
  20 * limitations can be found in Documentation/block/bfq-iosched.txt.
  21 *
  22 * BFQ is a proportional-share storage-I/O scheduling algorithm based
  23 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
  24 * budgets, measured in number of sectors, to processes instead of
  25 * time slices. The device is not granted to the in-service process
  26 * for a given time slice, but until it has exhausted its assigned
  27 * budget. This change from the time to the service domain enables BFQ
  28 * to distribute the device throughput among processes as desired,
  29 * without any distortion due to throughput fluctuations, or to device
  30 * internal queueing. BFQ uses an ad hoc internal scheduler, called
  31 * B-WF2Q+, to schedule processes according to their budgets. More
  32 * precisely, BFQ schedules queues associated with processes. Each
  33 * process/queue is assigned a user-configurable weight, and B-WF2Q+
  34 * guarantees that each queue receives a fraction of the throughput
  35 * proportional to its weight. Thanks to the accurate policy of
  36 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
  37 * processes issuing sequential requests (to boost the throughput),
  38 * and yet guarantee a low latency to interactive and soft real-time
  39 * applications.
  40 *
  41 * In particular, to provide these low-latency guarantees, BFQ
  42 * explicitly privileges the I/O of two classes of time-sensitive
  43 * applications: interactive and soft real-time. In more detail, BFQ
  44 * behaves this way if the low_latency parameter is set (default
  45 * configuration). This feature enables BFQ to provide applications in
  46 * these classes with a very low latency.
  47 *
  48 * To implement this feature, BFQ constantly tries to detect whether
  49 * the I/O requests in a bfq_queue come from an interactive or a soft
  50 * real-time application. For brevity, in these cases, the queue is
  51 * said to be interactive or soft real-time. In both cases, BFQ
  52 * privileges the service of the queue, over that of non-interactive
  53 * and non-soft-real-time queues. This privileging is performed,
  54 * mainly, by raising the weight of the queue. So, for brevity, we
  55 * call just weight-raising periods the time periods during which a
  56 * queue is privileged, because deemed interactive or soft real-time.
  57 *
  58 * The detection of soft real-time queues/applications is described in
  59 * detail in the comments on the function
  60 * bfq_bfqq_softrt_next_start. On the other hand, the detection of an
  61 * interactive queue works as follows: a queue is deemed interactive
  62 * if it is constantly non empty only for a limited time interval,
  63 * after which it does become empty. The queue may be deemed
  64 * interactive again (for a limited time), if it restarts being
  65 * constantly non empty, provided that this happens only after the
  66 * queue has remained empty for a given minimum idle time.
  67 *
  68 * By default, BFQ computes automatically the above maximum time
  69 * interval, i.e., the time interval after which a constantly
  70 * non-empty queue stops being deemed interactive. Since a queue is
  71 * weight-raised while it is deemed interactive, this maximum time
  72 * interval happens to coincide with the (maximum) duration of the
  73 * weight-raising for interactive queues.
  74 *
  75 * Finally, BFQ also features additional heuristics for
  76 * preserving both a low latency and a high throughput on NCQ-capable,
  77 * rotational or flash-based devices, and to get the job done quickly
  78 * for applications consisting in many I/O-bound processes.
  79 *
  80 * NOTE: if the main or only goal, with a given device, is to achieve
  81 * the maximum-possible throughput at all times, then do switch off
  82 * all low-latency heuristics for that device, by setting low_latency
  83 * to 0.
  84 *
  85 * BFQ is described in [1], where also a reference to the initial,
  86 * more theoretical paper on BFQ can be found. The interested reader
  87 * can find in the latter paper full details on the main algorithm, as
  88 * well as formulas of the guarantees and formal proofs of all the
  89 * properties.  With respect to the version of BFQ presented in these
  90 * papers, this implementation adds a few more heuristics, such as the
  91 * ones that guarantee a low latency to interactive and soft real-time
  92 * applications, and a hierarchical extension based on H-WF2Q+.
  93 *
  94 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
  95 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
  96 * with O(log N) complexity derives from the one introduced with EEVDF
  97 * in [3].
  98 *
  99 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
 100 *     Scheduler", Proceedings of the First Workshop on Mobile System
 101 *     Technologies (MST-2015), May 2015.
 102 *     http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
 103 *
 104 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
 105 *     Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
 106 *     Oct 1997.
 107 *
 108 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
 109 *
 110 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
 111 *     First: A Flexible and Accurate Mechanism for Proportional Share
 112 *     Resource Allocation", technical report.
 113 *
 114 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
 115 */
 116#include <linux/module.h>
 117#include <linux/slab.h>
 118#include <linux/blkdev.h>
 119#include <linux/cgroup.h>
 120#include <linux/elevator.h>
 121#include <linux/ktime.h>
 122#include <linux/rbtree.h>
 123#include <linux/ioprio.h>
 124#include <linux/sbitmap.h>
 125#include <linux/delay.h>
 126
 127#include "blk.h"
 128#include "blk-mq.h"
 129#include "blk-mq-tag.h"
 130#include "blk-mq-sched.h"
 131#include "bfq-iosched.h"
 132#include "blk-wbt.h"
 133
 134#define BFQ_BFQQ_FNS(name)                                              \
 135void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)                       \
 136{                                                                       \
 137        __set_bit(BFQQF_##name, &(bfqq)->flags);                        \
 138}                                                                       \
 139void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)                      \
 140{                                                                       \
 141        __clear_bit(BFQQF_##name, &(bfqq)->flags);              \
 142}                                                                       \
 143int bfq_bfqq_##name(const struct bfq_queue *bfqq)                       \
 144{                                                                       \
 145        return test_bit(BFQQF_##name, &(bfqq)->flags);          \
 146}
 147
 148BFQ_BFQQ_FNS(just_created);
 149BFQ_BFQQ_FNS(busy);
 150BFQ_BFQQ_FNS(wait_request);
 151BFQ_BFQQ_FNS(non_blocking_wait_rq);
 152BFQ_BFQQ_FNS(fifo_expire);
 153BFQ_BFQQ_FNS(has_short_ttime);
 154BFQ_BFQQ_FNS(sync);
 155BFQ_BFQQ_FNS(IO_bound);
 156BFQ_BFQQ_FNS(in_large_burst);
 157BFQ_BFQQ_FNS(coop);
 158BFQ_BFQQ_FNS(split_coop);
 159BFQ_BFQQ_FNS(softrt_update);
 160#undef BFQ_BFQQ_FNS                                             \
 161
 162/* Expiration time of sync (0) and async (1) requests, in ns. */
 163static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
 164
 165/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
 166static const int bfq_back_max = 16 * 1024;
 167
 168/* Penalty of a backwards seek, in number of sectors. */
 169static const int bfq_back_penalty = 2;
 170
 171/* Idling period duration, in ns. */
 172static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
 173
 174/* Minimum number of assigned budgets for which stats are safe to compute. */
 175static const int bfq_stats_min_budgets = 194;
 176
 177/* Default maximum budget values, in sectors and number of requests. */
 178static const int bfq_default_max_budget = 16 * 1024;
 179
 180/*
 181 * When a sync request is dispatched, the queue that contains that
 182 * request, and all the ancestor entities of that queue, are charged
 183 * with the number of sectors of the request. In contrast, if the
 184 * request is async, then the queue and its ancestor entities are
 185 * charged with the number of sectors of the request, multiplied by
 186 * the factor below. This throttles the bandwidth for async I/O,
 187 * w.r.t. to sync I/O, and it is done to counter the tendency of async
 188 * writes to steal I/O throughput to reads.
 189 *
 190 * The current value of this parameter is the result of a tuning with
 191 * several hardware and software configurations. We tried to find the
 192 * lowest value for which writes do not cause noticeable problems to
 193 * reads. In fact, the lower this parameter, the stabler I/O control,
 194 * in the following respect.  The lower this parameter is, the less
 195 * the bandwidth enjoyed by a group decreases
 196 * - when the group does writes, w.r.t. to when it does reads;
 197 * - when other groups do reads, w.r.t. to when they do writes.
 198 */
 199static const int bfq_async_charge_factor = 3;
 200
 201/* Default timeout values, in jiffies, approximating CFQ defaults. */
 202const int bfq_timeout = HZ / 8;
 203
 204/*
 205 * Time limit for merging (see comments in bfq_setup_cooperator). Set
 206 * to the slowest value that, in our tests, proved to be effective in
 207 * removing false positives, while not causing true positives to miss
 208 * queue merging.
 209 *
 210 * As can be deduced from the low time limit below, queue merging, if
 211 * successful, happens at the very beginning of the I/O of the involved
 212 * cooperating processes, as a consequence of the arrival of the very
 213 * first requests from each cooperator.  After that, there is very
 214 * little chance to find cooperators.
 215 */
 216static const unsigned long bfq_merge_time_limit = HZ/10;
 217
 218static struct kmem_cache *bfq_pool;
 219
 220/* Below this threshold (in ns), we consider thinktime immediate. */
 221#define BFQ_MIN_TT              (2 * NSEC_PER_MSEC)
 222
 223/* hw_tag detection: parallel requests threshold and min samples needed. */
 224#define BFQ_HW_QUEUE_THRESHOLD  3
 225#define BFQ_HW_QUEUE_SAMPLES    32
 226
 227#define BFQQ_SEEK_THR           (sector_t)(8 * 100)
 228#define BFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
 229#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
 230        (get_sdist(last_pos, rq) >                      \
 231         BFQQ_SEEK_THR &&                               \
 232         (!blk_queue_nonrot(bfqd->queue) ||             \
 233          blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
 234#define BFQQ_CLOSE_THR          (sector_t)(8 * 1024)
 235#define BFQQ_SEEKY(bfqq)        (hweight32(bfqq->seek_history) > 19)
 236/*
 237 * Sync random I/O is likely to be confused with soft real-time I/O,
 238 * because it is characterized by limited throughput and apparently
 239 * isochronous arrival pattern. To avoid false positives, queues
 240 * containing only random (seeky) I/O are prevented from being tagged
 241 * as soft real-time.
 242 */
 243#define BFQQ_TOTALLY_SEEKY(bfqq)        (bfqq->seek_history == -1)
 244
 245/* Min number of samples required to perform peak-rate update */
 246#define BFQ_RATE_MIN_SAMPLES    32
 247/* Min observation time interval required to perform a peak-rate update (ns) */
 248#define BFQ_RATE_MIN_INTERVAL   (300*NSEC_PER_MSEC)
 249/* Target observation time interval for a peak-rate update (ns) */
 250#define BFQ_RATE_REF_INTERVAL   NSEC_PER_SEC
 251
 252/*
 253 * Shift used for peak-rate fixed precision calculations.
 254 * With
 255 * - the current shift: 16 positions
 256 * - the current type used to store rate: u32
 257 * - the current unit of measure for rate: [sectors/usec], or, more precisely,
 258 *   [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
 259 * the range of rates that can be stored is
 260 * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
 261 * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
 262 * [15, 65G] sectors/sec
 263 * Which, assuming a sector size of 512B, corresponds to a range of
 264 * [7.5K, 33T] B/sec
 265 */
 266#define BFQ_RATE_SHIFT          16
 267
 268/*
 269 * When configured for computing the duration of the weight-raising
 270 * for interactive queues automatically (see the comments at the
 271 * beginning of this file), BFQ does it using the following formula:
 272 * duration = (ref_rate / r) * ref_wr_duration,
 273 * where r is the peak rate of the device, and ref_rate and
 274 * ref_wr_duration are two reference parameters.  In particular,
 275 * ref_rate is the peak rate of the reference storage device (see
 276 * below), and ref_wr_duration is about the maximum time needed, with
 277 * BFQ and while reading two files in parallel, to load typical large
 278 * applications on the reference device (see the comments on
 279 * max_service_from_wr below, for more details on how ref_wr_duration
 280 * is obtained).  In practice, the slower/faster the device at hand
 281 * is, the more/less it takes to load applications with respect to the
 282 * reference device.  Accordingly, the longer/shorter BFQ grants
 283 * weight raising to interactive applications.
 284 *
 285 * BFQ uses two different reference pairs (ref_rate, ref_wr_duration),
 286 * depending on whether the device is rotational or non-rotational.
 287 *
 288 * In the following definitions, ref_rate[0] and ref_wr_duration[0]
 289 * are the reference values for a rotational device, whereas
 290 * ref_rate[1] and ref_wr_duration[1] are the reference values for a
 291 * non-rotational device. The reference rates are not the actual peak
 292 * rates of the devices used as a reference, but slightly lower
 293 * values. The reason for using slightly lower values is that the
 294 * peak-rate estimator tends to yield slightly lower values than the
 295 * actual peak rate (it can yield the actual peak rate only if there
 296 * is only one process doing I/O, and the process does sequential
 297 * I/O).
 298 *
 299 * The reference peak rates are measured in sectors/usec, left-shifted
 300 * by BFQ_RATE_SHIFT.
 301 */
 302static int ref_rate[2] = {14000, 33000};
 303/*
 304 * To improve readability, a conversion function is used to initialize
 305 * the following array, which entails that the array can be
 306 * initialized only in a function.
 307 */
 308static int ref_wr_duration[2];
 309
 310/*
 311 * BFQ uses the above-detailed, time-based weight-raising mechanism to
 312 * privilege interactive tasks. This mechanism is vulnerable to the
 313 * following false positives: I/O-bound applications that will go on
 314 * doing I/O for much longer than the duration of weight
 315 * raising. These applications have basically no benefit from being
 316 * weight-raised at the beginning of their I/O. On the opposite end,
 317 * while being weight-raised, these applications
 318 * a) unjustly steal throughput to applications that may actually need
 319 * low latency;
 320 * b) make BFQ uselessly perform device idling; device idling results
 321 * in loss of device throughput with most flash-based storage, and may
 322 * increase latencies when used purposelessly.
 323 *
 324 * BFQ tries to reduce these problems, by adopting the following
 325 * countermeasure. To introduce this countermeasure, we need first to
 326 * finish explaining how the duration of weight-raising for
 327 * interactive tasks is computed.
 328 *
 329 * For a bfq_queue deemed as interactive, the duration of weight
 330 * raising is dynamically adjusted, as a function of the estimated
 331 * peak rate of the device, so as to be equal to the time needed to
 332 * execute the 'largest' interactive task we benchmarked so far. By
 333 * largest task, we mean the task for which each involved process has
 334 * to do more I/O than for any of the other tasks we benchmarked. This
 335 * reference interactive task is the start-up of LibreOffice Writer,
 336 * and in this task each process/bfq_queue needs to have at most ~110K
 337 * sectors transferred.
 338 *
 339 * This last piece of information enables BFQ to reduce the actual
 340 * duration of weight-raising for at least one class of I/O-bound
 341 * applications: those doing sequential or quasi-sequential I/O. An
 342 * example is file copy. In fact, once started, the main I/O-bound
 343 * processes of these applications usually consume the above 110K
 344 * sectors in much less time than the processes of an application that
 345 * is starting, because these I/O-bound processes will greedily devote
 346 * almost all their CPU cycles only to their target,
 347 * throughput-friendly I/O operations. This is even more true if BFQ
 348 * happens to be underestimating the device peak rate, and thus
 349 * overestimating the duration of weight raising. But, according to
 350 * our measurements, once transferred 110K sectors, these processes
 351 * have no right to be weight-raised any longer.
 352 *
 353 * Basing on the last consideration, BFQ ends weight-raising for a
 354 * bfq_queue if the latter happens to have received an amount of
 355 * service at least equal to the following constant. The constant is
 356 * set to slightly more than 110K, to have a minimum safety margin.
 357 *
 358 * This early ending of weight-raising reduces the amount of time
 359 * during which interactive false positives cause the two problems
 360 * described at the beginning of these comments.
 361 */
 362static const unsigned long max_service_from_wr = 120000;
 363
 364#define RQ_BIC(rq)              icq_to_bic((rq)->elv.priv[0])
 365#define RQ_BFQQ(rq)             ((rq)->elv.priv[1])
 366
 367struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
 368{
 369        return bic->bfqq[is_sync];
 370}
 371
 372void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
 373{
 374        bic->bfqq[is_sync] = bfqq;
 375}
 376
 377struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
 378{
 379        return bic->icq.q->elevator->elevator_data;
 380}
 381
 382/**
 383 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
 384 * @icq: the iocontext queue.
 385 */
 386static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
 387{
 388        /* bic->icq is the first member, %NULL will convert to %NULL */
 389        return container_of(icq, struct bfq_io_cq, icq);
 390}
 391
 392/**
 393 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
 394 * @bfqd: the lookup key.
 395 * @ioc: the io_context of the process doing I/O.
 396 * @q: the request queue.
 397 */
 398static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
 399                                        struct io_context *ioc,
 400                                        struct request_queue *q)
 401{
 402        if (ioc) {
 403                unsigned long flags;
 404                struct bfq_io_cq *icq;
 405
 406                spin_lock_irqsave(&q->queue_lock, flags);
 407                icq = icq_to_bic(ioc_lookup_icq(ioc, q));
 408                spin_unlock_irqrestore(&q->queue_lock, flags);
 409
 410                return icq;
 411        }
 412
 413        return NULL;
 414}
 415
 416/*
 417 * Scheduler run of queue, if there are requests pending and no one in the
 418 * driver that will restart queueing.
 419 */
 420void bfq_schedule_dispatch(struct bfq_data *bfqd)
 421{
 422        if (bfqd->queued != 0) {
 423                bfq_log(bfqd, "schedule dispatch");
 424                blk_mq_run_hw_queues(bfqd->queue, true);
 425        }
 426}
 427
 428#define bfq_class_idle(bfqq)    ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
 429#define bfq_class_rt(bfqq)      ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
 430
 431#define bfq_sample_valid(samples)       ((samples) > 80)
 432
 433/*
 434 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
 435 * We choose the request that is closer to the head right now.  Distance
 436 * behind the head is penalized and only allowed to a certain extent.
 437 */
 438static struct request *bfq_choose_req(struct bfq_data *bfqd,
 439                                      struct request *rq1,
 440                                      struct request *rq2,
 441                                      sector_t last)
 442{
 443        sector_t s1, s2, d1 = 0, d2 = 0;
 444        unsigned long back_max;
 445#define BFQ_RQ1_WRAP    0x01 /* request 1 wraps */
 446#define BFQ_RQ2_WRAP    0x02 /* request 2 wraps */
 447        unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
 448
 449        if (!rq1 || rq1 == rq2)
 450                return rq2;
 451        if (!rq2)
 452                return rq1;
 453
 454        if (rq_is_sync(rq1) && !rq_is_sync(rq2))
 455                return rq1;
 456        else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
 457                return rq2;
 458        if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
 459                return rq1;
 460        else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
 461                return rq2;
 462
 463        s1 = blk_rq_pos(rq1);
 464        s2 = blk_rq_pos(rq2);
 465
 466        /*
 467         * By definition, 1KiB is 2 sectors.
 468         */
 469        back_max = bfqd->bfq_back_max * 2;
 470
 471        /*
 472         * Strict one way elevator _except_ in the case where we allow
 473         * short backward seeks which are biased as twice the cost of a
 474         * similar forward seek.
 475         */
 476        if (s1 >= last)
 477                d1 = s1 - last;
 478        else if (s1 + back_max >= last)
 479                d1 = (last - s1) * bfqd->bfq_back_penalty;
 480        else
 481                wrap |= BFQ_RQ1_WRAP;
 482
 483        if (s2 >= last)
 484                d2 = s2 - last;
 485        else if (s2 + back_max >= last)
 486                d2 = (last - s2) * bfqd->bfq_back_penalty;
 487        else
 488                wrap |= BFQ_RQ2_WRAP;
 489
 490        /* Found required data */
 491
 492        /*
 493         * By doing switch() on the bit mask "wrap" we avoid having to
 494         * check two variables for all permutations: --> faster!
 495         */
 496        switch (wrap) {
 497        case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
 498                if (d1 < d2)
 499                        return rq1;
 500                else if (d2 < d1)
 501                        return rq2;
 502
 503                if (s1 >= s2)
 504                        return rq1;
 505                else
 506                        return rq2;
 507
 508        case BFQ_RQ2_WRAP:
 509                return rq1;
 510        case BFQ_RQ1_WRAP:
 511                return rq2;
 512        case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
 513        default:
 514                /*
 515                 * Since both rqs are wrapped,
 516                 * start with the one that's further behind head
 517                 * (--> only *one* back seek required),
 518                 * since back seek takes more time than forward.
 519                 */
 520                if (s1 <= s2)
 521                        return rq1;
 522                else
 523                        return rq2;
 524        }
 525}
 526
 527/*
 528 * Async I/O can easily starve sync I/O (both sync reads and sync
 529 * writes), by consuming all tags. Similarly, storms of sync writes,
 530 * such as those that sync(2) may trigger, can starve sync reads.
 531 * Limit depths of async I/O and sync writes so as to counter both
 532 * problems.
 533 */
 534static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
 535{
 536        struct bfq_data *bfqd = data->q->elevator->elevator_data;
 537
 538        if (op_is_sync(op) && !op_is_write(op))
 539                return;
 540
 541        data->shallow_depth =
 542                bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
 543
 544        bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
 545                        __func__, bfqd->wr_busy_queues, op_is_sync(op),
 546                        data->shallow_depth);
 547}
 548
 549static struct bfq_queue *
 550bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
 551                     sector_t sector, struct rb_node **ret_parent,
 552                     struct rb_node ***rb_link)
 553{
 554        struct rb_node **p, *parent;
 555        struct bfq_queue *bfqq = NULL;
 556
 557        parent = NULL;
 558        p = &root->rb_node;
 559        while (*p) {
 560                struct rb_node **n;
 561
 562                parent = *p;
 563                bfqq = rb_entry(parent, struct bfq_queue, pos_node);
 564
 565                /*
 566                 * Sort strictly based on sector. Smallest to the left,
 567                 * largest to the right.
 568                 */
 569                if (sector > blk_rq_pos(bfqq->next_rq))
 570                        n = &(*p)->rb_right;
 571                else if (sector < blk_rq_pos(bfqq->next_rq))
 572                        n = &(*p)->rb_left;
 573                else
 574                        break;
 575                p = n;
 576                bfqq = NULL;
 577        }
 578
 579        *ret_parent = parent;
 580        if (rb_link)
 581                *rb_link = p;
 582
 583        bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
 584                (unsigned long long)sector,
 585                bfqq ? bfqq->pid : 0);
 586
 587        return bfqq;
 588}
 589
 590static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
 591{
 592        return bfqq->service_from_backlogged > 0 &&
 593                time_is_before_jiffies(bfqq->first_IO_time +
 594                                       bfq_merge_time_limit);
 595}
 596
 597/*
 598 * The following function is not marked as __cold because it is
 599 * actually cold, but for the same performance goal described in the
 600 * comments on the likely() at the beginning of
 601 * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
 602 * execution time for the case where this function is not invoked, we
 603 * had to add an unlikely() in each involved if().
 604 */
 605void __cold
 606bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 607{
 608        struct rb_node **p, *parent;
 609        struct bfq_queue *__bfqq;
 610
 611        if (bfqq->pos_root) {
 612                rb_erase(&bfqq->pos_node, bfqq->pos_root);
 613                bfqq->pos_root = NULL;
 614        }
 615
 616        /*
 617         * bfqq cannot be merged any longer (see comments in
 618         * bfq_setup_cooperator): no point in adding bfqq into the
 619         * position tree.
 620         */
 621        if (bfq_too_late_for_merging(bfqq))
 622                return;
 623
 624        if (bfq_class_idle(bfqq))
 625                return;
 626        if (!bfqq->next_rq)
 627                return;
 628
 629        bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
 630        __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
 631                        blk_rq_pos(bfqq->next_rq), &parent, &p);
 632        if (!__bfqq) {
 633                rb_link_node(&bfqq->pos_node, parent, p);
 634                rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
 635        } else
 636                bfqq->pos_root = NULL;
 637}
 638
 639/*
 640 * The following function returns false either if every active queue
 641 * must receive the same share of the throughput (symmetric scenario),
 642 * or, as a special case, if bfqq must receive a share of the
 643 * throughput lower than or equal to the share that every other active
 644 * queue must receive.  If bfqq does sync I/O, then these are the only
 645 * two cases where bfqq happens to be guaranteed its share of the
 646 * throughput even if I/O dispatching is not plugged when bfqq remains
 647 * temporarily empty (for more details, see the comments in the
 648 * function bfq_better_to_idle()). For this reason, the return value
 649 * of this function is used to check whether I/O-dispatch plugging can
 650 * be avoided.
 651 *
 652 * The above first case (symmetric scenario) occurs when:
 653 * 1) all active queues have the same weight,
 654 * 2) all active queues belong to the same I/O-priority class,
 655 * 3) all active groups at the same level in the groups tree have the same
 656 *    weight,
 657 * 4) all active groups at the same level in the groups tree have the same
 658 *    number of children.
 659 *
 660 * Unfortunately, keeping the necessary state for evaluating exactly
 661 * the last two symmetry sub-conditions above would be quite complex
 662 * and time consuming. Therefore this function evaluates, instead,
 663 * only the following stronger three sub-conditions, for which it is
 664 * much easier to maintain the needed state:
 665 * 1) all active queues have the same weight,
 666 * 2) all active queues belong to the same I/O-priority class,
 667 * 3) there are no active groups.
 668 * In particular, the last condition is always true if hierarchical
 669 * support or the cgroups interface are not enabled, thus no state
 670 * needs to be maintained in this case.
 671 */
 672static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
 673                                   struct bfq_queue *bfqq)
 674{
 675        bool smallest_weight = bfqq &&
 676                bfqq->weight_counter &&
 677                bfqq->weight_counter ==
 678                container_of(
 679                        rb_first_cached(&bfqd->queue_weights_tree),
 680                        struct bfq_weight_counter,
 681                        weights_node);
 682
 683        /*
 684         * For queue weights to differ, queue_weights_tree must contain
 685         * at least two nodes.
 686         */
 687        bool varied_queue_weights = !smallest_weight &&
 688                !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
 689                (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
 690                 bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
 691
 692        bool multiple_classes_busy =
 693                (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
 694                (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
 695                (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
 696
 697        return varied_queue_weights || multiple_classes_busy
 698#ifdef CONFIG_BFQ_GROUP_IOSCHED
 699               || bfqd->num_groups_with_pending_reqs > 0
 700#endif
 701                ;
 702}
 703
 704/*
 705 * If the weight-counter tree passed as input contains no counter for
 706 * the weight of the input queue, then add that counter; otherwise just
 707 * increment the existing counter.
 708 *
 709 * Note that weight-counter trees contain few nodes in mostly symmetric
 710 * scenarios. For example, if all queues have the same weight, then the
 711 * weight-counter tree for the queues may contain at most one node.
 712 * This holds even if low_latency is on, because weight-raised queues
 713 * are not inserted in the tree.
 714 * In most scenarios, the rate at which nodes are created/destroyed
 715 * should be low too.
 716 */
 717void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 718                          struct rb_root_cached *root)
 719{
 720        struct bfq_entity *entity = &bfqq->entity;
 721        struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
 722        bool leftmost = true;
 723
 724        /*
 725         * Do not insert if the queue is already associated with a
 726         * counter, which happens if:
 727         *   1) a request arrival has caused the queue to become both
 728         *      non-weight-raised, and hence change its weight, and
 729         *      backlogged; in this respect, each of the two events
 730         *      causes an invocation of this function,
 731         *   2) this is the invocation of this function caused by the
 732         *      second event. This second invocation is actually useless,
 733         *      and we handle this fact by exiting immediately. More
 734         *      efficient or clearer solutions might possibly be adopted.
 735         */
 736        if (bfqq->weight_counter)
 737                return;
 738
 739        while (*new) {
 740                struct bfq_weight_counter *__counter = container_of(*new,
 741                                                struct bfq_weight_counter,
 742                                                weights_node);
 743                parent = *new;
 744
 745                if (entity->weight == __counter->weight) {
 746                        bfqq->weight_counter = __counter;
 747                        goto inc_counter;
 748                }
 749                if (entity->weight < __counter->weight)
 750                        new = &((*new)->rb_left);
 751                else {
 752                        new = &((*new)->rb_right);
 753                        leftmost = false;
 754                }
 755        }
 756
 757        bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
 758                                       GFP_ATOMIC);
 759
 760        /*
 761         * In the unlucky event of an allocation failure, we just
 762         * exit. This will cause the weight of queue to not be
 763         * considered in bfq_asymmetric_scenario, which, in its turn,
 764         * causes the scenario to be deemed wrongly symmetric in case
 765         * bfqq's weight would have been the only weight making the
 766         * scenario asymmetric.  On the bright side, no unbalance will
 767         * however occur when bfqq becomes inactive again (the
 768         * invocation of this function is triggered by an activation
 769         * of queue).  In fact, bfq_weights_tree_remove does nothing
 770         * if !bfqq->weight_counter.
 771         */
 772        if (unlikely(!bfqq->weight_counter))
 773                return;
 774
 775        bfqq->weight_counter->weight = entity->weight;
 776        rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
 777        rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
 778                                leftmost);
 779
 780inc_counter:
 781        bfqq->weight_counter->num_active++;
 782        bfqq->ref++;
 783}
 784
 785/*
 786 * Decrement the weight counter associated with the queue, and, if the
 787 * counter reaches 0, remove the counter from the tree.
 788 * See the comments to the function bfq_weights_tree_add() for considerations
 789 * about overhead.
 790 */
 791void __bfq_weights_tree_remove(struct bfq_data *bfqd,
 792                               struct bfq_queue *bfqq,
 793                               struct rb_root_cached *root)
 794{
 795        if (!bfqq->weight_counter)
 796                return;
 797
 798        bfqq->weight_counter->num_active--;
 799        if (bfqq->weight_counter->num_active > 0)
 800                goto reset_entity_pointer;
 801
 802        rb_erase_cached(&bfqq->weight_counter->weights_node, root);
 803        kfree(bfqq->weight_counter);
 804
 805reset_entity_pointer:
 806        bfqq->weight_counter = NULL;
 807        bfq_put_queue(bfqq);
 808}
 809
 810/*
 811 * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
 812 * of active groups for each queue's inactive parent entity.
 813 */
 814void bfq_weights_tree_remove(struct bfq_data *bfqd,
 815                             struct bfq_queue *bfqq)
 816{
 817        struct bfq_entity *entity = bfqq->entity.parent;
 818
 819        for_each_entity(entity) {
 820                struct bfq_sched_data *sd = entity->my_sched_data;
 821
 822                if (sd->next_in_service || sd->in_service_entity) {
 823                        /*
 824                         * entity is still active, because either
 825                         * next_in_service or in_service_entity is not
 826                         * NULL (see the comments on the definition of
 827                         * next_in_service for details on why
 828                         * in_service_entity must be checked too).
 829                         *
 830                         * As a consequence, its parent entities are
 831                         * active as well, and thus this loop must
 832                         * stop here.
 833                         */
 834                        break;
 835                }
 836
 837                /*
 838                 * The decrement of num_groups_with_pending_reqs is
 839                 * not performed immediately upon the deactivation of
 840                 * entity, but it is delayed to when it also happens
 841                 * that the first leaf descendant bfqq of entity gets
 842                 * all its pending requests completed. The following
 843                 * instructions perform this delayed decrement, if
 844                 * needed. See the comments on
 845                 * num_groups_with_pending_reqs for details.
 846                 */
 847                if (entity->in_groups_with_pending_reqs) {
 848                        entity->in_groups_with_pending_reqs = false;
 849                        bfqd->num_groups_with_pending_reqs--;
 850                }
 851        }
 852
 853        /*
 854         * Next function is invoked last, because it causes bfqq to be
 855         * freed if the following holds: bfqq is not in service and
 856         * has no dispatched request. DO NOT use bfqq after the next
 857         * function invocation.
 858         */
 859        __bfq_weights_tree_remove(bfqd, bfqq,
 860                                  &bfqd->queue_weights_tree);
 861}
 862
 863/*
 864 * Return expired entry, or NULL to just start from scratch in rbtree.
 865 */
 866static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
 867                                      struct request *last)
 868{
 869        struct request *rq;
 870
 871        if (bfq_bfqq_fifo_expire(bfqq))
 872                return NULL;
 873
 874        bfq_mark_bfqq_fifo_expire(bfqq);
 875
 876        rq = rq_entry_fifo(bfqq->fifo.next);
 877
 878        if (rq == last || ktime_get_ns() < rq->fifo_time)
 879                return NULL;
 880
 881        bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
 882        return rq;
 883}
 884
 885static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
 886                                        struct bfq_queue *bfqq,
 887                                        struct request *last)
 888{
 889        struct rb_node *rbnext = rb_next(&last->rb_node);
 890        struct rb_node *rbprev = rb_prev(&last->rb_node);
 891        struct request *next, *prev = NULL;
 892
 893        /* Follow expired path, else get first next available. */
 894        next = bfq_check_fifo(bfqq, last);
 895        if (next)
 896                return next;
 897
 898        if (rbprev)
 899                prev = rb_entry_rq(rbprev);
 900
 901        if (rbnext)
 902                next = rb_entry_rq(rbnext);
 903        else {
 904                rbnext = rb_first(&bfqq->sort_list);
 905                if (rbnext && rbnext != &last->rb_node)
 906                        next = rb_entry_rq(rbnext);
 907        }
 908
 909        return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
 910}
 911
 912/* see the definition of bfq_async_charge_factor for details */
 913static unsigned long bfq_serv_to_charge(struct request *rq,
 914                                        struct bfq_queue *bfqq)
 915{
 916        if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
 917            bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
 918                return blk_rq_sectors(rq);
 919
 920        return blk_rq_sectors(rq) * bfq_async_charge_factor;
 921}
 922
 923/**
 924 * bfq_updated_next_req - update the queue after a new next_rq selection.
 925 * @bfqd: the device data the queue belongs to.
 926 * @bfqq: the queue to update.
 927 *
 928 * If the first request of a queue changes we make sure that the queue
 929 * has enough budget to serve at least its first request (if the
 930 * request has grown).  We do this because if the queue has not enough
 931 * budget for its first request, it has to go through two dispatch
 932 * rounds to actually get it dispatched.
 933 */
 934static void bfq_updated_next_req(struct bfq_data *bfqd,
 935                                 struct bfq_queue *bfqq)
 936{
 937        struct bfq_entity *entity = &bfqq->entity;
 938        struct request *next_rq = bfqq->next_rq;
 939        unsigned long new_budget;
 940
 941        if (!next_rq)
 942                return;
 943
 944        if (bfqq == bfqd->in_service_queue)
 945                /*
 946                 * In order not to break guarantees, budgets cannot be
 947                 * changed after an entity has been selected.
 948                 */
 949                return;
 950
 951        new_budget = max_t(unsigned long,
 952                           max_t(unsigned long, bfqq->max_budget,
 953                                 bfq_serv_to_charge(next_rq, bfqq)),
 954                           entity->service);
 955        if (entity->budget != new_budget) {
 956                entity->budget = new_budget;
 957                bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
 958                                         new_budget);
 959                bfq_requeue_bfqq(bfqd, bfqq, false);
 960        }
 961}
 962
 963static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
 964{
 965        u64 dur;
 966
 967        if (bfqd->bfq_wr_max_time > 0)
 968                return bfqd->bfq_wr_max_time;
 969
 970        dur = bfqd->rate_dur_prod;
 971        do_div(dur, bfqd->peak_rate);
 972
 973        /*
 974         * Limit duration between 3 and 25 seconds. The upper limit
 975         * has been conservatively set after the following worst case:
 976         * on a QEMU/KVM virtual machine
 977         * - running in a slow PC
 978         * - with a virtual disk stacked on a slow low-end 5400rpm HDD
 979         * - serving a heavy I/O workload, such as the sequential reading
 980         *   of several files
 981         * mplayer took 23 seconds to start, if constantly weight-raised.
 982         *
 983         * As for higher values than that accommodating the above bad
 984         * scenario, tests show that higher values would often yield
 985         * the opposite of the desired result, i.e., would worsen
 986         * responsiveness by allowing non-interactive applications to
 987         * preserve weight raising for too long.
 988         *
 989         * On the other end, lower values than 3 seconds make it
 990         * difficult for most interactive tasks to complete their jobs
 991         * before weight-raising finishes.
 992         */
 993        return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000));
 994}
 995
 996/* switch back from soft real-time to interactive weight raising */
 997static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
 998                                          struct bfq_data *bfqd)
 999{
1000        bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1001        bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1002        bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
1003}
1004
1005static void
1006bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
1007                      struct bfq_io_cq *bic, bool bfq_already_existing)
1008{
1009        unsigned int old_wr_coeff = bfqq->wr_coeff;
1010        bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
1011
1012        if (bic->saved_has_short_ttime)
1013                bfq_mark_bfqq_has_short_ttime(bfqq);
1014        else
1015                bfq_clear_bfqq_has_short_ttime(bfqq);
1016
1017        if (bic->saved_IO_bound)
1018                bfq_mark_bfqq_IO_bound(bfqq);
1019        else
1020                bfq_clear_bfqq_IO_bound(bfqq);
1021
1022        bfqq->entity.new_weight = bic->saved_weight;
1023        bfqq->ttime = bic->saved_ttime;
1024        bfqq->wr_coeff = bic->saved_wr_coeff;
1025        bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
1026        bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
1027        bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
1028
1029        if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
1030            time_is_before_jiffies(bfqq->last_wr_start_finish +
1031                                   bfqq->wr_cur_max_time))) {
1032                if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
1033                    !bfq_bfqq_in_large_burst(bfqq) &&
1034                    time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
1035                                             bfq_wr_duration(bfqd))) {
1036                        switch_back_to_interactive_wr(bfqq, bfqd);
1037                } else {
1038                        bfqq->wr_coeff = 1;
1039                        bfq_log_bfqq(bfqq->bfqd, bfqq,
1040                                     "resume state: switching off wr");
1041                }
1042        }
1043
1044        /* make sure weight will be updated, however we got here */
1045        bfqq->entity.prio_changed = 1;
1046
1047        if (likely(!busy))
1048                return;
1049
1050        if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
1051                bfqd->wr_busy_queues++;
1052        else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
1053                bfqd->wr_busy_queues--;
1054}
1055
1056static int bfqq_process_refs(struct bfq_queue *bfqq)
1057{
1058        return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
1059                (bfqq->weight_counter != NULL);
1060}
1061
1062/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
1063static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1064{
1065        struct bfq_queue *item;
1066        struct hlist_node *n;
1067
1068        hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
1069                hlist_del_init(&item->burst_list_node);
1070
1071        /*
1072         * Start the creation of a new burst list only if there is no
1073         * active queue. See comments on the conditional invocation of
1074         * bfq_handle_burst().
1075         */
1076        if (bfq_tot_busy_queues(bfqd) == 0) {
1077                hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1078                bfqd->burst_size = 1;
1079        } else
1080                bfqd->burst_size = 0;
1081
1082        bfqd->burst_parent_entity = bfqq->entity.parent;
1083}
1084
1085/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
1086static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1087{
1088        /* Increment burst size to take into account also bfqq */
1089        bfqd->burst_size++;
1090
1091        if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
1092                struct bfq_queue *pos, *bfqq_item;
1093                struct hlist_node *n;
1094
1095                /*
1096                 * Enough queues have been activated shortly after each
1097                 * other to consider this burst as large.
1098                 */
1099                bfqd->large_burst = true;
1100
1101                /*
1102                 * We can now mark all queues in the burst list as
1103                 * belonging to a large burst.
1104                 */
1105                hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
1106                                     burst_list_node)
1107                        bfq_mark_bfqq_in_large_burst(bfqq_item);
1108                bfq_mark_bfqq_in_large_burst(bfqq);
1109
1110                /*
1111                 * From now on, and until the current burst finishes, any
1112                 * new queue being activated shortly after the last queue
1113                 * was inserted in the burst can be immediately marked as
1114                 * belonging to a large burst. So the burst list is not
1115                 * needed any more. Remove it.
1116                 */
1117                hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
1118                                          burst_list_node)
1119                        hlist_del_init(&pos->burst_list_node);
1120        } else /*
1121                * Burst not yet large: add bfqq to the burst list. Do
1122                * not increment the ref counter for bfqq, because bfqq
1123                * is removed from the burst list before freeing bfqq
1124                * in put_queue.
1125                */
1126                hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1127}
1128
1129/*
1130 * If many queues belonging to the same group happen to be created
1131 * shortly after each other, then the processes associated with these
1132 * queues have typically a common goal. In particular, bursts of queue
1133 * creations are usually caused by services or applications that spawn
1134 * many parallel threads/processes. Examples are systemd during boot,
1135 * or git grep. To help these processes get their job done as soon as
1136 * possible, it is usually better to not grant either weight-raising
1137 * or device idling to their queues, unless these queues must be
1138 * protected from the I/O flowing through other active queues.
1139 *
1140 * In this comment we describe, firstly, the reasons why this fact
1141 * holds, and, secondly, the next function, which implements the main
1142 * steps needed to properly mark these queues so that they can then be
1143 * treated in a different way.
1144 *
1145 * The above services or applications benefit mostly from a high
1146 * throughput: the quicker the requests of the activated queues are
1147 * cumulatively served, the sooner the target job of these queues gets
1148 * completed. As a consequence, weight-raising any of these queues,
1149 * which also implies idling the device for it, is almost always
1150 * counterproductive, unless there are other active queues to isolate
1151 * these new queues from. If there no other active queues, then
1152 * weight-raising these new queues just lowers throughput in most
1153 * cases.
1154 *
1155 * On the other hand, a burst of queue creations may be caused also by
1156 * the start of an application that does not consist of a lot of
1157 * parallel I/O-bound threads. In fact, with a complex application,
1158 * several short processes may need to be executed to start-up the
1159 * application. In this respect, to start an application as quickly as
1160 * possible, the best thing to do is in any case to privilege the I/O
1161 * related to the application with respect to all other
1162 * I/O. Therefore, the best strategy to start as quickly as possible
1163 * an application that causes a burst of queue creations is to
1164 * weight-raise all the queues created during the burst. This is the
1165 * exact opposite of the best strategy for the other type of bursts.
1166 *
1167 * In the end, to take the best action for each of the two cases, the
1168 * two types of bursts need to be distinguished. Fortunately, this
1169 * seems relatively easy, by looking at the sizes of the bursts. In
1170 * particular, we found a threshold such that only bursts with a
1171 * larger size than that threshold are apparently caused by
1172 * services or commands such as systemd or git grep. For brevity,
1173 * hereafter we call just 'large' these bursts. BFQ *does not*
1174 * weight-raise queues whose creation occurs in a large burst. In
1175 * addition, for each of these queues BFQ performs or does not perform
1176 * idling depending on which choice boosts the throughput more. The
1177 * exact choice depends on the device and request pattern at
1178 * hand.
1179 *
1180 * Unfortunately, false positives may occur while an interactive task
1181 * is starting (e.g., an application is being started). The
1182 * consequence is that the queues associated with the task do not
1183 * enjoy weight raising as expected. Fortunately these false positives
1184 * are very rare. They typically occur if some service happens to
1185 * start doing I/O exactly when the interactive task starts.
1186 *
1187 * Turning back to the next function, it is invoked only if there are
1188 * no active queues (apart from active queues that would belong to the
1189 * same, possible burst bfqq would belong to), and it implements all
1190 * the steps needed to detect the occurrence of a large burst and to
1191 * properly mark all the queues belonging to it (so that they can then
1192 * be treated in a different way). This goal is achieved by
1193 * maintaining a "burst list" that holds, temporarily, the queues that
1194 * belong to the burst in progress. The list is then used to mark
1195 * these queues as belonging to a large burst if the burst does become
1196 * large. The main steps are the following.
1197 *
1198 * . when the very first queue is created, the queue is inserted into the
1199 *   list (as it could be the first queue in a possible burst)
1200 *
1201 * . if the current burst has not yet become large, and a queue Q that does
1202 *   not yet belong to the burst is activated shortly after the last time
1203 *   at which a new queue entered the burst list, then the function appends
1204 *   Q to the burst list
1205 *
1206 * . if, as a consequence of the previous step, the burst size reaches
1207 *   the large-burst threshold, then
1208 *
1209 *     . all the queues in the burst list are marked as belonging to a
1210 *       large burst
1211 *
1212 *     . the burst list is deleted; in fact, the burst list already served
1213 *       its purpose (keeping temporarily track of the queues in a burst,
1214 *       so as to be able to mark them as belonging to a large burst in the
1215 *       previous sub-step), and now is not needed any more
1216 *
1217 *     . the device enters a large-burst mode
1218 *
1219 * . if a queue Q that does not belong to the burst is created while
1220 *   the device is in large-burst mode and shortly after the last time
1221 *   at which a queue either entered the burst list or was marked as
1222 *   belonging to the current large burst, then Q is immediately marked
1223 *   as belonging to a large burst.
1224 *
1225 * . if a queue Q that does not belong to the burst is created a while
1226 *   later, i.e., not shortly after, than the last time at which a queue
1227 *   either entered the burst list or was marked as belonging to the
1228 *   current large burst, then the current burst is deemed as finished and:
1229 *
1230 *        . the large-burst mode is reset if set
1231 *
1232 *        . the burst list is emptied
1233 *
1234 *        . Q is inserted in the burst list, as Q may be the first queue
1235 *          in a possible new burst (then the burst list contains just Q
1236 *          after this step).
1237 */
1238static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1239{
1240        /*
1241         * If bfqq is already in the burst list or is part of a large
1242         * burst, or finally has just been split, then there is
1243         * nothing else to do.
1244         */
1245        if (!hlist_unhashed(&bfqq->burst_list_node) ||
1246            bfq_bfqq_in_large_burst(bfqq) ||
1247            time_is_after_eq_jiffies(bfqq->split_time +
1248                                     msecs_to_jiffies(10)))
1249                return;
1250
1251        /*
1252         * If bfqq's creation happens late enough, or bfqq belongs to
1253         * a different group than the burst group, then the current
1254         * burst is finished, and related data structures must be
1255         * reset.
1256         *
1257         * In this respect, consider the special case where bfqq is
1258         * the very first queue created after BFQ is selected for this
1259         * device. In this case, last_ins_in_burst and
1260         * burst_parent_entity are not yet significant when we get
1261         * here. But it is easy to verify that, whether or not the
1262         * following condition is true, bfqq will end up being
1263         * inserted into the burst list. In particular the list will
1264         * happen to contain only bfqq. And this is exactly what has
1265         * to happen, as bfqq may be the first queue of the first
1266         * burst.
1267         */
1268        if (time_is_before_jiffies(bfqd->last_ins_in_burst +
1269            bfqd->bfq_burst_interval) ||
1270            bfqq->entity.parent != bfqd->burst_parent_entity) {
1271                bfqd->large_burst = false;
1272                bfq_reset_burst_list(bfqd, bfqq);
1273                goto end;
1274        }
1275
1276        /*
1277         * If we get here, then bfqq is being activated shortly after the
1278         * last queue. So, if the current burst is also large, we can mark
1279         * bfqq as belonging to this large burst immediately.
1280         */
1281        if (bfqd->large_burst) {
1282                bfq_mark_bfqq_in_large_burst(bfqq);
1283                goto end;
1284        }
1285
1286        /*
1287         * If we get here, then a large-burst state has not yet been
1288         * reached, but bfqq is being activated shortly after the last
1289         * queue. Then we add bfqq to the burst.
1290         */
1291        bfq_add_to_burst(bfqd, bfqq);
1292end:
1293        /*
1294         * At this point, bfqq either has been added to the current
1295         * burst or has caused the current burst to terminate and a
1296         * possible new burst to start. In particular, in the second
1297         * case, bfqq has become the first queue in the possible new
1298         * burst.  In both cases last_ins_in_burst needs to be moved
1299         * forward.
1300         */
1301        bfqd->last_ins_in_burst = jiffies;
1302}
1303
1304static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1305{
1306        struct bfq_entity *entity = &bfqq->entity;
1307
1308        return entity->budget - entity->service;
1309}
1310
1311/*
1312 * If enough samples have been computed, return the current max budget
1313 * stored in bfqd, which is dynamically updated according to the
1314 * estimated disk peak rate; otherwise return the default max budget
1315 */
1316static int bfq_max_budget(struct bfq_data *bfqd)
1317{
1318        if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1319                return bfq_default_max_budget;
1320        else
1321                return bfqd->bfq_max_budget;
1322}
1323
1324/*
1325 * Return min budget, which is a fraction of the current or default
1326 * max budget (trying with 1/32)
1327 */
1328static int bfq_min_budget(struct bfq_data *bfqd)
1329{
1330        if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1331                return bfq_default_max_budget / 32;
1332        else
1333                return bfqd->bfq_max_budget / 32;
1334}
1335
1336/*
1337 * The next function, invoked after the input queue bfqq switches from
1338 * idle to busy, updates the budget of bfqq. The function also tells
1339 * whether the in-service queue should be expired, by returning
1340 * true. The purpose of expiring the in-service queue is to give bfqq
1341 * the chance to possibly preempt the in-service queue, and the reason
1342 * for preempting the in-service queue is to achieve one of the two
1343 * goals below.
1344 *
1345 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1346 * expired because it has remained idle. In particular, bfqq may have
1347 * expired for one of the following two reasons:
1348 *
1349 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1350 *   and did not make it to issue a new request before its last
1351 *   request was served;
1352 *
1353 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1354 *   a new request before the expiration of the idling-time.
1355 *
1356 * Even if bfqq has expired for one of the above reasons, the process
1357 * associated with the queue may be however issuing requests greedily,
1358 * and thus be sensitive to the bandwidth it receives (bfqq may have
1359 * remained idle for other reasons: CPU high load, bfqq not enjoying
1360 * idling, I/O throttling somewhere in the path from the process to
1361 * the I/O scheduler, ...). But if, after every expiration for one of
1362 * the above two reasons, bfqq has to wait for the service of at least
1363 * one full budget of another queue before being served again, then
1364 * bfqq is likely to get a much lower bandwidth or resource time than
1365 * its reserved ones. To address this issue, two countermeasures need
1366 * to be taken.
1367 *
1368 * First, the budget and the timestamps of bfqq need to be updated in
1369 * a special way on bfqq reactivation: they need to be updated as if
1370 * bfqq did not remain idle and did not expire. In fact, if they are
1371 * computed as if bfqq expired and remained idle until reactivation,
1372 * then the process associated with bfqq is treated as if, instead of
1373 * being greedy, it stopped issuing requests when bfqq remained idle,
1374 * and restarts issuing requests only on this reactivation. In other
1375 * words, the scheduler does not help the process recover the "service
1376 * hole" between bfqq expiration and reactivation. As a consequence,
1377 * the process receives a lower bandwidth than its reserved one. In
1378 * contrast, to recover this hole, the budget must be updated as if
1379 * bfqq was not expired at all before this reactivation, i.e., it must
1380 * be set to the value of the remaining budget when bfqq was
1381 * expired. Along the same line, timestamps need to be assigned the
1382 * value they had the last time bfqq was selected for service, i.e.,
1383 * before last expiration. Thus timestamps need to be back-shifted
1384 * with respect to their normal computation (see [1] for more details
1385 * on this tricky aspect).
1386 *
1387 * Secondly, to allow the process to recover the hole, the in-service
1388 * queue must be expired too, to give bfqq the chance to preempt it
1389 * immediately. In fact, if bfqq has to wait for a full budget of the
1390 * in-service queue to be completed, then it may become impossible to
1391 * let the process recover the hole, even if the back-shifted
1392 * timestamps of bfqq are lower than those of the in-service queue. If
1393 * this happens for most or all of the holes, then the process may not
1394 * receive its reserved bandwidth. In this respect, it is worth noting
1395 * that, being the service of outstanding requests unpreemptible, a
1396 * little fraction of the holes may however be unrecoverable, thereby
1397 * causing a little loss of bandwidth.
1398 *
1399 * The last important point is detecting whether bfqq does need this
1400 * bandwidth recovery. In this respect, the next function deems the
1401 * process associated with bfqq greedy, and thus allows it to recover
1402 * the hole, if: 1) the process is waiting for the arrival of a new
1403 * request (which implies that bfqq expired for one of the above two
1404 * reasons), and 2) such a request has arrived soon. The first
1405 * condition is controlled through the flag non_blocking_wait_rq,
1406 * while the second through the flag arrived_in_time. If both
1407 * conditions hold, then the function computes the budget in the
1408 * above-described special way, and signals that the in-service queue
1409 * should be expired. Timestamp back-shifting is done later in
1410 * __bfq_activate_entity.
1411 *
1412 * 2. Reduce latency. Even if timestamps are not backshifted to let
1413 * the process associated with bfqq recover a service hole, bfqq may
1414 * however happen to have, after being (re)activated, a lower finish
1415 * timestamp than the in-service queue.  That is, the next budget of
1416 * bfqq may have to be completed before the one of the in-service
1417 * queue. If this is the case, then preempting the in-service queue
1418 * allows this goal to be achieved, apart from the unpreemptible,
1419 * outstanding requests mentioned above.
1420 *
1421 * Unfortunately, regardless of which of the above two goals one wants
1422 * to achieve, service trees need first to be updated to know whether
1423 * the in-service queue must be preempted. To have service trees
1424 * correctly updated, the in-service queue must be expired and
1425 * rescheduled, and bfqq must be scheduled too. This is one of the
1426 * most costly operations (in future versions, the scheduling
1427 * mechanism may be re-designed in such a way to make it possible to
1428 * know whether preemption is needed without needing to update service
1429 * trees). In addition, queue preemptions almost always cause random
1430 * I/O, and thus loss of throughput. Because of these facts, the next
1431 * function adopts the following simple scheme to avoid both costly
1432 * operations and too frequent preemptions: it requests the expiration
1433 * of the in-service queue (unconditionally) only for queues that need
1434 * to recover a hole, or that either are weight-raised or deserve to
1435 * be weight-raised.
1436 */
1437static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1438                                                struct bfq_queue *bfqq,
1439                                                bool arrived_in_time,
1440                                                bool wr_or_deserves_wr)
1441{
1442        struct bfq_entity *entity = &bfqq->entity;
1443
1444        /*
1445         * In the next compound condition, we check also whether there
1446         * is some budget left, because otherwise there is no point in
1447         * trying to go on serving bfqq with this same budget: bfqq
1448         * would be expired immediately after being selected for
1449         * service. This would only cause useless overhead.
1450         */
1451        if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
1452            bfq_bfqq_budget_left(bfqq) > 0) {
1453                /*
1454                 * We do not clear the flag non_blocking_wait_rq here, as
1455                 * the latter is used in bfq_activate_bfqq to signal
1456                 * that timestamps need to be back-shifted (and is
1457                 * cleared right after).
1458                 */
1459
1460                /*
1461                 * In next assignment we rely on that either
1462                 * entity->service or entity->budget are not updated
1463                 * on expiration if bfqq is empty (see
1464                 * __bfq_bfqq_recalc_budget). Thus both quantities
1465                 * remain unchanged after such an expiration, and the
1466                 * following statement therefore assigns to
1467                 * entity->budget the remaining budget on such an
1468                 * expiration.
1469                 */
1470                entity->budget = min_t(unsigned long,
1471                                       bfq_bfqq_budget_left(bfqq),
1472                                       bfqq->max_budget);
1473
1474                /*
1475                 * At this point, we have used entity->service to get
1476                 * the budget left (needed for updating
1477                 * entity->budget). Thus we finally can, and have to,
1478                 * reset entity->service. The latter must be reset
1479                 * because bfqq would otherwise be charged again for
1480                 * the service it has received during its previous
1481                 * service slot(s).
1482                 */
1483                entity->service = 0;
1484
1485                return true;
1486        }
1487
1488        /*
1489         * We can finally complete expiration, by setting service to 0.
1490         */
1491        entity->service = 0;
1492        entity->budget = max_t(unsigned long, bfqq->max_budget,
1493                               bfq_serv_to_charge(bfqq->next_rq, bfqq));
1494        bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1495        return wr_or_deserves_wr;
1496}
1497
1498/*
1499 * Return the farthest past time instant according to jiffies
1500 * macros.
1501 */
1502static unsigned long bfq_smallest_from_now(void)
1503{
1504        return jiffies - MAX_JIFFY_OFFSET;
1505}
1506
1507static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1508                                             struct bfq_queue *bfqq,
1509                                             unsigned int old_wr_coeff,
1510                                             bool wr_or_deserves_wr,
1511                                             bool interactive,
1512                                             bool in_burst,
1513                                             bool soft_rt)
1514{
1515        if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1516                /* start a weight-raising period */
1517                if (interactive) {
1518                        bfqq->service_from_wr = 0;
1519                        bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1520                        bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1521                } else {
1522                        /*
1523                         * No interactive weight raising in progress
1524                         * here: assign minus infinity to
1525                         * wr_start_at_switch_to_srt, to make sure
1526                         * that, at the end of the soft-real-time
1527                         * weight raising periods that is starting
1528                         * now, no interactive weight-raising period
1529                         * may be wrongly considered as still in
1530                         * progress (and thus actually started by
1531                         * mistake).
1532                         */
1533                        bfqq->wr_start_at_switch_to_srt =
1534                                bfq_smallest_from_now();
1535                        bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1536                                BFQ_SOFTRT_WEIGHT_FACTOR;
1537                        bfqq->wr_cur_max_time =
1538                                bfqd->bfq_wr_rt_max_time;
1539                }
1540
1541                /*
1542                 * If needed, further reduce budget to make sure it is
1543                 * close to bfqq's backlog, so as to reduce the
1544                 * scheduling-error component due to a too large
1545                 * budget. Do not care about throughput consequences,
1546                 * but only about latency. Finally, do not assign a
1547                 * too small budget either, to avoid increasing
1548                 * latency by causing too frequent expirations.
1549                 */
1550                bfqq->entity.budget = min_t(unsigned long,
1551                                            bfqq->entity.budget,
1552                                            2 * bfq_min_budget(bfqd));
1553        } else if (old_wr_coeff > 1) {
1554                if (interactive) { /* update wr coeff and duration */
1555                        bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1556                        bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1557                } else if (in_burst)
1558                        bfqq->wr_coeff = 1;
1559                else if (soft_rt) {
1560                        /*
1561                         * The application is now or still meeting the
1562                         * requirements for being deemed soft rt.  We
1563                         * can then correctly and safely (re)charge
1564                         * the weight-raising duration for the
1565                         * application with the weight-raising
1566                         * duration for soft rt applications.
1567                         *
1568                         * In particular, doing this recharge now, i.e.,
1569                         * before the weight-raising period for the
1570                         * application finishes, reduces the probability
1571                         * of the following negative scenario:
1572                         * 1) the weight of a soft rt application is
1573                         *    raised at startup (as for any newly
1574                         *    created application),
1575                         * 2) since the application is not interactive,
1576                         *    at a certain time weight-raising is
1577                         *    stopped for the application,
1578                         * 3) at that time the application happens to
1579                         *    still have pending requests, and hence
1580                         *    is destined to not have a chance to be
1581                         *    deemed soft rt before these requests are
1582                         *    completed (see the comments to the
1583                         *    function bfq_bfqq_softrt_next_start()
1584                         *    for details on soft rt detection),
1585                         * 4) these pending requests experience a high
1586                         *    latency because the application is not
1587                         *    weight-raised while they are pending.
1588                         */
1589                        if (bfqq->wr_cur_max_time !=
1590                                bfqd->bfq_wr_rt_max_time) {
1591                                bfqq->wr_start_at_switch_to_srt =
1592                                        bfqq->last_wr_start_finish;
1593
1594                                bfqq->wr_cur_max_time =
1595                                        bfqd->bfq_wr_rt_max_time;
1596                                bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1597                                        BFQ_SOFTRT_WEIGHT_FACTOR;
1598                        }
1599                        bfqq->last_wr_start_finish = jiffies;
1600                }
1601        }
1602}
1603
1604static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1605                                        struct bfq_queue *bfqq)
1606{
1607        return bfqq->dispatched == 0 &&
1608                time_is_before_jiffies(
1609                        bfqq->budget_timeout +
1610                        bfqd->bfq_wr_min_idle_time);
1611}
1612
1613static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1614                                             struct bfq_queue *bfqq,
1615                                             int old_wr_coeff,
1616                                             struct request *rq,
1617                                             bool *interactive)
1618{
1619        bool soft_rt, in_burst, wr_or_deserves_wr,
1620                bfqq_wants_to_preempt,
1621                idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1622                /*
1623                 * See the comments on
1624                 * bfq_bfqq_update_budg_for_activation for
1625                 * details on the usage of the next variable.
1626                 */
1627                arrived_in_time =  ktime_get_ns() <=
1628                        bfqq->ttime.last_end_request +
1629                        bfqd->bfq_slice_idle * 3;
1630
1631
1632        /*
1633         * bfqq deserves to be weight-raised if:
1634         * - it is sync,
1635         * - it does not belong to a large burst,
1636         * - it has been idle for enough time or is soft real-time,
1637         * - is linked to a bfq_io_cq (it is not shared in any sense).
1638         */
1639        in_burst = bfq_bfqq_in_large_burst(bfqq);
1640        soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
1641                !BFQQ_TOTALLY_SEEKY(bfqq) &&
1642                !in_burst &&
1643                time_is_before_jiffies(bfqq->soft_rt_next_start) &&
1644                bfqq->dispatched == 0;
1645        *interactive = !in_burst && idle_for_long_time;
1646        wr_or_deserves_wr = bfqd->low_latency &&
1647                (bfqq->wr_coeff > 1 ||
1648                 (bfq_bfqq_sync(bfqq) &&
1649                  bfqq->bic && (*interactive || soft_rt)));
1650
1651        /*
1652         * Using the last flag, update budget and check whether bfqq
1653         * may want to preempt the in-service queue.
1654         */
1655        bfqq_wants_to_preempt =
1656                bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1657                                                    arrived_in_time,
1658                                                    wr_or_deserves_wr);
1659
1660        /*
1661         * If bfqq happened to be activated in a burst, but has been
1662         * idle for much more than an interactive queue, then we
1663         * assume that, in the overall I/O initiated in the burst, the
1664         * I/O associated with bfqq is finished. So bfqq does not need
1665         * to be treated as a queue belonging to a burst
1666         * anymore. Accordingly, we reset bfqq's in_large_burst flag
1667         * if set, and remove bfqq from the burst list if it's
1668         * there. We do not decrement burst_size, because the fact
1669         * that bfqq does not need to belong to the burst list any
1670         * more does not invalidate the fact that bfqq was created in
1671         * a burst.
1672         */
1673        if (likely(!bfq_bfqq_just_created(bfqq)) &&
1674            idle_for_long_time &&
1675            time_is_before_jiffies(
1676                    bfqq->budget_timeout +
1677                    msecs_to_jiffies(10000))) {
1678                hlist_del_init(&bfqq->burst_list_node);
1679                bfq_clear_bfqq_in_large_burst(bfqq);
1680        }
1681
1682        bfq_clear_bfqq_just_created(bfqq);
1683
1684
1685        if (!bfq_bfqq_IO_bound(bfqq)) {
1686                if (arrived_in_time) {
1687                        bfqq->requests_within_timer++;
1688                        if (bfqq->requests_within_timer >=
1689                            bfqd->bfq_requests_within_timer)
1690                                bfq_mark_bfqq_IO_bound(bfqq);
1691                } else
1692                        bfqq->requests_within_timer = 0;
1693        }
1694
1695        if (bfqd->low_latency) {
1696                if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1697                        /* wraparound */
1698                        bfqq->split_time =
1699                                jiffies - bfqd->bfq_wr_min_idle_time - 1;
1700
1701                if (time_is_before_jiffies(bfqq->split_time +
1702                                           bfqd->bfq_wr_min_idle_time)) {
1703                        bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1704                                                         old_wr_coeff,
1705                                                         wr_or_deserves_wr,
1706                                                         *interactive,
1707                                                         in_burst,
1708                                                         soft_rt);
1709
1710                        if (old_wr_coeff != bfqq->wr_coeff)
1711                                bfqq->entity.prio_changed = 1;
1712                }
1713        }
1714
1715        bfqq->last_idle_bklogged = jiffies;
1716        bfqq->service_from_backlogged = 0;
1717        bfq_clear_bfqq_softrt_update(bfqq);
1718
1719        bfq_add_bfqq_busy(bfqd, bfqq);
1720
1721        /*
1722         * Expire in-service queue only if preemption may be needed
1723         * for guarantees. In this respect, the function
1724         * next_queue_may_preempt just checks a simple, necessary
1725         * condition, and not a sufficient condition based on
1726         * timestamps. In fact, for the latter condition to be
1727         * evaluated, timestamps would need first to be updated, and
1728         * this operation is quite costly (see the comments on the
1729         * function bfq_bfqq_update_budg_for_activation).
1730         */
1731        if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
1732            bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
1733            next_queue_may_preempt(bfqd))
1734                bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1735                                false, BFQQE_PREEMPTED);
1736}
1737
1738static void bfq_add_request(struct request *rq)
1739{
1740        struct bfq_queue *bfqq = RQ_BFQQ(rq);
1741        struct bfq_data *bfqd = bfqq->bfqd;
1742        struct request *next_rq, *prev;
1743        unsigned int old_wr_coeff = bfqq->wr_coeff;
1744        bool interactive = false;
1745
1746        bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1747        bfqq->queued[rq_is_sync(rq)]++;
1748        bfqd->queued++;
1749
1750        if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
1751                /*
1752                 * Periodically reset inject limit, to make sure that
1753                 * the latter eventually drops in case workload
1754                 * changes, see step (3) in the comments on
1755                 * bfq_update_inject_limit().
1756                 */
1757                if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
1758                                             msecs_to_jiffies(1000))) {
1759                        /* invalidate baseline total service time */
1760                        bfqq->last_serv_time_ns = 0;
1761
1762                        /*
1763                         * Reset pointer in case we are waiting for
1764                         * some request completion.
1765                         */
1766                        bfqd->waited_rq = NULL;
1767
1768                        /*
1769                         * If bfqq has a short think time, then start
1770                         * by setting the inject limit to 0
1771                         * prudentially, because the service time of
1772                         * an injected I/O request may be higher than
1773                         * the think time of bfqq, and therefore, if
1774                         * one request was injected when bfqq remains
1775                         * empty, this injected request might delay
1776                         * the service of the next I/O request for
1777                         * bfqq significantly. In case bfqq can
1778                         * actually tolerate some injection, then the
1779                         * adaptive update will however raise the
1780                         * limit soon. This lucky circumstance holds
1781                         * exactly because bfqq has a short think
1782                         * time, and thus, after remaining empty, is
1783                         * likely to get new I/O enqueued---and then
1784                         * completed---before being expired. This is
1785                         * the very pattern that gives the
1786                         * limit-update algorithm the chance to
1787                         * measure the effect of injection on request
1788                         * service times, and then to update the limit
1789                         * accordingly.
1790                         *
1791                         * On the opposite end, if bfqq has a long
1792                         * think time, then start directly by 1,
1793                         * because:
1794                         * a) on the bright side, keeping at most one
1795                         * request in service in the drive is unlikely
1796                         * to cause any harm to the latency of bfqq's
1797                         * requests, as the service time of a single
1798                         * request is likely to be lower than the
1799                         * think time of bfqq;
1800                         * b) on the downside, after becoming empty,
1801                         * bfqq is likely to expire before getting its
1802                         * next request. With this request arrival
1803                         * pattern, it is very hard to sample total
1804                         * service times and update the inject limit
1805                         * accordingly (see comments on
1806                         * bfq_update_inject_limit()). So the limit is
1807                         * likely to be never, or at least seldom,
1808                         * updated.  As a consequence, by setting the
1809                         * limit to 1, we avoid that no injection ever
1810                         * occurs with bfqq. On the downside, this
1811                         * proactive step further reduces chances to
1812                         * actually compute the baseline total service
1813                         * time. Thus it reduces chances to execute the
1814                         * limit-update algorithm and possibly raise the
1815                         * limit to more than 1.
1816                         */
1817                        if (bfq_bfqq_has_short_ttime(bfqq))
1818                                bfqq->inject_limit = 0;
1819                        else
1820                                bfqq->inject_limit = 1;
1821                        bfqq->decrease_time_jif = jiffies;
1822                }
1823
1824                /*
1825                 * The following conditions must hold to setup a new
1826                 * sampling of total service time, and then a new
1827                 * update of the inject limit:
1828                 * - bfqq is in service, because the total service
1829                 *   time is evaluated only for the I/O requests of
1830                 *   the queues in service;
1831                 * - this is the right occasion to compute or to
1832                 *   lower the baseline total service time, because
1833                 *   there are actually no requests in the drive,
1834                 *   or
1835                 *   the baseline total service time is available, and
1836                 *   this is the right occasion to compute the other
1837                 *   quantity needed to update the inject limit, i.e.,
1838                 *   the total service time caused by the amount of
1839                 *   injection allowed by the current value of the
1840                 *   limit. It is the right occasion because injection
1841                 *   has actually been performed during the service
1842                 *   hole, and there are still in-flight requests,
1843                 *   which are very likely to be exactly the injected
1844                 *   requests, or part of them;
1845                 * - the minimum interval for sampling the total
1846                 *   service time and updating the inject limit has
1847                 *   elapsed.
1848                 */
1849                if (bfqq == bfqd->in_service_queue &&
1850                    (bfqd->rq_in_driver == 0 ||
1851                     (bfqq->last_serv_time_ns > 0 &&
1852                      bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
1853                    time_is_before_eq_jiffies(bfqq->decrease_time_jif +
1854                                              msecs_to_jiffies(100))) {
1855                        bfqd->last_empty_occupied_ns = ktime_get_ns();
1856                        /*
1857                         * Start the state machine for measuring the
1858                         * total service time of rq: setting
1859                         * wait_dispatch will cause bfqd->waited_rq to
1860                         * be set when rq will be dispatched.
1861                         */
1862                        bfqd->wait_dispatch = true;
1863                        bfqd->rqs_injected = false;
1864                }
1865        }
1866
1867        elv_rb_add(&bfqq->sort_list, rq);
1868
1869        /*
1870         * Check if this request is a better next-serve candidate.
1871         */
1872        prev = bfqq->next_rq;
1873        next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1874        bfqq->next_rq = next_rq;
1875
1876        /*
1877         * Adjust priority tree position, if next_rq changes.
1878         * See comments on bfq_pos_tree_add_move() for the unlikely().
1879         */
1880        if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
1881                bfq_pos_tree_add_move(bfqd, bfqq);
1882
1883        if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
1884                bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1885                                                 rq, &interactive);
1886        else {
1887                if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1888                    time_is_before_jiffies(
1889                                bfqq->last_wr_start_finish +
1890                                bfqd->bfq_wr_min_inter_arr_async)) {
1891                        bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1892                        bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1893
1894                        bfqd->wr_busy_queues++;
1895                        bfqq->entity.prio_changed = 1;
1896                }
1897                if (prev != bfqq->next_rq)
1898                        bfq_updated_next_req(bfqd, bfqq);
1899        }
1900
1901        /*
1902         * Assign jiffies to last_wr_start_finish in the following
1903         * cases:
1904         *
1905         * . if bfqq is not going to be weight-raised, because, for
1906         *   non weight-raised queues, last_wr_start_finish stores the
1907         *   arrival time of the last request; as of now, this piece
1908         *   of information is used only for deciding whether to
1909         *   weight-raise async queues
1910         *
1911         * . if bfqq is not weight-raised, because, if bfqq is now
1912         *   switching to weight-raised, then last_wr_start_finish
1913         *   stores the time when weight-raising starts
1914         *
1915         * . if bfqq is interactive, because, regardless of whether
1916         *   bfqq is currently weight-raised, the weight-raising
1917         *   period must start or restart (this case is considered
1918         *   separately because it is not detected by the above
1919         *   conditions, if bfqq is already weight-raised)
1920         *
1921         * last_wr_start_finish has to be updated also if bfqq is soft
1922         * real-time, because the weight-raising period is constantly
1923         * restarted on idle-to-busy transitions for these queues, but
1924         * this is already done in bfq_bfqq_handle_idle_busy_switch if
1925         * needed.
1926         */
1927        if (bfqd->low_latency &&
1928                (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1929                bfqq->last_wr_start_finish = jiffies;
1930}
1931
1932static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1933                                          struct bio *bio,
1934                                          struct request_queue *q)
1935{
1936        struct bfq_queue *bfqq = bfqd->bio_bfqq;
1937
1938
1939        if (bfqq)
1940                return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1941
1942        return NULL;
1943}
1944
1945static sector_t get_sdist(sector_t last_pos, struct request *rq)
1946{
1947        if (last_pos)
1948                return abs(blk_rq_pos(rq) - last_pos);
1949
1950        return 0;
1951}
1952
1953#if 0 /* Still not clear if we can do without next two functions */
1954static void bfq_activate_request(struct request_queue *q, struct request *rq)
1955{
1956        struct bfq_data *bfqd = q->elevator->elevator_data;
1957
1958        bfqd->rq_in_driver++;
1959}
1960
1961static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1962{
1963        struct bfq_data *bfqd = q->elevator->elevator_data;
1964
1965        bfqd->rq_in_driver--;
1966}
1967#endif
1968
1969static void bfq_remove_request(struct request_queue *q,
1970                               struct request *rq)
1971{
1972        struct bfq_queue *bfqq = RQ_BFQQ(rq);
1973        struct bfq_data *bfqd = bfqq->bfqd;
1974        const int sync = rq_is_sync(rq);
1975
1976        if (bfqq->next_rq == rq) {
1977                bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1978                bfq_updated_next_req(bfqd, bfqq);
1979        }
1980
1981        if (rq->queuelist.prev != &rq->queuelist)
1982                list_del_init(&rq->queuelist);
1983        bfqq->queued[sync]--;
1984        bfqd->queued--;
1985        elv_rb_del(&bfqq->sort_list, rq);
1986
1987        elv_rqhash_del(q, rq);
1988        if (q->last_merge == rq)
1989                q->last_merge = NULL;
1990
1991        if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1992                bfqq->next_rq = NULL;
1993
1994                if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
1995                        bfq_del_bfqq_busy(bfqd, bfqq, false);
1996                        /*
1997                         * bfqq emptied. In normal operation, when
1998                         * bfqq is empty, bfqq->entity.service and
1999                         * bfqq->entity.budget must contain,
2000                         * respectively, the service received and the
2001                         * budget used last time bfqq emptied. These
2002                         * facts do not hold in this case, as at least
2003                         * this last removal occurred while bfqq is
2004                         * not in service. To avoid inconsistencies,
2005                         * reset both bfqq->entity.service and
2006                         * bfqq->entity.budget, if bfqq has still a
2007                         * process that may issue I/O requests to it.
2008                         */
2009                        bfqq->entity.budget = bfqq->entity.service = 0;
2010                }
2011
2012                /*
2013                 * Remove queue from request-position tree as it is empty.
2014                 */
2015                if (bfqq->pos_root) {
2016                        rb_erase(&bfqq->pos_node, bfqq->pos_root);
2017                        bfqq->pos_root = NULL;
2018                }
2019        } else {
2020                /* see comments on bfq_pos_tree_add_move() for the unlikely() */
2021                if (unlikely(!bfqd->nonrot_with_queueing))
2022                        bfq_pos_tree_add_move(bfqd, bfqq);
2023        }
2024
2025        if (rq->cmd_flags & REQ_META)
2026                bfqq->meta_pending--;
2027
2028}
2029
2030static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
2031{
2032        struct request_queue *q = hctx->queue;
2033        struct bfq_data *bfqd = q->elevator->elevator_data;
2034        struct request *free = NULL;
2035        /*
2036         * bfq_bic_lookup grabs the queue_lock: invoke it now and
2037         * store its return value for later use, to avoid nesting
2038         * queue_lock inside the bfqd->lock. We assume that the bic
2039         * returned by bfq_bic_lookup does not go away before
2040         * bfqd->lock is taken.
2041         */
2042        struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
2043        bool ret;
2044
2045        spin_lock_irq(&bfqd->lock);
2046
2047        if (bic)
2048                bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
2049        else
2050                bfqd->bio_bfqq = NULL;
2051        bfqd->bio_bic = bic;
2052
2053        ret = blk_mq_sched_try_merge(q, bio, &free);
2054
2055        if (free)
2056                blk_mq_free_request(free);
2057        spin_unlock_irq(&bfqd->lock);
2058
2059        return ret;
2060}
2061
2062static int bfq_request_merge(struct request_queue *q, struct request **req,
2063                             struct bio *bio)
2064{
2065        struct bfq_data *bfqd = q->elevator->elevator_data;
2066        struct request *__rq;
2067
2068        __rq = bfq_find_rq_fmerge(bfqd, bio, q);
2069        if (__rq && elv_bio_merge_ok(__rq, bio)) {
2070                *req = __rq;
2071                return ELEVATOR_FRONT_MERGE;
2072        }
2073
2074        return ELEVATOR_NO_MERGE;
2075}
2076
2077static struct bfq_queue *bfq_init_rq(struct request *rq);
2078
2079static void bfq_request_merged(struct request_queue *q, struct request *req,
2080                               enum elv_merge type)
2081{
2082        if (type == ELEVATOR_FRONT_MERGE &&
2083            rb_prev(&req->rb_node) &&
2084            blk_rq_pos(req) <
2085            blk_rq_pos(container_of(rb_prev(&req->rb_node),
2086                                    struct request, rb_node))) {
2087                struct bfq_queue *bfqq = bfq_init_rq(req);
2088                struct bfq_data *bfqd = bfqq->bfqd;
2089                struct request *prev, *next_rq;
2090
2091                /* Reposition request in its sort_list */
2092                elv_rb_del(&bfqq->sort_list, req);
2093                elv_rb_add(&bfqq->sort_list, req);
2094
2095                /* Choose next request to be served for bfqq */
2096                prev = bfqq->next_rq;
2097                next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
2098                                         bfqd->last_position);
2099                bfqq->next_rq = next_rq;
2100                /*
2101                 * If next_rq changes, update both the queue's budget to
2102                 * fit the new request and the queue's position in its
2103                 * rq_pos_tree.
2104                 */
2105                if (prev != bfqq->next_rq) {
2106                        bfq_updated_next_req(bfqd, bfqq);
2107                        /*
2108                         * See comments on bfq_pos_tree_add_move() for
2109                         * the unlikely().
2110                         */
2111                        if (unlikely(!bfqd->nonrot_with_queueing))
2112                                bfq_pos_tree_add_move(bfqd, bfqq);
2113                }
2114        }
2115}
2116
2117/*
2118 * This function is called to notify the scheduler that the requests
2119 * rq and 'next' have been merged, with 'next' going away.  BFQ
2120 * exploits this hook to address the following issue: if 'next' has a
2121 * fifo_time lower that rq, then the fifo_time of rq must be set to
2122 * the value of 'next', to not forget the greater age of 'next'.
2123 *
2124 * NOTE: in this function we assume that rq is in a bfq_queue, basing
2125 * on that rq is picked from the hash table q->elevator->hash, which,
2126 * in its turn, is filled only with I/O requests present in
2127 * bfq_queues, while BFQ is in use for the request queue q. In fact,
2128 * the function that fills this hash table (elv_rqhash_add) is called
2129 * only by bfq_insert_request.
2130 */
2131static void bfq_requests_merged(struct request_queue *q, struct request *rq,
2132                                struct request *next)
2133{
2134        struct bfq_queue *bfqq = bfq_init_rq(rq),
2135                *next_bfqq = bfq_init_rq(next);
2136
2137        /*
2138         * If next and rq belong to the same bfq_queue and next is older
2139         * than rq, then reposition rq in the fifo (by substituting next
2140         * with rq). Otherwise, if next and rq belong to different
2141         * bfq_queues, never reposition rq: in fact, we would have to
2142         * reposition it with respect to next's position in its own fifo,
2143         * which would most certainly be too expensive with respect to
2144         * the benefits.
2145         */
2146        if (bfqq == next_bfqq &&
2147            !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2148            next->fifo_time < rq->fifo_time) {
2149                list_del_init(&rq->queuelist);
2150                list_replace_init(&next->queuelist, &rq->queuelist);
2151                rq->fifo_time = next->fifo_time;
2152        }
2153
2154        if (bfqq->next_rq == next)
2155                bfqq->next_rq = rq;
2156
2157        bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
2158}
2159
2160/* Must be called with bfqq != NULL */
2161static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
2162{
2163        if (bfq_bfqq_busy(bfqq))
2164                bfqq->bfqd->wr_busy_queues--;
2165        bfqq->wr_coeff = 1;
2166        bfqq->wr_cur_max_time = 0;
2167        bfqq->last_wr_start_finish = jiffies;
2168        /*
2169         * Trigger a weight change on the next invocation of
2170         * __bfq_entity_update_weight_prio.
2171         */
2172        bfqq->entity.prio_changed = 1;
2173}
2174
2175void bfq_end_wr_async_queues(struct bfq_data *bfqd,
2176                             struct bfq_group *bfqg)
2177{
2178        int i, j;
2179
2180        for (i = 0; i < 2; i++)
2181                for (j = 0; j < IOPRIO_BE_NR; j++)
2182                        if (bfqg->async_bfqq[i][j])
2183                                bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
2184        if (bfqg->async_idle_bfqq)
2185                bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
2186}
2187
2188static void bfq_end_wr(struct bfq_data *bfqd)
2189{
2190        struct bfq_queue *bfqq;
2191
2192        spin_lock_irq(&bfqd->lock);
2193
2194        list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
2195                bfq_bfqq_end_wr(bfqq);
2196        list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
2197                bfq_bfqq_end_wr(bfqq);
2198        bfq_end_wr_async(bfqd);
2199
2200        spin_unlock_irq(&bfqd->lock);
2201}
2202
2203static sector_t bfq_io_struct_pos(void *io_struct, bool request)
2204{
2205        if (request)
2206                return blk_rq_pos(io_struct);
2207        else
2208                return ((struct bio *)io_struct)->bi_iter.bi_sector;
2209}
2210
2211static int bfq_rq_close_to_sector(void *io_struct, bool request,
2212                                  sector_t sector)
2213{
2214        return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
2215               BFQQ_CLOSE_THR;
2216}
2217
2218static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
2219                                         struct bfq_queue *bfqq,
2220                                         sector_t sector)
2221{
2222        struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
2223        struct rb_node *parent, *node;
2224        struct bfq_queue *__bfqq;
2225
2226        if (RB_EMPTY_ROOT(root))
2227                return NULL;
2228
2229        /*
2230         * First, if we find a request starting at the end of the last
2231         * request, choose it.
2232         */
2233        __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
2234        if (__bfqq)
2235                return __bfqq;
2236
2237        /*
2238         * If the exact sector wasn't found, the parent of the NULL leaf
2239         * will contain the closest sector (rq_pos_tree sorted by
2240         * next_request position).
2241         */
2242        __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
2243        if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
2244                return __bfqq;
2245
2246        if (blk_rq_pos(__bfqq->next_rq) < sector)
2247                node = rb_next(&__bfqq->pos_node);
2248        else
2249                node = rb_prev(&__bfqq->pos_node);
2250        if (!node)
2251                return NULL;
2252
2253        __bfqq = rb_entry(node, struct bfq_queue, pos_node);
2254        if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
2255                return __bfqq;
2256
2257        return NULL;
2258}
2259
2260static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
2261                                                   struct bfq_queue *cur_bfqq,
2262                                                   sector_t sector)
2263{
2264        struct bfq_queue *bfqq;
2265
2266        /*
2267         * We shall notice if some of the queues are cooperating,
2268         * e.g., working closely on the same area of the device. In
2269         * that case, we can group them together and: 1) don't waste
2270         * time idling, and 2) serve the union of their requests in
2271         * the best possible order for throughput.
2272         */
2273        bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
2274        if (!bfqq || bfqq == cur_bfqq)
2275                return NULL;
2276
2277        return bfqq;
2278}
2279
2280static struct bfq_queue *
2281bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2282{
2283        int process_refs, new_process_refs;
2284        struct bfq_queue *__bfqq;
2285
2286        /*
2287         * If there are no process references on the new_bfqq, then it is
2288         * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
2289         * may have dropped their last reference (not just their last process
2290         * reference).
2291         */
2292        if (!bfqq_process_refs(new_bfqq))
2293                return NULL;
2294
2295        /* Avoid a circular list and skip interim queue merges. */
2296        while ((__bfqq = new_bfqq->new_bfqq)) {
2297                if (__bfqq == bfqq)
2298                        return NULL;
2299                new_bfqq = __bfqq;
2300        }
2301
2302        process_refs = bfqq_process_refs(bfqq);
2303        new_process_refs = bfqq_process_refs(new_bfqq);
2304        /*
2305         * If the process for the bfqq has gone away, there is no
2306         * sense in merging the queues.
2307         */
2308        if (process_refs == 0 || new_process_refs == 0)
2309                return NULL;
2310
2311        bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
2312                new_bfqq->pid);
2313
2314        /*
2315         * Merging is just a redirection: the requests of the process
2316         * owning one of the two queues are redirected to the other queue.
2317         * The latter queue, in its turn, is set as shared if this is the
2318         * first time that the requests of some process are redirected to
2319         * it.
2320         *
2321         * We redirect bfqq to new_bfqq and not the opposite, because
2322         * we are in the context of the process owning bfqq, thus we
2323         * have the io_cq of this process. So we can immediately
2324         * configure this io_cq to redirect the requests of the
2325         * process to new_bfqq. In contrast, the io_cq of new_bfqq is
2326         * not available any more (new_bfqq->bic == NULL).
2327         *
2328         * Anyway, even in case new_bfqq coincides with the in-service
2329         * queue, redirecting requests the in-service queue is the
2330         * best option, as we feed the in-service queue with new
2331         * requests close to the last request served and, by doing so,
2332         * are likely to increase the throughput.
2333         */
2334        bfqq->new_bfqq = new_bfqq;
2335        new_bfqq->ref += process_refs;
2336        return new_bfqq;
2337}
2338
2339static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
2340                                        struct bfq_queue *new_bfqq)
2341{
2342        if (bfq_too_late_for_merging(new_bfqq))
2343                return false;
2344
2345        if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
2346            (bfqq->ioprio_class != new_bfqq->ioprio_class))
2347                return false;
2348
2349        /*
2350         * If either of the queues has already been detected as seeky,
2351         * then merging it with the other queue is unlikely to lead to
2352         * sequential I/O.
2353         */
2354        if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
2355                return false;
2356
2357        /*
2358         * Interleaved I/O is known to be done by (some) applications
2359         * only for reads, so it does not make sense to merge async
2360         * queues.
2361         */
2362        if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
2363                return false;
2364
2365        return true;
2366}
2367
2368/*
2369 * Attempt to schedule a merge of bfqq with the currently in-service
2370 * queue or with a close queue among the scheduled queues.  Return
2371 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
2372 * structure otherwise.
2373 *
2374 * The OOM queue is not allowed to participate to cooperation: in fact, since
2375 * the requests temporarily redirected to the OOM queue could be redirected
2376 * again to dedicated queues at any time, the state needed to correctly
2377 * handle merging with the OOM queue would be quite complex and expensive
2378 * to maintain. Besides, in such a critical condition as an out of memory,
2379 * the benefits of queue merging may be little relevant, or even negligible.
2380 *
2381 * WARNING: queue merging may impair fairness among non-weight raised
2382 * queues, for at least two reasons: 1) the original weight of a
2383 * merged queue may change during the merged state, 2) even being the
2384 * weight the same, a merged queue may be bloated with many more
2385 * requests than the ones produced by its originally-associated
2386 * process.
2387 */
2388static struct bfq_queue *
2389bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2390                     void *io_struct, bool request)
2391{
2392        struct bfq_queue *in_service_bfqq, *new_bfqq;
2393
2394        /*
2395         * Do not perform queue merging if the device is non
2396         * rotational and performs internal queueing. In fact, such a
2397         * device reaches a high speed through internal parallelism
2398         * and pipelining. This means that, to reach a high
2399         * throughput, it must have many requests enqueued at the same
2400         * time. But, in this configuration, the internal scheduling
2401         * algorithm of the device does exactly the job of queue
2402         * merging: it reorders requests so as to obtain as much as
2403         * possible a sequential I/O pattern. As a consequence, with
2404         * the workload generated by processes doing interleaved I/O,
2405         * the throughput reached by the device is likely to be the
2406         * same, with and without queue merging.
2407         *
2408         * Disabling merging also provides a remarkable benefit in
2409         * terms of throughput. Merging tends to make many workloads
2410         * artificially more uneven, because of shared queues
2411         * remaining non empty for incomparably more time than
2412         * non-merged queues. This may accentuate workload
2413         * asymmetries. For example, if one of the queues in a set of
2414         * merged queues has a higher weight than a normal queue, then
2415         * the shared queue may inherit such a high weight and, by
2416         * staying almost always active, may force BFQ to perform I/O
2417         * plugging most of the time. This evidently makes it harder
2418         * for BFQ to let the device reach a high throughput.
2419         *
2420         * Finally, the likely() macro below is not used because one
2421         * of the two branches is more likely than the other, but to
2422         * have the code path after the following if() executed as
2423         * fast as possible for the case of a non rotational device
2424         * with queueing. We want it because this is the fastest kind
2425         * of device. On the opposite end, the likely() may lengthen
2426         * the execution time of BFQ for the case of slower devices
2427         * (rotational or at least without queueing). But in this case
2428         * the execution time of BFQ matters very little, if not at
2429         * all.
2430         */
2431        if (likely(bfqd->nonrot_with_queueing))
2432                return NULL;
2433
2434        /*
2435         * Prevent bfqq from being merged if it has been created too
2436         * long ago. The idea is that true cooperating processes, and
2437         * thus their associated bfq_queues, are supposed to be
2438         * created shortly after each other. This is the case, e.g.,
2439         * for KVM/QEMU and dump I/O threads. Basing on this
2440         * assumption, the following filtering greatly reduces the
2441         * probability that two non-cooperating processes, which just
2442         * happen to do close I/O for some short time interval, have
2443         * their queues merged by mistake.
2444         */
2445        if (bfq_too_late_for_merging(bfqq))
2446                return NULL;
2447
2448        if (bfqq->new_bfqq)
2449                return bfqq->new_bfqq;
2450
2451        if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
2452                return NULL;
2453
2454        /* If there is only one backlogged queue, don't search. */
2455        if (bfq_tot_busy_queues(bfqd) == 1)
2456                return NULL;
2457
2458        in_service_bfqq = bfqd->in_service_queue;
2459
2460        if (in_service_bfqq && in_service_bfqq != bfqq &&
2461            likely(in_service_bfqq != &bfqd->oom_bfqq) &&
2462            bfq_rq_close_to_sector(io_struct, request,
2463                                   bfqd->in_serv_last_pos) &&
2464            bfqq->entity.parent == in_service_bfqq->entity.parent &&
2465            bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
2466                new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
2467                if (new_bfqq)
2468                        return new_bfqq;
2469        }
2470        /*
2471         * Check whether there is a cooperator among currently scheduled
2472         * queues. The only thing we need is that the bio/request is not
2473         * NULL, as we need it to establish whether a cooperator exists.
2474         */
2475        new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
2476                        bfq_io_struct_pos(io_struct, request));
2477
2478        if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
2479            bfq_may_be_close_cooperator(bfqq, new_bfqq))
2480                return bfq_setup_merge(bfqq, new_bfqq);
2481
2482        return NULL;
2483}
2484
2485static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
2486{
2487        struct bfq_io_cq *bic = bfqq->bic;
2488
2489        /*
2490         * If !bfqq->bic, the queue is already shared or its requests
2491         * have already been redirected to a shared queue; both idle window
2492         * and weight raising state have already been saved. Do nothing.
2493         */
2494        if (!bic)
2495                return;
2496
2497        bic->saved_weight = bfqq->entity.orig_weight;
2498        bic->saved_ttime = bfqq->ttime;
2499        bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
2500        bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
2501        bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2502        bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
2503        if (unlikely(bfq_bfqq_just_created(bfqq) &&
2504                     !bfq_bfqq_in_large_burst(bfqq) &&
2505                     bfqq->bfqd->low_latency)) {
2506                /*
2507                 * bfqq being merged right after being created: bfqq
2508                 * would have deserved interactive weight raising, but
2509                 * did not make it to be set in a weight-raised state,
2510                 * because of this early merge. Store directly the
2511                 * weight-raising state that would have been assigned
2512                 * to bfqq, so that to avoid that bfqq unjustly fails
2513                 * to enjoy weight raising if split soon.
2514                 */
2515                bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
2516                bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
2517                bic->saved_last_wr_start_finish = jiffies;
2518        } else {
2519                bic->saved_wr_coeff = bfqq->wr_coeff;
2520                bic->saved_wr_start_at_switch_to_srt =
2521                        bfqq->wr_start_at_switch_to_srt;
2522                bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2523                bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2524        }
2525}
2526
2527static void
2528bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2529                struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2530{
2531        bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2532                (unsigned long)new_bfqq->pid);
2533        /* Save weight raising and idle window of the merged queues */
2534        bfq_bfqq_save_state(bfqq);
2535        bfq_bfqq_save_state(new_bfqq);
2536        if (bfq_bfqq_IO_bound(bfqq))
2537                bfq_mark_bfqq_IO_bound(new_bfqq);
2538        bfq_clear_bfqq_IO_bound(bfqq);
2539
2540        /*
2541         * If bfqq is weight-raised, then let new_bfqq inherit
2542         * weight-raising. To reduce false positives, neglect the case
2543         * where bfqq has just been created, but has not yet made it
2544         * to be weight-raised (which may happen because EQM may merge
2545         * bfqq even before bfq_add_request is executed for the first
2546         * time for bfqq). Handling this case would however be very
2547         * easy, thanks to the flag just_created.
2548         */
2549        if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2550                new_bfqq->wr_coeff = bfqq->wr_coeff;
2551                new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2552                new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2553                new_bfqq->wr_start_at_switch_to_srt =
2554                        bfqq->wr_start_at_switch_to_srt;
2555                if (bfq_bfqq_busy(new_bfqq))
2556                        bfqd->wr_busy_queues++;
2557                new_bfqq->entity.prio_changed = 1;
2558        }
2559
2560        if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2561                bfqq->wr_coeff = 1;
2562                bfqq->entity.prio_changed = 1;
2563                if (bfq_bfqq_busy(bfqq))
2564                        bfqd->wr_busy_queues--;
2565        }
2566
2567        bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2568                     bfqd->wr_busy_queues);
2569
2570        /*
2571         * Merge queues (that is, let bic redirect its requests to new_bfqq)
2572         */
2573        bic_set_bfqq(bic, new_bfqq, 1);
2574        bfq_mark_bfqq_coop(new_bfqq);
2575        /*
2576         * new_bfqq now belongs to at least two bics (it is a shared queue):
2577         * set new_bfqq->bic to NULL. bfqq either:
2578         * - does not belong to any bic any more, and hence bfqq->bic must
2579         *   be set to NULL, or
2580         * - is a queue whose owning bics have already been redirected to a
2581         *   different queue, hence the queue is destined to not belong to
2582         *   any bic soon and bfqq->bic is already NULL (therefore the next
2583         *   assignment causes no harm).
2584         */
2585        new_bfqq->bic = NULL;
2586        /*
2587         * If the queue is shared, the pid is the pid of one of the associated
2588         * processes. Which pid depends on the exact sequence of merge events
2589         * the queue underwent. So printing such a pid is useless and confusing
2590         * because it reports a random pid between those of the associated
2591         * processes.
2592         * We mark such a queue with a pid -1, and then print SHARED instead of
2593         * a pid in logging messages.
2594         */
2595        new_bfqq->pid = -1;
2596        bfqq->bic = NULL;
2597        /* release process reference to bfqq */
2598        bfq_put_queue(bfqq);
2599}
2600
2601static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2602                                struct bio *bio)
2603{
2604        struct bfq_data *bfqd = q->elevator->elevator_data;
2605        bool is_sync = op_is_sync(bio->bi_opf);
2606        struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
2607
2608        /*
2609         * Disallow merge of a sync bio into an async request.
2610         */
2611        if (is_sync && !rq_is_sync(rq))
2612                return false;
2613
2614        /*
2615         * Lookup the bfqq that this bio will be queued with. Allow
2616         * merge only if rq is queued there.
2617         */
2618        if (!bfqq)
2619                return false;
2620
2621        /*
2622         * We take advantage of this function to perform an early merge
2623         * of the queues of possible cooperating processes.
2624         */
2625        new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2626        if (new_bfqq) {
2627                /*
2628                 * bic still points to bfqq, then it has not yet been
2629                 * redirected to some other bfq_queue, and a queue
2630                 * merge between bfqq and new_bfqq can be safely
2631                 * fulfilled, i.e., bic can be redirected to new_bfqq
2632                 * and bfqq can be put.
2633                 */
2634                bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2635                                new_bfqq);
2636                /*
2637                 * If we get here, bio will be queued into new_queue,
2638                 * so use new_bfqq to decide whether bio and rq can be
2639                 * merged.
2640                 */
2641                bfqq = new_bfqq;
2642
2643                /*
2644                 * Change also bqfd->bio_bfqq, as
2645                 * bfqd->bio_bic now points to new_bfqq, and
2646                 * this function may be invoked again (and then may
2647                 * use again bqfd->bio_bfqq).
2648                 */
2649                bfqd->bio_bfqq = bfqq;
2650        }
2651
2652        return bfqq == RQ_BFQQ(rq);
2653}
2654
2655/*
2656 * Set the maximum time for the in-service queue to consume its
2657 * budget. This prevents seeky processes from lowering the throughput.
2658 * In practice, a time-slice service scheme is used with seeky
2659 * processes.
2660 */
2661static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2662                                   struct bfq_queue *bfqq)
2663{
2664        unsigned int timeout_coeff;
2665
2666        if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2667                timeout_coeff = 1;
2668        else
2669                timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2670
2671        bfqd->last_budget_start = ktime_get();
2672
2673        bfqq->budget_timeout = jiffies +
2674                bfqd->bfq_timeout * timeout_coeff;
2675}
2676
2677static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2678                                       struct bfq_queue *bfqq)
2679{
2680        if (bfqq) {
2681                bfq_clear_bfqq_fifo_expire(bfqq);
2682
2683                bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2684
2685                if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2686                    bfqq->wr_coeff > 1 &&
2687                    bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2688                    time_is_before_jiffies(bfqq->budget_timeout)) {
2689                        /*
2690                         * For soft real-time queues, move the start
2691                         * of the weight-raising period forward by the
2692                         * time the queue has not received any
2693                         * service. Otherwise, a relatively long
2694                         * service delay is likely to cause the
2695                         * weight-raising period of the queue to end,
2696                         * because of the short duration of the
2697                         * weight-raising period of a soft real-time
2698                         * queue.  It is worth noting that this move
2699                         * is not so dangerous for the other queues,
2700                         * because soft real-time queues are not
2701                         * greedy.
2702                         *
2703                         * To not add a further variable, we use the
2704                         * overloaded field budget_timeout to
2705                         * determine for how long the queue has not
2706                         * received service, i.e., how much time has
2707                         * elapsed since the queue expired. However,
2708                         * this is a little imprecise, because
2709                         * budget_timeout is set to jiffies if bfqq
2710                         * not only expires, but also remains with no
2711                         * request.
2712                         */
2713                        if (time_after(bfqq->budget_timeout,
2714                                       bfqq->last_wr_start_finish))
2715                                bfqq->last_wr_start_finish +=
2716                                        jiffies - bfqq->budget_timeout;
2717                        else
2718                                bfqq->last_wr_start_finish = jiffies;
2719                }
2720
2721                bfq_set_budget_timeout(bfqd, bfqq);
2722                bfq_log_bfqq(bfqd, bfqq,
2723                             "set_in_service_queue, cur-budget = %d",
2724                             bfqq->entity.budget);
2725        }
2726
2727        bfqd->in_service_queue = bfqq;
2728}
2729
2730/*
2731 * Get and set a new queue for service.
2732 */
2733static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2734{
2735        struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2736
2737        __bfq_set_in_service_queue(bfqd, bfqq);
2738        return bfqq;
2739}
2740
2741static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2742{
2743        struct bfq_queue *bfqq = bfqd->in_service_queue;
2744        u32 sl;
2745
2746        bfq_mark_bfqq_wait_request(bfqq);
2747
2748        /*
2749         * We don't want to idle for seeks, but we do want to allow
2750         * fair distribution of slice time for a process doing back-to-back
2751         * seeks. So allow a little bit of time for him to submit a new rq.
2752         */
2753        sl = bfqd->bfq_slice_idle;
2754        /*
2755         * Unless the queue is being weight-raised or the scenario is
2756         * asymmetric, grant only minimum idle time if the queue
2757         * is seeky. A long idling is preserved for a weight-raised
2758         * queue, or, more in general, in an asymmetric scenario,
2759         * because a long idling is needed for guaranteeing to a queue
2760         * its reserved share of the throughput (in particular, it is
2761         * needed if the queue has a higher weight than some other
2762         * queue).
2763         */
2764        if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2765            !bfq_asymmetric_scenario(bfqd, bfqq))
2766                sl = min_t(u64, sl, BFQ_MIN_TT);
2767        else if (bfqq->wr_coeff > 1)
2768                sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
2769
2770        bfqd->last_idling_start = ktime_get();
2771        bfqd->last_idling_start_jiffies = jiffies;
2772
2773        hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2774                      HRTIMER_MODE_REL);
2775        bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
2776}
2777
2778/*
2779 * In autotuning mode, max_budget is dynamically recomputed as the
2780 * amount of sectors transferred in timeout at the estimated peak
2781 * rate. This enables BFQ to utilize a full timeslice with a full
2782 * budget, even if the in-service queue is served at peak rate. And
2783 * this maximises throughput with sequential workloads.
2784 */
2785static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2786{
2787        return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2788                jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2789}
2790
2791/*
2792 * Update parameters related to throughput and responsiveness, as a
2793 * function of the estimated peak rate. See comments on
2794 * bfq_calc_max_budget(), and on the ref_wr_duration array.
2795 */
2796static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2797{
2798        if (bfqd->bfq_user_max_budget == 0) {
2799                bfqd->bfq_max_budget =
2800                        bfq_calc_max_budget(bfqd);
2801                bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
2802        }
2803}
2804
2805static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2806                                       struct request *rq)
2807{
2808        if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2809                bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2810                bfqd->peak_rate_samples = 1;
2811                bfqd->sequential_samples = 0;
2812                bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2813                        blk_rq_sectors(rq);
2814        } else /* no new rq dispatched, just reset the number of samples */
2815                bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2816
2817        bfq_log(bfqd,
2818                "reset_rate_computation at end, sample %u/%u tot_sects %llu",
2819                bfqd->peak_rate_samples, bfqd->sequential_samples,
2820                bfqd->tot_sectors_dispatched);
2821}
2822
2823static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2824{
2825        u32 rate, weight, divisor;
2826
2827        /*
2828         * For the convergence property to hold (see comments on
2829         * bfq_update_peak_rate()) and for the assessment to be
2830         * reliable, a minimum number of samples must be present, and
2831         * a minimum amount of time must have elapsed. If not so, do
2832         * not compute new rate. Just reset parameters, to get ready
2833         * for a new evaluation attempt.
2834         */
2835        if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2836            bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2837                goto reset_computation;
2838
2839        /*
2840         * If a new request completion has occurred after last
2841         * dispatch, then, to approximate the rate at which requests
2842         * have been served by the device, it is more precise to
2843         * extend the observation interval to the last completion.
2844         */
2845        bfqd->delta_from_first =
2846                max_t(u64, bfqd->delta_from_first,
2847                      bfqd->last_completion - bfqd->first_dispatch);
2848
2849        /*
2850         * Rate computed in sects/usec, and not sects/nsec, for
2851         * precision issues.
2852         */
2853        rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2854                        div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2855
2856        /*
2857         * Peak rate not updated if:
2858         * - the percentage of sequential dispatches is below 3/4 of the
2859         *   total, and rate is below the current estimated peak rate
2860         * - rate is unreasonably high (> 20M sectors/sec)
2861         */
2862        if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2863             rate <= bfqd->peak_rate) ||
2864                rate > 20<<BFQ_RATE_SHIFT)
2865                goto reset_computation;
2866
2867        /*
2868         * We have to update the peak rate, at last! To this purpose,
2869         * we use a low-pass filter. We compute the smoothing constant
2870         * of the filter as a function of the 'weight' of the new
2871         * measured rate.
2872         *
2873         * As can be seen in next formulas, we define this weight as a
2874         * quantity proportional to how sequential the workload is,
2875         * and to how long the observation time interval is.
2876         *
2877         * The weight runs from 0 to 8. The maximum value of the
2878         * weight, 8, yields the minimum value for the smoothing
2879         * constant. At this minimum value for the smoothing constant,
2880         * the measured rate contributes for half of the next value of
2881         * the estimated peak rate.
2882         *
2883         * So, the first step is to compute the weight as a function
2884         * of how sequential the workload is. Note that the weight
2885         * cannot reach 9, because bfqd->sequential_samples cannot
2886         * become equal to bfqd->peak_rate_samples, which, in its
2887         * turn, holds true because bfqd->sequential_samples is not
2888         * incremented for the first sample.
2889         */
2890        weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2891
2892        /*
2893         * Second step: further refine the weight as a function of the
2894         * duration of the observation interval.
2895         */
2896        weight = min_t(u32, 8,
2897                       div_u64(weight * bfqd->delta_from_first,
2898                               BFQ_RATE_REF_INTERVAL));
2899
2900        /*
2901         * Divisor ranging from 10, for minimum weight, to 2, for
2902         * maximum weight.
2903         */
2904        divisor = 10 - weight;
2905
2906        /*
2907         * Finally, update peak rate:
2908         *
2909         * peak_rate = peak_rate * (divisor-1) / divisor  +  rate / divisor
2910         */
2911        bfqd->peak_rate *= divisor-1;
2912        bfqd->peak_rate /= divisor;
2913        rate /= divisor; /* smoothing constant alpha = 1/divisor */
2914
2915        bfqd->peak_rate += rate;
2916
2917        /*
2918         * For a very slow device, bfqd->peak_rate can reach 0 (see
2919         * the minimum representable values reported in the comments
2920         * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
2921         * divisions by zero where bfqd->peak_rate is used as a
2922         * divisor.
2923         */
2924        bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
2925
2926        update_thr_responsiveness_params(bfqd);
2927
2928reset_computation:
2929        bfq_reset_rate_computation(bfqd, rq);
2930}
2931
2932/*
2933 * Update the read/write peak rate (the main quantity used for
2934 * auto-tuning, see update_thr_responsiveness_params()).
2935 *
2936 * It is not trivial to estimate the peak rate (correctly): because of
2937 * the presence of sw and hw queues between the scheduler and the
2938 * device components that finally serve I/O requests, it is hard to
2939 * say exactly when a given dispatched request is served inside the
2940 * device, and for how long. As a consequence, it is hard to know
2941 * precisely at what rate a given set of requests is actually served
2942 * by the device.
2943 *
2944 * On the opposite end, the dispatch time of any request is trivially
2945 * available, and, from this piece of information, the "dispatch rate"
2946 * of requests can be immediately computed. So, the idea in the next
2947 * function is to use what is known, namely request dispatch times
2948 * (plus, when useful, request completion times), to estimate what is
2949 * unknown, namely in-device request service rate.
2950 *
2951 * The main issue is that, because of the above facts, the rate at
2952 * which a certain set of requests is dispatched over a certain time
2953 * interval can vary greatly with respect to the rate at which the
2954 * same requests are then served. But, since the size of any
2955 * intermediate queue is limited, and the service scheme is lossless
2956 * (no request is silently dropped), the following obvious convergence
2957 * property holds: the number of requests dispatched MUST become
2958 * closer and closer to the number of requests completed as the
2959 * observation interval grows. This is the key property used in
2960 * the next function to estimate the peak service rate as a function
2961 * of the observed dispatch rate. The function assumes to be invoked
2962 * on every request dispatch.
2963 */
2964static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2965{
2966        u64 now_ns = ktime_get_ns();
2967
2968        if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2969                bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2970                        bfqd->peak_rate_samples);
2971                bfq_reset_rate_computation(bfqd, rq);
2972                goto update_last_values; /* will add one sample */
2973        }
2974
2975        /*
2976         * Device idle for very long: the observation interval lasting
2977         * up to this dispatch cannot be a valid observation interval
2978         * for computing a new peak rate (similarly to the late-
2979         * completion event in bfq_completed_request()). Go to
2980         * update_rate_and_reset to have the following three steps
2981         * taken:
2982         * - close the observation interval at the last (previous)
2983         *   request dispatch or completion
2984         * - compute rate, if possible, for that observation interval
2985         * - start a new observation interval with this dispatch
2986         */
2987        if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2988            bfqd->rq_in_driver == 0)
2989                goto update_rate_and_reset;
2990
2991        /* Update sampling information */
2992        bfqd->peak_rate_samples++;
2993
2994        if ((bfqd->rq_in_driver > 0 ||
2995                now_ns - bfqd->last_completion < BFQ_MIN_TT)
2996            && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
2997                bfqd->sequential_samples++;
2998
2999        bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
3000
3001        /* Reset max observed rq size every 32 dispatches */
3002        if (likely(bfqd->peak_rate_samples % 32))
3003                bfqd->last_rq_max_size =
3004                        max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
3005        else
3006                bfqd->last_rq_max_size = blk_rq_sectors(rq);
3007
3008        bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
3009
3010        /* Target observation interval not yet reached, go on sampling */
3011        if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
3012                goto update_last_values;
3013
3014update_rate_and_reset:
3015        bfq_update_rate_reset(bfqd, rq);
3016update_last_values:
3017        bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
3018        if (RQ_BFQQ(rq) == bfqd->in_service_queue)
3019                bfqd->in_serv_last_pos = bfqd->last_position;
3020        bfqd->last_dispatch = now_ns;
3021}
3022
3023/*
3024 * Remove request from internal lists.
3025 */
3026static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
3027{
3028        struct bfq_queue *bfqq = RQ_BFQQ(rq);
3029
3030        /*
3031         * For consistency, the next instruction should have been
3032         * executed after removing the request from the queue and
3033         * dispatching it.  We execute instead this instruction before
3034         * bfq_remove_request() (and hence introduce a temporary
3035         * inconsistency), for efficiency.  In fact, should this
3036         * dispatch occur for a non in-service bfqq, this anticipated
3037         * increment prevents two counters related to bfqq->dispatched
3038         * from risking to be, first, uselessly decremented, and then
3039         * incremented again when the (new) value of bfqq->dispatched
3040         * happens to be taken into account.
3041         */
3042        bfqq->dispatched++;
3043        bfq_update_peak_rate(q->elevator->elevator_data, rq);
3044
3045        bfq_remove_request(q, rq);
3046}
3047
3048static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3049{
3050        /*
3051         * If this bfqq is shared between multiple processes, check
3052         * to make sure that those processes are still issuing I/Os
3053         * within the mean seek distance. If not, it may be time to
3054         * break the queues apart again.
3055         */
3056        if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
3057                bfq_mark_bfqq_split_coop(bfqq);
3058
3059        if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
3060                if (bfqq->dispatched == 0)
3061                        /*
3062                         * Overloading budget_timeout field to store
3063                         * the time at which the queue remains with no
3064                         * backlog and no outstanding request; used by
3065                         * the weight-raising mechanism.
3066                         */
3067                        bfqq->budget_timeout = jiffies;
3068
3069                bfq_del_bfqq_busy(bfqd, bfqq, true);
3070        } else {
3071                bfq_requeue_bfqq(bfqd, bfqq, true);
3072                /*
3073                 * Resort priority tree of potential close cooperators.
3074                 * See comments on bfq_pos_tree_add_move() for the unlikely().
3075                 */
3076                if (unlikely(!bfqd->nonrot_with_queueing))
3077                        bfq_pos_tree_add_move(bfqd, bfqq);
3078        }
3079
3080        /*
3081         * All in-service entities must have been properly deactivated
3082         * or requeued before executing the next function, which
3083         * resets all in-service entities as no more in service. This
3084         * may cause bfqq to be freed. If this happens, the next
3085         * function returns true.
3086         */
3087        return __bfq_bfqd_reset_in_service(bfqd);
3088}
3089
3090/**
3091 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
3092 * @bfqd: device data.
3093 * @bfqq: queue to update.
3094 * @reason: reason for expiration.
3095 *
3096 * Handle the feedback on @bfqq budget at queue expiration.
3097 * See the body for detailed comments.
3098 */
3099static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
3100                                     struct bfq_queue *bfqq,
3101                                     enum bfqq_expiration reason)
3102{
3103        struct request *next_rq;
3104        int budget, min_budget;
3105
3106        min_budget = bfq_min_budget(bfqd);
3107
3108        if (bfqq->wr_coeff == 1)
3109                budget = bfqq->max_budget;
3110        else /*
3111              * Use a constant, low budget for weight-raised queues,
3112              * to help achieve a low latency. Keep it slightly higher
3113              * than the minimum possible budget, to cause a little
3114              * bit fewer expirations.
3115              */
3116                budget = 2 * min_budget;
3117
3118        bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
3119                bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
3120        bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
3121                budget, bfq_min_budget(bfqd));
3122        bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
3123                bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
3124
3125        if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
3126                switch (reason) {
3127                /*
3128                 * Caveat: in all the following cases we trade latency
3129                 * for throughput.
3130                 */
3131                case BFQQE_TOO_IDLE:
3132                        /*
3133                         * This is the only case where we may reduce
3134                         * the budget: if there is no request of the
3135                         * process still waiting for completion, then
3136                         * we assume (tentatively) that the timer has
3137                         * expired because the batch of requests of
3138                         * the process could have been served with a
3139                         * smaller budget.  Hence, betting that
3140                         * process will behave in the same way when it
3141                         * becomes backlogged again, we reduce its
3142                         * next budget.  As long as we guess right,
3143                         * this budget cut reduces the latency
3144                         * experienced by the process.
3145                         *
3146                         * However, if there are still outstanding
3147                         * requests, then the process may have not yet
3148                         * issued its next request just because it is
3149                         * still waiting for the completion of some of
3150                         * the still outstanding ones.  So in this
3151                         * subcase we do not reduce its budget, on the
3152                         * contrary we increase it to possibly boost
3153                         * the throughput, as discussed in the
3154                         * comments to the BUDGET_TIMEOUT case.
3155                         */
3156                        if (bfqq->dispatched > 0) /* still outstanding reqs */
3157                                budget = min(budget * 2, bfqd->bfq_max_budget);
3158                        else {
3159                                if (budget > 5 * min_budget)
3160                                        budget -= 4 * min_budget;
3161                                else
3162                                        budget = min_budget;
3163                        }
3164                        break;
3165                case BFQQE_BUDGET_TIMEOUT:
3166                        /*
3167                         * We double the budget here because it gives
3168                         * the chance to boost the throughput if this
3169                         * is not a seeky process (and has bumped into
3170                         * this timeout because of, e.g., ZBR).
3171                         */
3172                        budget = min(budget * 2, bfqd->bfq_max_budget);
3173                        break;
3174                case BFQQE_BUDGET_EXHAUSTED:
3175                        /*
3176                         * The process still has backlog, and did not
3177                         * let either the budget timeout or the disk
3178                         * idling timeout expire. Hence it is not
3179                         * seeky, has a short thinktime and may be
3180                         * happy with a higher budget too. So
3181                         * definitely increase the budget of this good
3182                         * candidate to boost the disk throughput.
3183                         */
3184                        budget = min(budget * 4, bfqd->bfq_max_budget);
3185                        break;
3186                case BFQQE_NO_MORE_REQUESTS:
3187                        /*
3188                         * For queues that expire for this reason, it
3189                         * is particularly important to keep the
3190                         * budget close to the actual service they
3191                         * need. Doing so reduces the timestamp
3192                         * misalignment problem described in the
3193                         * comments in the body of
3194                         * __bfq_activate_entity. In fact, suppose
3195                         * that a queue systematically expires for
3196                         * BFQQE_NO_MORE_REQUESTS and presents a
3197                         * new request in time to enjoy timestamp
3198                         * back-shifting. The larger the budget of the
3199                         * queue is with respect to the service the
3200                         * queue actually requests in each service
3201                         * slot, the more times the queue can be
3202                         * reactivated with the same virtual finish
3203                         * time. It follows that, even if this finish
3204                         * time is pushed to the system virtual time
3205                         * to reduce the consequent timestamp
3206                         * misalignment, the queue unjustly enjoys for
3207                         * many re-activations a lower finish time
3208                         * than all newly activated queues.
3209                         *
3210                         * The service needed by bfqq is measured
3211                         * quite precisely by bfqq->entity.service.
3212                         * Since bfqq does not enjoy device idling,
3213                         * bfqq->entity.service is equal to the number
3214                         * of sectors that the process associated with
3215                         * bfqq requested to read/write before waiting
3216                         * for request completions, or blocking for
3217                         * other reasons.
3218                         */
3219                        budget = max_t(int, bfqq->entity.service, min_budget);
3220                        break;
3221                default:
3222                        return;
3223                }
3224        } else if (!bfq_bfqq_sync(bfqq)) {
3225                /*
3226                 * Async queues get always the maximum possible
3227                 * budget, as for them we do not care about latency
3228                 * (in addition, their ability to dispatch is limited
3229                 * by the charging factor).
3230                 */
3231                budget = bfqd->bfq_max_budget;
3232        }
3233
3234        bfqq->max_budget = budget;
3235
3236        if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
3237            !bfqd->bfq_user_max_budget)
3238                bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
3239
3240        /*
3241         * If there is still backlog, then assign a new budget, making
3242         * sure that it is large enough for the next request.  Since
3243         * the finish time of bfqq must be kept in sync with the
3244         * budget, be sure to call __bfq_bfqq_expire() *after* this
3245         * update.
3246         *
3247         * If there is no backlog, then no need to update the budget;
3248         * it will be updated on the arrival of a new request.
3249         */
3250        next_rq = bfqq->next_rq;
3251        if (next_rq)
3252                bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
3253                                            bfq_serv_to_charge(next_rq, bfqq));
3254
3255        bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
3256                        next_rq ? blk_rq_sectors(next_rq) : 0,
3257                        bfqq->entity.budget);
3258}
3259
3260/*
3261 * Return true if the process associated with bfqq is "slow". The slow
3262 * flag is used, in addition to the budget timeout, to reduce the
3263 * amount of service provided to seeky processes, and thus reduce
3264 * their chances to lower the throughput. More details in the comments
3265 * on the function bfq_bfqq_expire().
3266 *
3267 * An important observation is in order: as discussed in the comments
3268 * on the function bfq_update_peak_rate(), with devices with internal
3269 * queues, it is hard if ever possible to know when and for how long
3270 * an I/O request is processed by the device (apart from the trivial
3271 * I/O pattern where a new request is dispatched only after the
3272 * previous one has been completed). This makes it hard to evaluate
3273 * the real rate at which the I/O requests of each bfq_queue are
3274 * served.  In fact, for an I/O scheduler like BFQ, serving a
3275 * bfq_queue means just dispatching its requests during its service
3276 * slot (i.e., until the budget of the queue is exhausted, or the
3277 * queue remains idle, or, finally, a timeout fires). But, during the
3278 * service slot of a bfq_queue, around 100 ms at most, the device may
3279 * be even still processing requests of bfq_queues served in previous
3280 * service slots. On the opposite end, the requests of the in-service
3281 * bfq_queue may be completed after the service slot of the queue
3282 * finishes.
3283 *
3284 * Anyway, unless more sophisticated solutions are used
3285 * (where possible), the sum of the sizes of the requests dispatched
3286 * during the service slot of a bfq_queue is probably the only
3287 * approximation available for the service received by the bfq_queue
3288 * during its service slot. And this sum is the quantity used in this
3289 * function to evaluate the I/O speed of a process.
3290 */
3291static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3292                                 bool compensate, enum bfqq_expiration reason,
3293                                 unsigned long *delta_ms)
3294{
3295        ktime_t delta_ktime;
3296        u32 delta_usecs;
3297        bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
3298
3299        if (!bfq_bfqq_sync(bfqq))
3300                return false;
3301
3302        if (compensate)
3303                delta_ktime = bfqd->last_idling_start;
3304        else
3305                delta_ktime = ktime_get();
3306        delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
3307        delta_usecs = ktime_to_us(delta_ktime);
3308
3309        /* don't use too short time intervals */
3310        if (delta_usecs < 1000) {
3311                if (blk_queue_nonrot(bfqd->queue))
3312                         /*
3313                          * give same worst-case guarantees as idling
3314                          * for seeky
3315                          */
3316                        *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
3317                else /* charge at least one seek */
3318                        *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
3319
3320                return slow;
3321        }
3322
3323        *delta_ms = delta_usecs / USEC_PER_MSEC;
3324
3325        /*
3326         * Use only long (> 20ms) intervals to filter out excessive
3327         * spikes in service rate estimation.
3328         */
3329        if (delta_usecs > 20000) {
3330                /*
3331                 * Caveat for rotational devices: processes doing I/O
3332                 * in the slower disk zones tend to be slow(er) even
3333                 * if not seeky. In this respect, the estimated peak
3334                 * rate is likely to be an average over the disk
3335                 * surface. Accordingly, to not be too harsh with
3336                 * unlucky processes, a process is deemed slow only if
3337                 * its rate has been lower than half of the estimated
3338                 * peak rate.
3339                 */
3340                slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
3341        }
3342
3343        bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
3344
3345        return slow;
3346}
3347
3348/*
3349 * To be deemed as soft real-time, an application must meet two
3350 * requirements. First, the application must not require an average
3351 * bandwidth higher than the approximate bandwidth required to playback or
3352 * record a compressed high-definition video.
3353 * The next function is invoked on the completion of the last request of a
3354 * batch, to compute the next-start time instant, soft_rt_next_start, such
3355 * that, if the next request of the application does not arrive before
3356 * soft_rt_next_start, then the above requirement on the bandwidth is met.
3357 *
3358 * The second requirement is that the request pattern of the application is
3359 * isochronous, i.e., that, after issuing a request or a batch of requests,
3360 * the application stops issuing new requests until all its pending requests
3361 * have been completed. After that, the application may issue a new batch,
3362 * and so on.
3363 * For this reason the next function is invoked to compute
3364 * soft_rt_next_start only for applications that meet this requirement,
3365 * whereas soft_rt_next_start is set to infinity for applications that do
3366 * not.
3367 *
3368 * Unfortunately, even a greedy (i.e., I/O-bound) application may
3369 * happen to meet, occasionally or systematically, both the above
3370 * bandwidth and isochrony requirements. This may happen at least in
3371 * the following circumstances. First, if the CPU load is high. The
3372 * application may stop issuing requests while the CPUs are busy
3373 * serving other processes, then restart, then stop again for a while,
3374 * and so on. The other circumstances are related to the storage
3375 * device: the storage device is highly loaded or reaches a low-enough
3376 * throughput with the I/O of the application (e.g., because the I/O
3377 * is random and/or the device is slow). In all these cases, the
3378 * I/O of the application may be simply slowed down enough to meet
3379 * the bandwidth and isochrony requirements. To reduce the probability
3380 * that greedy applications are deemed as soft real-time in these
3381 * corner cases, a further rule is used in the computation of
3382 * soft_rt_next_start: the return value of this function is forced to
3383 * be higher than the maximum between the following two quantities.
3384 *
3385 * (a) Current time plus: (1) the maximum time for which the arrival
3386 *     of a request is waited for when a sync queue becomes idle,
3387 *     namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
3388 *     postpone for a moment the reason for adding a few extra
3389 *     jiffies; we get back to it after next item (b).  Lower-bounding
3390 *     the return value of this function with the current time plus
3391 *     bfqd->bfq_slice_idle tends to filter out greedy applications,
3392 *     because the latter issue their next request as soon as possible
3393 *     after the last one has been completed. In contrast, a soft
3394 *     real-time application spends some time processing data, after a
3395 *     batch of its requests has been completed.
3396 *
3397 * (b) Current value of bfqq->soft_rt_next_start. As pointed out
3398 *     above, greedy applications may happen to meet both the
3399 *     bandwidth and isochrony requirements under heavy CPU or
3400 *     storage-device load. In more detail, in these scenarios, these
3401 *     applications happen, only for limited time periods, to do I/O
3402 *     slowly enough to meet all the requirements described so far,
3403 *     including the filtering in above item (a). These slow-speed
3404 *     time intervals are usually interspersed between other time
3405 *     intervals during which these applications do I/O at a very high
3406 *     speed. Fortunately, exactly because of the high speed of the
3407 *     I/O in the high-speed intervals, the values returned by this
3408 *     function happen to be so high, near the end of any such
3409 *     high-speed interval, to be likely to fall *after* the end of
3410 *     the low-speed time interval that follows. These high values are
3411 *     stored in bfqq->soft_rt_next_start after each invocation of
3412 *     this function. As a consequence, if the last value of
3413 *     bfqq->soft_rt_next_start is constantly used to lower-bound the
3414 *     next value that this function may return, then, from the very
3415 *     beginning of a low-speed interval, bfqq->soft_rt_next_start is
3416 *     likely to be constantly kept so high that any I/O request
3417 *     issued during the low-speed interval is considered as arriving
3418 *     to soon for the application to be deemed as soft
3419 *     real-time. Then, in the high-speed interval that follows, the
3420 *     application will not be deemed as soft real-time, just because
3421 *     it will do I/O at a high speed. And so on.
3422 *
3423 * Getting back to the filtering in item (a), in the following two
3424 * cases this filtering might be easily passed by a greedy
3425 * application, if the reference quantity was just
3426 * bfqd->bfq_slice_idle:
3427 * 1) HZ is so low that the duration of a jiffy is comparable to or
3428 *    higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
3429 *    devices with HZ=100. The time granularity may be so coarse
3430 *    that the approximation, in jiffies, of bfqd->bfq_slice_idle
3431 *    is rather lower than the exact value.
3432 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
3433 *    for a while, then suddenly 'jump' by several units to recover the lost
3434 *    increments. This seems to happen, e.g., inside virtual machines.
3435 * To address this issue, in the filtering in (a) we do not use as a
3436 * reference time interval just bfqd->bfq_slice_idle, but
3437 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
3438 * minimum number of jiffies for which the filter seems to be quite
3439 * precise also in embedded systems and KVM/QEMU virtual machines.
3440 */
3441static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
3442                                                struct bfq_queue *bfqq)
3443{
3444        return max3(bfqq->soft_rt_next_start,
3445                    bfqq->last_idle_bklogged +
3446                    HZ * bfqq->service_from_backlogged /
3447                    bfqd->bfq_wr_max_softrt_rate,
3448                    jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
3449}
3450
3451/**
3452 * bfq_bfqq_expire - expire a queue.
3453 * @bfqd: device owning the queue.
3454 * @bfqq: the queue to expire.
3455 * @compensate: if true, compensate for the time spent idling.
3456 * @reason: the reason causing the expiration.
3457 *
3458 * If the process associated with bfqq does slow I/O (e.g., because it
3459 * issues random requests), we charge bfqq with the time it has been
3460 * in service instead of the service it has received (see
3461 * bfq_bfqq_charge_time for details on how this goal is achieved). As
3462 * a consequence, bfqq will typically get higher timestamps upon
3463 * reactivation, and hence it will be rescheduled as if it had
3464 * received more service than what it has actually received. In the
3465 * end, bfqq receives less service in proportion to how slowly its
3466 * associated process consumes its budgets (and hence how seriously it
3467 * tends to lower the throughput). In addition, this time-charging
3468 * strategy guarantees time fairness among slow processes. In
3469 * contrast, if the process associated with bfqq is not slow, we
3470 * charge bfqq exactly with the service it has received.
3471 *
3472 * Charging time to the first type of queues and the exact service to
3473 * the other has the effect of using the WF2Q+ policy to schedule the
3474 * former on a timeslice basis, without violating service domain
3475 * guarantees among the latter.
3476 */
3477void bfq_bfqq_expire(struct bfq_data *bfqd,
3478                     struct bfq_queue *bfqq,
3479                     bool compensate,
3480                     enum bfqq_expiration reason)
3481{
3482        bool slow;
3483        unsigned long delta = 0;
3484        struct bfq_entity *entity = &bfqq->entity;
3485
3486        /*
3487         * Check whether the process is slow (see bfq_bfqq_is_slow).
3488         */
3489        slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
3490
3491        /*
3492         * As above explained, charge slow (typically seeky) and
3493         * timed-out queues with the time and not the service
3494         * received, to favor sequential workloads.
3495         *
3496         * Processes doing I/O in the slower disk zones will tend to
3497         * be slow(er) even if not seeky. Therefore, since the
3498         * estimated peak rate is actually an average over the disk
3499         * surface, these processes may timeout just for bad luck. To
3500         * avoid punishing them, do not charge time to processes that
3501         * succeeded in consuming at least 2/3 of their budget. This
3502         * allows BFQ to preserve enough elasticity to still perform
3503         * bandwidth, and not time, distribution with little unlucky
3504         * or quasi-sequential processes.
3505         */
3506        if (bfqq->wr_coeff == 1 &&
3507            (slow ||
3508             (reason == BFQQE_BUDGET_TIMEOUT &&
3509              bfq_bfqq_budget_left(bfqq) >=  entity->budget / 3)))
3510                bfq_bfqq_charge_time(bfqd, bfqq, delta);
3511
3512        if (reason == BFQQE_TOO_IDLE &&
3513            entity->service <= 2 * entity->budget / 10)
3514                bfq_clear_bfqq_IO_bound(bfqq);
3515
3516        if (bfqd->low_latency && bfqq->wr_coeff == 1)
3517                bfqq->last_wr_start_finish = jiffies;
3518
3519        if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
3520            RB_EMPTY_ROOT(&bfqq->sort_list)) {
3521                /*
3522                 * If we get here, and there are no outstanding
3523                 * requests, then the request pattern is isochronous
3524                 * (see the comments on the function
3525                 * bfq_bfqq_softrt_next_start()). Thus we can compute
3526                 * soft_rt_next_start. And we do it, unless bfqq is in
3527                 * interactive weight raising. We do not do it in the
3528                 * latter subcase, for the following reason. bfqq may
3529                 * be conveying the I/O needed to load a soft
3530                 * real-time application. Such an application will
3531                 * actually exhibit a soft real-time I/O pattern after
3532                 * it finally starts doing its job. But, if
3533                 * soft_rt_next_start is computed here for an
3534                 * interactive bfqq, and bfqq had received a lot of
3535                 * service before remaining with no outstanding
3536                 * request (likely to happen on a fast device), then
3537                 * soft_rt_next_start would be assigned such a high
3538                 * value that, for a very long time, bfqq would be
3539                 * prevented from being possibly considered as soft
3540                 * real time.
3541                 *
3542                 * If, instead, the queue still has outstanding
3543                 * requests, then we have to wait for the completion
3544                 * of all the outstanding requests to discover whether
3545                 * the request pattern is actually isochronous.
3546                 */
3547                if (bfqq->dispatched == 0 &&
3548                    bfqq->wr_coeff != bfqd->bfq_wr_coeff)
3549                        bfqq->soft_rt_next_start =
3550                                bfq_bfqq_softrt_next_start(bfqd, bfqq);
3551                else if (bfqq->dispatched > 0) {
3552                        /*
3553                         * Schedule an update of soft_rt_next_start to when
3554                         * the task may be discovered to be isochronous.
3555                         */
3556                        bfq_mark_bfqq_softrt_update(bfqq);
3557                }
3558        }
3559
3560        bfq_log_bfqq(bfqd, bfqq,
3561                "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
3562                slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
3563
3564        /*
3565         * bfqq expired, so no total service time needs to be computed
3566         * any longer: reset state machine for measuring total service
3567         * times.
3568         */
3569        bfqd->rqs_injected = bfqd->wait_dispatch = false;
3570        bfqd->waited_rq = NULL;
3571
3572        /*
3573         * Increase, decrease or leave budget unchanged according to
3574         * reason.
3575         */
3576        __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3577        if (__bfq_bfqq_expire(bfqd, bfqq))
3578                /* bfqq is gone, no more actions on it */
3579                return;
3580
3581        /* mark bfqq as waiting a request only if a bic still points to it */
3582        if (!bfq_bfqq_busy(bfqq) &&
3583            reason != BFQQE_BUDGET_TIMEOUT &&
3584            reason != BFQQE_BUDGET_EXHAUSTED) {
3585                bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3586                /*
3587                 * Not setting service to 0, because, if the next rq
3588                 * arrives in time, the queue will go on receiving
3589                 * service with this same budget (as if it never expired)
3590                 */
3591        } else
3592                entity->service = 0;
3593
3594        /*
3595         * Reset the received-service counter for every parent entity.
3596         * Differently from what happens with bfqq->entity.service,
3597         * the resetting of this counter never needs to be postponed
3598         * for parent entities. In fact, in case bfqq may have a
3599         * chance to go on being served using the last, partially
3600         * consumed budget, bfqq->entity.service needs to be kept,
3601         * because if bfqq then actually goes on being served using
3602         * the same budget, the last value of bfqq->entity.service is
3603         * needed to properly decrement bfqq->entity.budget by the
3604         * portion already consumed. In contrast, it is not necessary
3605         * to keep entity->service for parent entities too, because
3606         * the bubble up of the new value of bfqq->entity.budget will
3607         * make sure that the budgets of parent entities are correct,
3608         * even in case bfqq and thus parent entities go on receiving
3609         * service with the same budget.
3610         */
3611        entity = entity->parent;
3612        for_each_entity(entity)
3613                entity->service = 0;
3614}
3615
3616/*
3617 * Budget timeout is not implemented through a dedicated timer, but
3618 * just checked on request arrivals and completions, as well as on
3619 * idle timer expirations.
3620 */
3621static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3622{
3623        return time_is_before_eq_jiffies(bfqq->budget_timeout);
3624}
3625
3626/*
3627 * If we expire a queue that is actively waiting (i.e., with the
3628 * device idled) for the arrival of a new request, then we may incur
3629 * the timestamp misalignment problem described in the body of the
3630 * function __bfq_activate_entity. Hence we return true only if this
3631 * condition does not hold, or if the queue is slow enough to deserve
3632 * only to be kicked off for preserving a high throughput.
3633 */
3634static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3635{
3636        bfq_log_bfqq(bfqq->bfqd, bfqq,
3637                "may_budget_timeout: wait_request %d left %d timeout %d",
3638                bfq_bfqq_wait_request(bfqq),
3639                        bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
3640                bfq_bfqq_budget_timeout(bfqq));
3641
3642        return (!bfq_bfqq_wait_request(bfqq) ||
3643                bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
3644                &&
3645                bfq_bfqq_budget_timeout(bfqq);
3646}
3647
3648static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
3649                                             struct bfq_queue *bfqq)
3650{
3651        bool rot_without_queueing =
3652                !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
3653                bfqq_sequential_and_IO_bound,
3654                idling_boosts_thr;
3655
3656        bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
3657                bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
3658
3659        /*
3660         * The next variable takes into account the cases where idling
3661         * boosts the throughput.
3662         *
3663         * The value of the variable is computed considering, first, that
3664         * idling is virtually always beneficial for the throughput if:
3665         * (a) the device is not NCQ-capable and rotational, or
3666         * (b) regardless of the presence of NCQ, the device is rotational and
3667         *     the request pattern for bfqq is I/O-bound and sequential, or
3668         * (c) regardless of whether it is rotational, the device is
3669         *     not NCQ-capable and the request pattern for bfqq is
3670         *     I/O-bound and sequential.
3671         *
3672         * Secondly, and in contrast to the above item (b), idling an
3673         * NCQ-capable flash-based device would not boost the
3674         * throughput even with sequential I/O; rather it would lower
3675         * the throughput in proportion to how fast the device
3676         * is. Accordingly, the next variable is true if any of the
3677         * above conditions (a), (b) or (c) is true, and, in
3678         * particular, happens to be false if bfqd is an NCQ-capable
3679         * flash-based device.
3680         */
3681        idling_boosts_thr = rot_without_queueing ||
3682                ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
3683                 bfqq_sequential_and_IO_bound);
3684
3685        /*
3686         * The return value of this function is equal to that of
3687         * idling_boosts_thr, unless a special case holds. In this
3688         * special case, described below, idling may cause problems to
3689         * weight-raised queues.
3690         *
3691         * When the request pool is saturated (e.g., in the presence
3692         * of write hogs), if the processes associated with
3693         * non-weight-raised queues ask for requests at a lower rate,
3694         * then processes associated with weight-raised queues have a
3695         * higher probability to get a request from the pool
3696         * immediately (or at least soon) when they need one. Thus
3697         * they have a higher probability to actually get a fraction
3698         * of the device throughput proportional to their high
3699         * weight. This is especially true with NCQ-capable drives,
3700         * which enqueue several requests in advance, and further
3701         * reorder internally-queued requests.
3702         *
3703         * For this reason, we force to false the return value if
3704         * there are weight-raised busy queues. In this case, and if
3705         * bfqq is not weight-raised, this guarantees that the device
3706         * is not idled for bfqq (if, instead, bfqq is weight-raised,
3707         * then idling will be guaranteed by another variable, see
3708         * below). Combined with the timestamping rules of BFQ (see
3709         * [1] for details), this behavior causes bfqq, and hence any
3710         * sync non-weight-raised queue, to get a lower number of
3711         * requests served, and thus to ask for a lower number of
3712         * requests from the request pool, before the busy
3713         * weight-raised queues get served again. This often mitigates
3714         * starvation problems in the presence of heavy write
3715         * workloads and NCQ, thereby guaranteeing a higher
3716         * application and system responsiveness in these hostile
3717         * scenarios.
3718         */
3719        return idling_boosts_thr &&
3720                bfqd->wr_busy_queues == 0;
3721}
3722
3723/*
3724 * There is a case where idling does not have to be performed for
3725 * throughput concerns, but to preserve the throughput share of
3726 * the process associated with bfqq.
3727 *
3728 * To introduce this case, we can note that allowing the drive
3729 * to enqueue more than one request at a time, and hence
3730 * delegating de facto final scheduling decisions to the
3731 * drive's internal scheduler, entails loss of control on the
3732 * actual request service order. In particular, the critical
3733 * situation is when requests from different processes happen
3734 * to be present, at the same time, in the internal queue(s)
3735 * of the drive. In such a situation, the drive, by deciding
3736 * the service order of the internally-queued requests, does
3737 * determine also the actual throughput distribution among
3738 * these processes. But the drive typically has no notion or
3739 * concern about per-process throughput distribution, and
3740 * makes its decisions only on a per-request basis. Therefore,
3741 * the service distribution enforced by the drive's internal
3742 * scheduler is likely to coincide with the desired throughput
3743 * distribution only in a completely symmetric, or favorably
3744 * skewed scenario where:
3745 * (i-a) each of these processes must get the same throughput as
3746 *       the others,
3747 * (i-b) in case (i-a) does not hold, it holds that the process
3748 *       associated with bfqq must receive a lower or equal
3749 *       throughput than any of the other processes;
3750 * (ii)  the I/O of each process has the same properties, in
3751 *       terms of locality (sequential or random), direction
3752 *       (reads or writes), request sizes, greediness
3753 *       (from I/O-bound to sporadic), and so on;
3754
3755 * In fact, in such a scenario, the drive tends to treat the requests
3756 * of each process in about the same way as the requests of the
3757 * others, and thus to provide each of these processes with about the
3758 * same throughput.  This is exactly the desired throughput
3759 * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
3760 * even more convenient distribution for (the process associated with)
3761 * bfqq.
3762 *
3763 * In contrast, in any asymmetric or unfavorable scenario, device
3764 * idling (I/O-dispatch plugging) is certainly needed to guarantee
3765 * that bfqq receives its assigned fraction of the device throughput
3766 * (see [1] for details).
3767 *
3768 * The problem is that idling may significantly reduce throughput with
3769 * certain combinations of types of I/O and devices. An important
3770 * example is sync random I/O on flash storage with command
3771 * queueing. So, unless bfqq falls in cases where idling also boosts
3772 * throughput, it is important to check conditions (i-a), i(-b) and
3773 * (ii) accurately, so as to avoid idling when not strictly needed for
3774 * service guarantees.
3775 *
3776 * Unfortunately, it is extremely difficult to thoroughly check
3777 * condition (ii). And, in case there are active groups, it becomes
3778 * very difficult to check conditions (i-a) and (i-b) too.  In fact,
3779 * if there are active groups, then, for conditions (i-a) or (i-b) to
3780 * become false 'indirectly', it is enough that an active group
3781 * contains more active processes or sub-groups than some other active
3782 * group. More precisely, for conditions (i-a) or (i-b) to become
3783 * false because of such a group, it is not even necessary that the
3784 * group is (still) active: it is sufficient that, even if the group
3785 * has become inactive, some of its descendant processes still have
3786 * some request already dispatched but still waiting for
3787 * completion. In fact, requests have still to be guaranteed their
3788 * share of the throughput even after being dispatched. In this
3789 * respect, it is easy to show that, if a group frequently becomes
3790 * inactive while still having in-flight requests, and if, when this
3791 * happens, the group is not considered in the calculation of whether
3792 * the scenario is asymmetric, then the group may fail to be
3793 * guaranteed its fair share of the throughput (basically because
3794 * idling may not be performed for the descendant processes of the
3795 * group, but it had to be).  We address this issue with the following
3796 * bi-modal behavior, implemented in the function
3797 * bfq_asymmetric_scenario().
3798 *
3799 * If there are groups with requests waiting for completion
3800 * (as commented above, some of these groups may even be
3801 * already inactive), then the scenario is tagged as
3802 * asymmetric, conservatively, without checking any of the
3803 * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
3804 * This behavior matches also the fact that groups are created
3805 * exactly if controlling I/O is a primary concern (to
3806 * preserve bandwidth and latency guarantees).
3807 *
3808 * On the opposite end, if there are no groups with requests waiting
3809 * for completion, then only conditions (i-a) and (i-b) are actually
3810 * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
3811 * idling is not performed, regardless of whether condition (ii)
3812 * holds.  In other words, only if conditions (i-a) and (i-b) do not
3813 * hold, then idling is allowed, and the device tends to be prevented
3814 * from queueing many requests, possibly of several processes. Since
3815 * there are no groups with requests waiting for completion, then, to
3816 * control conditions (i-a) and (i-b) it is enough to check just
3817 * whether all the queues with requests waiting for completion also
3818 * have the same weight.
3819 *
3820 * Not checking condition (ii) evidently exposes bfqq to the
3821 * risk of getting less throughput than its fair share.
3822 * However, for queues with the same weight, a further
3823 * mechanism, preemption, mitigates or even eliminates this
3824 * problem. And it does so without consequences on overall
3825 * throughput. This mechanism and its benefits are explained
3826 * in the next three paragraphs.
3827 *
3828 * Even if a queue, say Q, is expired when it remains idle, Q
3829 * can still preempt the new in-service queue if the next
3830 * request of Q arrives soon (see the comments on
3831 * bfq_bfqq_update_budg_for_activation). If all queues and
3832 * groups have the same weight, this form of preemption,
3833 * combined with the hole-recovery heuristic described in the
3834 * comments on function bfq_bfqq_update_budg_for_activation,
3835 * are enough to preserve a correct bandwidth distribution in
3836 * the mid term, even without idling. In fact, even if not
3837 * idling allows the internal queues of the device to contain
3838 * many requests, and thus to reorder requests, we can rather
3839 * safely assume that the internal scheduler still preserves a
3840 * minimum of mid-term fairness.
3841 *
3842 * More precisely, this preemption-based, idleless approach
3843 * provides fairness in terms of IOPS, and not sectors per
3844 * second. This can be seen with a simple example. Suppose
3845 * that there are two queues with the same weight, but that
3846 * the first queue receives requests of 8 sectors, while the
3847 * second queue receives requests of 1024 sectors. In
3848 * addition, suppose that each of the two queues contains at
3849 * most one request at a time, which implies that each queue
3850 * always remains idle after it is served. Finally, after
3851 * remaining idle, each queue receives very quickly a new
3852 * request. It follows that the two queues are served
3853 * alternatively, preempting each other if needed. This
3854 * implies that, although both queues have the same weight,
3855 * the queue with large requests receives a service that is
3856 * 1024/8 times as high as the service received by the other
3857 * queue.
3858 *
3859 * The motivation for using preemption instead of idling (for
3860 * queues with the same weight) is that, by not idling,
3861 * service guarantees are preserved (completely or at least in
3862 * part) without minimally sacrificing throughput. And, if
3863 * there is no active group, then the primary expectation for
3864 * this device is probably a high throughput.
3865 *
3866 * We are now left only with explaining the additional
3867 * compound condition that is checked below for deciding
3868 * whether the scenario is asymmetric. To explain this
3869 * compound condition, we need to add that the function
3870 * bfq_asymmetric_scenario checks the weights of only
3871 * non-weight-raised queues, for efficiency reasons (see
3872 * comments on bfq_weights_tree_add()). Then the fact that
3873 * bfqq is weight-raised is checked explicitly here. More
3874 * precisely, the compound condition below takes into account
3875 * also the fact that, even if bfqq is being weight-raised,
3876 * the scenario is still symmetric if all queues with requests
3877 * waiting for completion happen to be
3878 * weight-raised. Actually, we should be even more precise
3879 * here, and differentiate between interactive weight raising
3880 * and soft real-time weight raising.
3881 *
3882 * As a side note, it is worth considering that the above
3883 * device-idling countermeasures may however fail in the
3884 * following unlucky scenario: if idling is (correctly)
3885 * disabled in a time period during which all symmetry
3886 * sub-conditions hold, and hence the device is allowed to
3887 * enqueue many requests, but at some later point in time some
3888 * sub-condition stops to hold, then it may become impossible
3889 * to let requests be served in the desired order until all
3890 * the requests already queued in the device have been served.
3891 */
3892static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
3893                                                 struct bfq_queue *bfqq)
3894{
3895        return (bfqq->wr_coeff > 1 &&
3896                bfqd->wr_busy_queues <
3897                bfq_tot_busy_queues(bfqd)) ||
3898                bfq_asymmetric_scenario(bfqd, bfqq);
3899}
3900
3901/*
3902 * For a queue that becomes empty, device idling is allowed only if
3903 * this function returns true for that queue. As a consequence, since
3904 * device idling plays a critical role for both throughput boosting
3905 * and service guarantees, the return value of this function plays a
3906 * critical role as well.
3907 *
3908 * In a nutshell, this function returns true only if idling is
3909 * beneficial for throughput or, even if detrimental for throughput,
3910 * idling is however necessary to preserve service guarantees (low
3911 * latency, desired throughput distribution, ...). In particular, on
3912 * NCQ-capable devices, this function tries to return false, so as to
3913 * help keep the drives' internal queues full, whenever this helps the
3914 * device boost the throughput without causing any service-guarantee
3915 * issue.
3916 *
3917 * Most of the issues taken into account to get the return value of
3918 * this function are not trivial. We discuss these issues in the two
3919 * functions providing the main pieces of information needed by this
3920 * function.
3921 */
3922static bool bfq_better_to_idle(struct bfq_queue *bfqq)
3923{
3924        struct bfq_data *bfqd = bfqq->bfqd;
3925        bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
3926
3927        if (unlikely(bfqd->strict_guarantees))
3928                return true;
3929
3930        /*
3931         * Idling is performed only if slice_idle > 0. In addition, we
3932         * do not idle if
3933         * (a) bfqq is async
3934         * (b) bfqq is in the idle io prio class: in this case we do
3935         * not idle because we want to minimize the bandwidth that
3936         * queues in this class can steal to higher-priority queues
3937         */
3938        if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
3939           bfq_class_idle(bfqq))
3940                return false;
3941
3942        idling_boosts_thr_with_no_issue =
3943                idling_boosts_thr_without_issues(bfqd, bfqq);
3944
3945        idling_needed_for_service_guar =
3946                idling_needed_for_service_guarantees(bfqd, bfqq);
3947
3948        /*
3949         * We have now the two components we need to compute the
3950         * return value of the function, which is true only if idling
3951         * either boosts the throughput (without issues), or is
3952         * necessary to preserve service guarantees.
3953         */
3954        return idling_boosts_thr_with_no_issue ||
3955                idling_needed_for_service_guar;
3956}
3957
3958/*
3959 * If the in-service queue is empty but the function bfq_better_to_idle
3960 * returns true, then:
3961 * 1) the queue must remain in service and cannot be expired, and
3962 * 2) the device must be idled to wait for the possible arrival of a new
3963 *    request for the queue.
3964 * See the comments on the function bfq_better_to_idle for the reasons
3965 * why performing device idling is the best choice to boost the throughput
3966 * and preserve service guarantees when bfq_better_to_idle itself
3967 * returns true.
3968 */
3969static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3970{
3971        return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
3972}
3973
3974/*
3975 * This function chooses the queue from which to pick the next extra
3976 * I/O request to inject, if it finds a compatible queue. See the
3977 * comments on bfq_update_inject_limit() for details on the injection
3978 * mechanism, and for the definitions of the quantities mentioned
3979 * below.
3980 */
3981static struct bfq_queue *
3982bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
3983{
3984        struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
3985        unsigned int limit = in_serv_bfqq->inject_limit;
3986        /*
3987         * If
3988         * - bfqq is not weight-raised and therefore does not carry
3989         *   time-critical I/O,
3990         * or
3991         * - regardless of whether bfqq is weight-raised, bfqq has
3992         *   however a long think time, during which it can absorb the
3993         *   effect of an appropriate number of extra I/O requests
3994         *   from other queues (see bfq_update_inject_limit for
3995         *   details on the computation of this number);
3996         * then injection can be performed without restrictions.
3997         */
3998        bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
3999                !bfq_bfqq_has_short_ttime(in_serv_bfqq);
4000
4001        /*
4002         * If
4003         * - the baseline total service time could not be sampled yet,
4004         *   so the inject limit happens to be still 0, and
4005         * - a lot of time has elapsed since the plugging of I/O
4006         *   dispatching started, so drive speed is being wasted
4007         *   significantly;
4008         * then temporarily raise inject limit to one request.
4009         */
4010        if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
4011            bfq_bfqq_wait_request(in_serv_bfqq) &&
4012            time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
4013                                      bfqd->bfq_slice_idle)
4014                )
4015                limit = 1;
4016
4017        if (bfqd->rq_in_driver >= limit)
4018                return NULL;
4019
4020        /*
4021         * Linear search of the source queue for injection; but, with
4022         * a high probability, very few steps are needed to find a
4023         * candidate queue, i.e., a queue with enough budget left for
4024         * its next request. In fact:
4025         * - BFQ dynamically updates the budget of every queue so as
4026         *   to accommodate the expected backlog of the queue;
4027         * - if a queue gets all its requests dispatched as injected
4028         *   service, then the queue is removed from the active list
4029         *   (and re-added only if it gets new requests, but then it
4030         *   is assigned again enough budget for its new backlog).
4031         */
4032        list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
4033                if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
4034                    (in_serv_always_inject || bfqq->wr_coeff > 1) &&
4035                    bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
4036                    bfq_bfqq_budget_left(bfqq)) {
4037                        /*
4038                         * Allow for only one large in-flight request
4039                         * on non-rotational devices, for the
4040                         * following reason. On non-rotationl drives,
4041                         * large requests take much longer than
4042                         * smaller requests to be served. In addition,
4043                         * the drive prefers to serve large requests
4044                         * w.r.t. to small ones, if it can choose. So,
4045                         * having more than one large requests queued
4046                         * in the drive may easily make the next first
4047                         * request of the in-service queue wait for so
4048                         * long to break bfqq's service guarantees. On
4049                         * the bright side, large requests let the
4050                         * drive reach a very high throughput, even if
4051                         * there is only one in-flight large request
4052                         * at a time.
4053                         */
4054                        if (blk_queue_nonrot(bfqd->queue) &&
4055                            blk_rq_sectors(bfqq->next_rq) >=
4056                            BFQQ_SECT_THR_NONROT)
4057                                limit = min_t(unsigned int, 1, limit);
4058                        else
4059                                limit = in_serv_bfqq->inject_limit;
4060
4061                        if (bfqd->rq_in_driver < limit) {
4062                                bfqd->rqs_injected = true;
4063                                return bfqq;
4064                        }
4065                }
4066
4067        return NULL;
4068}
4069
4070/*
4071 * Select a queue for service.  If we have a current queue in service,
4072 * check whether to continue servicing it, or retrieve and set a new one.
4073 */
4074static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
4075{
4076        struct bfq_queue *bfqq;
4077        struct request *next_rq;
4078        enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
4079
4080        bfqq = bfqd->in_service_queue;
4081        if (!bfqq)
4082                goto new_queue;
4083
4084        bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
4085
4086        /*
4087         * Do not expire bfqq for budget timeout if bfqq may be about
4088         * to enjoy device idling. The reason why, in this case, we
4089         * prevent bfqq from expiring is the same as in the comments
4090         * on the case where bfq_bfqq_must_idle() returns true, in
4091         * bfq_completed_request().
4092         */
4093        if (bfq_may_expire_for_budg_timeout(bfqq) &&
4094            !bfq_bfqq_must_idle(bfqq))
4095                goto expire;
4096
4097check_queue:
4098        /*
4099         * This loop is rarely executed more than once. Even when it
4100         * happens, it is much more convenient to re-execute this loop
4101         * than to return NULL and trigger a new dispatch to get a
4102         * request served.
4103         */
4104        next_rq = bfqq->next_rq;
4105        /*
4106         * If bfqq has requests queued and it has enough budget left to
4107         * serve them, keep the queue, otherwise expire it.
4108         */
4109        if (next_rq) {
4110                if (bfq_serv_to_charge(next_rq, bfqq) >
4111                        bfq_bfqq_budget_left(bfqq)) {
4112                        /*
4113                         * Expire the queue for budget exhaustion,
4114                         * which makes sure that the next budget is
4115                         * enough to serve the next request, even if
4116                         * it comes from the fifo expired path.
4117                         */
4118                        reason = BFQQE_BUDGET_EXHAUSTED;
4119                        goto expire;
4120                } else {
4121                        /*
4122                         * The idle timer may be pending because we may
4123                         * not disable disk idling even when a new request
4124                         * arrives.
4125                         */
4126                        if (bfq_bfqq_wait_request(bfqq)) {
4127                                /*
4128                                 * If we get here: 1) at least a new request
4129                                 * has arrived but we have not disabled the
4130                                 * timer because the request was too small,
4131                                 * 2) then the block layer has unplugged
4132                                 * the device, causing the dispatch to be
4133                                 * invoked.
4134                                 *
4135                                 * Since the device is unplugged, now the
4136                                 * requests are probably large enough to
4137                                 * provide a reasonable throughput.
4138                                 * So we disable idling.
4139                                 */
4140                                bfq_clear_bfqq_wait_request(bfqq);
4141                                hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4142                        }
4143                        goto keep_queue;
4144                }
4145        }
4146
4147        /*
4148         * No requests pending. However, if the in-service queue is idling
4149         * for a new request, or has requests waiting for a completion and
4150         * may idle after their completion, then keep it anyway.
4151         *
4152         * Yet, inject service from other queues if it boosts
4153         * throughput and is possible.
4154         */
4155        if (bfq_bfqq_wait_request(bfqq) ||
4156            (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
4157                struct bfq_queue *async_bfqq =
4158                        bfqq->bic && bfqq->bic->bfqq[0] &&
4159                        bfq_bfqq_busy(bfqq->bic->bfqq[0]) ?
4160                        bfqq->bic->bfqq[0] : NULL;
4161
4162                /*
4163                 * If the process associated with bfqq has also async
4164                 * I/O pending, then inject it
4165                 * unconditionally. Injecting I/O from the same
4166                 * process can cause no harm to the process. On the
4167                 * contrary, it can only increase bandwidth and reduce
4168                 * latency for the process.
4169                 */
4170                if (async_bfqq &&
4171                    icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
4172                    bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
4173                    bfq_bfqq_budget_left(async_bfqq))
4174                        bfqq = bfqq->bic->bfqq[0];
4175                else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
4176                         (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
4177                          !bfq_bfqq_has_short_ttime(bfqq)))
4178                        bfqq = bfq_choose_bfqq_for_injection(bfqd);
4179                else
4180                        bfqq = NULL;
4181
4182                goto keep_queue;
4183        }
4184
4185        reason = BFQQE_NO_MORE_REQUESTS;
4186expire:
4187        bfq_bfqq_expire(bfqd, bfqq, false, reason);
4188new_queue:
4189        bfqq = bfq_set_in_service_queue(bfqd);
4190        if (bfqq) {
4191                bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
4192                goto check_queue;
4193        }
4194keep_queue:
4195        if (bfqq)
4196                bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
4197        else
4198                bfq_log(bfqd, "select_queue: no queue returned");
4199
4200        return bfqq;
4201}
4202
4203static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4204{
4205        struct bfq_entity *entity = &bfqq->entity;
4206
4207        if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
4208                bfq_log_bfqq(bfqd, bfqq,
4209                        "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
4210                        jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
4211                        jiffies_to_msecs(bfqq->wr_cur_max_time),
4212                        bfqq->wr_coeff,
4213                        bfqq->entity.weight, bfqq->entity.orig_weight);
4214
4215                if (entity->prio_changed)
4216                        bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
4217
4218                /*
4219                 * If the queue was activated in a burst, or too much
4220                 * time has elapsed from the beginning of this
4221                 * weight-raising period, then end weight raising.
4222                 */
4223                if (bfq_bfqq_in_large_burst(bfqq))
4224                        bfq_bfqq_end_wr(bfqq);
4225                else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
4226                                                bfqq->wr_cur_max_time)) {
4227                        if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
4228                        time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
4229                                               bfq_wr_duration(bfqd)))
4230                                bfq_bfqq_end_wr(bfqq);
4231                        else {
4232                                switch_back_to_interactive_wr(bfqq, bfqd);
4233                                bfqq->entity.prio_changed = 1;
4234                        }
4235                }
4236                if (bfqq->wr_coeff > 1 &&
4237                    bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
4238                    bfqq->service_from_wr > max_service_from_wr) {
4239                        /* see comments on max_service_from_wr */
4240                        bfq_bfqq_end_wr(bfqq);
4241                }
4242        }
4243        /*
4244         * To improve latency (for this or other queues), immediately
4245         * update weight both if it must be raised and if it must be
4246         * lowered. Since, entity may be on some active tree here, and
4247         * might have a pending change of its ioprio class, invoke
4248         * next function with the last parameter unset (see the
4249         * comments on the function).
4250         */
4251        if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
4252                __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
4253                                                entity, false);
4254}
4255
4256/*
4257 * Dispatch next request from bfqq.
4258 */
4259static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
4260                                                 struct bfq_queue *bfqq)
4261{
4262        struct request *rq = bfqq->next_rq;
4263        unsigned long service_to_charge;
4264
4265        service_to_charge = bfq_serv_to_charge(rq, bfqq);
4266
4267        bfq_bfqq_served(bfqq, service_to_charge);
4268
4269        if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
4270                bfqd->wait_dispatch = false;
4271                bfqd->waited_rq = rq;
4272        }
4273
4274        bfq_dispatch_remove(bfqd->queue, rq);
4275
4276        if (bfqq != bfqd->in_service_queue)
4277                goto return_rq;
4278
4279        /*
4280         * If weight raising has to terminate for bfqq, then next
4281         * function causes an immediate update of bfqq's weight,
4282         * without waiting for next activation. As a consequence, on
4283         * expiration, bfqq will be timestamped as if has never been
4284         * weight-raised during this service slot, even if it has
4285         * received part or even most of the service as a
4286         * weight-raised queue. This inflates bfqq's timestamps, which
4287         * is beneficial, as bfqq is then more willing to leave the
4288         * device immediately to possible other weight-raised queues.
4289         */
4290        bfq_update_wr_data(bfqd, bfqq);
4291
4292        /*
4293         * Expire bfqq, pretending that its budget expired, if bfqq
4294         * belongs to CLASS_IDLE and other queues are waiting for
4295         * service.
4296         */
4297        if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
4298                goto return_rq;
4299
4300        bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
4301
4302return_rq:
4303        return rq;
4304}
4305
4306static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
4307{
4308        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
4309
4310        /*
4311         * Avoiding lock: a race on bfqd->busy_queues should cause at
4312         * most a call to dispatch for nothing
4313         */
4314        return !list_empty_careful(&bfqd->dispatch) ||
4315                bfq_tot_busy_queues(bfqd) > 0;
4316}
4317
4318static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
4319{
4320        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
4321        struct request *rq = NULL;
4322        struct bfq_queue *bfqq = NULL;
4323
4324        if (!list_empty(&bfqd->dispatch)) {
4325                rq = list_first_entry(&bfqd->dispatch, struct request,
4326                                      queuelist);
4327                list_del_init(&rq->queuelist);
4328
4329                bfqq = RQ_BFQQ(rq);
4330
4331                if (bfqq) {
4332                        /*
4333                         * Increment counters here, because this
4334                         * dispatch does not follow the standard
4335                         * dispatch flow (where counters are
4336                         * incremented)
4337                         */
4338                        bfqq->dispatched++;
4339
4340                        goto inc_in_driver_start_rq;
4341                }
4342
4343                /*
4344                 * We exploit the bfq_finish_requeue_request hook to
4345                 * decrement rq_in_driver, but
4346                 * bfq_finish_requeue_request will not be invoked on
4347                 * this request. So, to avoid unbalance, just start
4348                 * this request, without incrementing rq_in_driver. As
4349                 * a negative consequence, rq_in_driver is deceptively
4350                 * lower than it should be while this request is in
4351                 * service. This may cause bfq_schedule_dispatch to be
4352                 * invoked uselessly.
4353                 *
4354                 * As for implementing an exact solution, the
4355                 * bfq_finish_requeue_request hook, if defined, is
4356                 * probably invoked also on this request. So, by
4357                 * exploiting this hook, we could 1) increment
4358                 * rq_in_driver here, and 2) decrement it in
4359                 * bfq_finish_requeue_request. Such a solution would
4360                 * let the value of the counter be always accurate,
4361                 * but it would entail using an extra interface
4362                 * function. This cost seems higher than the benefit,
4363                 * being the frequency of non-elevator-private
4364                 * requests very low.
4365                 */
4366                goto start_rq;
4367        }
4368
4369        bfq_log(bfqd, "dispatch requests: %d busy queues",
4370                bfq_tot_busy_queues(bfqd));
4371
4372        if (bfq_tot_busy_queues(bfqd) == 0)
4373                goto exit;
4374
4375        /*
4376         * Force device to serve one request at a time if
4377         * strict_guarantees is true. Forcing this service scheme is
4378         * currently the ONLY way to guarantee that the request
4379         * service order enforced by the scheduler is respected by a
4380         * queueing device. Otherwise the device is free even to make
4381         * some unlucky request wait for as long as the device
4382         * wishes.
4383         *
4384         * Of course, serving one request at at time may cause loss of
4385         * throughput.
4386         */
4387        if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
4388                goto exit;
4389
4390        bfqq = bfq_select_queue(bfqd);
4391        if (!bfqq)
4392                goto exit;
4393
4394        rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
4395
4396        if (rq) {
4397inc_in_driver_start_rq:
4398                bfqd->rq_in_driver++;
4399start_rq:
4400                rq->rq_flags |= RQF_STARTED;
4401        }
4402exit:
4403        return rq;
4404}
4405
4406#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4407static void bfq_update_dispatch_stats(struct request_queue *q,
4408                                      struct request *rq,
4409                                      struct bfq_queue *in_serv_queue,
4410                                      bool idle_timer_disabled)
4411{
4412        struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
4413
4414        if (!idle_timer_disabled && !bfqq)
4415                return;
4416
4417        /*
4418         * rq and bfqq are guaranteed to exist until this function
4419         * ends, for the following reasons. First, rq can be
4420         * dispatched to the device, and then can be completed and
4421         * freed, only after this function ends. Second, rq cannot be
4422         * merged (and thus freed because of a merge) any longer,
4423         * because it has already started. Thus rq cannot be freed
4424         * before this function ends, and, since rq has a reference to
4425         * bfqq, the same guarantee holds for bfqq too.
4426         *
4427         * In addition, the following queue lock guarantees that
4428         * bfqq_group(bfqq) exists as well.
4429         */
4430        spin_lock_irq(&q->queue_lock);
4431        if (idle_timer_disabled)
4432                /*
4433                 * Since the idle timer has been disabled,
4434                 * in_serv_queue contained some request when
4435                 * __bfq_dispatch_request was invoked above, which
4436                 * implies that rq was picked exactly from
4437                 * in_serv_queue. Thus in_serv_queue == bfqq, and is
4438                 * therefore guaranteed to exist because of the above
4439                 * arguments.
4440                 */
4441                bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
4442        if (bfqq) {
4443                struct bfq_group *bfqg = bfqq_group(bfqq);
4444
4445                bfqg_stats_update_avg_queue_size(bfqg);
4446                bfqg_stats_set_start_empty_time(bfqg);
4447                bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
4448        }
4449        spin_unlock_irq(&q->queue_lock);
4450}
4451#else
4452static inline void bfq_update_dispatch_stats(struct request_queue *q,
4453                                             struct request *rq,
4454                                             struct bfq_queue *in_serv_queue,
4455                                             bool idle_timer_disabled) {}
4456#endif
4457
4458static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
4459{
4460        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
4461        struct request *rq;
4462        struct bfq_queue *in_serv_queue;
4463        bool waiting_rq, idle_timer_disabled;
4464
4465        spin_lock_irq(&bfqd->lock);
4466
4467        in_serv_queue = bfqd->in_service_queue;
4468        waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
4469
4470        rq = __bfq_dispatch_request(hctx);
4471
4472        idle_timer_disabled =
4473                waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
4474
4475        spin_unlock_irq(&bfqd->lock);
4476
4477        bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
4478                                  idle_timer_disabled);
4479
4480        return rq;
4481}
4482
4483/*
4484 * Task holds one reference to the queue, dropped when task exits.  Each rq
4485 * in-flight on this queue also holds a reference, dropped when rq is freed.
4486 *
4487 * Scheduler lock must be held here. Recall not to use bfqq after calling
4488 * this function on it.
4489 */
4490void bfq_put_queue(struct bfq_queue *bfqq)
4491{
4492#ifdef CONFIG_BFQ_GROUP_IOSCHED
4493        struct bfq_group *bfqg = bfqq_group(bfqq);
4494#endif
4495
4496        if (bfqq->bfqd)
4497                bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
4498                             bfqq, bfqq->ref);
4499
4500        bfqq->ref--;
4501        if (bfqq->ref)
4502                return;
4503
4504        if (!hlist_unhashed(&bfqq->burst_list_node)) {
4505                hlist_del_init(&bfqq->burst_list_node);
4506                /*
4507                 * Decrement also burst size after the removal, if the
4508                 * process associated with bfqq is exiting, and thus
4509                 * does not contribute to the burst any longer. This
4510                 * decrement helps filter out false positives of large
4511                 * bursts, when some short-lived process (often due to
4512                 * the execution of commands by some service) happens
4513                 * to start and exit while a complex application is
4514                 * starting, and thus spawning several processes that
4515                 * do I/O (and that *must not* be treated as a large
4516                 * burst, see comments on bfq_handle_burst).
4517                 *
4518                 * In particular, the decrement is performed only if:
4519                 * 1) bfqq is not a merged queue, because, if it is,
4520                 * then this free of bfqq is not triggered by the exit
4521                 * of the process bfqq is associated with, but exactly
4522                 * by the fact that bfqq has just been merged.
4523                 * 2) burst_size is greater than 0, to handle
4524                 * unbalanced decrements. Unbalanced decrements may
4525                 * happen in te following case: bfqq is inserted into
4526                 * the current burst list--without incrementing
4527                 * bust_size--because of a split, but the current
4528                 * burst list is not the burst list bfqq belonged to
4529                 * (see comments on the case of a split in
4530                 * bfq_set_request).
4531                 */
4532                if (bfqq->bic && bfqq->bfqd->burst_size > 0)
4533                        bfqq->bfqd->burst_size--;
4534        }
4535
4536        kmem_cache_free(bfq_pool, bfqq);
4537#ifdef CONFIG_BFQ_GROUP_IOSCHED
4538        bfqg_and_blkg_put(bfqg);
4539#endif
4540}
4541
4542static void bfq_put_cooperator(struct bfq_queue *bfqq)
4543{
4544        struct bfq_queue *__bfqq, *next;
4545
4546        /*
4547         * If this queue was scheduled to merge with another queue, be
4548         * sure to drop the reference taken on that queue (and others in
4549         * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
4550         */
4551        __bfqq = bfqq->new_bfqq;
4552        while (__bfqq) {
4553                if (__bfqq == bfqq)
4554                        break;
4555                next = __bfqq->new_bfqq;
4556                bfq_put_queue(__bfqq);
4557                __bfqq = next;
4558        }
4559}
4560
4561static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4562{
4563        if (bfqq == bfqd->in_service_queue) {
4564                __bfq_bfqq_expire(bfqd, bfqq);
4565                bfq_schedule_dispatch(bfqd);
4566        }
4567
4568        bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
4569
4570        bfq_put_cooperator(bfqq);
4571
4572        bfq_put_queue(bfqq); /* release process reference */
4573}
4574
4575static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
4576{
4577        struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4578        struct bfq_data *bfqd;
4579
4580        if (bfqq)
4581                bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
4582
4583        if (bfqq && bfqd) {
4584                unsigned long flags;
4585
4586                spin_lock_irqsave(&bfqd->lock, flags);
4587                bfq_exit_bfqq(bfqd, bfqq);
4588                bic_set_bfqq(bic, NULL, is_sync);
4589                spin_unlock_irqrestore(&bfqd->lock, flags);
4590        }
4591}
4592
4593static void bfq_exit_icq(struct io_cq *icq)
4594{
4595        struct bfq_io_cq *bic = icq_to_bic(icq);
4596
4597        bfq_exit_icq_bfqq(bic, true);
4598        bfq_exit_icq_bfqq(bic, false);
4599}
4600
4601/*
4602 * Update the entity prio values; note that the new values will not
4603 * be used until the next (re)activation.
4604 */
4605static void
4606bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
4607{
4608        struct task_struct *tsk = current;
4609        int ioprio_class;
4610        struct bfq_data *bfqd = bfqq->bfqd;
4611
4612        if (!bfqd)
4613                return;
4614
4615        ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
4616        switch (ioprio_class) {
4617        default:
4618                dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
4619                        "bfq: bad prio class %d\n", ioprio_class);
4620                /* fall through */
4621        case IOPRIO_CLASS_NONE:
4622                /*
4623                 * No prio set, inherit CPU scheduling settings.
4624                 */
4625                bfqq->new_ioprio = task_nice_ioprio(tsk);
4626                bfqq->new_ioprio_class = task_nice_ioclass(tsk);
4627                break;
4628        case IOPRIO_CLASS_RT:
4629                bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
4630                bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
4631                break;
4632        case IOPRIO_CLASS_BE:
4633                bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
4634                bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
4635                break;
4636        case IOPRIO_CLASS_IDLE:
4637                bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
4638                bfqq->new_ioprio = 7;
4639                break;
4640        }
4641
4642        if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
4643                pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
4644                        bfqq->new_ioprio);
4645                bfqq->new_ioprio = IOPRIO_BE_NR;
4646        }
4647
4648        bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
4649        bfqq->entity.prio_changed = 1;
4650}
4651
4652static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
4653                                       struct bio *bio, bool is_sync,
4654                                       struct bfq_io_cq *bic);
4655
4656static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
4657{
4658        struct bfq_data *bfqd = bic_to_bfqd(bic);
4659        struct bfq_queue *bfqq;
4660        int ioprio = bic->icq.ioc->ioprio;
4661
4662        /*
4663         * This condition may trigger on a newly created bic, be sure to
4664         * drop the lock before returning.
4665         */
4666        if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
4667                return;
4668
4669        bic->ioprio = ioprio;
4670
4671        bfqq = bic_to_bfqq(bic, false);
4672        if (bfqq) {
4673                /* release process reference on this queue */
4674                bfq_put_queue(bfqq);
4675                bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
4676                bic_set_bfqq(bic, bfqq, false);
4677        }
4678
4679        bfqq = bic_to_bfqq(bic, true);
4680        if (bfqq)
4681                bfq_set_next_ioprio_data(bfqq, bic);
4682}
4683
4684static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4685                          struct bfq_io_cq *bic, pid_t pid, int is_sync)
4686{
4687        RB_CLEAR_NODE(&bfqq->entity.rb_node);
4688        INIT_LIST_HEAD(&bfqq->fifo);
4689        INIT_HLIST_NODE(&bfqq->burst_list_node);
4690
4691        bfqq->ref = 0;
4692        bfqq->bfqd = bfqd;
4693
4694        if (bic)
4695                bfq_set_next_ioprio_data(bfqq, bic);
4696
4697        if (is_sync) {
4698                /*
4699                 * No need to mark as has_short_ttime if in
4700                 * idle_class, because no device idling is performed
4701                 * for queues in idle class
4702                 */
4703                if (!bfq_class_idle(bfqq))
4704                        /* tentatively mark as has_short_ttime */
4705                        bfq_mark_bfqq_has_short_ttime(bfqq);
4706                bfq_mark_bfqq_sync(bfqq);
4707                bfq_mark_bfqq_just_created(bfqq);
4708        } else
4709                bfq_clear_bfqq_sync(bfqq);
4710
4711        /* set end request to minus infinity from now */
4712        bfqq->ttime.last_end_request = ktime_get_ns() + 1;
4713
4714        bfq_mark_bfqq_IO_bound(bfqq);
4715
4716        bfqq->pid = pid;
4717
4718        /* Tentative initial value to trade off between thr and lat */
4719        bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
4720        bfqq->budget_timeout = bfq_smallest_from_now();
4721
4722        bfqq->wr_coeff = 1;
4723        bfqq->last_wr_start_finish = jiffies;
4724        bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
4725        bfqq->split_time = bfq_smallest_from_now();
4726
4727        /*
4728         * To not forget the possibly high bandwidth consumed by a
4729         * process/queue in the recent past,
4730         * bfq_bfqq_softrt_next_start() returns a value at least equal
4731         * to the current value of bfqq->soft_rt_next_start (see
4732         * comments on bfq_bfqq_softrt_next_start).  Set
4733         * soft_rt_next_start to now, to mean that bfqq has consumed
4734         * no bandwidth so far.
4735         */
4736        bfqq->soft_rt_next_start = jiffies;
4737
4738        /* first request is almost certainly seeky */
4739        bfqq->seek_history = 1;
4740}
4741
4742static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
4743                                               struct bfq_group *bfqg,
4744                                               int ioprio_class, int ioprio)
4745{
4746        switch (ioprio_class) {
4747        case IOPRIO_CLASS_RT:
4748                return &bfqg->async_bfqq[0][ioprio];
4749        case IOPRIO_CLASS_NONE:
4750                ioprio = IOPRIO_NORM;
4751                /* fall through */
4752        case IOPRIO_CLASS_BE:
4753                return &bfqg->async_bfqq[1][ioprio];
4754        case IOPRIO_CLASS_IDLE:
4755                return &bfqg->async_idle_bfqq;
4756        default:
4757                return NULL;
4758        }
4759}
4760
4761static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
4762                                       struct bio *bio, bool is_sync,
4763                                       struct bfq_io_cq *bic)
4764{
4765        const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
4766        const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
4767        struct bfq_queue **async_bfqq = NULL;
4768        struct bfq_queue *bfqq;
4769        struct bfq_group *bfqg;
4770
4771        rcu_read_lock();
4772
4773        bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
4774        if (!bfqg) {
4775                bfqq = &bfqd->oom_bfqq;
4776                goto out;
4777        }
4778
4779        if (!is_sync) {
4780                async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
4781                                                  ioprio);
4782                bfqq = *async_bfqq;
4783                if (bfqq)
4784                        goto out;
4785        }
4786
4787        bfqq = kmem_cache_alloc_node(bfq_pool,
4788                                     GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
4789                                     bfqd->queue->node);
4790
4791        if (bfqq) {
4792                bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
4793                              is_sync);
4794                bfq_init_entity(&bfqq->entity, bfqg);
4795                bfq_log_bfqq(bfqd, bfqq, "allocated");
4796        } else {
4797                bfqq = &bfqd->oom_bfqq;
4798                bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
4799                goto out;
4800        }
4801
4802        /*
4803         * Pin the queue now that it's allocated, scheduler exit will
4804         * prune it.
4805         */
4806        if (async_bfqq) {
4807                bfqq->ref++; /*
4808                              * Extra group reference, w.r.t. sync
4809                              * queue. This extra reference is removed
4810                              * only if bfqq->bfqg disappears, to
4811                              * guarantee that this queue is not freed
4812                              * until its group goes away.
4813                              */
4814                bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
4815                             bfqq, bfqq->ref);
4816                *async_bfqq = bfqq;
4817        }
4818
4819out:
4820        bfqq->ref++; /* get a process reference to this queue */
4821        bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
4822        rcu_read_unlock();
4823        return bfqq;
4824}
4825
4826static void bfq_update_io_thinktime(struct bfq_data *bfqd,
4827                                    struct bfq_queue *bfqq)
4828{
4829        struct bfq_ttime *ttime = &bfqq->ttime;
4830        u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
4831
4832        elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
4833
4834        ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
4835        ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
4836        ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
4837                                     ttime->ttime_samples);
4838}
4839
4840static void
4841bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4842                       struct request *rq)
4843{
4844        bfqq->seek_history <<= 1;
4845        bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
4846
4847        if (bfqq->wr_coeff > 1 &&
4848            bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
4849            BFQQ_TOTALLY_SEEKY(bfqq))
4850                bfq_bfqq_end_wr(bfqq);
4851}
4852
4853static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
4854                                       struct bfq_queue *bfqq,
4855                                       struct bfq_io_cq *bic)
4856{
4857        bool has_short_ttime = true;
4858
4859        /*
4860         * No need to update has_short_ttime if bfqq is async or in
4861         * idle io prio class, or if bfq_slice_idle is zero, because
4862         * no device idling is performed for bfqq in this case.
4863         */
4864        if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
4865            bfqd->bfq_slice_idle == 0)
4866                return;
4867
4868        /* Idle window just restored, statistics are meaningless. */
4869        if (time_is_after_eq_jiffies(bfqq->split_time +
4870                                     bfqd->bfq_wr_min_idle_time))
4871                return;
4872
4873        /* Think time is infinite if no process is linked to
4874         * bfqq. Otherwise check average think time to
4875         * decide whether to mark as has_short_ttime
4876         */
4877        if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
4878            (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
4879             bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
4880                has_short_ttime = false;
4881
4882        bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
4883                     has_short_ttime);
4884
4885        if (has_short_ttime)
4886                bfq_mark_bfqq_has_short_ttime(bfqq);
4887        else
4888                bfq_clear_bfqq_has_short_ttime(bfqq);
4889}
4890
4891/*
4892 * Called when a new fs request (rq) is added to bfqq.  Check if there's
4893 * something we should do about it.
4894 */
4895static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4896                            struct request *rq)
4897{
4898        struct bfq_io_cq *bic = RQ_BIC(rq);
4899
4900        if (rq->cmd_flags & REQ_META)
4901                bfqq->meta_pending++;
4902
4903        bfq_update_io_thinktime(bfqd, bfqq);
4904        bfq_update_has_short_ttime(bfqd, bfqq, bic);
4905        bfq_update_io_seektime(bfqd, bfqq, rq);
4906
4907        bfq_log_bfqq(bfqd, bfqq,
4908                     "rq_enqueued: has_short_ttime=%d (seeky %d)",
4909                     bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
4910
4911        bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4912
4913        if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4914                bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4915                                 blk_rq_sectors(rq) < 32;
4916                bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4917
4918                /*
4919                 * There is just this request queued: if
4920                 * - the request is small, and
4921                 * - we are idling to boost throughput, and
4922                 * - the queue is not to be expired,
4923                 * then just exit.
4924                 *
4925                 * In this way, if the device is being idled to wait
4926                 * for a new request from the in-service queue, we
4927                 * avoid unplugging the device and committing the
4928                 * device to serve just a small request. In contrast
4929                 * we wait for the block layer to decide when to
4930                 * unplug the device: hopefully, new requests will be
4931                 * merged to this one quickly, then the device will be
4932                 * unplugged and larger requests will be dispatched.
4933                 */
4934                if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
4935                    !budget_timeout)
4936                        return;
4937
4938                /*
4939                 * A large enough request arrived, or idling is being
4940                 * performed to preserve service guarantees, or
4941                 * finally the queue is to be expired: in all these
4942                 * cases disk idling is to be stopped, so clear
4943                 * wait_request flag and reset timer.
4944                 */
4945                bfq_clear_bfqq_wait_request(bfqq);
4946                hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4947
4948                /*
4949                 * The queue is not empty, because a new request just
4950                 * arrived. Hence we can safely expire the queue, in
4951                 * case of budget timeout, without risking that the
4952                 * timestamps of the queue are not updated correctly.
4953                 * See [1] for more details.
4954                 */
4955                if (budget_timeout)
4956                        bfq_bfqq_expire(bfqd, bfqq, false,
4957                                        BFQQE_BUDGET_TIMEOUT);
4958        }
4959}
4960
4961/* returns true if it causes the idle timer to be disabled */
4962static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4963{
4964        struct bfq_queue *bfqq = RQ_BFQQ(rq),
4965                *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
4966        bool waiting, idle_timer_disabled = false;
4967
4968        if (new_bfqq) {
4969                /*
4970                 * Release the request's reference to the old bfqq
4971                 * and make sure one is taken to the shared queue.
4972                 */
4973                new_bfqq->allocated++;
4974                bfqq->allocated--;
4975                new_bfqq->ref++;
4976                /*
4977                 * If the bic associated with the process
4978                 * issuing this request still points to bfqq
4979                 * (and thus has not been already redirected
4980                 * to new_bfqq or even some other bfq_queue),
4981                 * then complete the merge and redirect it to
4982                 * new_bfqq.
4983                 */
4984                if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4985                        bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4986                                        bfqq, new_bfqq);
4987
4988                bfq_clear_bfqq_just_created(bfqq);
4989                /*
4990                 * rq is about to be enqueued into new_bfqq,
4991                 * release rq reference on bfqq
4992                 */
4993                bfq_put_queue(bfqq);
4994                rq->elv.priv[1] = new_bfqq;
4995                bfqq = new_bfqq;
4996        }
4997
4998        waiting = bfqq && bfq_bfqq_wait_request(bfqq);
4999        bfq_add_request(rq);
5000        idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
5001
5002        rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
5003        list_add_tail(&rq->queuelist, &bfqq->fifo);
5004
5005        bfq_rq_enqueued(bfqd, bfqq, rq);
5006
5007        return idle_timer_disabled;
5008}
5009
5010#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
5011static void bfq_update_insert_stats(struct request_queue *q,
5012                                    struct bfq_queue *bfqq,
5013                                    bool idle_timer_disabled,
5014                                    unsigned int cmd_flags)
5015{
5016        if (!bfqq)
5017                return;
5018
5019        /*
5020         * bfqq still exists, because it can disappear only after
5021         * either it is merged with another queue, or the process it
5022         * is associated with exits. But both actions must be taken by
5023         * the same process currently executing this flow of
5024         * instructions.
5025         *
5026         * In addition, the following queue lock guarantees that
5027         * bfqq_group(bfqq) exists as well.
5028         */
5029        spin_lock_irq(&q->queue_lock);
5030        bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
5031        if (idle_timer_disabled)
5032                bfqg_stats_update_idle_time(bfqq_group(bfqq));
5033        spin_unlock_irq(&q->queue_lock);
5034}
5035#else
5036static inline void bfq_update_insert_stats(struct request_queue *q,
5037                                           struct bfq_queue *bfqq,
5038                                           bool idle_timer_disabled,
5039                                           unsigned int cmd_flags) {}
5040#endif
5041
5042static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
5043                               bool at_head)
5044{
5045        struct request_queue *q = hctx->queue;
5046        struct bfq_data *bfqd = q->elevator->elevator_data;
5047        struct bfq_queue *bfqq;
5048        bool idle_timer_disabled = false;
5049        unsigned int cmd_flags;
5050
5051        spin_lock_irq(&bfqd->lock);
5052        if (blk_mq_sched_try_insert_merge(q, rq)) {
5053                spin_unlock_irq(&bfqd->lock);
5054                return;
5055        }
5056
5057        spin_unlock_irq(&bfqd->lock);
5058
5059        blk_mq_sched_request_inserted(rq);
5060
5061        spin_lock_irq(&bfqd->lock);
5062        bfqq = bfq_init_rq(rq);
5063        if (at_head || blk_rq_is_passthrough(rq)) {
5064                if (at_head)
5065                        list_add(&rq->queuelist, &bfqd->dispatch);
5066                else
5067                        list_add_tail(&rq->queuelist, &bfqd->dispatch);
5068        } else { /* bfqq is assumed to be non null here */
5069                idle_timer_disabled = __bfq_insert_request(bfqd, rq);
5070                /*
5071                 * Update bfqq, because, if a queue merge has occurred
5072                 * in __bfq_insert_request, then rq has been
5073                 * redirected into a new queue.
5074                 */
5075                bfqq = RQ_BFQQ(rq);
5076
5077                if (rq_mergeable(rq)) {
5078                        elv_rqhash_add(q, rq);
5079                        if (!q->last_merge)
5080                                q->last_merge = rq;
5081                }
5082        }
5083
5084        /*
5085         * Cache cmd_flags before releasing scheduler lock, because rq
5086         * may disappear afterwards (for example, because of a request
5087         * merge).
5088         */
5089        cmd_flags = rq->cmd_flags;
5090
5091        spin_unlock_irq(&bfqd->lock);
5092
5093        bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
5094                                cmd_flags);
5095}
5096
5097static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
5098                                struct list_head *list, bool at_head)
5099{
5100        while (!list_empty(list)) {
5101                struct request *rq;
5102
5103                rq = list_first_entry(list, struct request, queuelist);
5104                list_del_init(&rq->queuelist);
5105                bfq_insert_request(hctx, rq, at_head);
5106        }
5107}
5108
5109static void bfq_update_hw_tag(struct bfq_data *bfqd)
5110{
5111        struct bfq_queue *bfqq = bfqd->in_service_queue;
5112
5113        bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
5114                                       bfqd->rq_in_driver);
5115
5116        if (bfqd->hw_tag == 1)
5117                return;
5118
5119        /*
5120         * This sample is valid if the number of outstanding requests
5121         * is large enough to allow a queueing behavior.  Note that the
5122         * sum is not exact, as it's not taking into account deactivated
5123         * requests.
5124         */
5125        if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
5126                return;
5127
5128        /*
5129         * If active queue hasn't enough requests and can idle, bfq might not
5130         * dispatch sufficient requests to hardware. Don't zero hw_tag in this
5131         * case
5132         */
5133        if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
5134            bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
5135            BFQ_HW_QUEUE_THRESHOLD &&
5136            bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
5137                return;
5138
5139        if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
5140                return;
5141
5142        bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
5143        bfqd->max_rq_in_driver = 0;
5144        bfqd->hw_tag_samples = 0;
5145
5146        bfqd->nonrot_with_queueing =
5147                blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
5148}
5149
5150static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
5151{
5152        u64 now_ns;
5153        u32 delta_us;
5154
5155        bfq_update_hw_tag(bfqd);
5156
5157        bfqd->rq_in_driver--;
5158        bfqq->dispatched--;
5159
5160        if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
5161                /*
5162                 * Set budget_timeout (which we overload to store the
5163                 * time at which the queue remains with no backlog and
5164                 * no outstanding request; used by the weight-raising
5165                 * mechanism).
5166                 */
5167                bfqq->budget_timeout = jiffies;
5168
5169                bfq_weights_tree_remove(bfqd, bfqq);
5170        }
5171
5172        now_ns = ktime_get_ns();
5173
5174        bfqq->ttime.last_end_request = now_ns;
5175
5176        /*
5177         * Using us instead of ns, to get a reasonable precision in
5178         * computing rate in next check.
5179         */
5180        delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
5181
5182        /*
5183         * If the request took rather long to complete, and, according
5184         * to the maximum request size recorded, this completion latency
5185         * implies that the request was certainly served at a very low
5186         * rate (less than 1M sectors/sec), then the whole observation
5187         * interval that lasts up to this time instant cannot be a
5188         * valid time interval for computing a new peak rate.  Invoke
5189         * bfq_update_rate_reset to have the following three steps
5190         * taken:
5191         * - close the observation interval at the last (previous)
5192         *   request dispatch or completion
5193         * - compute rate, if possible, for that observation interval
5194         * - reset to zero samples, which will trigger a proper
5195         *   re-initialization of the observation interval on next
5196         *   dispatch
5197         */
5198        if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
5199           (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
5200                        1UL<<(BFQ_RATE_SHIFT - 10))
5201                bfq_update_rate_reset(bfqd, NULL);
5202        bfqd->last_completion = now_ns;
5203
5204        /*
5205         * If we are waiting to discover whether the request pattern
5206         * of the task associated with the queue is actually
5207         * isochronous, and both requisites for this condition to hold
5208         * are now satisfied, then compute soft_rt_next_start (see the
5209         * comments on the function bfq_bfqq_softrt_next_start()). We
5210         * do not compute soft_rt_next_start if bfqq is in interactive
5211         * weight raising (see the comments in bfq_bfqq_expire() for
5212         * an explanation). We schedule this delayed update when bfqq
5213         * expires, if it still has in-flight requests.
5214         */
5215        if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
5216            RB_EMPTY_ROOT(&bfqq->sort_list) &&
5217            bfqq->wr_coeff != bfqd->bfq_wr_coeff)
5218                bfqq->soft_rt_next_start =
5219                        bfq_bfqq_softrt_next_start(bfqd, bfqq);
5220
5221        /*
5222         * If this is the in-service queue, check if it needs to be expired,
5223         * or if we want to idle in case it has no pending requests.
5224         */
5225        if (bfqd->in_service_queue == bfqq) {
5226                if (bfq_bfqq_must_idle(bfqq)) {
5227                        if (bfqq->dispatched == 0)
5228                                bfq_arm_slice_timer(bfqd);
5229                        /*
5230                         * If we get here, we do not expire bfqq, even
5231                         * if bfqq was in budget timeout or had no
5232                         * more requests (as controlled in the next
5233                         * conditional instructions). The reason for
5234                         * not expiring bfqq is as follows.
5235                         *
5236                         * Here bfqq->dispatched > 0 holds, but
5237                         * bfq_bfqq_must_idle() returned true. This
5238                         * implies that, even if no request arrives
5239                         * for bfqq before bfqq->dispatched reaches 0,
5240                         * bfqq will, however, not be expired on the
5241                         * completion event that causes bfqq->dispatch
5242                         * to reach zero. In contrast, on this event,
5243                         * bfqq will start enjoying device idling
5244                         * (I/O-dispatch plugging).
5245                         *
5246                         * But, if we expired bfqq here, bfqq would
5247                         * not have the chance to enjoy device idling
5248                         * when bfqq->dispatched finally reaches
5249                         * zero. This would expose bfqq to violation
5250                         * of its reserved service guarantees.
5251                         */
5252                        return;
5253                } else if (bfq_may_expire_for_budg_timeout(bfqq))
5254                        bfq_bfqq_expire(bfqd, bfqq, false,
5255                                        BFQQE_BUDGET_TIMEOUT);
5256                else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
5257                         (bfqq->dispatched == 0 ||
5258                          !bfq_better_to_idle(bfqq)))
5259                        bfq_bfqq_expire(bfqd, bfqq, false,
5260                                        BFQQE_NO_MORE_REQUESTS);
5261        }
5262
5263        if (!bfqd->rq_in_driver)
5264                bfq_schedule_dispatch(bfqd);
5265}
5266
5267static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
5268{
5269        bfqq->allocated--;
5270
5271        bfq_put_queue(bfqq);
5272}
5273
5274/*
5275 * The processes associated with bfqq may happen to generate their
5276 * cumulative I/O at a lower rate than the rate at which the device
5277 * could serve the same I/O. This is rather probable, e.g., if only
5278 * one process is associated with bfqq and the device is an SSD. It
5279 * results in bfqq becoming often empty while in service. In this
5280 * respect, if BFQ is allowed to switch to another queue when bfqq
5281 * remains empty, then the device goes on being fed with I/O requests,
5282 * and the throughput is not affected. In contrast, if BFQ is not
5283 * allowed to switch to another queue---because bfqq is sync and
5284 * I/O-dispatch needs to be plugged while bfqq is temporarily
5285 * empty---then, during the service of bfqq, there will be frequent
5286 * "service holes", i.e., time intervals during which bfqq gets empty
5287 * and the device can only consume the I/O already queued in its
5288 * hardware queues. During service holes, the device may even get to
5289 * remaining idle. In the end, during the service of bfqq, the device
5290 * is driven at a lower speed than the one it can reach with the kind
5291 * of I/O flowing through bfqq.
5292 *
5293 * To counter this loss of throughput, BFQ implements a "request
5294 * injection mechanism", which tries to fill the above service holes
5295 * with I/O requests taken from other queues. The hard part in this
5296 * mechanism is finding the right amount of I/O to inject, so as to
5297 * both boost throughput and not break bfqq's bandwidth and latency
5298 * guarantees. In this respect, the mechanism maintains a per-queue
5299 * inject limit, computed as below. While bfqq is empty, the injection
5300 * mechanism dispatches extra I/O requests only until the total number
5301 * of I/O requests in flight---i.e., already dispatched but not yet
5302 * completed---remains lower than this limit.
5303 *
5304 * A first definition comes in handy to introduce the algorithm by
5305 * which the inject limit is computed.  We define as first request for
5306 * bfqq, an I/O request for bfqq that arrives while bfqq is in
5307 * service, and causes bfqq to switch from empty to non-empty. The
5308 * algorithm updates the limit as a function of the effect of
5309 * injection on the service times of only the first requests of
5310 * bfqq. The reason for this restriction is that these are the
5311 * requests whose service time is affected most, because they are the
5312 * first to arrive after injection possibly occurred.
5313 *
5314 * To evaluate the effect of injection, the algorithm measures the
5315 * "total service time" of first requests. We define as total service
5316 * time of an I/O request, the time that elapses since when the
5317 * request is enqueued into bfqq, to when it is completed. This
5318 * quantity allows the whole effect of injection to be measured. It is
5319 * easy to see why. Suppose that some requests of other queues are
5320 * actually injected while bfqq is empty, and that a new request R
5321 * then arrives for bfqq. If the device does start to serve all or
5322 * part of the injected requests during the service hole, then,
5323 * because of this extra service, it may delay the next invocation of
5324 * the dispatch hook of BFQ. Then, even after R gets eventually
5325 * dispatched, the device may delay the actual service of R if it is
5326 * still busy serving the extra requests, or if it decides to serve,
5327 * before R, some extra request still present in its queues. As a
5328 * conclusion, the cumulative extra delay caused by injection can be
5329 * easily evaluated by just comparing the total service time of first
5330 * requests with and without injection.
5331 *
5332 * The limit-update algorithm works as follows. On the arrival of a
5333 * first request of bfqq, the algorithm measures the total time of the
5334 * request only if one of the three cases below holds, and, for each
5335 * case, it updates the limit as described below:
5336 *
5337 * (1) If there is no in-flight request. This gives a baseline for the
5338 *     total service time of the requests of bfqq. If the baseline has
5339 *     not been computed yet, then, after computing it, the limit is
5340 *     set to 1, to start boosting throughput, and to prepare the
5341 *     ground for the next case. If the baseline has already been
5342 *     computed, then it is updated, in case it results to be lower
5343 *     than the previous value.
5344 *
5345 * (2) If the limit is higher than 0 and there are in-flight
5346 *     requests. By comparing the total service time in this case with
5347 *     the above baseline, it is possible to know at which extent the
5348 *     current value of the limit is inflating the total service
5349 *     time. If the inflation is below a certain threshold, then bfqq
5350 *     is assumed to be suffering from no perceivable loss of its
5351 *     service guarantees, and the limit is even tentatively
5352 *     increased. If the inflation is above the threshold, then the
5353 *     limit is decreased. Due to the lack of any hysteresis, this
5354 *     logic makes the limit oscillate even in steady workload
5355 *     conditions. Yet we opted for it, because it is fast in reaching
5356 *     the best value for the limit, as a function of the current I/O
5357 *     workload. To reduce oscillations, this step is disabled for a
5358 *     short time interval after the limit happens to be decreased.
5359 *
5360 * (3) Periodically, after resetting the limit, to make sure that the
5361 *     limit eventually drops in case the workload changes. This is
5362 *     needed because, after the limit has gone safely up for a
5363 *     certain workload, it is impossible to guess whether the
5364 *     baseline total service time may have changed, without measuring
5365 *     it again without injection. A more effective version of this
5366 *     step might be to just sample the baseline, by interrupting
5367 *     injection only once, and then to reset/lower the limit only if
5368 *     the total service time with the current limit does happen to be
5369 *     too large.
5370 *
5371 * More details on each step are provided in the comments on the
5372 * pieces of code that implement these steps: the branch handling the
5373 * transition from empty to non empty in bfq_add_request(), the branch
5374 * handling injection in bfq_select_queue(), and the function
5375 * bfq_choose_bfqq_for_injection(). These comments also explain some
5376 * exceptions, made by the injection mechanism in some special cases.
5377 */
5378static void bfq_update_inject_limit(struct bfq_data *bfqd,
5379                                    struct bfq_queue *bfqq)
5380{
5381        u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
5382        unsigned int old_limit = bfqq->inject_limit;
5383
5384        if (bfqq->last_serv_time_ns > 0) {
5385                u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
5386
5387                if (tot_time_ns >= threshold && old_limit > 0) {
5388                        bfqq->inject_limit--;
5389                        bfqq->decrease_time_jif = jiffies;
5390                } else if (tot_time_ns < threshold &&
5391                           old_limit < bfqd->max_rq_in_driver<<1)
5392                        bfqq->inject_limit++;
5393        }
5394
5395        /*
5396         * Either we still have to compute the base value for the
5397         * total service time, and there seem to be the right
5398         * conditions to do it, or we can lower the last base value
5399         * computed.
5400         */
5401        if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 0) ||
5402            tot_time_ns < bfqq->last_serv_time_ns) {
5403                bfqq->last_serv_time_ns = tot_time_ns;
5404                /*
5405                 * Now we certainly have a base value: make sure we
5406                 * start trying injection.
5407                 */
5408                bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
5409        }
5410
5411        /* update complete, not waiting for any request completion any longer */
5412        bfqd->waited_rq = NULL;
5413}
5414
5415/*
5416 * Handle either a requeue or a finish for rq. The things to do are
5417 * the same in both cases: all references to rq are to be dropped. In
5418 * particular, rq is considered completed from the point of view of
5419 * the scheduler.
5420 */
5421static void bfq_finish_requeue_request(struct request *rq)
5422{
5423        struct bfq_queue *bfqq = RQ_BFQQ(rq);
5424        struct bfq_data *bfqd;
5425
5426        /*
5427         * Requeue and finish hooks are invoked in blk-mq without
5428         * checking whether the involved request is actually still
5429         * referenced in the scheduler. To handle this fact, the
5430         * following two checks make this function exit in case of
5431         * spurious invocations, for which there is nothing to do.
5432         *
5433         * First, check whether rq has nothing to do with an elevator.
5434         */
5435        if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
5436                return;
5437
5438        /*
5439         * rq either is not associated with any icq, or is an already
5440         * requeued request that has not (yet) been re-inserted into
5441         * a bfq_queue.
5442         */
5443        if (!rq->elv.icq || !bfqq)
5444                return;
5445
5446        bfqd = bfqq->bfqd;
5447
5448        if (rq->rq_flags & RQF_STARTED)
5449                bfqg_stats_update_completion(bfqq_group(bfqq),
5450                                             rq->start_time_ns,
5451                                             rq->io_start_time_ns,
5452                                             rq->cmd_flags);
5453
5454        if (likely(rq->rq_flags & RQF_STARTED)) {
5455                unsigned long flags;
5456
5457                spin_lock_irqsave(&bfqd->lock, flags);
5458
5459                if (rq == bfqd->waited_rq)
5460                        bfq_update_inject_limit(bfqd, bfqq);
5461
5462                bfq_completed_request(bfqq, bfqd);
5463                bfq_finish_requeue_request_body(bfqq);
5464
5465                spin_unlock_irqrestore(&bfqd->lock, flags);
5466        } else {
5467                /*
5468                 * Request rq may be still/already in the scheduler,
5469                 * in which case we need to remove it (this should
5470                 * never happen in case of requeue). And we cannot
5471                 * defer such a check and removal, to avoid
5472                 * inconsistencies in the time interval from the end
5473                 * of this function to the start of the deferred work.
5474                 * This situation seems to occur only in process
5475                 * context, as a consequence of a merge. In the
5476                 * current version of the code, this implies that the
5477                 * lock is held.
5478                 */
5479
5480                if (!RB_EMPTY_NODE(&rq->rb_node)) {
5481                        bfq_remove_request(rq->q, rq);
5482                        bfqg_stats_update_io_remove(bfqq_group(bfqq),
5483                                                    rq->cmd_flags);
5484                }
5485                bfq_finish_requeue_request_body(bfqq);
5486        }
5487
5488        /*
5489         * Reset private fields. In case of a requeue, this allows
5490         * this function to correctly do nothing if it is spuriously
5491         * invoked again on this same request (see the check at the
5492         * beginning of the function). Probably, a better general
5493         * design would be to prevent blk-mq from invoking the requeue
5494         * or finish hooks of an elevator, for a request that is not
5495         * referred by that elevator.
5496         *
5497         * Resetting the following fields would break the
5498         * request-insertion logic if rq is re-inserted into a bfq
5499         * internal queue, without a re-preparation. Here we assume
5500         * that re-insertions of requeued requests, without
5501         * re-preparation, can happen only for pass_through or at_head
5502         * requests (which are not re-inserted into bfq internal
5503         * queues).
5504         */
5505        rq->elv.priv[0] = NULL;
5506        rq->elv.priv[1] = NULL;
5507}
5508
5509/*
5510 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
5511 * was the last process referring to that bfqq.
5512 */
5513static struct bfq_queue *
5514bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
5515{
5516        bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
5517
5518        if (bfqq_process_refs(bfqq) == 1) {
5519                bfqq->pid = current->pid;
5520                bfq_clear_bfqq_coop(bfqq);
5521                bfq_clear_bfqq_split_coop(bfqq);
5522                return bfqq;
5523        }
5524
5525        bic_set_bfqq(bic, NULL, 1);
5526
5527        bfq_put_cooperator(bfqq);
5528
5529        bfq_put_queue(bfqq);
5530        return NULL;
5531}
5532
5533static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
5534                                                   struct bfq_io_cq *bic,
5535                                                   struct bio *bio,
5536                                                   bool split, bool is_sync,
5537                                                   bool *new_queue)
5538{
5539        struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
5540
5541        if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
5542                return bfqq;
5543
5544        if (new_queue)
5545                *new_queue = true;
5546
5547        if (bfqq)
5548                bfq_put_queue(bfqq);
5549        bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
5550
5551        bic_set_bfqq(bic, bfqq, is_sync);
5552        if (split && is_sync) {
5553                if ((bic->was_in_burst_list && bfqd->large_burst) ||
5554                    bic->saved_in_large_burst)
5555                        bfq_mark_bfqq_in_large_burst(bfqq);
5556                else {
5557                        bfq_clear_bfqq_in_large_burst(bfqq);
5558                        if (bic->was_in_burst_list)
5559                                /*
5560                                 * If bfqq was in the current
5561                                 * burst list before being
5562                                 * merged, then we have to add
5563                                 * it back. And we do not need
5564                                 * to increase burst_size, as
5565                                 * we did not decrement
5566                                 * burst_size when we removed
5567                                 * bfqq from the burst list as
5568                                 * a consequence of a merge
5569                                 * (see comments in
5570                                 * bfq_put_queue). In this
5571                                 * respect, it would be rather
5572                                 * costly to know whether the
5573                                 * current burst list is still
5574                                 * the same burst list from
5575                                 * which bfqq was removed on
5576                                 * the merge. To avoid this
5577                                 * cost, if bfqq was in a
5578                                 * burst list, then we add
5579                                 * bfqq to the current burst
5580                                 * list without any further
5581                                 * check. This can cause
5582                                 * inappropriate insertions,
5583                                 * but rarely enough to not
5584                                 * harm the detection of large
5585                                 * bursts significantly.
5586                                 */
5587                                hlist_add_head(&bfqq->burst_list_node,
5588                                               &bfqd->burst_list);
5589                }
5590                bfqq->split_time = jiffies;
5591        }
5592
5593        return bfqq;
5594}
5595
5596/*
5597 * Only reset private fields. The actual request preparation will be
5598 * performed by bfq_init_rq, when rq is either inserted or merged. See
5599 * comments on bfq_init_rq for the reason behind this delayed
5600 * preparation.
5601 */
5602static void bfq_prepare_request(struct request *rq, struct bio *bio)
5603{
5604        /*
5605         * Regardless of whether we have an icq attached, we have to
5606         * clear the scheduler pointers, as they might point to
5607         * previously allocated bic/bfqq structs.
5608         */
5609        rq->elv.priv[0] = rq->elv.priv[1] = NULL;
5610}
5611
5612/*
5613 * If needed, init rq, allocate bfq data structures associated with
5614 * rq, and increment reference counters in the destination bfq_queue
5615 * for rq. Return the destination bfq_queue for rq, or NULL is rq is
5616 * not associated with any bfq_queue.
5617 *
5618 * This function is invoked by the functions that perform rq insertion
5619 * or merging. One may have expected the above preparation operations
5620 * to be performed in bfq_prepare_request, and not delayed to when rq
5621 * is inserted or merged. The rationale behind this delayed
5622 * preparation is that, after the prepare_request hook is invoked for
5623 * rq, rq may still be transformed into a request with no icq, i.e., a
5624 * request not associated with any queue. No bfq hook is invoked to
5625 * signal this transformation. As a consequence, should these
5626 * preparation operations be performed when the prepare_request hook
5627 * is invoked, and should rq be transformed one moment later, bfq
5628 * would end up in an inconsistent state, because it would have
5629 * incremented some queue counters for an rq destined to
5630 * transformation, without any chance to correctly lower these
5631 * counters back. In contrast, no transformation can still happen for
5632 * rq after rq has been inserted or merged. So, it is safe to execute
5633 * these preparation operations when rq is finally inserted or merged.
5634 */
5635static struct bfq_queue *bfq_init_rq(struct request *rq)
5636{
5637        struct request_queue *q = rq->q;
5638        struct bio *bio = rq->bio;
5639        struct bfq_data *bfqd = q->elevator->elevator_data;
5640        struct bfq_io_cq *bic;
5641        const int is_sync = rq_is_sync(rq);
5642        struct bfq_queue *bfqq;
5643        bool new_queue = false;
5644        bool bfqq_already_existing = false, split = false;
5645
5646        if (unlikely(!rq->elv.icq))
5647                return NULL;
5648
5649        /*
5650         * Assuming that elv.priv[1] is set only if everything is set
5651         * for this rq. This holds true, because this function is
5652         * invoked only for insertion or merging, and, after such
5653         * events, a request cannot be manipulated any longer before
5654         * being removed from bfq.
5655         */
5656        if (rq->elv.priv[1])
5657                return rq->elv.priv[1];
5658
5659        bic = icq_to_bic(rq->elv.icq);
5660
5661        bfq_check_ioprio_change(bic, bio);
5662
5663        bfq_bic_update_cgroup(bic, bio);
5664
5665        bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
5666                                         &new_queue);
5667
5668        if (likely(!new_queue)) {
5669                /* If the queue was seeky for too long, break it apart. */
5670                if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
5671                        bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
5672
5673                        /* Update bic before losing reference to bfqq */
5674                        if (bfq_bfqq_in_large_burst(bfqq))
5675                                bic->saved_in_large_burst = true;
5676
5677                        bfqq = bfq_split_bfqq(bic, bfqq);
5678                        split = true;
5679
5680                        if (!bfqq)
5681                                bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
5682                                                                 true, is_sync,
5683                                                                 NULL);
5684                        else
5685                                bfqq_already_existing = true;
5686                }
5687        }
5688
5689        bfqq->allocated++;
5690        bfqq->ref++;
5691        bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
5692                     rq, bfqq, bfqq->ref);
5693
5694        rq->elv.priv[0] = bic;
5695        rq->elv.priv[1] = bfqq;
5696
5697        /*
5698         * If a bfq_queue has only one process reference, it is owned
5699         * by only this bic: we can then set bfqq->bic = bic. in
5700         * addition, if the queue has also just been split, we have to
5701         * resume its state.
5702         */
5703        if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
5704                bfqq->bic = bic;
5705                if (split) {
5706                        /*
5707                         * The queue has just been split from a shared
5708                         * queue: restore the idle window and the
5709                         * possible weight raising period.
5710                         */
5711                        bfq_bfqq_resume_state(bfqq, bfqd, bic,
5712                                              bfqq_already_existing);
5713                }
5714        }
5715
5716        /*
5717         * Consider bfqq as possibly belonging to a burst of newly
5718         * created queues only if:
5719         * 1) A burst is actually happening (bfqd->burst_size > 0)
5720         * or
5721         * 2) There is no other active queue. In fact, if, in
5722         *    contrast, there are active queues not belonging to the
5723         *    possible burst bfqq may belong to, then there is no gain
5724         *    in considering bfqq as belonging to a burst, and
5725         *    therefore in not weight-raising bfqq. See comments on
5726         *    bfq_handle_burst().
5727         *
5728         * This filtering also helps eliminating false positives,
5729         * occurring when bfqq does not belong to an actual large
5730         * burst, but some background task (e.g., a service) happens
5731         * to trigger the creation of new queues very close to when
5732         * bfqq and its possible companion queues are created. See
5733         * comments on bfq_handle_burst() for further details also on
5734         * this issue.
5735         */
5736        if (unlikely(bfq_bfqq_just_created(bfqq) &&
5737                     (bfqd->burst_size > 0 ||
5738                      bfq_tot_busy_queues(bfqd) == 0)))
5739                bfq_handle_burst(bfqd, bfqq);
5740
5741        return bfqq;
5742}
5743
5744static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
5745{
5746        struct bfq_data *bfqd = bfqq->bfqd;
5747        enum bfqq_expiration reason;
5748        unsigned long flags;
5749
5750        spin_lock_irqsave(&bfqd->lock, flags);
5751        bfq_clear_bfqq_wait_request(bfqq);
5752
5753        if (bfqq != bfqd->in_service_queue) {
5754                spin_unlock_irqrestore(&bfqd->lock, flags);
5755                return;
5756        }
5757
5758        if (bfq_bfqq_budget_timeout(bfqq))
5759                /*
5760                 * Also here the queue can be safely expired
5761                 * for budget timeout without wasting
5762                 * guarantees
5763                 */
5764                reason = BFQQE_BUDGET_TIMEOUT;
5765        else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
5766                /*
5767                 * The queue may not be empty upon timer expiration,
5768                 * because we may not disable the timer when the
5769                 * first request of the in-service queue arrives
5770                 * during disk idling.
5771                 */
5772                reason = BFQQE_TOO_IDLE;
5773        else
5774                goto schedule_dispatch;
5775
5776        bfq_bfqq_expire(bfqd, bfqq, true, reason);
5777
5778schedule_dispatch:
5779        spin_unlock_irqrestore(&bfqd->lock, flags);
5780        bfq_schedule_dispatch(bfqd);
5781}
5782
5783/*
5784 * Handler of the expiration of the timer running if the in-service queue
5785 * is idling inside its time slice.
5786 */
5787static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
5788{
5789        struct bfq_data *bfqd = container_of(timer, struct bfq_data,
5790                                             idle_slice_timer);
5791        struct bfq_queue *bfqq = bfqd->in_service_queue;
5792
5793        /*
5794         * Theoretical race here: the in-service queue can be NULL or
5795         * different from the queue that was idling if a new request
5796         * arrives for the current queue and there is a full dispatch
5797         * cycle that changes the in-service queue.  This can hardly
5798         * happen, but in the worst case we just expire a queue too
5799         * early.
5800         */
5801        if (bfqq)
5802                bfq_idle_slice_timer_body(bfqq);
5803
5804        return HRTIMER_NORESTART;
5805}
5806
5807static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
5808                                 struct bfq_queue **bfqq_ptr)
5809{
5810        struct bfq_queue *bfqq = *bfqq_ptr;
5811
5812        bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
5813        if (bfqq) {
5814                bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
5815
5816                bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
5817                             bfqq, bfqq->ref);
5818                bfq_put_queue(bfqq);
5819                *bfqq_ptr = NULL;
5820        }
5821}
5822
5823/*
5824 * Release all the bfqg references to its async queues.  If we are
5825 * deallocating the group these queues may still contain requests, so
5826 * we reparent them to the root cgroup (i.e., the only one that will
5827 * exist for sure until all the requests on a device are gone).
5828 */
5829void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
5830{
5831        int i, j;
5832
5833        for (i = 0; i < 2; i++)
5834                for (j = 0; j < IOPRIO_BE_NR; j++)
5835                        __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
5836
5837        __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
5838}
5839
5840/*
5841 * See the comments on bfq_limit_depth for the purpose of
5842 * the depths set in the function. Return minimum shallow depth we'll use.
5843 */
5844static unsigned int bfq_update_depths(struct bfq_data *bfqd,
5845                                      struct sbitmap_queue *bt)
5846{
5847        unsigned int i, j, min_shallow = UINT_MAX;
5848
5849        /*
5850         * In-word depths if no bfq_queue is being weight-raised:
5851         * leaving 25% of tags only for sync reads.
5852         *
5853         * In next formulas, right-shift the value
5854         * (1U<<bt->sb.shift), instead of computing directly
5855         * (1U<<(bt->sb.shift - something)), to be robust against
5856         * any possible value of bt->sb.shift, without having to
5857         * limit 'something'.
5858         */
5859        /* no more than 50% of tags for async I/O */
5860        bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
5861        /*
5862         * no more than 75% of tags for sync writes (25% extra tags
5863         * w.r.t. async I/O, to prevent async I/O from starving sync
5864         * writes)
5865         */
5866        bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
5867
5868        /*
5869         * In-word depths in case some bfq_queue is being weight-
5870         * raised: leaving ~63% of tags for sync reads. This is the
5871         * highest percentage for which, in our tests, application
5872         * start-up times didn't suffer from any regression due to tag
5873         * shortage.
5874         */
5875        /* no more than ~18% of tags for async I/O */
5876        bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
5877        /* no more than ~37% of tags for sync writes (~20% extra tags) */
5878        bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
5879
5880        for (i = 0; i < 2; i++)
5881                for (j = 0; j < 2; j++)
5882                        min_shallow = min(min_shallow, bfqd->word_depths[i][j]);
5883
5884        return min_shallow;
5885}
5886
5887static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
5888{
5889        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
5890        struct blk_mq_tags *tags = hctx->sched_tags;
5891        unsigned int min_shallow;
5892
5893        min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
5894        sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
5895}
5896
5897static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
5898{
5899        bfq_depth_updated(hctx);
5900        return 0;
5901}
5902
5903static void bfq_exit_queue(struct elevator_queue *e)
5904{
5905        struct bfq_data *bfqd = e->elevator_data;
5906        struct bfq_queue *bfqq, *n;
5907
5908        hrtimer_cancel(&bfqd->idle_slice_timer);
5909
5910        spin_lock_irq(&bfqd->lock);
5911        list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
5912                bfq_deactivate_bfqq(bfqd, bfqq, false, false);
5913        spin_unlock_irq(&bfqd->lock);
5914
5915        hrtimer_cancel(&bfqd->idle_slice_timer);
5916
5917#ifdef CONFIG_BFQ_GROUP_IOSCHED
5918        /* release oom-queue reference to root group */
5919        bfqg_and_blkg_put(bfqd->root_group);
5920
5921        blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
5922#else
5923        spin_lock_irq(&bfqd->lock);
5924        bfq_put_async_queues(bfqd, bfqd->root_group);
5925        kfree(bfqd->root_group);
5926        spin_unlock_irq(&bfqd->lock);
5927#endif
5928
5929        kfree(bfqd);
5930}
5931
5932static void bfq_init_root_group(struct bfq_group *root_group,
5933                                struct bfq_data *bfqd)
5934{
5935        int i;
5936
5937#ifdef CONFIG_BFQ_GROUP_IOSCHED
5938        root_group->entity.parent = NULL;
5939        root_group->my_entity = NULL;
5940        root_group->bfqd = bfqd;
5941#endif
5942        root_group->rq_pos_tree = RB_ROOT;
5943        for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
5944                root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
5945        root_group->sched_data.bfq_class_idle_last_service = jiffies;
5946}
5947
5948static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
5949{
5950        struct bfq_data *bfqd;
5951        struct elevator_queue *eq;
5952
5953        eq = elevator_alloc(q, e);
5954        if (!eq)
5955                return -ENOMEM;
5956
5957        bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
5958        if (!bfqd) {
5959                kobject_put(&eq->kobj);
5960                return -ENOMEM;
5961        }
5962        eq->elevator_data = bfqd;
5963
5964        spin_lock_irq(&q->queue_lock);
5965        q->elevator = eq;
5966        spin_unlock_irq(&q->queue_lock);
5967
5968        /*
5969         * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
5970         * Grab a permanent reference to it, so that the normal code flow
5971         * will not attempt to free it.
5972         */
5973        bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
5974        bfqd->oom_bfqq.ref++;
5975        bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
5976        bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
5977        bfqd->oom_bfqq.entity.new_weight =
5978                bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
5979
5980        /* oom_bfqq does not participate to bursts */
5981        bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
5982
5983        /*
5984         * Trigger weight initialization, according to ioprio, at the
5985         * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
5986         * class won't be changed any more.
5987         */
5988        bfqd->oom_bfqq.entity.prio_changed = 1;
5989
5990        bfqd->queue = q;
5991
5992        INIT_LIST_HEAD(&bfqd->dispatch);
5993
5994        hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
5995                     HRTIMER_MODE_REL);
5996        bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
5997
5998        bfqd->queue_weights_tree = RB_ROOT_CACHED;
5999        bfqd->num_groups_with_pending_reqs = 0;
6000
6001        INIT_LIST_HEAD(&bfqd->active_list);
6002        INIT_LIST_HEAD(&bfqd->idle_list);
6003        INIT_HLIST_HEAD(&bfqd->burst_list);
6004
6005        bfqd->hw_tag = -1;
6006        bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
6007
6008        bfqd->bfq_max_budget = bfq_default_max_budget;
6009
6010        bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
6011        bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
6012        bfqd->bfq_back_max = bfq_back_max;
6013        bfqd->bfq_back_penalty = bfq_back_penalty;
6014        bfqd->bfq_slice_idle = bfq_slice_idle;
6015        bfqd->bfq_timeout = bfq_timeout;
6016
6017        bfqd->bfq_requests_within_timer = 120;
6018
6019        bfqd->bfq_large_burst_thresh = 8;
6020        bfqd->bfq_burst_interval = msecs_to_jiffies(180);
6021
6022        bfqd->low_latency = true;
6023
6024        /*
6025         * Trade-off between responsiveness and fairness.
6026         */
6027        bfqd->bfq_wr_coeff = 30;
6028        bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
6029        bfqd->bfq_wr_max_time = 0;
6030        bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
6031        bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
6032        bfqd->bfq_wr_max_softrt_rate = 7000; /*
6033                                              * Approximate rate required
6034                                              * to playback or record a
6035                                              * high-definition compressed
6036                                              * video.
6037                                              */
6038        bfqd->wr_busy_queues = 0;
6039
6040        /*
6041         * Begin by assuming, optimistically, that the device peak
6042         * rate is equal to 2/3 of the highest reference rate.
6043         */
6044        bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
6045                ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
6046        bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
6047
6048        spin_lock_init(&bfqd->lock);
6049
6050        /*
6051         * The invocation of the next bfq_create_group_hierarchy
6052         * function is the head of a chain of function calls
6053         * (bfq_create_group_hierarchy->blkcg_activate_policy->
6054         * blk_mq_freeze_queue) that may lead to the invocation of the
6055         * has_work hook function. For this reason,
6056         * bfq_create_group_hierarchy is invoked only after all
6057         * scheduler data has been initialized, apart from the fields
6058         * that can be initialized only after invoking
6059         * bfq_create_group_hierarchy. This, in particular, enables
6060         * has_work to correctly return false. Of course, to avoid
6061         * other inconsistencies, the blk-mq stack must then refrain
6062         * from invoking further scheduler hooks before this init
6063         * function is finished.
6064         */
6065        bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
6066        if (!bfqd->root_group)
6067                goto out_free;
6068        bfq_init_root_group(bfqd->root_group, bfqd);
6069        bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
6070
6071        wbt_disable_default(q);
6072        return 0;
6073
6074out_free:
6075        kfree(bfqd);
6076        kobject_put(&eq->kobj);
6077        return -ENOMEM;
6078}
6079
6080static void bfq_slab_kill(void)
6081{
6082        kmem_cache_destroy(bfq_pool);
6083}
6084
6085static int __init bfq_slab_setup(void)
6086{
6087        bfq_pool = KMEM_CACHE(bfq_queue, 0);
6088        if (!bfq_pool)
6089                return -ENOMEM;
6090        return 0;
6091}
6092
6093static ssize_t bfq_var_show(unsigned int var, char *page)
6094{
6095        return sprintf(page, "%u\n", var);
6096}
6097
6098static int bfq_var_store(unsigned long *var, const char *page)
6099{
6100        unsigned long new_val;
6101        int ret = kstrtoul(page, 10, &new_val);
6102
6103        if (ret)
6104                return ret;
6105        *var = new_val;
6106        return 0;
6107}
6108
6109#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
6110static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
6111{                                                                       \
6112        struct bfq_data *bfqd = e->elevator_data;                       \
6113        u64 __data = __VAR;                                             \
6114        if (__CONV == 1)                                                \
6115                __data = jiffies_to_msecs(__data);                      \
6116        else if (__CONV == 2)                                           \
6117                __data = div_u64(__data, NSEC_PER_MSEC);                \
6118        return bfq_var_show(__data, (page));                            \
6119}
6120SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
6121SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
6122SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
6123SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
6124SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
6125SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
6126SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
6127SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
6128SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
6129#undef SHOW_FUNCTION
6130
6131#define USEC_SHOW_FUNCTION(__FUNC, __VAR)                               \
6132static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
6133{                                                                       \
6134        struct bfq_data *bfqd = e->elevator_data;                       \
6135        u64 __data = __VAR;                                             \
6136        __data = div_u64(__data, NSEC_PER_USEC);                        \
6137        return bfq_var_show(__data, (page));                            \
6138}
6139USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
6140#undef USEC_SHOW_FUNCTION
6141
6142#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
6143static ssize_t                                                          \
6144__FUNC(struct elevator_queue *e, const char *page, size_t count)        \
6145{                                                                       \
6146        struct bfq_data *bfqd = e->elevator_data;                       \
6147        unsigned long __data, __min = (MIN), __max = (MAX);             \
6148        int ret;                                                        \
6149                                                                        \
6150        ret = bfq_var_store(&__data, (page));                           \
6151        if (ret)                                                        \
6152                return ret;                                             \
6153        if (__data < __min)                                             \
6154                __data = __min;                                         \
6155        else if (__data > __max)                                        \
6156                __data = __max;                                         \
6157        if (__CONV == 1)                                                \
6158                *(__PTR) = msecs_to_jiffies(__data);                    \
6159        else if (__CONV == 2)                                           \
6160                *(__PTR) = (u64)__data * NSEC_PER_MSEC;                 \
6161        else                                                            \
6162                *(__PTR) = __data;                                      \
6163        return count;                                                   \
6164}
6165STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
6166                INT_MAX, 2);
6167STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
6168                INT_MAX, 2);
6169STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
6170STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
6171                INT_MAX, 0);
6172STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
6173#undef STORE_FUNCTION
6174
6175#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                    \
6176static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
6177{                                                                       \
6178        struct bfq_data *bfqd = e->elevator_data;                       \
6179        unsigned long __data, __min = (MIN), __max = (MAX);             \
6180        int ret;                                                        \
6181                                                                        \
6182        ret = bfq_var_store(&__data, (page));                           \
6183        if (ret)                                                        \
6184                return ret;                                             \
6185        if (__data < __min)                                             \
6186                __data = __min;                                         \
6187        else if (__data > __max)                                        \
6188                __data = __max;                                         \
6189        *(__PTR) = (u64)__data * NSEC_PER_USEC;                         \
6190        return count;                                                   \
6191}
6192USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
6193                    UINT_MAX);
6194#undef USEC_STORE_FUNCTION
6195
6196static ssize_t bfq_max_budget_store(struct elevator_queue *e,
6197                                    const char *page, size_t count)
6198{
6199        struct bfq_data *bfqd = e->elevator_data;
6200        unsigned long __data;
6201        int ret;
6202
6203        ret = bfq_var_store(&__data, (page));
6204        if (ret)
6205                return ret;
6206
6207        if (__data == 0)
6208                bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
6209        else {
6210                if (__data > INT_MAX)
6211                        __data = INT_MAX;
6212                bfqd->bfq_max_budget = __data;
6213        }
6214
6215        bfqd->bfq_user_max_budget = __data;
6216
6217        return count;
6218}
6219
6220/*
6221 * Leaving this name to preserve name compatibility with cfq
6222 * parameters, but this timeout is used for both sync and async.
6223 */
6224static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
6225                                      const char *page, size_t count)
6226{
6227        struct bfq_data *bfqd = e->elevator_data;
6228        unsigned long __data;
6229        int ret;
6230
6231        ret = bfq_var_store(&__data, (page));
6232        if (ret)
6233                return ret;
6234
6235        if (__data < 1)
6236                __data = 1;
6237        else if (__data > INT_MAX)
6238                __data = INT_MAX;
6239
6240        bfqd->bfq_timeout = msecs_to_jiffies(__data);
6241        if (bfqd->bfq_user_max_budget == 0)
6242                bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
6243
6244        return count;
6245}
6246
6247static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
6248                                     const char *page, size_t count)
6249{
6250        struct bfq_data *bfqd = e->elevator_data;
6251        unsigned long __data;
6252        int ret;
6253
6254        ret = bfq_var_store(&__data, (page));
6255        if (ret)
6256                return ret;
6257
6258        if (__data > 1)
6259                __data = 1;
6260        if (!bfqd->strict_guarantees && __data == 1
6261            && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
6262                bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
6263
6264        bfqd->strict_guarantees = __data;
6265
6266        return count;
6267}
6268
6269static ssize_t bfq_low_latency_store(struct elevator_queue *e,
6270                                     const char *page, size_t count)
6271{
6272        struct bfq_data *bfqd = e->elevator_data;
6273        unsigned long __data;
6274        int ret;
6275
6276        ret = bfq_var_store(&__data, (page));
6277        if (ret)
6278                return ret;
6279
6280        if (__data > 1)
6281                __data = 1;
6282        if (__data == 0 && bfqd->low_latency != 0)
6283                bfq_end_wr(bfqd);
6284        bfqd->low_latency = __data;
6285
6286        return count;
6287}
6288
6289#define BFQ_ATTR(name) \
6290        __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
6291
6292static struct elv_fs_entry bfq_attrs[] = {
6293        BFQ_ATTR(fifo_expire_sync),
6294        BFQ_ATTR(fifo_expire_async),
6295        BFQ_ATTR(back_seek_max),
6296        BFQ_ATTR(back_seek_penalty),
6297        BFQ_ATTR(slice_idle),
6298        BFQ_ATTR(slice_idle_us),
6299        BFQ_ATTR(max_budget),
6300        BFQ_ATTR(timeout_sync),
6301        BFQ_ATTR(strict_guarantees),
6302        BFQ_ATTR(low_latency),
6303        __ATTR_NULL
6304};
6305
6306static struct elevator_type iosched_bfq_mq = {
6307        .ops = {
6308                .limit_depth            = bfq_limit_depth,
6309                .prepare_request        = bfq_prepare_request,
6310                .requeue_request        = bfq_finish_requeue_request,
6311                .finish_request         = bfq_finish_requeue_request,
6312                .exit_icq               = bfq_exit_icq,
6313                .insert_requests        = bfq_insert_requests,
6314                .dispatch_request       = bfq_dispatch_request,
6315                .next_request           = elv_rb_latter_request,
6316                .former_request         = elv_rb_former_request,
6317                .allow_merge            = bfq_allow_bio_merge,
6318                .bio_merge              = bfq_bio_merge,
6319                .request_merge          = bfq_request_merge,
6320                .requests_merged        = bfq_requests_merged,
6321                .request_merged         = bfq_request_merged,
6322                .has_work               = bfq_has_work,
6323                .depth_updated          = bfq_depth_updated,
6324                .init_hctx              = bfq_init_hctx,
6325                .init_sched             = bfq_init_queue,
6326                .exit_sched             = bfq_exit_queue,
6327        },
6328
6329        .icq_size =             sizeof(struct bfq_io_cq),
6330        .icq_align =            __alignof__(struct bfq_io_cq),
6331        .elevator_attrs =       bfq_attrs,
6332        .elevator_name =        "bfq",
6333        .elevator_owner =       THIS_MODULE,
6334};
6335MODULE_ALIAS("bfq-iosched");
6336
6337static int __init bfq_init(void)
6338{
6339        int ret;
6340
6341#ifdef CONFIG_BFQ_GROUP_IOSCHED
6342        ret = blkcg_policy_register(&blkcg_policy_bfq);
6343        if (ret)
6344                return ret;
6345#endif
6346
6347        ret = -ENOMEM;
6348        if (bfq_slab_setup())
6349                goto err_pol_unreg;
6350
6351        /*
6352         * Times to load large popular applications for the typical
6353         * systems installed on the reference devices (see the
6354         * comments before the definition of the next
6355         * array). Actually, we use slightly lower values, as the
6356         * estimated peak rate tends to be smaller than the actual
6357         * peak rate.  The reason for this last fact is that estimates
6358         * are computed over much shorter time intervals than the long
6359         * intervals typically used for benchmarking. Why? First, to
6360         * adapt more quickly to variations. Second, because an I/O
6361         * scheduler cannot rely on a peak-rate-evaluation workload to
6362         * be run for a long time.
6363         */
6364        ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */
6365        ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */
6366
6367        ret = elv_register(&iosched_bfq_mq);
6368        if (ret)
6369                goto slab_kill;
6370
6371        return 0;
6372
6373slab_kill:
6374        bfq_slab_kill();
6375err_pol_unreg:
6376#ifdef CONFIG_BFQ_GROUP_IOSCHED
6377        blkcg_policy_unregister(&blkcg_policy_bfq);
6378#endif
6379        return ret;
6380}
6381
6382static void __exit bfq_exit(void)
6383{
6384        elv_unregister(&iosched_bfq_mq);
6385#ifdef CONFIG_BFQ_GROUP_IOSCHED
6386        blkcg_policy_unregister(&blkcg_policy_bfq);
6387#endif
6388        bfq_slab_kill();
6389}
6390
6391module_init(bfq_init);
6392module_exit(bfq_exit);
6393
6394MODULE_AUTHOR("Paolo Valente");
6395MODULE_LICENSE("GPL");
6396MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");
6397