linux/block/blk-rq-qos.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include "blk-rq-qos.h"
   4
   5/*
   6 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
   7 * false if 'v' + 1 would be bigger than 'below'.
   8 */
   9static bool atomic_inc_below(atomic_t *v, unsigned int below)
  10{
  11        unsigned int cur = atomic_read(v);
  12
  13        for (;;) {
  14                unsigned int old;
  15
  16                if (cur >= below)
  17                        return false;
  18                old = atomic_cmpxchg(v, cur, cur + 1);
  19                if (old == cur)
  20                        break;
  21                cur = old;
  22        }
  23
  24        return true;
  25}
  26
  27bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
  28{
  29        return atomic_inc_below(&rq_wait->inflight, limit);
  30}
  31
  32void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
  33{
  34        do {
  35                if (rqos->ops->cleanup)
  36                        rqos->ops->cleanup(rqos, bio);
  37                rqos = rqos->next;
  38        } while (rqos);
  39}
  40
  41void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
  42{
  43        do {
  44                if (rqos->ops->done)
  45                        rqos->ops->done(rqos, rq);
  46                rqos = rqos->next;
  47        } while (rqos);
  48}
  49
  50void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
  51{
  52        do {
  53                if (rqos->ops->issue)
  54                        rqos->ops->issue(rqos, rq);
  55                rqos = rqos->next;
  56        } while (rqos);
  57}
  58
  59void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
  60{
  61        do {
  62                if (rqos->ops->requeue)
  63                        rqos->ops->requeue(rqos, rq);
  64                rqos = rqos->next;
  65        } while (rqos);
  66}
  67
  68void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
  69{
  70        do {
  71                if (rqos->ops->throttle)
  72                        rqos->ops->throttle(rqos, bio);
  73                rqos = rqos->next;
  74        } while (rqos);
  75}
  76
  77void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
  78{
  79        do {
  80                if (rqos->ops->track)
  81                        rqos->ops->track(rqos, rq, bio);
  82                rqos = rqos->next;
  83        } while (rqos);
  84}
  85
  86void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
  87{
  88        do {
  89                if (rqos->ops->done_bio)
  90                        rqos->ops->done_bio(rqos, bio);
  91                rqos = rqos->next;
  92        } while (rqos);
  93}
  94
  95/*
  96 * Return true, if we can't increase the depth further by scaling
  97 */
  98bool rq_depth_calc_max_depth(struct rq_depth *rqd)
  99{
 100        unsigned int depth;
 101        bool ret = false;
 102
 103        /*
 104         * For QD=1 devices, this is a special case. It's important for those
 105         * to have one request ready when one completes, so force a depth of
 106         * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
 107         * since the device can't have more than that in flight. If we're
 108         * scaling down, then keep a setting of 1/1/1.
 109         */
 110        if (rqd->queue_depth == 1) {
 111                if (rqd->scale_step > 0)
 112                        rqd->max_depth = 1;
 113                else {
 114                        rqd->max_depth = 2;
 115                        ret = true;
 116                }
 117        } else {
 118                /*
 119                 * scale_step == 0 is our default state. If we have suffered
 120                 * latency spikes, step will be > 0, and we shrink the
 121                 * allowed write depths. If step is < 0, we're only doing
 122                 * writes, and we allow a temporarily higher depth to
 123                 * increase performance.
 124                 */
 125                depth = min_t(unsigned int, rqd->default_depth,
 126                              rqd->queue_depth);
 127                if (rqd->scale_step > 0)
 128                        depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
 129                else if (rqd->scale_step < 0) {
 130                        unsigned int maxd = 3 * rqd->queue_depth / 4;
 131
 132                        depth = 1 + ((depth - 1) << -rqd->scale_step);
 133                        if (depth > maxd) {
 134                                depth = maxd;
 135                                ret = true;
 136                        }
 137                }
 138
 139                rqd->max_depth = depth;
 140        }
 141
 142        return ret;
 143}
 144
 145void rq_depth_scale_up(struct rq_depth *rqd)
 146{
 147        /*
 148         * Hit max in previous round, stop here
 149         */
 150        if (rqd->scaled_max)
 151                return;
 152
 153        rqd->scale_step--;
 154
 155        rqd->scaled_max = rq_depth_calc_max_depth(rqd);
 156}
 157
 158/*
 159 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
 160 * had a latency violation.
 161 */
 162void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 163{
 164        /*
 165         * Stop scaling down when we've hit the limit. This also prevents
 166         * ->scale_step from going to crazy values, if the device can't
 167         * keep up.
 168         */
 169        if (rqd->max_depth == 1)
 170                return;
 171
 172        if (rqd->scale_step < 0 && hard_throttle)
 173                rqd->scale_step = 0;
 174        else
 175                rqd->scale_step++;
 176
 177        rqd->scaled_max = false;
 178        rq_depth_calc_max_depth(rqd);
 179}
 180
 181struct rq_qos_wait_data {
 182        struct wait_queue_entry wq;
 183        struct task_struct *task;
 184        struct rq_wait *rqw;
 185        acquire_inflight_cb_t *cb;
 186        void *private_data;
 187        bool got_token;
 188};
 189
 190static int rq_qos_wake_function(struct wait_queue_entry *curr,
 191                                unsigned int mode, int wake_flags, void *key)
 192{
 193        struct rq_qos_wait_data *data = container_of(curr,
 194                                                     struct rq_qos_wait_data,
 195                                                     wq);
 196
 197        /*
 198         * If we fail to get a budget, return -1 to interrupt the wake up loop
 199         * in __wake_up_common.
 200         */
 201        if (!data->cb(data->rqw, data->private_data))
 202                return -1;
 203
 204        data->got_token = true;
 205        smp_wmb();
 206        list_del_init(&curr->entry);
 207        wake_up_process(data->task);
 208        return 1;
 209}
 210
 211/**
 212 * rq_qos_wait - throttle on a rqw if we need to
 213 * @rqw: rqw to throttle on
 214 * @private_data: caller provided specific data
 215 * @acquire_inflight_cb: inc the rqw->inflight counter if we can
 216 * @cleanup_cb: the callback to cleanup in case we race with a waker
 217 *
 218 * This provides a uniform place for the rq_qos users to do their throttling.
 219 * Since you can end up with a lot of things sleeping at once, this manages the
 220 * waking up based on the resources available.  The acquire_inflight_cb should
 221 * inc the rqw->inflight if we have the ability to do so, or return false if not
 222 * and then we will sleep until the room becomes available.
 223 *
 224 * cleanup_cb is in case that we race with a waker and need to cleanup the
 225 * inflight count accordingly.
 226 */
 227void rq_qos_wait(struct rq_wait *rqw, void *private_data,
 228                 acquire_inflight_cb_t *acquire_inflight_cb,
 229                 cleanup_cb_t *cleanup_cb)
 230{
 231        struct rq_qos_wait_data data = {
 232                .wq = {
 233                        .func   = rq_qos_wake_function,
 234                        .entry  = LIST_HEAD_INIT(data.wq.entry),
 235                },
 236                .task = current,
 237                .rqw = rqw,
 238                .cb = acquire_inflight_cb,
 239                .private_data = private_data,
 240        };
 241        bool has_sleeper;
 242
 243        has_sleeper = wq_has_sleeper(&rqw->wait);
 244        if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
 245                return;
 246
 247        prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
 248        has_sleeper = !wq_has_single_sleeper(&rqw->wait);
 249        do {
 250                /* The memory barrier in set_task_state saves us here. */
 251                if (data.got_token)
 252                        break;
 253                if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
 254                        finish_wait(&rqw->wait, &data.wq);
 255
 256                        /*
 257                         * We raced with wbt_wake_function() getting a token,
 258                         * which means we now have two. Put our local token
 259                         * and wake anyone else potentially waiting for one.
 260                         */
 261                        smp_rmb();
 262                        if (data.got_token)
 263                                cleanup_cb(rqw, private_data);
 264                        break;
 265                }
 266                io_schedule();
 267                has_sleeper = true;
 268                set_current_state(TASK_UNINTERRUPTIBLE);
 269        } while (1);
 270        finish_wait(&rqw->wait, &data.wq);
 271}
 272
 273void rq_qos_exit(struct request_queue *q)
 274{
 275        blk_mq_debugfs_unregister_queue_rqos(q);
 276
 277        while (q->rq_qos) {
 278                struct rq_qos *rqos = q->rq_qos;
 279                q->rq_qos = rqos->next;
 280                rqos->ops->exit(rqos);
 281        }
 282}
 283