1
2
3#include "blk-rq-qos.h"
4
5
6
7
8
9static bool atomic_inc_below(atomic_t *v, unsigned int below)
10{
11 unsigned int cur = atomic_read(v);
12
13 for (;;) {
14 unsigned int old;
15
16 if (cur >= below)
17 return false;
18 old = atomic_cmpxchg(v, cur, cur + 1);
19 if (old == cur)
20 break;
21 cur = old;
22 }
23
24 return true;
25}
26
27bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
28{
29 return atomic_inc_below(&rq_wait->inflight, limit);
30}
31
32void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
33{
34 do {
35 if (rqos->ops->cleanup)
36 rqos->ops->cleanup(rqos, bio);
37 rqos = rqos->next;
38 } while (rqos);
39}
40
41void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
42{
43 do {
44 if (rqos->ops->done)
45 rqos->ops->done(rqos, rq);
46 rqos = rqos->next;
47 } while (rqos);
48}
49
50void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
51{
52 do {
53 if (rqos->ops->issue)
54 rqos->ops->issue(rqos, rq);
55 rqos = rqos->next;
56 } while (rqos);
57}
58
59void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
60{
61 do {
62 if (rqos->ops->requeue)
63 rqos->ops->requeue(rqos, rq);
64 rqos = rqos->next;
65 } while (rqos);
66}
67
68void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
69{
70 do {
71 if (rqos->ops->throttle)
72 rqos->ops->throttle(rqos, bio);
73 rqos = rqos->next;
74 } while (rqos);
75}
76
77void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
78{
79 do {
80 if (rqos->ops->track)
81 rqos->ops->track(rqos, rq, bio);
82 rqos = rqos->next;
83 } while (rqos);
84}
85
86void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
87{
88 do {
89 if (rqos->ops->done_bio)
90 rqos->ops->done_bio(rqos, bio);
91 rqos = rqos->next;
92 } while (rqos);
93}
94
95
96
97
98bool rq_depth_calc_max_depth(struct rq_depth *rqd)
99{
100 unsigned int depth;
101 bool ret = false;
102
103
104
105
106
107
108
109
110 if (rqd->queue_depth == 1) {
111 if (rqd->scale_step > 0)
112 rqd->max_depth = 1;
113 else {
114 rqd->max_depth = 2;
115 ret = true;
116 }
117 } else {
118
119
120
121
122
123
124
125 depth = min_t(unsigned int, rqd->default_depth,
126 rqd->queue_depth);
127 if (rqd->scale_step > 0)
128 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
129 else if (rqd->scale_step < 0) {
130 unsigned int maxd = 3 * rqd->queue_depth / 4;
131
132 depth = 1 + ((depth - 1) << -rqd->scale_step);
133 if (depth > maxd) {
134 depth = maxd;
135 ret = true;
136 }
137 }
138
139 rqd->max_depth = depth;
140 }
141
142 return ret;
143}
144
145void rq_depth_scale_up(struct rq_depth *rqd)
146{
147
148
149
150 if (rqd->scaled_max)
151 return;
152
153 rqd->scale_step--;
154
155 rqd->scaled_max = rq_depth_calc_max_depth(rqd);
156}
157
158
159
160
161
162void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
163{
164
165
166
167
168
169 if (rqd->max_depth == 1)
170 return;
171
172 if (rqd->scale_step < 0 && hard_throttle)
173 rqd->scale_step = 0;
174 else
175 rqd->scale_step++;
176
177 rqd->scaled_max = false;
178 rq_depth_calc_max_depth(rqd);
179}
180
181struct rq_qos_wait_data {
182 struct wait_queue_entry wq;
183 struct task_struct *task;
184 struct rq_wait *rqw;
185 acquire_inflight_cb_t *cb;
186 void *private_data;
187 bool got_token;
188};
189
190static int rq_qos_wake_function(struct wait_queue_entry *curr,
191 unsigned int mode, int wake_flags, void *key)
192{
193 struct rq_qos_wait_data *data = container_of(curr,
194 struct rq_qos_wait_data,
195 wq);
196
197
198
199
200
201 if (!data->cb(data->rqw, data->private_data))
202 return -1;
203
204 data->got_token = true;
205 smp_wmb();
206 list_del_init(&curr->entry);
207 wake_up_process(data->task);
208 return 1;
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227void rq_qos_wait(struct rq_wait *rqw, void *private_data,
228 acquire_inflight_cb_t *acquire_inflight_cb,
229 cleanup_cb_t *cleanup_cb)
230{
231 struct rq_qos_wait_data data = {
232 .wq = {
233 .func = rq_qos_wake_function,
234 .entry = LIST_HEAD_INIT(data.wq.entry),
235 },
236 .task = current,
237 .rqw = rqw,
238 .cb = acquire_inflight_cb,
239 .private_data = private_data,
240 };
241 bool has_sleeper;
242
243 has_sleeper = wq_has_sleeper(&rqw->wait);
244 if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
245 return;
246
247 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
248 has_sleeper = !wq_has_single_sleeper(&rqw->wait);
249 do {
250
251 if (data.got_token)
252 break;
253 if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
254 finish_wait(&rqw->wait, &data.wq);
255
256
257
258
259
260
261 smp_rmb();
262 if (data.got_token)
263 cleanup_cb(rqw, private_data);
264 break;
265 }
266 io_schedule();
267 has_sleeper = true;
268 set_current_state(TASK_UNINTERRUPTIBLE);
269 } while (1);
270 finish_wait(&rqw->wait, &data.wq);
271}
272
273void rq_qos_exit(struct request_queue *q)
274{
275 blk_mq_debugfs_unregister_queue_rqos(q);
276
277 while (q->rq_qos) {
278 struct rq_qos *rqos = q->rq_qos;
279 q->rq_qos = rqos->next;
280 rqos->ops->exit(rqos);
281 }
282}
283