1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66#include <linux/kernel.h>
67#include <linux/module.h>
68#include <linux/bio.h>
69#include <linux/blkdev.h>
70#include <linux/gfp.h>
71#include <linux/blk-mq.h>
72
73#include "blk.h"
74#include "blk-mq.h"
75#include "blk-mq-tag.h"
76#include "blk-mq-sched.h"
77
78
79enum {
80 REQ_FSEQ_PREFLUSH = (1 << 0),
81 REQ_FSEQ_DATA = (1 << 1),
82 REQ_FSEQ_POSTFLUSH = (1 << 2),
83 REQ_FSEQ_DONE = (1 << 3),
84
85 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86 REQ_FSEQ_POSTFLUSH,
87
88
89
90
91
92 FLUSH_PENDING_TIMEOUT = 5 * HZ,
93};
94
95static void blk_kick_flush(struct request_queue *q,
96 struct blk_flush_queue *fq, unsigned int flags);
97
98static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
99{
100 unsigned int policy = 0;
101
102 if (blk_rq_sectors(rq))
103 policy |= REQ_FSEQ_DATA;
104
105 if (fflags & (1UL << QUEUE_FLAG_WC)) {
106 if (rq->cmd_flags & REQ_PREFLUSH)
107 policy |= REQ_FSEQ_PREFLUSH;
108 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109 (rq->cmd_flags & REQ_FUA))
110 policy |= REQ_FSEQ_POSTFLUSH;
111 }
112 return policy;
113}
114
115static unsigned int blk_flush_cur_seq(struct request *rq)
116{
117 return 1 << ffz(rq->flush.seq);
118}
119
120static void blk_flush_restore_request(struct request *rq)
121{
122
123
124
125
126
127 rq->bio = rq->biotail;
128
129
130 rq->rq_flags &= ~RQF_FLUSH_SEQ;
131 rq->end_io = rq->flush.saved_end_io;
132}
133
134static void blk_flush_queue_rq(struct request *rq, bool add_front)
135{
136 blk_mq_add_to_requeue_list(rq, add_front, true);
137}
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155static void blk_flush_complete_seq(struct request *rq,
156 struct blk_flush_queue *fq,
157 unsigned int seq, blk_status_t error)
158{
159 struct request_queue *q = rq->q;
160 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
161 unsigned int cmd_flags;
162
163 BUG_ON(rq->flush.seq & seq);
164 rq->flush.seq |= seq;
165 cmd_flags = rq->cmd_flags;
166
167 if (likely(!error))
168 seq = blk_flush_cur_seq(rq);
169 else
170 seq = REQ_FSEQ_DONE;
171
172 switch (seq) {
173 case REQ_FSEQ_PREFLUSH:
174 case REQ_FSEQ_POSTFLUSH:
175
176 if (list_empty(pending))
177 fq->flush_pending_since = jiffies;
178 list_move_tail(&rq->flush.list, pending);
179 break;
180
181 case REQ_FSEQ_DATA:
182 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
183 blk_flush_queue_rq(rq, true);
184 break;
185
186 case REQ_FSEQ_DONE:
187
188
189
190
191
192
193 BUG_ON(!list_empty(&rq->queuelist));
194 list_del_init(&rq->flush.list);
195 blk_flush_restore_request(rq);
196 blk_mq_end_request(rq, error);
197 break;
198
199 default:
200 BUG();
201 }
202
203 blk_kick_flush(q, fq, cmd_flags);
204}
205
206static void flush_end_io(struct request *flush_rq, blk_status_t error)
207{
208 struct request_queue *q = flush_rq->q;
209 struct list_head *running;
210 struct request *rq, *n;
211 unsigned long flags = 0;
212 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
213 struct blk_mq_hw_ctx *hctx;
214
215
216 spin_lock_irqsave(&fq->mq_flush_lock, flags);
217
218 if (!refcount_dec_and_test(&flush_rq->ref)) {
219 fq->rq_status = error;
220 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
221 return;
222 }
223
224 if (fq->rq_status != BLK_STS_OK)
225 error = fq->rq_status;
226
227 hctx = flush_rq->mq_hctx;
228 if (!q->elevator) {
229 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
230 flush_rq->tag = -1;
231 } else {
232 blk_mq_put_driver_tag(flush_rq);
233 flush_rq->internal_tag = -1;
234 }
235
236 running = &fq->flush_queue[fq->flush_running_idx];
237 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
238
239
240 fq->flush_running_idx ^= 1;
241
242
243 list_for_each_entry_safe(rq, n, running, flush.list) {
244 unsigned int seq = blk_flush_cur_seq(rq);
245
246 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
247 blk_flush_complete_seq(rq, fq, seq, error);
248 }
249
250 fq->flush_queue_delayed = 0;
251 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
252}
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
268 unsigned int flags)
269{
270 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
271 struct request *first_rq =
272 list_first_entry(pending, struct request, flush.list);
273 struct request *flush_rq = fq->flush_rq;
274
275
276 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
277 return;
278
279
280
281
282
283
284
285 if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
286 time_before(jiffies,
287 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
288 return;
289
290
291
292
293
294 fq->flush_pending_idx ^= 1;
295
296 blk_rq_init(q, flush_rq);
297
298
299
300
301
302
303
304
305
306 flush_rq->mq_ctx = first_rq->mq_ctx;
307 flush_rq->mq_hctx = first_rq->mq_hctx;
308
309 if (!q->elevator) {
310 fq->orig_rq = first_rq;
311 flush_rq->tag = first_rq->tag;
312 blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
313 } else {
314 flush_rq->internal_tag = first_rq->internal_tag;
315 }
316
317 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
318 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
319 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
320 flush_rq->rq_disk = first_rq->rq_disk;
321 flush_rq->end_io = flush_end_io;
322
323 blk_flush_queue_rq(flush_rq, false);
324}
325
326static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
327{
328 struct request_queue *q = rq->q;
329 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
330 struct blk_mq_ctx *ctx = rq->mq_ctx;
331 unsigned long flags;
332 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
333
334 if (q->elevator) {
335 WARN_ON(rq->tag < 0);
336 blk_mq_put_driver_tag(rq);
337 }
338
339
340
341
342
343 spin_lock_irqsave(&fq->mq_flush_lock, flags);
344 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
345 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
346
347 blk_mq_sched_restart(hctx);
348}
349
350
351
352
353
354
355
356
357
358
359void blk_insert_flush(struct request *rq)
360{
361 struct request_queue *q = rq->q;
362 unsigned long fflags = q->queue_flags;
363 unsigned int policy = blk_flush_policy(fflags, rq);
364 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
365
366
367
368
369
370 rq->cmd_flags &= ~REQ_PREFLUSH;
371 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
372 rq->cmd_flags &= ~REQ_FUA;
373
374
375
376
377
378
379 rq->cmd_flags |= REQ_SYNC;
380
381
382
383
384
385
386
387 if (!policy) {
388 blk_mq_end_request(rq, 0);
389 return;
390 }
391
392 BUG_ON(rq->bio != rq->biotail);
393
394
395
396
397
398
399 if ((policy & REQ_FSEQ_DATA) &&
400 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
401 blk_mq_request_bypass_insert(rq, false);
402 return;
403 }
404
405
406
407
408
409 memset(&rq->flush, 0, sizeof(rq->flush));
410 INIT_LIST_HEAD(&rq->flush.list);
411 rq->rq_flags |= RQF_FLUSH_SEQ;
412 rq->flush.saved_end_io = rq->end_io;
413
414 rq->end_io = mq_flush_data_end_io;
415
416 spin_lock_irq(&fq->mq_flush_lock);
417 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
418 spin_unlock_irq(&fq->mq_flush_lock);
419}
420
421
422
423
424
425
426
427
428
429
430
431
432int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
433 sector_t *error_sector)
434{
435 struct request_queue *q;
436 struct bio *bio;
437 int ret = 0;
438
439 if (bdev->bd_disk == NULL)
440 return -ENXIO;
441
442 q = bdev_get_queue(bdev);
443 if (!q)
444 return -ENXIO;
445
446
447
448
449
450
451
452 if (!q->make_request_fn)
453 return -ENXIO;
454
455 bio = bio_alloc(gfp_mask, 0);
456 bio_set_dev(bio, bdev);
457 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
458
459 ret = submit_bio_wait(bio);
460
461
462
463
464
465
466 if (error_sector)
467 *error_sector = bio->bi_iter.bi_sector;
468
469 bio_put(bio);
470 return ret;
471}
472EXPORT_SYMBOL(blkdev_issue_flush);
473
474struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
475 int node, int cmd_size, gfp_t flags)
476{
477 struct blk_flush_queue *fq;
478 int rq_sz = sizeof(struct request);
479
480 fq = kzalloc_node(sizeof(*fq), flags, node);
481 if (!fq)
482 goto fail;
483
484 spin_lock_init(&fq->mq_flush_lock);
485
486 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
487 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
488 if (!fq->flush_rq)
489 goto fail_rq;
490
491 INIT_LIST_HEAD(&fq->flush_queue[0]);
492 INIT_LIST_HEAD(&fq->flush_queue[1]);
493 INIT_LIST_HEAD(&fq->flush_data_in_flight);
494
495 return fq;
496
497 fail_rq:
498 kfree(fq);
499 fail:
500 return NULL;
501}
502
503void blk_free_flush_queue(struct blk_flush_queue *fq)
504{
505
506 if (!fq)
507 return;
508
509 kfree(fq->flush_rq);
510 kfree(fq);
511}
512