1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
71#include <linux/gfp.h>
72#include <linux/blk-mq.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76
77
78enum {
79 REQ_FSEQ_PREFLUSH = (1 << 0),
80 REQ_FSEQ_DATA = (1 << 1),
81 REQ_FSEQ_POSTFLUSH = (1 << 2),
82 REQ_FSEQ_DONE = (1 << 3),
83
84 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85 REQ_FSEQ_POSTFLUSH,
86
87
88
89
90
91 FLUSH_PENDING_TIMEOUT = 5 * HZ,
92};
93
94static bool blk_kick_flush(struct request_queue *q,
95 struct blk_flush_queue *fq);
96
97static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
98{
99 unsigned int policy = 0;
100
101 if (blk_rq_sectors(rq))
102 policy |= REQ_FSEQ_DATA;
103
104 if (fflags & REQ_FLUSH) {
105 if (rq->cmd_flags & REQ_FLUSH)
106 policy |= REQ_FSEQ_PREFLUSH;
107 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
108 policy |= REQ_FSEQ_POSTFLUSH;
109 }
110 return policy;
111}
112
113static unsigned int blk_flush_cur_seq(struct request *rq)
114{
115 return 1 << ffz(rq->flush.seq);
116}
117
118static void blk_flush_restore_request(struct request *rq)
119{
120
121
122
123
124
125 rq->bio = rq->biotail;
126
127
128 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
129 rq->end_io = rq->flush.saved_end_io;
130}
131
132static bool blk_flush_queue_rq(struct request *rq, bool add_front)
133{
134 if (rq->q->mq_ops) {
135 struct request_queue *q = rq->q;
136
137 blk_mq_add_to_requeue_list(rq, add_front);
138 blk_mq_kick_requeue_list(q);
139 return false;
140 } else {
141 if (add_front)
142 list_add(&rq->queuelist, &rq->q->queue_head);
143 else
144 list_add_tail(&rq->queuelist, &rq->q->queue_head);
145 return true;
146 }
147}
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165static bool blk_flush_complete_seq(struct request *rq,
166 struct blk_flush_queue *fq,
167 unsigned int seq, int error)
168{
169 struct request_queue *q = rq->q;
170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
171 bool queued = false, kicked;
172
173 BUG_ON(rq->flush.seq & seq);
174 rq->flush.seq |= seq;
175
176 if (likely(!error))
177 seq = blk_flush_cur_seq(rq);
178 else
179 seq = REQ_FSEQ_DONE;
180
181 switch (seq) {
182 case REQ_FSEQ_PREFLUSH:
183 case REQ_FSEQ_POSTFLUSH:
184
185 if (list_empty(pending))
186 fq->flush_pending_since = jiffies;
187 list_move_tail(&rq->flush.list, pending);
188 break;
189
190 case REQ_FSEQ_DATA:
191 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
192 queued = blk_flush_queue_rq(rq, true);
193 break;
194
195 case REQ_FSEQ_DONE:
196
197
198
199
200
201
202 BUG_ON(!list_empty(&rq->queuelist));
203 list_del_init(&rq->flush.list);
204 blk_flush_restore_request(rq);
205 if (q->mq_ops)
206 blk_mq_end_request(rq, error);
207 else
208 __blk_end_request_all(rq, error);
209 break;
210
211 default:
212 BUG();
213 }
214
215 kicked = blk_kick_flush(q, fq);
216 return kicked | queued;
217}
218
219static void flush_end_io(struct request *flush_rq, int error)
220{
221 struct request_queue *q = flush_rq->q;
222 struct list_head *running;
223 bool queued = false;
224 struct request *rq, *n;
225 unsigned long flags = 0;
226 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
227
228 if (q->mq_ops) {
229 spin_lock_irqsave(&fq->mq_flush_lock, flags);
230 flush_rq->tag = -1;
231 }
232
233 running = &fq->flush_queue[fq->flush_running_idx];
234 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
235
236
237 fq->flush_running_idx ^= 1;
238
239 if (!q->mq_ops)
240 elv_completed_request(q, flush_rq);
241
242
243 list_for_each_entry_safe(rq, n, running, flush.list) {
244 unsigned int seq = blk_flush_cur_seq(rq);
245
246 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
247 queued |= blk_flush_complete_seq(rq, fq, seq, error);
248 }
249
250
251
252
253
254
255
256
257
258
259
260
261 if (queued || fq->flush_queue_delayed) {
262 WARN_ON(q->mq_ops);
263 blk_run_queue_async(q);
264 }
265 fq->flush_queue_delayed = 0;
266 if (q->mq_ops)
267 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
285{
286 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
287 struct request *first_rq =
288 list_first_entry(pending, struct request, flush.list);
289 struct request *flush_rq = fq->flush_rq;
290
291
292 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
293 return false;
294
295
296 if (!list_empty(&fq->flush_data_in_flight) &&
297 time_before(jiffies,
298 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
299 return false;
300
301
302
303
304
305 fq->flush_pending_idx ^= 1;
306
307 blk_rq_init(q, flush_rq);
308
309
310
311
312
313 if (q->mq_ops) {
314 flush_rq->mq_ctx = first_rq->mq_ctx;
315 flush_rq->tag = first_rq->tag;
316 }
317
318 flush_rq->cmd_type = REQ_TYPE_FS;
319 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
320 flush_rq->rq_disk = first_rq->rq_disk;
321 flush_rq->end_io = flush_end_io;
322
323 return blk_flush_queue_rq(flush_rq, false);
324}
325
326static void flush_data_end_io(struct request *rq, int error)
327{
328 struct request_queue *q = rq->q;
329 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
330
331
332
333
334
335 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
336 blk_run_queue_async(q);
337}
338
339static void mq_flush_data_end_io(struct request *rq, int error)
340{
341 struct request_queue *q = rq->q;
342 struct blk_mq_hw_ctx *hctx;
343 struct blk_mq_ctx *ctx = rq->mq_ctx;
344 unsigned long flags;
345 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
346
347 hctx = q->mq_ops->map_queue(q, ctx->cpu);
348
349
350
351
352
353 spin_lock_irqsave(&fq->mq_flush_lock, flags);
354 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
355 blk_mq_run_hw_queue(hctx, true);
356 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
357}
358
359
360
361
362
363
364
365
366
367
368
369
370
371void blk_insert_flush(struct request *rq)
372{
373 struct request_queue *q = rq->q;
374 unsigned int fflags = q->flush_flags;
375 unsigned int policy = blk_flush_policy(fflags, rq);
376 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
377
378
379
380
381
382 rq->cmd_flags &= ~REQ_FLUSH;
383 if (!(fflags & REQ_FUA))
384 rq->cmd_flags &= ~REQ_FUA;
385
386
387
388
389
390
391
392 if (!policy) {
393 if (q->mq_ops)
394 blk_mq_end_request(rq, 0);
395 else
396 __blk_end_bidi_request(rq, 0, 0, 0);
397 return;
398 }
399
400 BUG_ON(rq->bio != rq->biotail);
401
402
403
404
405
406
407 if ((policy & REQ_FSEQ_DATA) &&
408 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
409 if (q->mq_ops) {
410 blk_mq_insert_request(rq, false, false, true);
411 } else
412 list_add_tail(&rq->queuelist, &q->queue_head);
413 return;
414 }
415
416
417
418
419
420 memset(&rq->flush, 0, sizeof(rq->flush));
421 INIT_LIST_HEAD(&rq->flush.list);
422 rq->cmd_flags |= REQ_FLUSH_SEQ;
423 rq->flush.saved_end_io = rq->end_io;
424 if (q->mq_ops) {
425 rq->end_io = mq_flush_data_end_io;
426
427 spin_lock_irq(&fq->mq_flush_lock);
428 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
429 spin_unlock_irq(&fq->mq_flush_lock);
430 return;
431 }
432 rq->end_io = flush_data_end_io;
433
434 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
435}
436
437
438
439
440
441
442
443
444
445
446
447
448
449int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
450 sector_t *error_sector)
451{
452 struct request_queue *q;
453 struct bio *bio;
454 int ret = 0;
455
456 if (bdev->bd_disk == NULL)
457 return -ENXIO;
458
459 q = bdev_get_queue(bdev);
460 if (!q)
461 return -ENXIO;
462
463
464
465
466
467
468
469 if (!q->make_request_fn)
470 return -ENXIO;
471
472 bio = bio_alloc(gfp_mask, 0);
473 bio->bi_bdev = bdev;
474
475 ret = submit_bio_wait(WRITE_FLUSH, bio);
476
477
478
479
480
481
482 if (error_sector)
483 *error_sector = bio->bi_iter.bi_sector;
484
485 bio_put(bio);
486 return ret;
487}
488EXPORT_SYMBOL(blkdev_issue_flush);
489
490struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
491 int node, int cmd_size)
492{
493 struct blk_flush_queue *fq;
494 int rq_sz = sizeof(struct request);
495
496 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
497 if (!fq)
498 goto fail;
499
500 if (q->mq_ops) {
501 spin_lock_init(&fq->mq_flush_lock);
502 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
503 }
504
505 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
506 if (!fq->flush_rq)
507 goto fail_rq;
508
509 INIT_LIST_HEAD(&fq->flush_queue[0]);
510 INIT_LIST_HEAD(&fq->flush_queue[1]);
511 INIT_LIST_HEAD(&fq->flush_data_in_flight);
512
513 return fq;
514
515 fail_rq:
516 kfree(fq);
517 fail:
518 return NULL;
519}
520
521void blk_free_flush_queue(struct blk_flush_queue *fq)
522{
523
524 if (!fq)
525 return;
526
527 kfree(fq->flush_rq);
528 kfree(fq);
529}
530