1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
71#include <linux/gfp.h>
72#include <linux/blk-mq.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76#include "blk-mq-tag.h"
77#include "blk-mq-sched.h"
78
79
80enum {
81 REQ_FSEQ_PREFLUSH = (1 << 0),
82 REQ_FSEQ_DATA = (1 << 1),
83 REQ_FSEQ_POSTFLUSH = (1 << 2),
84 REQ_FSEQ_DONE = (1 << 3),
85
86 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 REQ_FSEQ_POSTFLUSH,
88
89
90
91
92
93 FLUSH_PENDING_TIMEOUT = 5 * HZ,
94};
95
96static bool blk_kick_flush(struct request_queue *q,
97 struct blk_flush_queue *fq, unsigned int flags);
98
99static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100{
101 unsigned int policy = 0;
102
103 if (blk_rq_sectors(rq))
104 policy |= REQ_FSEQ_DATA;
105
106 if (fflags & (1UL << QUEUE_FLAG_WC)) {
107 if (rq->cmd_flags & REQ_PREFLUSH)
108 policy |= REQ_FSEQ_PREFLUSH;
109 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110 (rq->cmd_flags & REQ_FUA))
111 policy |= REQ_FSEQ_POSTFLUSH;
112 }
113 return policy;
114}
115
116static unsigned int blk_flush_cur_seq(struct request *rq)
117{
118 return 1 << ffz(rq->flush.seq);
119}
120
121static void blk_flush_restore_request(struct request *rq)
122{
123
124
125
126
127
128 rq->bio = rq->biotail;
129
130
131 rq->rq_flags &= ~RQF_FLUSH_SEQ;
132 rq->end_io = rq->flush.saved_end_io;
133}
134
135static bool blk_flush_queue_rq(struct request *rq, bool add_front)
136{
137 if (rq->q->mq_ops) {
138 blk_mq_add_to_requeue_list(rq, add_front, true);
139 return false;
140 } else {
141 if (add_front)
142 list_add(&rq->queuelist, &rq->q->queue_head);
143 else
144 list_add_tail(&rq->queuelist, &rq->q->queue_head);
145 return true;
146 }
147}
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165static bool blk_flush_complete_seq(struct request *rq,
166 struct blk_flush_queue *fq,
167 unsigned int seq, blk_status_t error)
168{
169 struct request_queue *q = rq->q;
170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
171 bool queued = false, kicked;
172 unsigned int cmd_flags;
173
174 BUG_ON(rq->flush.seq & seq);
175 rq->flush.seq |= seq;
176 cmd_flags = rq->cmd_flags;
177
178 if (likely(!error))
179 seq = blk_flush_cur_seq(rq);
180 else
181 seq = REQ_FSEQ_DONE;
182
183 switch (seq) {
184 case REQ_FSEQ_PREFLUSH:
185 case REQ_FSEQ_POSTFLUSH:
186
187 if (list_empty(pending))
188 fq->flush_pending_since = jiffies;
189 list_move_tail(&rq->flush.list, pending);
190 break;
191
192 case REQ_FSEQ_DATA:
193 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
194 queued = blk_flush_queue_rq(rq, true);
195 break;
196
197 case REQ_FSEQ_DONE:
198
199
200
201
202
203
204 BUG_ON(!list_empty(&rq->queuelist));
205 list_del_init(&rq->flush.list);
206 blk_flush_restore_request(rq);
207 if (q->mq_ops)
208 blk_mq_end_request(rq, error);
209 else
210 __blk_end_request_all(rq, error);
211 break;
212
213 default:
214 BUG();
215 }
216
217 kicked = blk_kick_flush(q, fq, cmd_flags);
218 return kicked | queued;
219}
220
221static void flush_end_io(struct request *flush_rq, blk_status_t error)
222{
223 struct request_queue *q = flush_rq->q;
224 struct list_head *running;
225 bool queued = false;
226 struct request *rq, *n;
227 unsigned long flags = 0;
228 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
229
230 if (q->mq_ops) {
231 struct blk_mq_hw_ctx *hctx;
232
233
234 spin_lock_irqsave(&fq->mq_flush_lock, flags);
235 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
236 if (!q->elevator) {
237 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
238 flush_rq->tag = -1;
239 } else {
240 blk_mq_put_driver_tag_hctx(hctx, flush_rq);
241 flush_rq->internal_tag = -1;
242 }
243 }
244
245 running = &fq->flush_queue[fq->flush_running_idx];
246 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
247
248
249 fq->flush_running_idx ^= 1;
250
251 if (!q->mq_ops)
252 elv_completed_request(q, flush_rq);
253
254
255 list_for_each_entry_safe(rq, n, running, flush.list) {
256 unsigned int seq = blk_flush_cur_seq(rq);
257
258 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
259 queued |= blk_flush_complete_seq(rq, fq, seq, error);
260 }
261
262
263
264
265
266
267
268
269
270
271
272
273 if (queued || fq->flush_queue_delayed) {
274 WARN_ON(q->mq_ops);
275 blk_run_queue_async(q);
276 }
277 fq->flush_queue_delayed = 0;
278 if (q->mq_ops)
279 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
280}
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
298 unsigned int flags)
299{
300 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
301 struct request *first_rq =
302 list_first_entry(pending, struct request, flush.list);
303 struct request *flush_rq = fq->flush_rq;
304
305
306 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
307 return false;
308
309
310
311
312
313
314
315 if (!list_empty(&fq->flush_data_in_flight) &&
316 !(q->mq_ops && q->elevator) &&
317 time_before(jiffies,
318 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
319 return false;
320
321
322
323
324
325 fq->flush_pending_idx ^= 1;
326
327 blk_rq_init(q, flush_rq);
328
329
330
331
332
333
334
335
336
337 if (q->mq_ops) {
338 struct blk_mq_hw_ctx *hctx;
339
340 flush_rq->mq_ctx = first_rq->mq_ctx;
341
342 if (!q->elevator) {
343 fq->orig_rq = first_rq;
344 flush_rq->tag = first_rq->tag;
345 hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
346 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
347 } else {
348 flush_rq->internal_tag = first_rq->internal_tag;
349 }
350 }
351
352 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
353 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
354 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
355 flush_rq->rq_disk = first_rq->rq_disk;
356 flush_rq->end_io = flush_end_io;
357
358 return blk_flush_queue_rq(flush_rq, false);
359}
360
361static void flush_data_end_io(struct request *rq, blk_status_t error)
362{
363 struct request_queue *q = rq->q;
364 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
365
366 lockdep_assert_held(q->queue_lock);
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391 elv_completed_request(q, rq);
392
393
394 rq->rq_flags &= ~RQF_STARTED;
395
396
397
398
399
400 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
401 blk_run_queue_async(q);
402}
403
404static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
405{
406 struct request_queue *q = rq->q;
407 struct blk_mq_hw_ctx *hctx;
408 struct blk_mq_ctx *ctx = rq->mq_ctx;
409 unsigned long flags;
410 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
411
412 hctx = blk_mq_map_queue(q, ctx->cpu);
413
414 if (q->elevator) {
415 WARN_ON(rq->tag < 0);
416 blk_mq_put_driver_tag_hctx(hctx, rq);
417 }
418
419
420
421
422
423 spin_lock_irqsave(&fq->mq_flush_lock, flags);
424 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
425 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
426
427 blk_mq_run_hw_queue(hctx, true);
428}
429
430
431
432
433
434
435
436
437
438
439void blk_insert_flush(struct request *rq)
440{
441 struct request_queue *q = rq->q;
442 unsigned long fflags = q->queue_flags;
443 unsigned int policy = blk_flush_policy(fflags, rq);
444 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
445
446 if (!q->mq_ops)
447 lockdep_assert_held(q->queue_lock);
448
449
450
451
452
453 rq->cmd_flags &= ~REQ_PREFLUSH;
454 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
455 rq->cmd_flags &= ~REQ_FUA;
456
457
458
459
460
461
462 rq->cmd_flags |= REQ_SYNC;
463
464
465
466
467
468
469
470 if (!policy) {
471 if (q->mq_ops)
472 blk_mq_end_request(rq, 0);
473 else
474 __blk_end_request(rq, 0, 0);
475 return;
476 }
477
478 BUG_ON(rq->bio != rq->biotail);
479
480
481
482
483
484
485 if ((policy & REQ_FSEQ_DATA) &&
486 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
487 if (q->mq_ops)
488 blk_mq_request_bypass_insert(rq, false);
489 else
490 list_add_tail(&rq->queuelist, &q->queue_head);
491 return;
492 }
493
494
495
496
497
498 memset(&rq->flush, 0, sizeof(rq->flush));
499 INIT_LIST_HEAD(&rq->flush.list);
500 rq->rq_flags |= RQF_FLUSH_SEQ;
501 rq->flush.saved_end_io = rq->end_io;
502 if (q->mq_ops) {
503 rq->end_io = mq_flush_data_end_io;
504
505 spin_lock_irq(&fq->mq_flush_lock);
506 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
507 spin_unlock_irq(&fq->mq_flush_lock);
508 return;
509 }
510 rq->end_io = flush_data_end_io;
511
512 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
513}
514
515
516
517
518
519
520
521
522
523
524
525
526int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
527 sector_t *error_sector)
528{
529 struct request_queue *q;
530 struct bio *bio;
531 int ret = 0;
532
533 if (bdev->bd_disk == NULL)
534 return -ENXIO;
535
536 q = bdev_get_queue(bdev);
537 if (!q)
538 return -ENXIO;
539
540
541
542
543
544
545
546 if (!q->make_request_fn)
547 return -ENXIO;
548
549 bio = bio_alloc(gfp_mask, 0);
550 bio_set_dev(bio, bdev);
551 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
552
553 ret = submit_bio_wait(bio);
554
555
556
557
558
559
560 if (error_sector)
561 *error_sector = bio->bi_iter.bi_sector;
562
563 bio_put(bio);
564 return ret;
565}
566EXPORT_SYMBOL(blkdev_issue_flush);
567
568struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
569 int node, int cmd_size)
570{
571 struct blk_flush_queue *fq;
572 int rq_sz = sizeof(struct request);
573
574 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
575 if (!fq)
576 goto fail;
577
578 if (q->mq_ops)
579 spin_lock_init(&fq->mq_flush_lock);
580
581 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
582 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
583 if (!fq->flush_rq)
584 goto fail_rq;
585
586 INIT_LIST_HEAD(&fq->flush_queue[0]);
587 INIT_LIST_HEAD(&fq->flush_queue[1]);
588 INIT_LIST_HEAD(&fq->flush_data_in_flight);
589
590 return fq;
591
592 fail_rq:
593 kfree(fq);
594 fail:
595 return NULL;
596}
597
598void blk_free_flush_queue(struct blk_flush_queue *fq)
599{
600
601 if (!fq)
602 return;
603
604 kfree(fq->flush_rq);
605 kfree(fq);
606}
607