1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
71#include <linux/gfp.h>
72#include <linux/blk-mq.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76#include "blk-mq-tag.h"
77
78
79enum {
80 REQ_FSEQ_PREFLUSH = (1 << 0),
81 REQ_FSEQ_DATA = (1 << 1),
82 REQ_FSEQ_POSTFLUSH = (1 << 2),
83 REQ_FSEQ_DONE = (1 << 3),
84
85 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86 REQ_FSEQ_POSTFLUSH,
87
88
89
90
91
92 FLUSH_PENDING_TIMEOUT = 5 * HZ,
93};
94
95static bool blk_kick_flush(struct request_queue *q,
96 struct blk_flush_queue *fq);
97
98static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
99{
100 unsigned int policy = 0;
101
102 if (blk_rq_sectors(rq))
103 policy |= REQ_FSEQ_DATA;
104
105 if (fflags & REQ_FLUSH) {
106 if (rq->cmd_flags & REQ_FLUSH)
107 policy |= REQ_FSEQ_PREFLUSH;
108 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
109 policy |= REQ_FSEQ_POSTFLUSH;
110 }
111 return policy;
112}
113
114static unsigned int blk_flush_cur_seq(struct request *rq)
115{
116 return 1 << ffz(rq->flush.seq);
117}
118
119static void blk_flush_restore_request(struct request *rq)
120{
121
122
123
124
125
126 rq->bio = rq->biotail;
127
128
129 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
130 rq->end_io = rq->flush.saved_end_io;
131}
132
133static bool blk_flush_queue_rq(struct request *rq, bool add_front)
134{
135 if (rq->q->mq_ops) {
136 struct request_queue *q = rq->q;
137
138 blk_mq_add_to_requeue_list(rq, add_front);
139 blk_mq_kick_requeue_list(q);
140 return false;
141 } else {
142 if (add_front)
143 list_add(&rq->queuelist, &rq->q->queue_head);
144 else
145 list_add_tail(&rq->queuelist, &rq->q->queue_head);
146 return true;
147 }
148}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166static bool blk_flush_complete_seq(struct request *rq,
167 struct blk_flush_queue *fq,
168 unsigned int seq, int error)
169{
170 struct request_queue *q = rq->q;
171 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
172 bool queued = false, kicked;
173
174 BUG_ON(rq->flush.seq & seq);
175 rq->flush.seq |= seq;
176
177 if (likely(!error))
178 seq = blk_flush_cur_seq(rq);
179 else
180 seq = REQ_FSEQ_DONE;
181
182 switch (seq) {
183 case REQ_FSEQ_PREFLUSH:
184 case REQ_FSEQ_POSTFLUSH:
185
186 if (list_empty(pending))
187 fq->flush_pending_since = jiffies;
188 list_move_tail(&rq->flush.list, pending);
189 break;
190
191 case REQ_FSEQ_DATA:
192 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
193 queued = blk_flush_queue_rq(rq, true);
194 break;
195
196 case REQ_FSEQ_DONE:
197
198
199
200
201
202
203 BUG_ON(!list_empty(&rq->queuelist));
204 list_del_init(&rq->flush.list);
205 blk_flush_restore_request(rq);
206 if (q->mq_ops)
207 blk_mq_end_request(rq, error);
208 else
209 __blk_end_request_all(rq, error);
210 break;
211
212 default:
213 BUG();
214 }
215
216 kicked = blk_kick_flush(q, fq);
217 return kicked | queued;
218}
219
220static void flush_end_io(struct request *flush_rq, int error)
221{
222 struct request_queue *q = flush_rq->q;
223 struct list_head *running;
224 bool queued = false;
225 struct request *rq, *n;
226 unsigned long flags = 0;
227 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
228
229 if (q->mq_ops) {
230 struct blk_mq_hw_ctx *hctx;
231
232
233 spin_lock_irqsave(&fq->mq_flush_lock, flags);
234 hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
235 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
236 flush_rq->tag = -1;
237 }
238
239 running = &fq->flush_queue[fq->flush_running_idx];
240 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
241
242
243 fq->flush_running_idx ^= 1;
244
245 if (!q->mq_ops)
246 elv_completed_request(q, flush_rq);
247
248
249 list_for_each_entry_safe(rq, n, running, flush.list) {
250 unsigned int seq = blk_flush_cur_seq(rq);
251
252 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
253 queued |= blk_flush_complete_seq(rq, fq, seq, error);
254 }
255
256
257
258
259
260
261
262
263
264
265
266
267 if (queued || fq->flush_queue_delayed) {
268 WARN_ON(q->mq_ops);
269 blk_run_queue_async(q);
270 }
271 fq->flush_queue_delayed = 0;
272 if (q->mq_ops)
273 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
274}
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
291{
292 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
293 struct request *first_rq =
294 list_first_entry(pending, struct request, flush.list);
295 struct request *flush_rq = fq->flush_rq;
296
297
298 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
299 return false;
300
301
302 if (!list_empty(&fq->flush_data_in_flight) &&
303 time_before(jiffies,
304 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
305 return false;
306
307
308
309
310
311 fq->flush_pending_idx ^= 1;
312
313 blk_rq_init(q, flush_rq);
314
315
316
317
318
319
320 if (q->mq_ops) {
321 struct blk_mq_hw_ctx *hctx;
322
323 flush_rq->mq_ctx = first_rq->mq_ctx;
324 flush_rq->tag = first_rq->tag;
325 fq->orig_rq = first_rq;
326
327 hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
328 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
329 }
330
331 flush_rq->cmd_type = REQ_TYPE_FS;
332 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
333 flush_rq->rq_disk = first_rq->rq_disk;
334 flush_rq->end_io = flush_end_io;
335
336 return blk_flush_queue_rq(flush_rq, false);
337}
338
339static void flush_data_end_io(struct request *rq, int error)
340{
341 struct request_queue *q = rq->q;
342 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
343
344
345
346
347
348 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
349 blk_run_queue_async(q);
350}
351
352static void mq_flush_data_end_io(struct request *rq, int error)
353{
354 struct request_queue *q = rq->q;
355 struct blk_mq_hw_ctx *hctx;
356 struct blk_mq_ctx *ctx = rq->mq_ctx;
357 unsigned long flags;
358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
359
360 hctx = q->mq_ops->map_queue(q, ctx->cpu);
361
362
363
364
365
366 spin_lock_irqsave(&fq->mq_flush_lock, flags);
367 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
368 blk_mq_run_hw_queue(hctx, true);
369 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384void blk_insert_flush(struct request *rq)
385{
386 struct request_queue *q = rq->q;
387 unsigned int fflags = q->flush_flags;
388 unsigned int policy = blk_flush_policy(fflags, rq);
389 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
390
391
392
393
394
395 rq->cmd_flags &= ~REQ_FLUSH;
396 if (!(fflags & REQ_FUA))
397 rq->cmd_flags &= ~REQ_FUA;
398
399
400
401
402
403
404
405 if (!policy) {
406 if (q->mq_ops)
407 blk_mq_end_request(rq, 0);
408 else
409 __blk_end_bidi_request(rq, 0, 0, 0);
410 return;
411 }
412
413 BUG_ON(rq->bio != rq->biotail);
414
415
416
417
418
419
420 if ((policy & REQ_FSEQ_DATA) &&
421 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
422 if (q->mq_ops) {
423 blk_mq_insert_request(rq, false, false, true);
424 } else
425 list_add_tail(&rq->queuelist, &q->queue_head);
426 return;
427 }
428
429
430
431
432
433 memset(&rq->flush, 0, sizeof(rq->flush));
434 INIT_LIST_HEAD(&rq->flush.list);
435 rq->cmd_flags |= REQ_FLUSH_SEQ;
436 rq->flush.saved_end_io = rq->end_io;
437 if (q->mq_ops) {
438 rq->end_io = mq_flush_data_end_io;
439
440 spin_lock_irq(&fq->mq_flush_lock);
441 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
442 spin_unlock_irq(&fq->mq_flush_lock);
443 return;
444 }
445 rq->end_io = flush_data_end_io;
446
447 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
463 sector_t *error_sector)
464{
465 struct request_queue *q;
466 struct bio *bio;
467 int ret = 0;
468
469 if (bdev->bd_disk == NULL)
470 return -ENXIO;
471
472 q = bdev_get_queue(bdev);
473 if (!q)
474 return -ENXIO;
475
476
477
478
479
480
481
482 if (!q->make_request_fn)
483 return -ENXIO;
484
485 bio = bio_alloc(gfp_mask, 0);
486 bio->bi_bdev = bdev;
487
488 ret = submit_bio_wait(WRITE_FLUSH, bio);
489
490
491
492
493
494
495 if (error_sector)
496 *error_sector = bio->bi_iter.bi_sector;
497
498 bio_put(bio);
499 return ret;
500}
501EXPORT_SYMBOL(blkdev_issue_flush);
502
503struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
504 int node, int cmd_size)
505{
506 struct blk_flush_queue *fq;
507 int rq_sz = sizeof(struct request);
508
509 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
510 if (!fq)
511 goto fail;
512
513 if (q->mq_ops) {
514 spin_lock_init(&fq->mq_flush_lock);
515 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
516 }
517
518 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
519 if (!fq->flush_rq)
520 goto fail_rq;
521
522 INIT_LIST_HEAD(&fq->flush_queue[0]);
523 INIT_LIST_HEAD(&fq->flush_queue[1]);
524 INIT_LIST_HEAD(&fq->flush_data_in_flight);
525
526 return fq;
527
528 fail_rq:
529 kfree(fq);
530 fail:
531 return NULL;
532}
533
534void blk_free_flush_queue(struct blk_flush_queue *fq)
535{
536
537 if (!fq)
538 return;
539
540 kfree(fq->flush_rq);
541 kfree(fq);
542}
543