1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66#include <linux/kernel.h>
67#include <linux/module.h>
68#include <linux/bio.h>
69#include <linux/blkdev.h>
70#include <linux/gfp.h>
71#include <linux/blk-mq.h>
72
73#include "blk.h"
74#include "blk-mq.h"
75#include "blk-mq-tag.h"
76#include "blk-mq-sched.h"
77
78
79enum {
80 REQ_FSEQ_PREFLUSH = (1 << 0),
81 REQ_FSEQ_DATA = (1 << 1),
82 REQ_FSEQ_POSTFLUSH = (1 << 2),
83 REQ_FSEQ_DONE = (1 << 3),
84
85 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86 REQ_FSEQ_POSTFLUSH,
87
88
89
90
91
92 FLUSH_PENDING_TIMEOUT = 5 * HZ,
93};
94
95static void blk_kick_flush(struct request_queue *q,
96 struct blk_flush_queue *fq, unsigned int flags);
97
98static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
99{
100 unsigned int policy = 0;
101
102 if (blk_rq_sectors(rq))
103 policy |= REQ_FSEQ_DATA;
104
105 if (fflags & (1UL << QUEUE_FLAG_WC)) {
106 if (rq->cmd_flags & REQ_PREFLUSH)
107 policy |= REQ_FSEQ_PREFLUSH;
108 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109 (rq->cmd_flags & REQ_FUA))
110 policy |= REQ_FSEQ_POSTFLUSH;
111 }
112 return policy;
113}
114
115static unsigned int blk_flush_cur_seq(struct request *rq)
116{
117 return 1 << ffz(rq->flush.seq);
118}
119
120static void blk_flush_restore_request(struct request *rq)
121{
122
123
124
125
126
127 rq->bio = rq->biotail;
128
129
130 rq->rq_flags &= ~RQF_FLUSH_SEQ;
131 rq->end_io = rq->flush.saved_end_io;
132}
133
134static void blk_flush_queue_rq(struct request *rq, bool add_front)
135{
136 blk_mq_add_to_requeue_list(rq, add_front, true);
137}
138
139static void blk_account_io_flush(struct request *rq)
140{
141 struct block_device *part = rq->rq_disk->part0;
142
143 part_stat_lock();
144 part_stat_inc(part, ios[STAT_FLUSH]);
145 part_stat_add(part, nsecs[STAT_FLUSH],
146 ktime_get_ns() - rq->start_time_ns);
147 part_stat_unlock();
148}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163static void blk_flush_complete_seq(struct request *rq,
164 struct blk_flush_queue *fq,
165 unsigned int seq, blk_status_t error)
166{
167 struct request_queue *q = rq->q;
168 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
169 unsigned int cmd_flags;
170
171 BUG_ON(rq->flush.seq & seq);
172 rq->flush.seq |= seq;
173 cmd_flags = rq->cmd_flags;
174
175 if (likely(!error))
176 seq = blk_flush_cur_seq(rq);
177 else
178 seq = REQ_FSEQ_DONE;
179
180 switch (seq) {
181 case REQ_FSEQ_PREFLUSH:
182 case REQ_FSEQ_POSTFLUSH:
183
184 if (list_empty(pending))
185 fq->flush_pending_since = jiffies;
186 list_move_tail(&rq->flush.list, pending);
187 break;
188
189 case REQ_FSEQ_DATA:
190 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
191 blk_flush_queue_rq(rq, true);
192 break;
193
194 case REQ_FSEQ_DONE:
195
196
197
198
199
200
201 BUG_ON(!list_empty(&rq->queuelist));
202 list_del_init(&rq->flush.list);
203 blk_flush_restore_request(rq);
204 blk_mq_end_request(rq, error);
205 break;
206
207 default:
208 BUG();
209 }
210
211 blk_kick_flush(q, fq, cmd_flags);
212}
213
214static void flush_end_io(struct request *flush_rq, blk_status_t error)
215{
216 struct request_queue *q = flush_rq->q;
217 struct list_head *running;
218 struct request *rq, *n;
219 unsigned long flags = 0;
220 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
221
222
223 spin_lock_irqsave(&fq->mq_flush_lock, flags);
224
225 if (!refcount_dec_and_test(&flush_rq->ref)) {
226 fq->rq_status = error;
227 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
228 return;
229 }
230
231 blk_account_io_flush(flush_rq);
232
233
234
235
236
237 WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
238 if (fq->rq_status != BLK_STS_OK)
239 error = fq->rq_status;
240
241 if (!q->elevator) {
242 flush_rq->tag = BLK_MQ_NO_TAG;
243 } else {
244 blk_mq_put_driver_tag(flush_rq);
245 flush_rq->internal_tag = BLK_MQ_NO_TAG;
246 }
247
248 running = &fq->flush_queue[fq->flush_running_idx];
249 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
250
251
252 fq->flush_running_idx ^= 1;
253
254
255 list_for_each_entry_safe(rq, n, running, flush.list) {
256 unsigned int seq = blk_flush_cur_seq(rq);
257
258 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
259 blk_flush_complete_seq(rq, fq, seq, error);
260 }
261
262 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
263}
264
265bool is_flush_rq(struct request *rq)
266{
267 return rq->end_io == flush_end_io;
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
284 unsigned int flags)
285{
286 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
287 struct request *first_rq =
288 list_first_entry(pending, struct request, flush.list);
289 struct request *flush_rq = fq->flush_rq;
290
291
292 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
293 return;
294
295
296 if (!list_empty(&fq->flush_data_in_flight) &&
297 time_before(jiffies,
298 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
299 return;
300
301
302
303
304
305 fq->flush_pending_idx ^= 1;
306
307 blk_rq_init(q, flush_rq);
308
309
310
311
312
313
314
315
316
317 flush_rq->mq_ctx = first_rq->mq_ctx;
318 flush_rq->mq_hctx = first_rq->mq_hctx;
319
320 if (!q->elevator) {
321 flush_rq->tag = first_rq->tag;
322
323
324
325
326
327
328 flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
329 } else
330 flush_rq->internal_tag = first_rq->internal_tag;
331
332 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
333 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
334 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
335 flush_rq->rq_disk = first_rq->rq_disk;
336 flush_rq->end_io = flush_end_io;
337
338
339
340
341
342
343 smp_wmb();
344 refcount_set(&flush_rq->ref, 1);
345
346 blk_flush_queue_rq(flush_rq, false);
347}
348
349static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
350{
351 struct request_queue *q = rq->q;
352 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
353 struct blk_mq_ctx *ctx = rq->mq_ctx;
354 unsigned long flags;
355 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
356
357 if (q->elevator) {
358 WARN_ON(rq->tag < 0);
359 blk_mq_put_driver_tag(rq);
360 }
361
362
363
364
365
366 spin_lock_irqsave(&fq->mq_flush_lock, flags);
367 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
368 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
369
370 blk_mq_sched_restart(hctx);
371}
372
373
374
375
376
377
378
379
380
381
382void blk_insert_flush(struct request *rq)
383{
384 struct request_queue *q = rq->q;
385 unsigned long fflags = q->queue_flags;
386 unsigned int policy = blk_flush_policy(fflags, rq);
387 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
388
389
390
391
392
393 rq->cmd_flags &= ~REQ_PREFLUSH;
394 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
395 rq->cmd_flags &= ~REQ_FUA;
396
397
398
399
400
401
402 rq->cmd_flags |= REQ_SYNC;
403
404
405
406
407
408
409
410 if (!policy) {
411 blk_mq_end_request(rq, 0);
412 return;
413 }
414
415 BUG_ON(rq->bio != rq->biotail);
416
417
418
419
420
421
422 if ((policy & REQ_FSEQ_DATA) &&
423 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
424 blk_mq_request_bypass_insert(rq, false, false);
425 return;
426 }
427
428
429
430
431
432 memset(&rq->flush, 0, sizeof(rq->flush));
433 INIT_LIST_HEAD(&rq->flush.list);
434 rq->rq_flags |= RQF_FLUSH_SEQ;
435 rq->flush.saved_end_io = rq->end_io;
436
437 rq->end_io = mq_flush_data_end_io;
438
439 spin_lock_irq(&fq->mq_flush_lock);
440 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
441 spin_unlock_irq(&fq->mq_flush_lock);
442}
443
444
445
446
447
448
449
450
451int blkdev_issue_flush(struct block_device *bdev)
452{
453 struct bio bio;
454
455 bio_init(&bio, NULL, 0);
456 bio_set_dev(&bio, bdev);
457 bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
458 return submit_bio_wait(&bio);
459}
460EXPORT_SYMBOL(blkdev_issue_flush);
461
462struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
463 gfp_t flags)
464{
465 struct blk_flush_queue *fq;
466 int rq_sz = sizeof(struct request);
467
468 fq = kzalloc_node(sizeof(*fq), flags, node);
469 if (!fq)
470 goto fail;
471
472 spin_lock_init(&fq->mq_flush_lock);
473
474 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
475 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
476 if (!fq->flush_rq)
477 goto fail_rq;
478
479 INIT_LIST_HEAD(&fq->flush_queue[0]);
480 INIT_LIST_HEAD(&fq->flush_queue[1]);
481 INIT_LIST_HEAD(&fq->flush_data_in_flight);
482
483 return fq;
484
485 fail_rq:
486 kfree(fq);
487 fail:
488 return NULL;
489}
490
491void blk_free_flush_queue(struct blk_flush_queue *fq)
492{
493
494 if (!fq)
495 return;
496
497 kfree(fq->flush_rq);
498 kfree(fq);
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
520 struct lock_class_key *key)
521{
522 lockdep_set_class(&hctx->fq->mq_flush_lock, key);
523}
524EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
525