1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/blk-mq.h>
9#include <linux/sched/sysctl.h>
10
11#include "blk.h"
12#include "blk-mq-sched.h"
13
14
15
16
17
18
19static void blk_end_sync_rq(struct request *rq, blk_status_t error)
20{
21 struct completion *waiting = rq->end_io_data;
22
23 rq->end_io_data = NULL;
24
25
26
27
28
29 complete(waiting);
30}
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
48 struct request *rq, int at_head,
49 rq_end_io_fn *done)
50{
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52
53 WARN_ON(irqs_disabled());
54 WARN_ON(!blk_rq_is_passthrough(rq));
55
56 rq->rq_disk = bd_disk;
57 rq->end_io = done;
58
59
60
61
62
63 if (q->mq_ops) {
64 blk_mq_sched_insert_request(rq, at_head, true, false);
65 return;
66 }
67
68 spin_lock_irq(q->queue_lock);
69
70 if (unlikely(blk_queue_dying(q))) {
71 rq->rq_flags |= RQF_QUIET;
72 __blk_end_request_all(rq, BLK_STS_IOERR);
73 spin_unlock_irq(q->queue_lock);
74 return;
75 }
76
77 __elv_add_request(q, rq, where);
78 __blk_run_queue(q);
79 spin_unlock_irq(q->queue_lock);
80}
81EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
82
83
84
85
86
87
88
89
90
91
92
93
94void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
95 struct request *rq, int at_head)
96{
97 DECLARE_COMPLETION_ONSTACK(wait);
98 unsigned long hang_check;
99
100 rq->end_io_data = &wait;
101 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
102
103
104 hang_check = sysctl_hung_task_timeout_secs;
105 if (hang_check)
106 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
107 else
108 wait_for_completion_io(&wait);
109}
110EXPORT_SYMBOL(blk_execute_rq);
111