1#ifndef BLK_INTERNAL_H
2#define BLK_INTERNAL_H
3
4#include <linux/idr.h>
5
6
7#define BLK_BATCH_TIME (HZ/50UL)
8
9
10#define BLK_BATCH_REQ 32
11
12extern struct kmem_cache *blk_requestq_cachep;
13extern struct kobj_type blk_queue_ktype;
14extern struct ida blk_queue_ida;
15
16static inline void __blk_get_queue(struct request_queue *q)
17{
18 kobject_get(&q->kobj);
19}
20
21int blk_init_rl(struct request_list *rl, struct request_queue *q,
22 gfp_t gfp_mask);
23void blk_exit_rl(struct request_list *rl);
24void init_request_from_bio(struct request *req, struct bio *bio);
25void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
26 struct bio *bio);
27int blk_rq_append_bio(struct request_queue *q, struct request *rq,
28 struct bio *bio);
29void blk_queue_bypass_start(struct request_queue *q);
30void blk_queue_bypass_end(struct request_queue *q);
31void blk_dequeue_request(struct request *rq);
32void __blk_queue_free_tags(struct request_queue *q);
33bool __blk_end_bidi_request(struct request *rq, int error,
34 unsigned int nr_bytes, unsigned int bidi_bytes);
35
36void blk_rq_timed_out_timer(unsigned long data);
37void blk_delete_timer(struct request *);
38void blk_add_timer(struct request *);
39
40
41
42
43enum rq_atomic_flags {
44 REQ_ATOM_COMPLETE = 0,
45};
46
47
48
49
50
51static inline int blk_mark_rq_complete(struct request *rq)
52{
53 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
54}
55
56static inline void blk_clear_rq_complete(struct request *rq)
57{
58 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
59}
60
61
62
63
64#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
65
66void blk_insert_flush(struct request *rq);
67void blk_abort_flushes(struct request_queue *q);
68
69static inline struct request *__elv_next_request(struct request_queue *q)
70{
71 struct request *rq;
72
73 while (1) {
74 if (!list_empty(&q->queue_head)) {
75 rq = list_entry_rq(q->queue_head.next);
76 return rq;
77 }
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94 if (q->flush_pending_idx != q->flush_running_idx &&
95 !queue_flush_queueable(q)) {
96 q->flush_queue_delayed = 1;
97 return NULL;
98 }
99 if (unlikely(blk_queue_dying(q)) ||
100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
101 return NULL;
102 }
103}
104
105static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
106{
107 struct elevator_queue *e = q->elevator;
108
109 if (e->type->ops.elevator_activate_req_fn)
110 e->type->ops.elevator_activate_req_fn(q, rq);
111}
112
113static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
114{
115 struct elevator_queue *e = q->elevator;
116
117 if (e->type->ops.elevator_deactivate_req_fn)
118 e->type->ops.elevator_deactivate_req_fn(q, rq);
119}
120
121#ifdef CONFIG_FAIL_IO_TIMEOUT
122int blk_should_fake_timeout(struct request_queue *);
123ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
124ssize_t part_timeout_store(struct device *, struct device_attribute *,
125 const char *, size_t);
126#else
127static inline int blk_should_fake_timeout(struct request_queue *q)
128{
129 return 0;
130}
131#endif
132
133int ll_back_merge_fn(struct request_queue *q, struct request *req,
134 struct bio *bio);
135int ll_front_merge_fn(struct request_queue *q, struct request *req,
136 struct bio *bio);
137int attempt_back_merge(struct request_queue *q, struct request *rq);
138int attempt_front_merge(struct request_queue *q, struct request *rq);
139int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
140 struct request *next);
141void blk_recalc_rq_segments(struct request *rq);
142void blk_rq_set_mixed_merge(struct request *rq);
143bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
144int blk_try_merge(struct request *rq, struct bio *bio);
145
146void blk_queue_congestion_threshold(struct request_queue *q);
147
148void __blk_run_queue_uncond(struct request_queue *q);
149
150int blk_dev_init(void);
151
152
153
154
155
156
157
158static inline int queue_congestion_on_threshold(struct request_queue *q)
159{
160 return q->nr_congestion_on;
161}
162
163
164
165
166static inline int queue_congestion_off_threshold(struct request_queue *q)
167{
168 return q->nr_congestion_off;
169}
170
171
172
173
174
175
176
177
178static inline int blk_do_io_stat(struct request *rq)
179{
180 return rq->rq_disk &&
181 (rq->cmd_flags & REQ_IO_STAT) &&
182 (rq->cmd_type == REQ_TYPE_FS);
183}
184
185
186
187
188void get_io_context(struct io_context *ioc);
189struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
190struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
191 gfp_t gfp_mask);
192void ioc_clear_queue(struct request_queue *q);
193
194int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
195
196
197
198
199
200
201
202
203
204
205
206
207
208static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
209{
210 WARN_ON_ONCE(irqs_disabled());
211 if (unlikely(!current->io_context))
212 create_task_io_context(current, gfp_mask, node);
213 return current->io_context;
214}
215
216
217
218
219#ifdef CONFIG_BLK_DEV_THROTTLING
220extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
221extern void blk_throtl_drain(struct request_queue *q);
222extern int blk_throtl_init(struct request_queue *q);
223extern void blk_throtl_exit(struct request_queue *q);
224#else
225static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
226{
227 return false;
228}
229static inline void blk_throtl_drain(struct request_queue *q) { }
230static inline int blk_throtl_init(struct request_queue *q) { return 0; }
231static inline void blk_throtl_exit(struct request_queue *q) { }
232#endif
233
234#endif
235