1
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/idr.h>
6#include <linux/blk-mq.h>
7#include <linux/part_stat.h>
8#include <linux/blk-crypto.h>
9#include <linux/memblock.h>
10#include <xen/xen.h>
11#include "blk-crypto-internal.h"
12#include "blk-mq.h"
13#include "blk-mq-sched.h"
14
15
16#define BLK_MAX_TIMEOUT (5 * HZ)
17
18extern struct dentry *blk_debugfs_root;
19
20struct blk_flush_queue {
21 unsigned int flush_pending_idx:1;
22 unsigned int flush_running_idx:1;
23 blk_status_t rq_status;
24 unsigned long flush_pending_since;
25 struct list_head flush_queue[2];
26 struct list_head flush_data_in_flight;
27 struct request *flush_rq;
28
29 spinlock_t mq_flush_lock;
30};
31
32extern struct kmem_cache *blk_requestq_cachep;
33extern struct kobj_type blk_queue_ktype;
34extern struct ida blk_queue_ida;
35
36static inline struct blk_flush_queue *
37blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
38{
39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
40}
41
42static inline void __blk_get_queue(struct request_queue *q)
43{
44 kobject_get(&q->kobj);
45}
46
47bool is_flush_rq(struct request *req);
48
49struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
50 gfp_t flags);
51void blk_free_flush_queue(struct blk_flush_queue *q);
52
53void blk_freeze_queue(struct request_queue *q);
54
55#define BIO_INLINE_VECS 4
56struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
57 gfp_t gfp_mask);
58void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
59
60static inline bool biovec_phys_mergeable(struct request_queue *q,
61 struct bio_vec *vec1, struct bio_vec *vec2)
62{
63 unsigned long mask = queue_segment_boundary(q);
64 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
65 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
66
67 if (addr1 + vec1->bv_len != addr2)
68 return false;
69 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
70 return false;
71 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
72 return false;
73 return true;
74}
75
76static inline bool __bvec_gap_to_prev(struct request_queue *q,
77 struct bio_vec *bprv, unsigned int offset)
78{
79 return (offset & queue_virt_boundary(q)) ||
80 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
81}
82
83
84
85
86
87static inline bool bvec_gap_to_prev(struct request_queue *q,
88 struct bio_vec *bprv, unsigned int offset)
89{
90 if (!queue_virt_boundary(q))
91 return false;
92 return __bvec_gap_to_prev(q, bprv, offset);
93}
94
95#ifdef CONFIG_BLK_DEV_INTEGRITY
96void blk_flush_integrity(void);
97bool __bio_integrity_endio(struct bio *);
98void bio_integrity_free(struct bio *bio);
99static inline bool bio_integrity_endio(struct bio *bio)
100{
101 if (bio_integrity(bio))
102 return __bio_integrity_endio(bio);
103 return true;
104}
105
106bool blk_integrity_merge_rq(struct request_queue *, struct request *,
107 struct request *);
108bool blk_integrity_merge_bio(struct request_queue *, struct request *,
109 struct bio *);
110
111static inline bool integrity_req_gap_back_merge(struct request *req,
112 struct bio *next)
113{
114 struct bio_integrity_payload *bip = bio_integrity(req->bio);
115 struct bio_integrity_payload *bip_next = bio_integrity(next);
116
117 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
118 bip_next->bip_vec[0].bv_offset);
119}
120
121static inline bool integrity_req_gap_front_merge(struct request *req,
122 struct bio *bio)
123{
124 struct bio_integrity_payload *bip = bio_integrity(bio);
125 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
126
127 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
128 bip_next->bip_vec[0].bv_offset);
129}
130
131void blk_integrity_add(struct gendisk *);
132void blk_integrity_del(struct gendisk *);
133#else
134static inline bool blk_integrity_merge_rq(struct request_queue *rq,
135 struct request *r1, struct request *r2)
136{
137 return true;
138}
139static inline bool blk_integrity_merge_bio(struct request_queue *rq,
140 struct request *r, struct bio *b)
141{
142 return true;
143}
144static inline bool integrity_req_gap_back_merge(struct request *req,
145 struct bio *next)
146{
147 return false;
148}
149static inline bool integrity_req_gap_front_merge(struct request *req,
150 struct bio *bio)
151{
152 return false;
153}
154
155static inline void blk_flush_integrity(void)
156{
157}
158static inline bool bio_integrity_endio(struct bio *bio)
159{
160 return true;
161}
162static inline void bio_integrity_free(struct bio *bio)
163{
164}
165static inline void blk_integrity_add(struct gendisk *disk)
166{
167}
168static inline void blk_integrity_del(struct gendisk *disk)
169{
170}
171#endif
172
173unsigned long blk_rq_timeout(unsigned long timeout);
174void blk_add_timer(struct request *req);
175
176bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
177 unsigned int nr_segs, struct request **same_queue_rq);
178bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
179 struct bio *bio, unsigned int nr_segs);
180
181void blk_account_io_start(struct request *req);
182void blk_account_io_done(struct request *req, u64 now);
183
184
185
186
187#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
188
189void blk_insert_flush(struct request *rq);
190
191int elevator_switch_mq(struct request_queue *q,
192 struct elevator_type *new_e);
193void __elevator_exit(struct request_queue *, struct elevator_queue *);
194int elv_register_queue(struct request_queue *q, bool uevent);
195void elv_unregister_queue(struct request_queue *q);
196
197static inline void elevator_exit(struct request_queue *q,
198 struct elevator_queue *e)
199{
200 lockdep_assert_held(&q->sysfs_lock);
201
202 blk_mq_sched_free_requests(q);
203 __elevator_exit(q, e);
204}
205
206ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
207 char *buf);
208ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
209 char *buf);
210ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
211 char *buf);
212ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
213 char *buf);
214ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
215 const char *buf, size_t count);
216ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
217ssize_t part_timeout_store(struct device *, struct device_attribute *,
218 const char *, size_t);
219
220void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
221int ll_back_merge_fn(struct request *req, struct bio *bio,
222 unsigned int nr_segs);
223bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
224 struct request *next);
225unsigned int blk_recalc_rq_segments(struct request *rq);
226void blk_rq_set_mixed_merge(struct request *rq);
227bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
228enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
229
230int blk_dev_init(void);
231
232
233
234
235
236
237
238static inline bool blk_do_io_stat(struct request *rq)
239{
240 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
241}
242
243static inline void req_set_nomerge(struct request_queue *q, struct request *req)
244{
245 req->cmd_flags |= REQ_NOMERGE;
246 if (req == q->last_merge)
247 q->last_merge = NULL;
248}
249
250
251
252
253
254
255static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
256{
257 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
258}
259
260
261
262
263
264
265
266
267static inline unsigned int bio_aligned_discard_max_sectors(
268 struct request_queue *q)
269{
270 return round_down(UINT_MAX, q->limits.discard_granularity) >>
271 SECTOR_SHIFT;
272}
273
274
275
276
277void get_io_context(struct io_context *ioc);
278struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
279struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
280 gfp_t gfp_mask);
281void ioc_clear_queue(struct request_queue *q);
282
283int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
284
285
286
287
288#ifdef CONFIG_BLK_DEV_THROTTLING
289extern int blk_throtl_init(struct request_queue *q);
290extern void blk_throtl_exit(struct request_queue *q);
291extern void blk_throtl_register_queue(struct request_queue *q);
292bool blk_throtl_bio(struct bio *bio);
293#else
294static inline int blk_throtl_init(struct request_queue *q) { return 0; }
295static inline void blk_throtl_exit(struct request_queue *q) { }
296static inline void blk_throtl_register_queue(struct request_queue *q) { }
297static inline bool blk_throtl_bio(struct bio *bio) { return false; }
298#endif
299#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
300extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
301extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
302 const char *page, size_t count);
303extern void blk_throtl_bio_endio(struct bio *bio);
304extern void blk_throtl_stat_add(struct request *rq, u64 time);
305#else
306static inline void blk_throtl_bio_endio(struct bio *bio) { }
307static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
308#endif
309
310void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
311
312static inline bool blk_queue_may_bounce(struct request_queue *q)
313{
314 return IS_ENABLED(CONFIG_BOUNCE) &&
315 q->limits.bounce == BLK_BOUNCE_HIGH &&
316 max_low_pfn >= max_pfn;
317}
318
319static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
320{
321 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
322 __blk_queue_bounce(q, bio);
323}
324
325#ifdef CONFIG_BLK_CGROUP_IOLATENCY
326extern int blk_iolatency_init(struct request_queue *q);
327#else
328static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
329#endif
330
331struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
332
333#ifdef CONFIG_BLK_DEV_ZONED
334void blk_queue_free_zone_bitmaps(struct request_queue *q);
335void blk_queue_clear_zone_settings(struct request_queue *q);
336#else
337static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
338static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
339#endif
340
341int blk_alloc_ext_minor(void);
342void blk_free_ext_minor(unsigned int minor);
343char *disk_name(struct gendisk *hd, int partno, char *buf);
344#define ADDPART_FLAG_NONE 0
345#define ADDPART_FLAG_RAID 1
346#define ADDPART_FLAG_WHOLEDISK 2
347int bdev_add_partition(struct block_device *bdev, int partno,
348 sector_t start, sector_t length);
349int bdev_del_partition(struct block_device *bdev, int partno);
350int bdev_resize_partition(struct block_device *bdev, int partno,
351 sector_t start, sector_t length);
352
353int bio_add_hw_page(struct request_queue *q, struct bio *bio,
354 struct page *page, unsigned int len, unsigned int offset,
355 unsigned int max_sectors, bool *same_page);
356
357struct request_queue *blk_alloc_queue(int node_id);
358
359void disk_alloc_events(struct gendisk *disk);
360void disk_add_events(struct gendisk *disk);
361void disk_del_events(struct gendisk *disk);
362void disk_release_events(struct gendisk *disk);
363extern struct device_attribute dev_attr_events;
364extern struct device_attribute dev_attr_events_async;
365extern struct device_attribute dev_attr_events_poll_msecs;
366
367#endif
368