1
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/idr.h>
6#include <linux/blk-mq.h>
7#include <linux/part_stat.h>
8#include <linux/blk-crypto.h>
9#include <linux/memblock.h>
10#include <xen/xen.h>
11#include "blk-crypto-internal.h"
12#include "blk-mq.h"
13#include "blk-mq-sched.h"
14
15
16#define BLK_MAX_TIMEOUT (5 * HZ)
17
18extern struct dentry *blk_debugfs_root;
19
20struct blk_flush_queue {
21 unsigned int flush_pending_idx:1;
22 unsigned int flush_running_idx:1;
23 blk_status_t rq_status;
24 unsigned long flush_pending_since;
25 struct list_head flush_queue[2];
26 struct list_head flush_data_in_flight;
27 struct request *flush_rq;
28
29 spinlock_t mq_flush_lock;
30};
31
32extern struct kmem_cache *blk_requestq_cachep;
33extern struct kobj_type blk_queue_ktype;
34extern struct ida blk_queue_ida;
35
36static inline struct blk_flush_queue *
37blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
38{
39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
40}
41
42static inline void __blk_get_queue(struct request_queue *q)
43{
44 kobject_get(&q->kobj);
45}
46
47bool is_flush_rq(struct request *req);
48
49struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
50 gfp_t flags);
51void blk_free_flush_queue(struct blk_flush_queue *q);
52
53void blk_freeze_queue(struct request_queue *q);
54void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
55void blk_queue_start_drain(struct request_queue *q);
56
57#define BIO_INLINE_VECS 4
58struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
59 gfp_t gfp_mask);
60void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
61
62static inline bool biovec_phys_mergeable(struct request_queue *q,
63 struct bio_vec *vec1, struct bio_vec *vec2)
64{
65 unsigned long mask = queue_segment_boundary(q);
66 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
67 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
68
69 if (addr1 + vec1->bv_len != addr2)
70 return false;
71 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
72 return false;
73 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
74 return false;
75 return true;
76}
77
78static inline bool __bvec_gap_to_prev(struct request_queue *q,
79 struct bio_vec *bprv, unsigned int offset)
80{
81 return (offset & queue_virt_boundary(q)) ||
82 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
83}
84
85
86
87
88
89static inline bool bvec_gap_to_prev(struct request_queue *q,
90 struct bio_vec *bprv, unsigned int offset)
91{
92 if (!queue_virt_boundary(q))
93 return false;
94 return __bvec_gap_to_prev(q, bprv, offset);
95}
96
97#ifdef CONFIG_BLK_DEV_INTEGRITY
98void blk_flush_integrity(void);
99bool __bio_integrity_endio(struct bio *);
100void bio_integrity_free(struct bio *bio);
101static inline bool bio_integrity_endio(struct bio *bio)
102{
103 if (bio_integrity(bio))
104 return __bio_integrity_endio(bio);
105 return true;
106}
107
108bool blk_integrity_merge_rq(struct request_queue *, struct request *,
109 struct request *);
110bool blk_integrity_merge_bio(struct request_queue *, struct request *,
111 struct bio *);
112
113static inline bool integrity_req_gap_back_merge(struct request *req,
114 struct bio *next)
115{
116 struct bio_integrity_payload *bip = bio_integrity(req->bio);
117 struct bio_integrity_payload *bip_next = bio_integrity(next);
118
119 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
120 bip_next->bip_vec[0].bv_offset);
121}
122
123static inline bool integrity_req_gap_front_merge(struct request *req,
124 struct bio *bio)
125{
126 struct bio_integrity_payload *bip = bio_integrity(bio);
127 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
128
129 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
130 bip_next->bip_vec[0].bv_offset);
131}
132
133int blk_integrity_add(struct gendisk *disk);
134void blk_integrity_del(struct gendisk *);
135#else
136static inline bool blk_integrity_merge_rq(struct request_queue *rq,
137 struct request *r1, struct request *r2)
138{
139 return true;
140}
141static inline bool blk_integrity_merge_bio(struct request_queue *rq,
142 struct request *r, struct bio *b)
143{
144 return true;
145}
146static inline bool integrity_req_gap_back_merge(struct request *req,
147 struct bio *next)
148{
149 return false;
150}
151static inline bool integrity_req_gap_front_merge(struct request *req,
152 struct bio *bio)
153{
154 return false;
155}
156
157static inline void blk_flush_integrity(void)
158{
159}
160static inline bool bio_integrity_endio(struct bio *bio)
161{
162 return true;
163}
164static inline void bio_integrity_free(struct bio *bio)
165{
166}
167static inline int blk_integrity_add(struct gendisk *disk)
168{
169 return 0;
170}
171static inline void blk_integrity_del(struct gendisk *disk)
172{
173}
174#endif
175
176unsigned long blk_rq_timeout(unsigned long timeout);
177void blk_add_timer(struct request *req);
178
179bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
180 unsigned int nr_segs, struct request **same_queue_rq);
181bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
182 struct bio *bio, unsigned int nr_segs);
183
184void blk_account_io_start(struct request *req);
185void blk_account_io_done(struct request *req, u64 now);
186
187
188
189
190#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
191
192void blk_insert_flush(struct request *rq);
193
194int elevator_switch_mq(struct request_queue *q,
195 struct elevator_type *new_e);
196void __elevator_exit(struct request_queue *, struct elevator_queue *);
197int elv_register_queue(struct request_queue *q, bool uevent);
198void elv_unregister_queue(struct request_queue *q);
199
200static inline void elevator_exit(struct request_queue *q,
201 struct elevator_queue *e)
202{
203 lockdep_assert_held(&q->sysfs_lock);
204
205 blk_mq_sched_free_requests(q);
206 __elevator_exit(q, e);
207}
208
209ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
210 char *buf);
211ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
212 char *buf);
213ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
214 char *buf);
215ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
216 char *buf);
217ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
218 const char *buf, size_t count);
219ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
220ssize_t part_timeout_store(struct device *, struct device_attribute *,
221 const char *, size_t);
222
223void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
224int ll_back_merge_fn(struct request *req, struct bio *bio,
225 unsigned int nr_segs);
226bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
227 struct request *next);
228unsigned int blk_recalc_rq_segments(struct request *rq);
229void blk_rq_set_mixed_merge(struct request *rq);
230bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
231enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
232
233int blk_dev_init(void);
234
235
236
237
238
239
240
241static inline bool blk_do_io_stat(struct request *rq)
242{
243 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
244}
245
246static inline void req_set_nomerge(struct request_queue *q, struct request *req)
247{
248 req->cmd_flags |= REQ_NOMERGE;
249 if (req == q->last_merge)
250 q->last_merge = NULL;
251}
252
253
254
255
256
257
258static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
259{
260 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
261}
262
263
264
265
266
267
268
269
270static inline unsigned int bio_aligned_discard_max_sectors(
271 struct request_queue *q)
272{
273 return round_down(UINT_MAX, q->limits.discard_granularity) >>
274 SECTOR_SHIFT;
275}
276
277
278
279
280void get_io_context(struct io_context *ioc);
281struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
282struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
283 gfp_t gfp_mask);
284void ioc_clear_queue(struct request_queue *q);
285
286int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
287
288
289
290
291#ifdef CONFIG_BLK_DEV_THROTTLING
292extern int blk_throtl_init(struct request_queue *q);
293extern void blk_throtl_exit(struct request_queue *q);
294extern void blk_throtl_register_queue(struct request_queue *q);
295extern void blk_throtl_charge_bio_split(struct bio *bio);
296bool blk_throtl_bio(struct bio *bio);
297#else
298static inline int blk_throtl_init(struct request_queue *q) { return 0; }
299static inline void blk_throtl_exit(struct request_queue *q) { }
300static inline void blk_throtl_register_queue(struct request_queue *q) { }
301static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
302static inline bool blk_throtl_bio(struct bio *bio) { return false; }
303#endif
304#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
305extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
306extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
307 const char *page, size_t count);
308extern void blk_throtl_bio_endio(struct bio *bio);
309extern void blk_throtl_stat_add(struct request *rq, u64 time);
310#else
311static inline void blk_throtl_bio_endio(struct bio *bio) { }
312static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
313#endif
314
315void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
316
317static inline bool blk_queue_may_bounce(struct request_queue *q)
318{
319 return IS_ENABLED(CONFIG_BOUNCE) &&
320 q->limits.bounce == BLK_BOUNCE_HIGH &&
321 max_low_pfn >= max_pfn;
322}
323
324static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
325{
326 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
327 __blk_queue_bounce(q, bio);
328}
329
330#ifdef CONFIG_BLK_CGROUP_IOLATENCY
331extern int blk_iolatency_init(struct request_queue *q);
332#else
333static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
334#endif
335
336struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
337
338#ifdef CONFIG_BLK_DEV_ZONED
339void blk_queue_free_zone_bitmaps(struct request_queue *q);
340void blk_queue_clear_zone_settings(struct request_queue *q);
341#else
342static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
343static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
344#endif
345
346int blk_alloc_ext_minor(void);
347void blk_free_ext_minor(unsigned int minor);
348#define ADDPART_FLAG_NONE 0
349#define ADDPART_FLAG_RAID 1
350#define ADDPART_FLAG_WHOLEDISK 2
351int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
352 sector_t length);
353int bdev_del_partition(struct gendisk *disk, int partno);
354int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
355 sector_t length);
356
357int bio_add_hw_page(struct request_queue *q, struct bio *bio,
358 struct page *page, unsigned int len, unsigned int offset,
359 unsigned int max_sectors, bool *same_page);
360
361struct request_queue *blk_alloc_queue(int node_id);
362
363int disk_alloc_events(struct gendisk *disk);
364void disk_add_events(struct gendisk *disk);
365void disk_del_events(struct gendisk *disk);
366void disk_release_events(struct gendisk *disk);
367extern struct device_attribute dev_attr_events;
368extern struct device_attribute dev_attr_events_async;
369extern struct device_attribute dev_attr_events_poll_msecs;
370
371static inline void bio_clear_hipri(struct bio *bio)
372{
373
374 bio_clear_flag(bio, BIO_PERCPU_CACHE);
375 bio->bi_opf &= ~REQ_HIPRI;
376}
377
378extern const struct address_space_operations def_blk_aops;
379
380#endif
381