1
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/blk-crypto.h>
6#include <linux/memblock.h>
7#include <xen/xen.h>
8#include "blk-crypto-internal.h"
9
10struct elevator_type;
11
12
13#define BLK_MAX_TIMEOUT (5 * HZ)
14
15extern struct dentry *blk_debugfs_root;
16
17struct blk_flush_queue {
18 unsigned int flush_pending_idx:1;
19 unsigned int flush_running_idx:1;
20 blk_status_t rq_status;
21 unsigned long flush_pending_since;
22 struct list_head flush_queue[2];
23 struct list_head flush_data_in_flight;
24 struct request *flush_rq;
25
26 spinlock_t mq_flush_lock;
27};
28
29extern struct kmem_cache *blk_requestq_cachep;
30extern struct kmem_cache *blk_requestq_srcu_cachep;
31extern struct kobj_type blk_queue_ktype;
32extern struct ida blk_queue_ida;
33
34static inline void __blk_get_queue(struct request_queue *q)
35{
36 kobject_get(&q->kobj);
37}
38
39bool is_flush_rq(struct request *req);
40
41struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
42 gfp_t flags);
43void blk_free_flush_queue(struct blk_flush_queue *q);
44
45void blk_freeze_queue(struct request_queue *q);
46void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
47void blk_queue_start_drain(struct request_queue *q);
48int __bio_queue_enter(struct request_queue *q, struct bio *bio);
49void submit_bio_noacct_nocheck(struct bio *bio);
50
51static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
52{
53 rcu_read_lock();
54 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
55 goto fail;
56
57
58
59
60
61 if (blk_queue_pm_only(q) &&
62 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
63 goto fail_put;
64
65 rcu_read_unlock();
66 return true;
67
68fail_put:
69 blk_queue_exit(q);
70fail:
71 rcu_read_unlock();
72 return false;
73}
74
75static inline int bio_queue_enter(struct bio *bio)
76{
77 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
78
79 if (blk_try_enter_queue(q, false))
80 return 0;
81 return __bio_queue_enter(q, bio);
82}
83
84#define BIO_INLINE_VECS 4
85struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
86 gfp_t gfp_mask);
87void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
88
89static inline bool biovec_phys_mergeable(struct request_queue *q,
90 struct bio_vec *vec1, struct bio_vec *vec2)
91{
92 unsigned long mask = queue_segment_boundary(q);
93 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
94 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
95
96 if (addr1 + vec1->bv_len != addr2)
97 return false;
98 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
99 return false;
100 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
101 return false;
102 return true;
103}
104
105static inline bool __bvec_gap_to_prev(struct request_queue *q,
106 struct bio_vec *bprv, unsigned int offset)
107{
108 return (offset & queue_virt_boundary(q)) ||
109 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
110}
111
112
113
114
115
116static inline bool bvec_gap_to_prev(struct request_queue *q,
117 struct bio_vec *bprv, unsigned int offset)
118{
119 if (!queue_virt_boundary(q))
120 return false;
121 return __bvec_gap_to_prev(q, bprv, offset);
122}
123
124static inline bool rq_mergeable(struct request *rq)
125{
126 if (blk_rq_is_passthrough(rq))
127 return false;
128
129 if (req_op(rq) == REQ_OP_FLUSH)
130 return false;
131
132 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
133 return false;
134
135 if (req_op(rq) == REQ_OP_ZONE_APPEND)
136 return false;
137
138 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
139 return false;
140 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
141 return false;
142
143 return true;
144}
145
146
147
148
149
150
151
152
153
154static inline bool blk_discard_mergable(struct request *req)
155{
156 if (req_op(req) == REQ_OP_DISCARD &&
157 queue_max_discard_segments(req->q) > 1)
158 return true;
159 return false;
160}
161
162#ifdef CONFIG_BLK_DEV_INTEGRITY
163void blk_flush_integrity(void);
164bool __bio_integrity_endio(struct bio *);
165void bio_integrity_free(struct bio *bio);
166static inline bool bio_integrity_endio(struct bio *bio)
167{
168 if (bio_integrity(bio))
169 return __bio_integrity_endio(bio);
170 return true;
171}
172
173bool blk_integrity_merge_rq(struct request_queue *, struct request *,
174 struct request *);
175bool blk_integrity_merge_bio(struct request_queue *, struct request *,
176 struct bio *);
177
178static inline bool integrity_req_gap_back_merge(struct request *req,
179 struct bio *next)
180{
181 struct bio_integrity_payload *bip = bio_integrity(req->bio);
182 struct bio_integrity_payload *bip_next = bio_integrity(next);
183
184 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
185 bip_next->bip_vec[0].bv_offset);
186}
187
188static inline bool integrity_req_gap_front_merge(struct request *req,
189 struct bio *bio)
190{
191 struct bio_integrity_payload *bip = bio_integrity(bio);
192 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
193
194 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
195 bip_next->bip_vec[0].bv_offset);
196}
197
198int blk_integrity_add(struct gendisk *disk);
199void blk_integrity_del(struct gendisk *);
200#else
201static inline bool blk_integrity_merge_rq(struct request_queue *rq,
202 struct request *r1, struct request *r2)
203{
204 return true;
205}
206static inline bool blk_integrity_merge_bio(struct request_queue *rq,
207 struct request *r, struct bio *b)
208{
209 return true;
210}
211static inline bool integrity_req_gap_back_merge(struct request *req,
212 struct bio *next)
213{
214 return false;
215}
216static inline bool integrity_req_gap_front_merge(struct request *req,
217 struct bio *bio)
218{
219 return false;
220}
221
222static inline void blk_flush_integrity(void)
223{
224}
225static inline bool bio_integrity_endio(struct bio *bio)
226{
227 return true;
228}
229static inline void bio_integrity_free(struct bio *bio)
230{
231}
232static inline int blk_integrity_add(struct gendisk *disk)
233{
234 return 0;
235}
236static inline void blk_integrity_del(struct gendisk *disk)
237{
238}
239#endif
240
241unsigned long blk_rq_timeout(unsigned long timeout);
242void blk_add_timer(struct request *req);
243const char *blk_status_to_str(blk_status_t status);
244
245bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
246 unsigned int nr_segs);
247bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
248 struct bio *bio, unsigned int nr_segs);
249
250
251
252
253#define BLK_MAX_REQUEST_COUNT 32
254#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
255
256
257
258
259#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
260
261void blk_insert_flush(struct request *rq);
262
263int elevator_switch_mq(struct request_queue *q,
264 struct elevator_type *new_e);
265void elevator_exit(struct request_queue *q);
266int elv_register_queue(struct request_queue *q, bool uevent);
267void elv_unregister_queue(struct request_queue *q);
268
269ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
270 char *buf);
271ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
272 char *buf);
273ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
274 char *buf);
275ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
276 char *buf);
277ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
278 const char *buf, size_t count);
279ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
280ssize_t part_timeout_store(struct device *, struct device_attribute *,
281 const char *, size_t);
282
283static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
284{
285 switch (bio_op(bio)) {
286 case REQ_OP_DISCARD:
287 case REQ_OP_SECURE_ERASE:
288 case REQ_OP_WRITE_ZEROES:
289 return true;
290 default:
291 break;
292 }
293
294
295
296
297
298
299
300
301
302 return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
303 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
304}
305
306void __blk_queue_split(struct request_queue *q, struct bio **bio,
307 unsigned int *nr_segs);
308int ll_back_merge_fn(struct request *req, struct bio *bio,
309 unsigned int nr_segs);
310bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
311 struct request *next);
312unsigned int blk_recalc_rq_segments(struct request *rq);
313void blk_rq_set_mixed_merge(struct request *rq);
314bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
315enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
316
317int blk_dev_init(void);
318
319
320
321
322
323
324
325static inline bool blk_do_io_stat(struct request *rq)
326{
327 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
328}
329
330void update_io_ticks(struct block_device *part, unsigned long now, bool end);
331
332static inline void req_set_nomerge(struct request_queue *q, struct request *req)
333{
334 req->cmd_flags |= REQ_NOMERGE;
335 if (req == q->last_merge)
336 q->last_merge = NULL;
337}
338
339
340
341
342
343
344static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
345{
346 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
347}
348
349
350
351
352
353
354
355
356static inline unsigned int bio_aligned_discard_max_sectors(
357 struct request_queue *q)
358{
359 return round_down(UINT_MAX, q->limits.discard_granularity) >>
360 SECTOR_SHIFT;
361}
362
363
364
365
366struct io_cq *ioc_find_get_icq(struct request_queue *q);
367struct io_cq *ioc_lookup_icq(struct request_queue *q);
368#ifdef CONFIG_BLK_ICQ
369void ioc_clear_queue(struct request_queue *q);
370#else
371static inline void ioc_clear_queue(struct request_queue *q)
372{
373}
374#endif
375
376#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
377extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
378extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
379 const char *page, size_t count);
380extern void blk_throtl_bio_endio(struct bio *bio);
381extern void blk_throtl_stat_add(struct request *rq, u64 time);
382#else
383static inline void blk_throtl_bio_endio(struct bio *bio) { }
384static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
385#endif
386
387void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
388
389static inline bool blk_queue_may_bounce(struct request_queue *q)
390{
391 return IS_ENABLED(CONFIG_BOUNCE) &&
392 q->limits.bounce == BLK_BOUNCE_HIGH &&
393 max_low_pfn >= max_pfn;
394}
395
396static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
397{
398 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
399 __blk_queue_bounce(q, bio);
400}
401
402#ifdef CONFIG_BLK_CGROUP_IOLATENCY
403extern int blk_iolatency_init(struct request_queue *q);
404#else
405static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
406#endif
407
408#ifdef CONFIG_BLK_DEV_ZONED
409void blk_queue_free_zone_bitmaps(struct request_queue *q);
410void blk_queue_clear_zone_settings(struct request_queue *q);
411#else
412static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
413static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
414#endif
415
416int blk_alloc_ext_minor(void);
417void blk_free_ext_minor(unsigned int minor);
418#define ADDPART_FLAG_NONE 0
419#define ADDPART_FLAG_RAID 1
420#define ADDPART_FLAG_WHOLEDISK 2
421int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
422 sector_t length);
423int bdev_del_partition(struct gendisk *disk, int partno);
424int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
425 sector_t length);
426void blk_drop_partitions(struct gendisk *disk);
427
428int bio_add_hw_page(struct request_queue *q, struct bio *bio,
429 struct page *page, unsigned int len, unsigned int offset,
430 unsigned int max_sectors, bool *same_page);
431
432static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
433{
434 if (srcu)
435 return blk_requestq_srcu_cachep;
436 return blk_requestq_cachep;
437}
438struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
439
440int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
441
442int disk_alloc_events(struct gendisk *disk);
443void disk_add_events(struct gendisk *disk);
444void disk_del_events(struct gendisk *disk);
445void disk_release_events(struct gendisk *disk);
446void disk_block_events(struct gendisk *disk);
447void disk_unblock_events(struct gendisk *disk);
448void disk_flush_events(struct gendisk *disk, unsigned int mask);
449extern struct device_attribute dev_attr_events;
450extern struct device_attribute dev_attr_events_async;
451extern struct device_attribute dev_attr_events_poll_msecs;
452
453static inline void bio_clear_polled(struct bio *bio)
454{
455
456 bio_clear_flag(bio, BIO_PERCPU_CACHE);
457 bio->bi_opf &= ~REQ_POLLED;
458}
459
460long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
461long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
462
463extern const struct address_space_operations def_blk_aops;
464
465int disk_register_independent_access_ranges(struct gendisk *disk,
466 struct blk_independent_access_ranges *new_iars);
467void disk_unregister_independent_access_ranges(struct gendisk *disk);
468
469#ifdef CONFIG_FAIL_MAKE_REQUEST
470bool should_fail_request(struct block_device *part, unsigned int bytes);
471#else
472static inline bool should_fail_request(struct block_device *part,
473 unsigned int bytes)
474{
475 return false;
476}
477#endif
478
479
480
481
482
483
484
485
486#define req_ref_zero_or_close_to_overflow(req) \
487 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
488
489static inline bool req_ref_inc_not_zero(struct request *req)
490{
491 return atomic_inc_not_zero(&req->ref);
492}
493
494static inline bool req_ref_put_and_test(struct request *req)
495{
496 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
497 return atomic_dec_and_test(&req->ref);
498}
499
500static inline void req_ref_set(struct request *req, int value)
501{
502 atomic_set(&req->ref, value);
503}
504
505static inline int req_ref_read(struct request *req)
506{
507 return atomic_read(&req->ref);
508}
509
510#endif
511