1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
4#include <linux/sched.h>
5
6#ifdef CONFIG_BLOCK
7
8#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
11#include <linux/timer.h>
12#include <linux/workqueue.h>
13#include <linux/pagemap.h>
14#include <linux/backing-dev.h>
15#include <linux/wait.h>
16#include <linux/mempool.h>
17#include <linux/bio.h>
18#include <linux/stringify.h>
19#include <linux/gfp.h>
20#include <linux/bsg.h>
21#include <linux/smp.h>
22
23#include <asm/scatterlist.h>
24
25struct module;
26struct scsi_ioctl_command;
27
28struct request_queue;
29struct elevator_queue;
30struct request_pm_state;
31struct blk_trace;
32struct request;
33struct sg_io_hdr;
34struct bsg_job;
35
36#define BLKDEV_MIN_RQ 4
37#define BLKDEV_MAX_RQ 128
38
39struct request;
40typedef void (rq_end_io_fn)(struct request *, int);
41
42struct request_list {
43
44
45
46
47 int count[2];
48 int starved[2];
49 int elvpriv;
50 mempool_t *rq_pool;
51 wait_queue_head_t wait[2];
52};
53
54
55
56
57enum rq_cmd_type_bits {
58 REQ_TYPE_FS = 1,
59 REQ_TYPE_BLOCK_PC,
60 REQ_TYPE_SENSE,
61 REQ_TYPE_PM_SUSPEND,
62 REQ_TYPE_PM_RESUME,
63 REQ_TYPE_PM_SHUTDOWN,
64 REQ_TYPE_SPECIAL,
65
66
67
68
69
70 REQ_TYPE_ATA_TASKFILE,
71 REQ_TYPE_ATA_PC,
72};
73
74#define BLK_MAX_CDB 16
75
76
77
78
79
80
81struct request {
82 struct list_head queuelist;
83 struct call_single_data csd;
84
85 struct request_queue *q;
86
87 unsigned int cmd_flags;
88 enum rq_cmd_type_bits cmd_type;
89 unsigned long atomic_flags;
90
91 int cpu;
92
93
94 unsigned int __data_len;
95 sector_t __sector;
96
97 struct bio *bio;
98 struct bio *biotail;
99
100 struct hlist_node hash;
101
102
103
104
105
106 union {
107 struct rb_node rb_node;
108 void *completion_data;
109 };
110
111
112
113
114
115
116
117 union {
118 struct {
119 struct io_cq *icq;
120 void *priv[2];
121 } elv;
122
123 struct {
124 unsigned int seq;
125 struct list_head list;
126 rq_end_io_fn *saved_end_io;
127 } flush;
128 };
129
130 struct gendisk *rq_disk;
131 struct hd_struct *part;
132 unsigned long start_time;
133#ifdef CONFIG_BLK_CGROUP
134 unsigned long long start_time_ns;
135 unsigned long long io_start_time_ns;
136#endif
137
138
139
140 unsigned short nr_phys_segments;
141#if defined(CONFIG_BLK_DEV_INTEGRITY)
142 unsigned short nr_integrity_segments;
143#endif
144
145 unsigned short ioprio;
146
147 int ref_count;
148
149 void *special;
150 char *buffer;
151
152 int tag;
153 int errors;
154
155
156
157
158 unsigned char __cmd[BLK_MAX_CDB];
159 unsigned char *cmd;
160 unsigned short cmd_len;
161
162 unsigned int extra_len;
163 unsigned int sense_len;
164 unsigned int resid_len;
165 void *sense;
166
167 unsigned long deadline;
168 struct list_head timeout_list;
169 unsigned int timeout;
170 int retries;
171
172
173
174
175 rq_end_io_fn *end_io;
176 void *end_io_data;
177
178
179 struct request *next_rq;
180};
181
182static inline unsigned short req_get_ioprio(struct request *req)
183{
184 return req->ioprio;
185}
186
187
188
189
190
191struct request_pm_state
192{
193
194 int pm_step;
195
196 u32 pm_state;
197 void* data;
198};
199
200#include <linux/elevator.h>
201
202typedef void (request_fn_proc) (struct request_queue *q);
203typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
204typedef int (prep_rq_fn) (struct request_queue *, struct request *);
205typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
206
207struct bio_vec;
208struct bvec_merge_data {
209 struct block_device *bi_bdev;
210 sector_t bi_sector;
211 unsigned bi_size;
212 unsigned long bi_rw;
213};
214typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
215 struct bio_vec *);
216typedef void (softirq_done_fn)(struct request *);
217typedef int (dma_drain_needed_fn)(struct request *);
218typedef int (lld_busy_fn) (struct request_queue *q);
219typedef int (bsg_job_fn) (struct bsg_job *);
220
221enum blk_eh_timer_return {
222 BLK_EH_NOT_HANDLED,
223 BLK_EH_HANDLED,
224 BLK_EH_RESET_TIMER,
225};
226
227typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
228
229enum blk_queue_state {
230 Queue_down,
231 Queue_up,
232};
233
234struct blk_queue_tag {
235 struct request **tag_index;
236 unsigned long *tag_map;
237 int busy;
238 int max_depth;
239 int real_max_depth;
240 atomic_t refcnt;
241};
242
243#define BLK_SCSI_MAX_CMDS (256)
244#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
245
246struct queue_limits {
247 unsigned long bounce_pfn;
248 unsigned long seg_boundary_mask;
249
250 unsigned int max_hw_sectors;
251 unsigned int max_sectors;
252 unsigned int max_segment_size;
253 unsigned int physical_block_size;
254 unsigned int alignment_offset;
255 unsigned int io_min;
256 unsigned int io_opt;
257 unsigned int max_discard_sectors;
258 unsigned int discard_granularity;
259 unsigned int discard_alignment;
260
261 unsigned short logical_block_size;
262 unsigned short max_segments;
263 unsigned short max_integrity_segments;
264
265 unsigned char misaligned;
266 unsigned char discard_misaligned;
267 unsigned char cluster;
268 unsigned char discard_zeroes_data;
269};
270
271struct request_queue {
272
273
274
275 struct list_head queue_head;
276 struct request *last_merge;
277 struct elevator_queue *elevator;
278
279
280
281
282 struct request_list rq;
283
284 request_fn_proc *request_fn;
285 make_request_fn *make_request_fn;
286 prep_rq_fn *prep_rq_fn;
287 unprep_rq_fn *unprep_rq_fn;
288 merge_bvec_fn *merge_bvec_fn;
289 softirq_done_fn *softirq_done_fn;
290 rq_timed_out_fn *rq_timed_out_fn;
291 dma_drain_needed_fn *dma_drain_needed;
292 lld_busy_fn *lld_busy_fn;
293
294
295
296
297 sector_t end_sector;
298 struct request *boundary_rq;
299
300
301
302
303 struct delayed_work delay_work;
304
305 struct backing_dev_info backing_dev_info;
306
307
308
309
310
311 void *queuedata;
312
313
314
315
316 unsigned long queue_flags;
317
318
319
320
321
322 int id;
323
324
325
326
327 gfp_t bounce_gfp;
328
329
330
331
332
333
334 spinlock_t __queue_lock;
335 spinlock_t *queue_lock;
336
337
338
339
340 struct kobject kobj;
341
342
343
344
345 unsigned long nr_requests;
346 unsigned int nr_congestion_on;
347 unsigned int nr_congestion_off;
348 unsigned int nr_batching;
349
350 unsigned int dma_drain_size;
351 void *dma_drain_buffer;
352 unsigned int dma_pad_mask;
353 unsigned int dma_alignment;
354
355 struct blk_queue_tag *queue_tags;
356 struct list_head tag_busy_list;
357
358 unsigned int nr_sorted;
359 unsigned int in_flight[2];
360
361 unsigned int rq_timeout;
362 struct timer_list timeout;
363 struct list_head timeout_list;
364
365 struct list_head icq_list;
366
367 struct queue_limits limits;
368
369
370
371
372 unsigned int sg_timeout;
373 unsigned int sg_reserved_size;
374 int node;
375#ifdef CONFIG_BLK_DEV_IO_TRACE
376 struct blk_trace *blk_trace;
377#endif
378
379
380
381 unsigned int flush_flags;
382 unsigned int flush_not_queueable:1;
383 unsigned int flush_queue_delayed:1;
384 unsigned int flush_pending_idx:1;
385 unsigned int flush_running_idx:1;
386 unsigned long flush_pending_since;
387 struct list_head flush_queue[2];
388 struct list_head flush_data_in_flight;
389 struct request flush_rq;
390
391 struct mutex sysfs_lock;
392
393#if defined(CONFIG_BLK_DEV_BSG)
394 bsg_job_fn *bsg_job_fn;
395 int bsg_job_size;
396 struct bsg_class_device bsg_dev;
397#endif
398
399#ifdef CONFIG_BLK_DEV_THROTTLING
400
401 struct throtl_data *td;
402#endif
403};
404
405#define QUEUE_FLAG_QUEUED 1
406#define QUEUE_FLAG_STOPPED 2
407#define QUEUE_FLAG_SYNCFULL 3
408#define QUEUE_FLAG_ASYNCFULL 4
409#define QUEUE_FLAG_DEAD 5
410#define QUEUE_FLAG_ELVSWITCH 6
411#define QUEUE_FLAG_BIDI 7
412#define QUEUE_FLAG_NOMERGES 8
413#define QUEUE_FLAG_SAME_COMP 9
414#define QUEUE_FLAG_FAIL_IO 10
415#define QUEUE_FLAG_STACKABLE 11
416#define QUEUE_FLAG_NONROT 12
417#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT
418#define QUEUE_FLAG_IO_STAT 13
419#define QUEUE_FLAG_DISCARD 14
420#define QUEUE_FLAG_NOXMERGES 15
421#define QUEUE_FLAG_ADD_RANDOM 16
422#define QUEUE_FLAG_SECDISCARD 17
423#define QUEUE_FLAG_SAME_FORCE 18
424
425#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
426 (1 << QUEUE_FLAG_STACKABLE) | \
427 (1 << QUEUE_FLAG_SAME_COMP) | \
428 (1 << QUEUE_FLAG_ADD_RANDOM))
429
430static inline void queue_lockdep_assert_held(struct request_queue *q)
431{
432 if (q->queue_lock)
433 lockdep_assert_held(q->queue_lock);
434}
435
436static inline void queue_flag_set_unlocked(unsigned int flag,
437 struct request_queue *q)
438{
439 __set_bit(flag, &q->queue_flags);
440}
441
442static inline int queue_flag_test_and_clear(unsigned int flag,
443 struct request_queue *q)
444{
445 queue_lockdep_assert_held(q);
446
447 if (test_bit(flag, &q->queue_flags)) {
448 __clear_bit(flag, &q->queue_flags);
449 return 1;
450 }
451
452 return 0;
453}
454
455static inline int queue_flag_test_and_set(unsigned int flag,
456 struct request_queue *q)
457{
458 queue_lockdep_assert_held(q);
459
460 if (!test_bit(flag, &q->queue_flags)) {
461 __set_bit(flag, &q->queue_flags);
462 return 0;
463 }
464
465 return 1;
466}
467
468static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
469{
470 queue_lockdep_assert_held(q);
471 __set_bit(flag, &q->queue_flags);
472}
473
474static inline void queue_flag_clear_unlocked(unsigned int flag,
475 struct request_queue *q)
476{
477 __clear_bit(flag, &q->queue_flags);
478}
479
480static inline int queue_in_flight(struct request_queue *q)
481{
482 return q->in_flight[0] + q->in_flight[1];
483}
484
485static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
486{
487 queue_lockdep_assert_held(q);
488 __clear_bit(flag, &q->queue_flags);
489}
490
491#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
492#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
493#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
494#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
495#define blk_queue_noxmerges(q) \
496 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
497#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
498#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
499#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
500#define blk_queue_stackable(q) \
501 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
502#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
503#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
504 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
505
506#define blk_noretry_request(rq) \
507 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
508 REQ_FAILFAST_DRIVER))
509
510#define blk_account_rq(rq) \
511 (((rq)->cmd_flags & REQ_STARTED) && \
512 ((rq)->cmd_type == REQ_TYPE_FS || \
513 ((rq)->cmd_flags & REQ_DISCARD)))
514
515#define blk_pm_request(rq) \
516 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
517 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
518
519#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
520#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
521
522#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
523
524#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
525
526#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
527
528static inline unsigned int blk_queue_cluster(struct request_queue *q)
529{
530 return q->limits.cluster;
531}
532
533
534
535
536static inline bool rw_is_sync(unsigned int rw_flags)
537{
538 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
539}
540
541static inline bool rq_is_sync(struct request *rq)
542{
543 return rw_is_sync(rq->cmd_flags);
544}
545
546static inline int blk_queue_full(struct request_queue *q, int sync)
547{
548 if (sync)
549 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
550 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
551}
552
553static inline void blk_set_queue_full(struct request_queue *q, int sync)
554{
555 if (sync)
556 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
557 else
558 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
559}
560
561static inline void blk_clear_queue_full(struct request_queue *q, int sync)
562{
563 if (sync)
564 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
565 else
566 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
567}
568
569
570
571
572
573
574#define RQ_NOMERGE_FLAGS \
575 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
576#define rq_mergeable(rq) \
577 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
578 (((rq)->cmd_flags & REQ_DISCARD) || \
579 (rq)->cmd_type == REQ_TYPE_FS))
580
581
582
583
584#define BLKPREP_OK 0
585#define BLKPREP_KILL 1
586#define BLKPREP_DEFER 2
587
588extern unsigned long blk_max_low_pfn, blk_max_pfn;
589
590
591
592
593
594
595
596
597
598#if BITS_PER_LONG == 32
599#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
600#else
601#define BLK_BOUNCE_HIGH -1ULL
602#endif
603#define BLK_BOUNCE_ANY (-1ULL)
604#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
605
606
607
608
609#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
610#define BLK_MIN_SG_TIMEOUT (7 * HZ)
611
612#ifdef CONFIG_BOUNCE
613extern int init_emergency_isa_pool(void);
614extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
615#else
616static inline int init_emergency_isa_pool(void)
617{
618 return 0;
619}
620static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
621{
622}
623#endif
624
625struct rq_map_data {
626 struct page **pages;
627 int page_order;
628 int nr_entries;
629 unsigned long offset;
630 int null_mapped;
631 int from_user;
632};
633
634struct req_iterator {
635 int i;
636 struct bio *bio;
637};
638
639
640#define for_each_bio(_bio) \
641 for (; _bio; _bio = _bio->bi_next)
642#define __rq_for_each_bio(_bio, rq) \
643 if ((rq->bio)) \
644 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
645
646#define rq_for_each_segment(bvl, _rq, _iter) \
647 __rq_for_each_bio(_iter.bio, _rq) \
648 bio_for_each_segment(bvl, _iter.bio, _iter.i)
649
650#define rq_iter_last(rq, _iter) \
651 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
652
653#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
654# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
655#endif
656#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
657extern void rq_flush_dcache_pages(struct request *rq);
658#else
659static inline void rq_flush_dcache_pages(struct request *rq)
660{
661}
662#endif
663
664extern int blk_register_queue(struct gendisk *disk);
665extern void blk_unregister_queue(struct gendisk *disk);
666extern void generic_make_request(struct bio *bio);
667extern void blk_rq_init(struct request_queue *q, struct request *rq);
668extern void blk_put_request(struct request *);
669extern void __blk_put_request(struct request_queue *, struct request *);
670extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
671extern struct request *blk_make_request(struct request_queue *, struct bio *,
672 gfp_t);
673extern void blk_requeue_request(struct request_queue *, struct request *);
674extern void blk_add_request_payload(struct request *rq, struct page *page,
675 unsigned int len);
676extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
677extern int blk_lld_busy(struct request_queue *q);
678extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
679 struct bio_set *bs, gfp_t gfp_mask,
680 int (*bio_ctr)(struct bio *, struct bio *, void *),
681 void *data);
682extern void blk_rq_unprep_clone(struct request *rq);
683extern int blk_insert_cloned_request(struct request_queue *q,
684 struct request *rq);
685extern void blk_delay_queue(struct request_queue *, unsigned long);
686extern void blk_recount_segments(struct request_queue *, struct bio *);
687extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
688extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
689 unsigned int, void __user *);
690extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
691 unsigned int, void __user *);
692extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
693 struct scsi_ioctl_command __user *);
694
695extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
696
697
698
699
700
701
702static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
703{
704 clear_bdi_congested(&q->backing_dev_info, sync);
705}
706
707
708
709
710
711static inline void blk_set_queue_congested(struct request_queue *q, int sync)
712{
713 set_bdi_congested(&q->backing_dev_info, sync);
714}
715
716extern void blk_start_queue(struct request_queue *q);
717extern void blk_stop_queue(struct request_queue *q);
718extern void blk_sync_queue(struct request_queue *q);
719extern void __blk_stop_queue(struct request_queue *q);
720extern void __blk_run_queue(struct request_queue *q);
721extern void blk_run_queue(struct request_queue *);
722extern void blk_run_queue_async(struct request_queue *q);
723extern int blk_rq_map_user(struct request_queue *, struct request *,
724 struct rq_map_data *, void __user *, unsigned long,
725 gfp_t);
726extern int blk_rq_unmap_user(struct bio *);
727extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
728extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
729 struct rq_map_data *, struct sg_iovec *, int,
730 unsigned int, gfp_t);
731extern int blk_execute_rq(struct request_queue *, struct gendisk *,
732 struct request *, int);
733extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
734 struct request *, int, rq_end_io_fn *);
735
736static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
737{
738 return bdev->bd_disk->queue;
739}
740
741
742
743
744
745
746
747
748
749static inline sector_t blk_rq_pos(const struct request *rq)
750{
751 return rq->__sector;
752}
753
754static inline unsigned int blk_rq_bytes(const struct request *rq)
755{
756 return rq->__data_len;
757}
758
759static inline int blk_rq_cur_bytes(const struct request *rq)
760{
761 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
762}
763
764extern unsigned int blk_rq_err_bytes(const struct request *rq);
765
766static inline unsigned int blk_rq_sectors(const struct request *rq)
767{
768 return blk_rq_bytes(rq) >> 9;
769}
770
771static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
772{
773 return blk_rq_cur_bytes(rq) >> 9;
774}
775
776
777
778
779extern struct request *blk_peek_request(struct request_queue *q);
780extern void blk_start_request(struct request *rq);
781extern struct request *blk_fetch_request(struct request_queue *q);
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796extern bool blk_update_request(struct request *rq, int error,
797 unsigned int nr_bytes);
798extern bool blk_end_request(struct request *rq, int error,
799 unsigned int nr_bytes);
800extern void blk_end_request_all(struct request *rq, int error);
801extern bool blk_end_request_cur(struct request *rq, int error);
802extern bool blk_end_request_err(struct request *rq, int error);
803extern bool __blk_end_request(struct request *rq, int error,
804 unsigned int nr_bytes);
805extern void __blk_end_request_all(struct request *rq, int error);
806extern bool __blk_end_request_cur(struct request *rq, int error);
807extern bool __blk_end_request_err(struct request *rq, int error);
808
809extern void blk_complete_request(struct request *);
810extern void __blk_complete_request(struct request *);
811extern void blk_abort_request(struct request *);
812extern void blk_abort_queue(struct request_queue *);
813extern void blk_unprep_request(struct request *);
814
815
816
817
818extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
819 spinlock_t *lock, int node_id);
820extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
821extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
822 request_fn_proc *, spinlock_t *);
823extern void blk_cleanup_queue(struct request_queue *);
824extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
825extern void blk_queue_bounce_limit(struct request_queue *, u64);
826extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
827extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
828extern void blk_queue_max_segments(struct request_queue *, unsigned short);
829extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
830extern void blk_queue_max_discard_sectors(struct request_queue *q,
831 unsigned int max_discard_sectors);
832extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
833extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
834extern void blk_queue_alignment_offset(struct request_queue *q,
835 unsigned int alignment);
836extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
837extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
838extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
839extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
840extern void blk_set_default_limits(struct queue_limits *lim);
841extern void blk_set_stacking_limits(struct queue_limits *lim);
842extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
843 sector_t offset);
844extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
845 sector_t offset);
846extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
847 sector_t offset);
848extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
849extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
850extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
851extern int blk_queue_dma_drain(struct request_queue *q,
852 dma_drain_needed_fn *dma_drain_needed,
853 void *buf, unsigned int size);
854extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
855extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
856extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
857extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
858extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
859extern void blk_queue_dma_alignment(struct request_queue *, int);
860extern void blk_queue_update_dma_alignment(struct request_queue *, int);
861extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
862extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
863extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
864extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
865extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
866extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
867
868extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
869extern void blk_dump_rq_flags(struct request *, char *);
870extern long nr_blockdev_pages(void);
871
872bool __must_check blk_get_queue(struct request_queue *);
873struct request_queue *blk_alloc_queue(gfp_t);
874struct request_queue *blk_alloc_queue_node(gfp_t, int);
875extern void blk_put_queue(struct request_queue *);
876
877
878
879
880
881
882
883
884
885
886
887
888
889struct blk_plug {
890 unsigned long magic;
891 struct list_head list;
892 struct list_head cb_list;
893 unsigned int should_sort;
894};
895#define BLK_MAX_REQUEST_COUNT 16
896
897struct blk_plug_cb {
898 struct list_head list;
899 void (*callback)(struct blk_plug_cb *);
900};
901
902extern void blk_start_plug(struct blk_plug *);
903extern void blk_finish_plug(struct blk_plug *);
904extern void blk_flush_plug_list(struct blk_plug *, bool);
905
906static inline void blk_flush_plug(struct task_struct *tsk)
907{
908 struct blk_plug *plug = tsk->plug;
909
910 if (plug)
911 blk_flush_plug_list(plug, false);
912}
913
914static inline void blk_schedule_flush_plug(struct task_struct *tsk)
915{
916 struct blk_plug *plug = tsk->plug;
917
918 if (plug)
919 blk_flush_plug_list(plug, true);
920}
921
922static inline bool blk_needs_flush_plug(struct task_struct *tsk)
923{
924 struct blk_plug *plug = tsk->plug;
925
926 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
927}
928
929
930
931
932#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
933extern int blk_queue_start_tag(struct request_queue *, struct request *);
934extern struct request *blk_queue_find_tag(struct request_queue *, int);
935extern void blk_queue_end_tag(struct request_queue *, struct request *);
936extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
937extern void blk_queue_free_tags(struct request_queue *);
938extern int blk_queue_resize_tags(struct request_queue *, int);
939extern void blk_queue_invalidate_tags(struct request_queue *);
940extern struct blk_queue_tag *blk_init_tags(int);
941extern void blk_free_tags(struct blk_queue_tag *);
942
943static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
944 int tag)
945{
946 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
947 return NULL;
948 return bqt->tag_index[tag];
949}
950
951#define BLKDEV_DISCARD_SECURE 0x01
952
953extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
954extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
955 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
956extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
957 sector_t nr_sects, gfp_t gfp_mask);
958static inline int sb_issue_discard(struct super_block *sb, sector_t block,
959 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
960{
961 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
962 nr_blocks << (sb->s_blocksize_bits - 9),
963 gfp_mask, flags);
964}
965static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
966 sector_t nr_blocks, gfp_t gfp_mask)
967{
968 return blkdev_issue_zeroout(sb->s_bdev,
969 block << (sb->s_blocksize_bits - 9),
970 nr_blocks << (sb->s_blocksize_bits - 9),
971 gfp_mask);
972}
973
974extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
975
976enum blk_default_limits {
977 BLK_MAX_SEGMENTS = 128,
978 BLK_SAFE_MAX_SECTORS = 255,
979 BLK_DEF_MAX_SECTORS = 1024,
980 BLK_MAX_SEGMENT_SIZE = 65536,
981 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
982};
983
984#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
985
986static inline unsigned long queue_bounce_pfn(struct request_queue *q)
987{
988 return q->limits.bounce_pfn;
989}
990
991static inline unsigned long queue_segment_boundary(struct request_queue *q)
992{
993 return q->limits.seg_boundary_mask;
994}
995
996static inline unsigned int queue_max_sectors(struct request_queue *q)
997{
998 return q->limits.max_sectors;
999}
1000
1001static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1002{
1003 return q->limits.max_hw_sectors;
1004}
1005
1006static inline unsigned short queue_max_segments(struct request_queue *q)
1007{
1008 return q->limits.max_segments;
1009}
1010
1011static inline unsigned int queue_max_segment_size(struct request_queue *q)
1012{
1013 return q->limits.max_segment_size;
1014}
1015
1016static inline unsigned short queue_logical_block_size(struct request_queue *q)
1017{
1018 int retval = 512;
1019
1020 if (q && q->limits.logical_block_size)
1021 retval = q->limits.logical_block_size;
1022
1023 return retval;
1024}
1025
1026static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1027{
1028 return queue_logical_block_size(bdev_get_queue(bdev));
1029}
1030
1031static inline unsigned int queue_physical_block_size(struct request_queue *q)
1032{
1033 return q->limits.physical_block_size;
1034}
1035
1036static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1037{
1038 return queue_physical_block_size(bdev_get_queue(bdev));
1039}
1040
1041static inline unsigned int queue_io_min(struct request_queue *q)
1042{
1043 return q->limits.io_min;
1044}
1045
1046static inline int bdev_io_min(struct block_device *bdev)
1047{
1048 return queue_io_min(bdev_get_queue(bdev));
1049}
1050
1051static inline unsigned int queue_io_opt(struct request_queue *q)
1052{
1053 return q->limits.io_opt;
1054}
1055
1056static inline int bdev_io_opt(struct block_device *bdev)
1057{
1058 return queue_io_opt(bdev_get_queue(bdev));
1059}
1060
1061static inline int queue_alignment_offset(struct request_queue *q)
1062{
1063 if (q->limits.misaligned)
1064 return -1;
1065
1066 return q->limits.alignment_offset;
1067}
1068
1069static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1070{
1071 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1072 unsigned int alignment = (sector << 9) & (granularity - 1);
1073
1074 return (granularity + lim->alignment_offset - alignment)
1075 & (granularity - 1);
1076}
1077
1078static inline int bdev_alignment_offset(struct block_device *bdev)
1079{
1080 struct request_queue *q = bdev_get_queue(bdev);
1081
1082 if (q->limits.misaligned)
1083 return -1;
1084
1085 if (bdev != bdev->bd_contains)
1086 return bdev->bd_part->alignment_offset;
1087
1088 return q->limits.alignment_offset;
1089}
1090
1091static inline int queue_discard_alignment(struct request_queue *q)
1092{
1093 if (q->limits.discard_misaligned)
1094 return -1;
1095
1096 return q->limits.discard_alignment;
1097}
1098
1099static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1100{
1101 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1102
1103 if (!lim->max_discard_sectors)
1104 return 0;
1105
1106 return (lim->discard_granularity + lim->discard_alignment - alignment)
1107 & (lim->discard_granularity - 1);
1108}
1109
1110static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1111{
1112 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1113 return 1;
1114
1115 return 0;
1116}
1117
1118static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1119{
1120 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1121}
1122
1123static inline int queue_dma_alignment(struct request_queue *q)
1124{
1125 return q ? q->dma_alignment : 511;
1126}
1127
1128static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1129 unsigned int len)
1130{
1131 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1132 return !(addr & alignment) && !(len & alignment);
1133}
1134
1135
1136static inline unsigned int blksize_bits(unsigned int size)
1137{
1138 unsigned int bits = 8;
1139 do {
1140 bits++;
1141 size >>= 1;
1142 } while (size > 256);
1143 return bits;
1144}
1145
1146static inline unsigned int block_size(struct block_device *bdev)
1147{
1148 return bdev->bd_block_size;
1149}
1150
1151static inline bool queue_flush_queueable(struct request_queue *q)
1152{
1153 return !q->flush_not_queueable;
1154}
1155
1156typedef struct {struct page *v;} Sector;
1157
1158unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1159
1160static inline void put_dev_sector(Sector p)
1161{
1162 page_cache_release(p.v);
1163}
1164
1165struct work_struct;
1166int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1167
1168#ifdef CONFIG_BLK_CGROUP
1169
1170
1171
1172
1173
1174static inline void set_start_time_ns(struct request *req)
1175{
1176 preempt_disable();
1177 req->start_time_ns = sched_clock();
1178 preempt_enable();
1179}
1180
1181static inline void set_io_start_time_ns(struct request *req)
1182{
1183 preempt_disable();
1184 req->io_start_time_ns = sched_clock();
1185 preempt_enable();
1186}
1187
1188static inline uint64_t rq_start_time_ns(struct request *req)
1189{
1190 return req->start_time_ns;
1191}
1192
1193static inline uint64_t rq_io_start_time_ns(struct request *req)
1194{
1195 return req->io_start_time_ns;
1196}
1197#else
1198static inline void set_start_time_ns(struct request *req) {}
1199static inline void set_io_start_time_ns(struct request *req) {}
1200static inline uint64_t rq_start_time_ns(struct request *req)
1201{
1202 return 0;
1203}
1204static inline uint64_t rq_io_start_time_ns(struct request *req)
1205{
1206 return 0;
1207}
1208#endif
1209
1210#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1211 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1212#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1213 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1214
1215#if defined(CONFIG_BLK_DEV_INTEGRITY)
1216
1217#define INTEGRITY_FLAG_READ 2
1218#define INTEGRITY_FLAG_WRITE 4
1219
1220struct blk_integrity_exchg {
1221 void *prot_buf;
1222 void *data_buf;
1223 sector_t sector;
1224 unsigned int data_size;
1225 unsigned short sector_size;
1226 const char *disk_name;
1227};
1228
1229typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
1230typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1231typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1232typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1233
1234struct blk_integrity {
1235 integrity_gen_fn *generate_fn;
1236 integrity_vrfy_fn *verify_fn;
1237 integrity_set_tag_fn *set_tag_fn;
1238 integrity_get_tag_fn *get_tag_fn;
1239
1240 unsigned short flags;
1241 unsigned short tuple_size;
1242 unsigned short sector_size;
1243 unsigned short tag_size;
1244
1245 const char *name;
1246
1247 struct kobject kobj;
1248};
1249
1250extern bool blk_integrity_is_initialized(struct gendisk *);
1251extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1252extern void blk_integrity_unregister(struct gendisk *);
1253extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1254extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1255 struct scatterlist *);
1256extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1257extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1258 struct request *);
1259extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1260 struct bio *);
1261
1262static inline
1263struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1264{
1265 return bdev->bd_disk->integrity;
1266}
1267
1268static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1269{
1270 return disk->integrity;
1271}
1272
1273static inline int blk_integrity_rq(struct request *rq)
1274{
1275 if (rq->bio == NULL)
1276 return 0;
1277
1278 return bio_integrity(rq->bio);
1279}
1280
1281static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1282 unsigned int segs)
1283{
1284 q->limits.max_integrity_segments = segs;
1285}
1286
1287static inline unsigned short
1288queue_max_integrity_segments(struct request_queue *q)
1289{
1290 return q->limits.max_integrity_segments;
1291}
1292
1293#else
1294
1295struct bio;
1296struct block_device;
1297struct gendisk;
1298struct blk_integrity;
1299
1300static inline int blk_integrity_rq(struct request *rq)
1301{
1302 return 0;
1303}
1304static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1305 struct bio *b)
1306{
1307 return 0;
1308}
1309static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1310 struct bio *b,
1311 struct scatterlist *s)
1312{
1313 return 0;
1314}
1315static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1316{
1317 return 0;
1318}
1319static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1320{
1321 return NULL;
1322}
1323static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1324{
1325 return 0;
1326}
1327static inline int blk_integrity_register(struct gendisk *d,
1328 struct blk_integrity *b)
1329{
1330 return 0;
1331}
1332static inline void blk_integrity_unregister(struct gendisk *d)
1333{
1334}
1335static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1336 unsigned int segs)
1337{
1338}
1339static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1340{
1341 return 0;
1342}
1343static inline int blk_integrity_merge_rq(struct request_queue *rq,
1344 struct request *r1,
1345 struct request *r2)
1346{
1347 return 0;
1348}
1349static inline int blk_integrity_merge_bio(struct request_queue *rq,
1350 struct request *r,
1351 struct bio *b)
1352{
1353 return 0;
1354}
1355static inline bool blk_integrity_is_initialized(struct gendisk *g)
1356{
1357 return 0;
1358}
1359
1360#endif
1361
1362struct block_device_operations {
1363 int (*open) (struct block_device *, fmode_t);
1364 int (*release) (struct gendisk *, fmode_t);
1365 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1366 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1367 int (*direct_access) (struct block_device *, sector_t,
1368 void **, unsigned long *);
1369 unsigned int (*check_events) (struct gendisk *disk,
1370 unsigned int clearing);
1371
1372 int (*media_changed) (struct gendisk *);
1373 void (*unlock_native_capacity) (struct gendisk *);
1374 int (*revalidate_disk) (struct gendisk *);
1375 int (*getgeo)(struct block_device *, struct hd_geometry *);
1376
1377 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1378 struct module *owner;
1379};
1380
1381extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1382 unsigned long);
1383#else
1384
1385
1386
1387#define buffer_heads_over_limit 0
1388
1389static inline long nr_blockdev_pages(void)
1390{
1391 return 0;
1392}
1393
1394struct blk_plug {
1395};
1396
1397static inline void blk_start_plug(struct blk_plug *plug)
1398{
1399}
1400
1401static inline void blk_finish_plug(struct blk_plug *plug)
1402{
1403}
1404
1405static inline void blk_flush_plug(struct task_struct *task)
1406{
1407}
1408
1409static inline void blk_schedule_flush_plug(struct task_struct *task)
1410{
1411}
1412
1413
1414static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1415{
1416 return false;
1417}
1418
1419#endif
1420
1421#endif
1422