1
2
3
4
5
6#ifndef __LINUX_BLK_TYPES_H
7#define __LINUX_BLK_TYPES_H
8
9#include <linux/types.h>
10#include <linux/bvec.h>
11#include <linux/ktime.h>
12
13struct bio_set;
14struct bio;
15struct bio_integrity_payload;
16struct page;
17struct block_device;
18struct io_context;
19struct cgroup_subsys_state;
20typedef void (bio_end_io_t) (struct bio *);
21
22
23
24
25
26#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
27typedef u32 __bitwise blk_status_t;
28#else
29typedef u8 __bitwise blk_status_t;
30#endif
31#define BLK_STS_OK 0
32#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
33#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
34#define BLK_STS_NOSPC ((__force blk_status_t)3)
35#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
36#define BLK_STS_TARGET ((__force blk_status_t)5)
37#define BLK_STS_NEXUS ((__force blk_status_t)6)
38#define BLK_STS_MEDIUM ((__force blk_status_t)7)
39#define BLK_STS_PROTECTION ((__force blk_status_t)8)
40#define BLK_STS_RESOURCE ((__force blk_status_t)9)
41#define BLK_STS_IOERR ((__force blk_status_t)10)
42
43
44#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
45
46#define BLK_STS_AGAIN ((__force blk_status_t)12)
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
65
66
67
68
69
70
71
72
73
74
75
76
77
78static inline bool blk_path_error(blk_status_t error)
79{
80 switch (error) {
81 case BLK_STS_NOTSUPP:
82 case BLK_STS_NOSPC:
83 case BLK_STS_TARGET:
84 case BLK_STS_NEXUS:
85 case BLK_STS_MEDIUM:
86 case BLK_STS_PROTECTION:
87 return false;
88 }
89
90
91 return true;
92}
93
94
95
96
97
98
99
100#define BIO_ISSUE_RES_BITS 1
101#define BIO_ISSUE_SIZE_BITS 12
102#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
103#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
104#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
105#define BIO_ISSUE_SIZE_MASK \
106 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
107#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
108
109
110#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
111
112struct bio_issue {
113 u64 value;
114};
115
116static inline u64 __bio_issue_time(u64 time)
117{
118 return time & BIO_ISSUE_TIME_MASK;
119}
120
121static inline u64 bio_issue_time(struct bio_issue *issue)
122{
123 return __bio_issue_time(issue->value);
124}
125
126static inline sector_t bio_issue_size(struct bio_issue *issue)
127{
128 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
129}
130
131static inline void bio_issue_init(struct bio_issue *issue,
132 sector_t size)
133{
134 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
135 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
136 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
137 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
138}
139
140
141
142
143
144struct bio {
145 struct bio *bi_next;
146 struct gendisk *bi_disk;
147 unsigned int bi_opf;
148
149
150
151 unsigned short bi_flags;
152 unsigned short bi_ioprio;
153 unsigned short bi_write_hint;
154 blk_status_t bi_status;
155 u8 bi_partno;
156
157
158
159
160 unsigned int bi_phys_segments;
161
162
163
164
165
166 unsigned int bi_seg_front_size;
167 unsigned int bi_seg_back_size;
168
169 struct bvec_iter bi_iter;
170
171 atomic_t __bi_remaining;
172 bio_end_io_t *bi_end_io;
173
174 void *bi_private;
175#ifdef CONFIG_BLK_CGROUP
176
177
178
179
180 struct io_context *bi_ioc;
181 struct cgroup_subsys_state *bi_css;
182 struct blkcg_gq *bi_blkg;
183 struct bio_issue bi_issue;
184#endif
185 union {
186#if defined(CONFIG_BLK_DEV_INTEGRITY)
187 struct bio_integrity_payload *bi_integrity;
188#endif
189 };
190
191 unsigned short bi_vcnt;
192
193
194
195
196
197 unsigned short bi_max_vecs;
198
199 atomic_t __bi_cnt;
200
201 struct bio_vec *bi_io_vec;
202
203 struct bio_set *bi_pool;
204
205
206
207
208
209
210 struct bio_vec bi_inline_vecs[0];
211};
212
213#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
214
215
216
217
218#define BIO_SEG_VALID 1
219#define BIO_CLONED 2
220#define BIO_BOUNCED 3
221#define BIO_USER_MAPPED 4
222#define BIO_NULL_MAPPED 5
223#define BIO_QUIET 6
224#define BIO_CHAIN 7
225#define BIO_REFFED 8
226#define BIO_THROTTLED 9
227
228#define BIO_TRACE_COMPLETION 10
229
230#define BIO_QUEUE_ENTERED 11
231
232
233
234
235
236
237
238#define BVEC_POOL_NR 6
239#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
240
241
242
243
244
245
246#define BVEC_POOL_BITS (3)
247#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
248#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
249#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
250# error "BVEC_POOL_BITS is too small"
251#endif
252
253
254
255
256
257#define BIO_RESET_BITS BVEC_POOL_OFFSET
258
259typedef __u32 __bitwise blk_mq_req_flags_t;
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274#define REQ_OP_BITS 8
275#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
276#define REQ_FLAG_BITS 24
277
278enum req_opf {
279
280 REQ_OP_READ = 0,
281
282 REQ_OP_WRITE = 1,
283
284 REQ_OP_FLUSH = 2,
285
286 REQ_OP_DISCARD = 3,
287
288 REQ_OP_ZONE_REPORT = 4,
289
290 REQ_OP_SECURE_ERASE = 5,
291
292 REQ_OP_ZONE_RESET = 6,
293
294 REQ_OP_WRITE_SAME = 7,
295
296 REQ_OP_WRITE_ZEROES = 9,
297
298
299 REQ_OP_SCSI_IN = 32,
300 REQ_OP_SCSI_OUT = 33,
301
302 REQ_OP_DRV_IN = 34,
303 REQ_OP_DRV_OUT = 35,
304
305 REQ_OP_LAST,
306};
307
308enum req_flag_bits {
309 __REQ_FAILFAST_DEV =
310 REQ_OP_BITS,
311 __REQ_FAILFAST_TRANSPORT,
312 __REQ_FAILFAST_DRIVER,
313 __REQ_SYNC,
314 __REQ_META,
315 __REQ_PRIO,
316 __REQ_NOMERGE,
317 __REQ_IDLE,
318 __REQ_INTEGRITY,
319 __REQ_FUA,
320 __REQ_PREFLUSH,
321 __REQ_RAHEAD,
322 __REQ_BACKGROUND,
323 __REQ_NOWAIT,
324
325
326 __REQ_NOUNMAP,
327
328
329 __REQ_DRV,
330 __REQ_SWAP,
331 __REQ_NR_BITS,
332};
333
334#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
335#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
336#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
337#define REQ_SYNC (1ULL << __REQ_SYNC)
338#define REQ_META (1ULL << __REQ_META)
339#define REQ_PRIO (1ULL << __REQ_PRIO)
340#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
341#define REQ_IDLE (1ULL << __REQ_IDLE)
342#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
343#define REQ_FUA (1ULL << __REQ_FUA)
344#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
345#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
346#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
347#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
348
349#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
350
351#define REQ_DRV (1ULL << __REQ_DRV)
352#define REQ_SWAP (1ULL << __REQ_SWAP)
353
354#define REQ_FAILFAST_MASK \
355 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
356
357#define REQ_NOMERGE_FLAGS \
358 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
359
360enum stat_group {
361 STAT_READ,
362 STAT_WRITE,
363 STAT_DISCARD,
364
365 NR_STAT_GROUPS
366};
367
368#define bio_op(bio) \
369 ((bio)->bi_opf & REQ_OP_MASK)
370#define req_op(req) \
371 ((req)->cmd_flags & REQ_OP_MASK)
372
373
374static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
375 unsigned op_flags)
376{
377 bio->bi_opf = op | op_flags;
378}
379
380static inline bool op_is_write(unsigned int op)
381{
382 return (op & 1);
383}
384
385
386
387
388
389static inline bool op_is_flush(unsigned int op)
390{
391 return op & (REQ_FUA | REQ_PREFLUSH);
392}
393
394
395
396
397
398
399static inline bool op_is_sync(unsigned int op)
400{
401 return (op & REQ_OP_MASK) == REQ_OP_READ ||
402 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
403}
404
405static inline bool op_is_discard(unsigned int op)
406{
407 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
408}
409
410static inline int op_stat_group(unsigned int op)
411{
412 if (op_is_discard(op))
413 return STAT_DISCARD;
414 return op_is_write(op);
415}
416
417typedef unsigned int blk_qc_t;
418#define BLK_QC_T_NONE -1U
419#define BLK_QC_T_SHIFT 16
420#define BLK_QC_T_INTERNAL (1U << 31)
421
422static inline bool blk_qc_t_valid(blk_qc_t cookie)
423{
424 return cookie != BLK_QC_T_NONE;
425}
426
427static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
428 bool internal)
429{
430 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
431
432 if (internal)
433 ret |= BLK_QC_T_INTERNAL;
434
435 return ret;
436}
437
438static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
439{
440 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
441}
442
443static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
444{
445 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
446}
447
448static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
449{
450 return (cookie & BLK_QC_T_INTERNAL) != 0;
451}
452
453struct blk_rq_stat {
454 u64 mean;
455 u64 min;
456 u64 max;
457 u32 nr_samples;
458 u64 batch;
459};
460
461#endif
462