1
2
3
4
5
6#ifndef __LINUX_BLK_TYPES_H
7#define __LINUX_BLK_TYPES_H
8
9#include <linux/types.h>
10#include <linux/bvec.h>
11#include <linux/ktime.h>
12#include <linux/rh_kabi.h>
13
14struct bio_set;
15struct bio;
16struct bio_integrity_payload;
17struct page;
18struct block_device;
19struct io_context;
20struct cgroup_subsys_state;
21typedef void (bio_end_io_t) (struct bio *);
22
23
24
25
26
27#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
28typedef u32 __bitwise blk_status_t;
29#else
30typedef u8 __bitwise blk_status_t;
31#endif
32#define BLK_STS_OK 0
33#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
34#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
35#define BLK_STS_NOSPC ((__force blk_status_t)3)
36#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
37#define BLK_STS_TARGET ((__force blk_status_t)5)
38#define BLK_STS_NEXUS ((__force blk_status_t)6)
39#define BLK_STS_MEDIUM ((__force blk_status_t)7)
40#define BLK_STS_PROTECTION ((__force blk_status_t)8)
41#define BLK_STS_RESOURCE ((__force blk_status_t)9)
42#define BLK_STS_IOERR ((__force blk_status_t)10)
43
44
45#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
46
47#define BLK_STS_AGAIN ((__force blk_status_t)12)
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
66
67
68
69
70
71
72
73
74
75
76
77#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
78
79
80
81
82
83
84
85
86
87
88
89
90
91static inline bool blk_path_error(blk_status_t error)
92{
93 switch (error) {
94 case BLK_STS_NOTSUPP:
95 case BLK_STS_NOSPC:
96 case BLK_STS_TARGET:
97 case BLK_STS_NEXUS:
98 case BLK_STS_MEDIUM:
99 case BLK_STS_PROTECTION:
100 return false;
101 }
102
103
104 return true;
105}
106
107
108
109
110
111
112
113#define BIO_ISSUE_RES_BITS 1
114#define BIO_ISSUE_SIZE_BITS 12
115#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
116#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
117#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
118#define BIO_ISSUE_SIZE_MASK \
119 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
120#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
121
122
123#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
124
125struct bio_issue {
126 u64 value;
127};
128
129static inline u64 __bio_issue_time(u64 time)
130{
131 return time & BIO_ISSUE_TIME_MASK;
132}
133
134static inline u64 bio_issue_time(struct bio_issue *issue)
135{
136 return __bio_issue_time(issue->value);
137}
138
139static inline sector_t bio_issue_size(struct bio_issue *issue)
140{
141 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
142}
143
144static inline void bio_issue_init(struct bio_issue *issue,
145 sector_t size)
146{
147 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
148 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
149 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
150 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
151}
152
153
154
155
156
157struct bio {
158 struct bio *bi_next;
159 struct gendisk *bi_disk;
160 unsigned int bi_opf;
161
162
163
164 unsigned short bi_flags;
165 unsigned short bi_ioprio;
166 unsigned short bi_write_hint;
167 blk_status_t bi_status;
168 u8 bi_partno;
169
170
171
172
173 unsigned int bi_phys_segments;
174
175
176
177
178
179 unsigned int bi_seg_front_size;
180 unsigned int bi_seg_back_size;
181
182 struct bvec_iter bi_iter;
183
184 atomic_t __bi_remaining;
185 bio_end_io_t *bi_end_io;
186
187 void *bi_private;
188#ifdef CONFIG_BLK_CGROUP
189
190
191
192
193
194
195 struct blkcg_gq *bi_blkg;
196 struct bio_issue bi_issue;
197#endif
198 union {
199#if defined(CONFIG_BLK_DEV_INTEGRITY)
200 struct bio_integrity_payload *bi_integrity;
201#endif
202 };
203
204 unsigned short bi_vcnt;
205
206
207
208
209
210 unsigned short bi_max_vecs;
211
212 atomic_t __bi_cnt;
213
214 struct bio_vec *bi_io_vec;
215
216 struct bio_set *bi_pool;
217
218 RH_KABI_USE(1, u64 bi_iocost_cost)
219 RH_KABI_RESERVE(2)
220 RH_KABI_RESERVE(3)
221
222
223
224
225
226
227 struct bio_vec bi_inline_vecs[0];
228};
229
230#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
231
232
233
234
235#define BIO_NO_PAGE_REF 0
236#define BIO_SEG_VALID 1
237#define BIO_CLONED 2
238#define BIO_BOUNCED 3
239#define BIO_USER_MAPPED 4
240#define BIO_NULL_MAPPED 5
241#define BIO_QUIET 6
242#define BIO_CHAIN 7
243#define BIO_REFFED 8
244#define BIO_THROTTLED 9
245
246#define BIO_TRACE_COMPLETION 10
247
248#define BIO_QUEUE_ENTERED 11
249#define BIO_TRACKED 12
250#define BIO_CGROUP_ACCT BIO_QUEUE_ENTERED
251
252
253
254
255
256
257
258#define BVEC_POOL_NR 6
259#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
260
261
262
263
264
265
266#define BVEC_POOL_BITS (3)
267#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
268#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
269#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
270# error "BVEC_POOL_BITS is too small"
271#endif
272
273
274
275
276
277#define BIO_RESET_BITS BVEC_POOL_OFFSET
278
279typedef __u32 __bitwise blk_mq_req_flags_t;
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294#define REQ_OP_BITS 8
295#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
296#define REQ_FLAG_BITS 24
297
298enum req_opf {
299
300 REQ_OP_READ = 0,
301
302 REQ_OP_WRITE = 1,
303
304 REQ_OP_FLUSH = 2,
305
306 REQ_OP_DISCARD = 3,
307
308 REQ_OP_SECURE_ERASE = 5,
309
310 REQ_OP_ZONE_RESET = 6,
311
312 REQ_OP_WRITE_SAME = 7,
313
314 REQ_OP_ZONE_RESET_ALL = 8,
315
316 REQ_OP_WRITE_ZEROES = 9,
317
318 REQ_OP_ZONE_OPEN = 10,
319
320 REQ_OP_ZONE_CLOSE = 11,
321
322 REQ_OP_ZONE_FINISH = 12,
323
324 REQ_OP_ZONE_APPEND = 13,
325
326
327 REQ_OP_SCSI_IN = 32,
328 REQ_OP_SCSI_OUT = 33,
329
330 REQ_OP_DRV_IN = 34,
331 REQ_OP_DRV_OUT = 35,
332
333 REQ_OP_LAST,
334};
335
336enum req_flag_bits {
337 __REQ_FAILFAST_DEV =
338 REQ_OP_BITS,
339 __REQ_FAILFAST_TRANSPORT,
340 __REQ_FAILFAST_DRIVER,
341 __REQ_SYNC,
342 __REQ_META,
343 __REQ_PRIO,
344 __REQ_NOMERGE,
345 __REQ_IDLE,
346 __REQ_INTEGRITY,
347 __REQ_FUA,
348 __REQ_PREFLUSH,
349 __REQ_RAHEAD,
350 __REQ_BACKGROUND,
351 __REQ_NOWAIT,
352
353
354 __REQ_NOUNMAP,
355
356 __REQ_HIPRI,
357
358
359 __REQ_DRV,
360 __REQ_SWAP,
361
362
363
364
365
366
367
368
369 __REQ_CGROUP_PUNT,
370 __REQ_NR_BITS,
371};
372
373#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
374#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
375#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
376#define REQ_SYNC (1ULL << __REQ_SYNC)
377#define REQ_META (1ULL << __REQ_META)
378#define REQ_PRIO (1ULL << __REQ_PRIO)
379#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
380#define REQ_IDLE (1ULL << __REQ_IDLE)
381#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
382#define REQ_FUA (1ULL << __REQ_FUA)
383#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
384#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
385#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
386#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
387#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
388
389#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
390#define REQ_HIPRI (1ULL << __REQ_HIPRI)
391
392#define REQ_DRV (1ULL << __REQ_DRV)
393#define REQ_SWAP (1ULL << __REQ_SWAP)
394
395#define REQ_FAILFAST_MASK \
396 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
397
398#define REQ_NOMERGE_FLAGS \
399 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
400
401enum stat_group {
402 STAT_READ,
403 STAT_WRITE,
404 STAT_DISCARD,
405
406 NR_STAT_GROUPS
407};
408
409#define bio_op(bio) \
410 ((bio)->bi_opf & REQ_OP_MASK)
411#define req_op(req) \
412 ((req)->cmd_flags & REQ_OP_MASK)
413
414
415static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
416 unsigned op_flags)
417{
418 bio->bi_opf = op | op_flags;
419}
420
421static inline bool op_is_write(unsigned int op)
422{
423 return (op & 1);
424}
425
426
427
428
429
430static inline bool op_is_flush(unsigned int op)
431{
432 return op & (REQ_FUA | REQ_PREFLUSH);
433}
434
435
436
437
438
439
440static inline bool op_is_sync(unsigned int op)
441{
442 return (op & REQ_OP_MASK) == REQ_OP_READ ||
443 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
444}
445
446static inline bool op_is_discard(unsigned int op)
447{
448 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
449}
450
451
452
453
454
455
456
457static inline bool op_is_zone_mgmt(enum req_opf op)
458{
459 switch (op & REQ_OP_MASK) {
460 case REQ_OP_ZONE_RESET:
461 case REQ_OP_ZONE_OPEN:
462 case REQ_OP_ZONE_CLOSE:
463 case REQ_OP_ZONE_FINISH:
464 return true;
465 default:
466 return false;
467 }
468}
469
470static inline int op_stat_group(unsigned int op)
471{
472 if (op_is_discard(op))
473 return STAT_DISCARD;
474 return op_is_write(op);
475}
476
477typedef unsigned int blk_qc_t;
478#define BLK_QC_T_NONE -1U
479#define BLK_QC_T_SHIFT 16
480#define BLK_QC_T_INTERNAL (1U << 31)
481
482static inline bool blk_qc_t_valid(blk_qc_t cookie)
483{
484 return cookie != BLK_QC_T_NONE;
485}
486
487static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
488{
489 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
490}
491
492static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
493{
494 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
495}
496
497static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
498{
499 return (cookie & BLK_QC_T_INTERNAL) != 0;
500}
501
502struct blk_rq_stat {
503 u64 mean;
504 u64 min;
505 u64 max;
506 u32 nr_samples;
507 u64 batch;
508};
509
510#endif
511