1
2#ifndef BLKTRACE_H
3#define BLKTRACE_H
4
5#include <linux/blk-mq.h>
6#include <linux/relay.h>
7#include <linux/compat.h>
8#include <uapi/linux/blktrace_api.h>
9#include <linux/list.h>
10
11#if defined(CONFIG_BLK_DEV_IO_TRACE)
12
13#include <linux/sysfs.h>
14
15struct blk_trace {
16 int trace_state;
17 struct rchan *rchan;
18 unsigned long __percpu *sequence;
19 unsigned char __percpu *msg_data;
20 u16 act_mask;
21 u64 start_lba;
22 u64 end_lba;
23 u32 pid;
24 u32 dev;
25 struct dentry *dir;
26 struct list_head running_list;
27 atomic_t dropped;
28};
29
30struct blkcg;
31
32extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
33extern void blk_trace_shutdown(struct request_queue *);
34extern __printf(3, 4)
35void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
51 do { \
52 struct blk_trace *bt; \
53 \
54 rcu_read_lock(); \
55 bt = rcu_dereference((q)->blk_trace); \
56 if (unlikely(bt)) \
57 __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
58 rcu_read_unlock(); \
59 } while (0)
60#define blk_add_trace_msg(q, fmt, ...) \
61 blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
62#define BLK_TN_MAX_MSG 128
63
64static inline bool blk_trace_note_message_enabled(struct request_queue *q)
65{
66 struct blk_trace *bt;
67 bool ret;
68
69 rcu_read_lock();
70 bt = rcu_dereference(q->blk_trace);
71 ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
72 rcu_read_unlock();
73 return ret;
74}
75
76extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
77extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
78 struct block_device *bdev,
79 char __user *arg);
80extern int blk_trace_startstop(struct request_queue *q, int start);
81extern int blk_trace_remove(struct request_queue *q);
82extern void blk_trace_remove_sysfs(struct device *dev);
83extern int blk_trace_init_sysfs(struct device *dev);
84
85extern struct attribute_group blk_trace_attr_group;
86
87#else
88# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
89# define blk_trace_shutdown(q) do { } while (0)
90# define blk_add_driver_data(rq, data, len) do {} while (0)
91# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
92# define blk_trace_startstop(q, start) (-ENOTTY)
93# define blk_trace_remove(q) (-ENOTTY)
94# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
95# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
96# define blk_trace_remove_sysfs(dev) do { } while (0)
97# define blk_trace_note_message_enabled(q) (false)
98static inline int blk_trace_init_sysfs(struct device *dev)
99{
100 return 0;
101}
102
103#endif
104
105#ifdef CONFIG_COMPAT
106
107struct compat_blk_user_trace_setup {
108 char name[BLKTRACE_BDEV_SIZE];
109 u16 act_mask;
110 u32 buf_size;
111 u32 buf_nr;
112 compat_u64 start_lba;
113 compat_u64 end_lba;
114 u32 pid;
115};
116#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
117
118#endif
119
120void blk_fill_rwbs(char *rwbs, unsigned int op);
121
122static inline sector_t blk_rq_trace_sector(struct request *rq)
123{
124
125
126
127
128 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
129 return 0;
130 return blk_rq_pos(rq);
131}
132
133static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
134{
135 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
136}
137
138#endif
139