1
2
3
4
5
6
7
8
9
10#ifndef DM_INTERNAL_H
11#define DM_INTERNAL_H
12
13#include <linux/fs.h>
14#include <linux/device-mapper.h>
15#include <linux/list.h>
16#include <linux/moduleparam.h>
17#include <linux/blkdev.h>
18#include <linux/backing-dev.h>
19#include <linux/hdreg.h>
20#include <linux/completion.h>
21#include <linux/kobject.h>
22
23#include "dm-stats.h"
24
25
26
27
28#define DM_SUSPEND_LOCKFS_FLAG (1 << 0)
29#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
30
31
32
33
34#define DM_STATUS_NOFLUSH_FLAG (1 << 0)
35
36
37
38
39struct dm_dev_internal {
40 struct list_head list;
41 atomic_t count;
42 struct dm_dev *dm_dev;
43};
44
45struct dm_table;
46struct dm_md_mempools;
47
48
49
50
51void dm_table_destroy(struct dm_table *t);
52void dm_table_event_callback(struct dm_table *t,
53 void (*fn)(void *), void *context);
54struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
55struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
56bool dm_table_has_no_data_devices(struct dm_table *table);
57int dm_calculate_queue_limits(struct dm_table *table,
58 struct queue_limits *limits);
59void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
60 struct queue_limits *limits);
61struct list_head *dm_table_get_devices(struct dm_table *t);
62void dm_table_presuspend_targets(struct dm_table *t);
63void dm_table_presuspend_undo_targets(struct dm_table *t);
64void dm_table_postsuspend_targets(struct dm_table *t);
65int dm_table_resume_targets(struct dm_table *t);
66int dm_table_any_congested(struct dm_table *t, int bdi_bits);
67enum dm_queue_mode dm_table_get_type(struct dm_table *t);
68struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
69struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
70struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
71bool dm_table_bio_based(struct dm_table *t);
72bool dm_table_request_based(struct dm_table *t);
73bool dm_table_all_blk_mq_devices(struct dm_table *t);
74void dm_table_free_md_mempools(struct dm_table *t);
75struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
76
77void dm_lock_md_type(struct mapped_device *md);
78void dm_unlock_md_type(struct mapped_device *md);
79void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
80enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
81struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
82
83int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
84
85
86
87
88#define dm_target_is_valid(t) ((t)->table)
89
90
91
92
93#define dm_target_bio_based(t) ((t)->type->map != NULL)
94
95
96
97
98#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
99
100
101
102
103
104#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
105
106
107
108
109int dm_target_init(void);
110void dm_target_exit(void);
111struct target_type *dm_get_target_type(const char *name);
112void dm_put_target_type(struct target_type *tt);
113int dm_target_iterate(void (*iter_func)(struct target_type *tt,
114 void *param), void *param);
115
116int dm_split_args(int *argc, char ***argvp, char *input);
117
118
119
120
121int dm_deleting_md(struct mapped_device *md);
122
123
124
125
126int dm_suspended_md(struct mapped_device *md);
127
128
129
130
131int dm_suspended_internally_md(struct mapped_device *md);
132void dm_internal_suspend_fast(struct mapped_device *md);
133void dm_internal_resume_fast(struct mapped_device *md);
134void dm_internal_suspend_noflush(struct mapped_device *md);
135void dm_internal_resume(struct mapped_device *md);
136
137
138
139
140int dm_test_deferred_remove_flag(struct mapped_device *md);
141
142
143
144
145void dm_deferred_remove(void);
146
147
148
149
150
151int dm_interface_init(void);
152void dm_interface_exit(void);
153
154
155
156
157int dm_sysfs_init(struct mapped_device *md);
158void dm_sysfs_exit(struct mapped_device *md);
159struct kobject *dm_kobject(struct mapped_device *md);
160struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
161
162
163
164
165void dm_kobject_release(struct kobject *kobj);
166
167
168
169
170int dm_linear_init(void);
171void dm_linear_exit(void);
172
173int dm_stripe_init(void);
174void dm_stripe_exit(void);
175
176
177
178
179void dm_destroy(struct mapped_device *md);
180void dm_destroy_immediate(struct mapped_device *md);
181int dm_open_count(struct mapped_device *md);
182int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
183int dm_cancel_deferred_remove(struct mapped_device *md);
184int dm_request_based(struct mapped_device *md);
185sector_t dm_get_size(struct mapped_device *md);
186struct request_queue *dm_get_md_queue(struct mapped_device *md);
187int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
188 struct dm_dev **result);
189void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
190struct dm_stats *dm_get_stats(struct mapped_device *md);
191
192int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
193 unsigned cookie);
194
195void dm_internal_suspend(struct mapped_device *md);
196void dm_internal_resume(struct mapped_device *md);
197
198int dm_io_init(void);
199void dm_io_exit(void);
200
201int dm_kcopyd_init(void);
202void dm_kcopyd_exit(void);
203
204
205
206
207struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
208 unsigned integrity, unsigned per_bio_data_size);
209void dm_free_md_mempools(struct dm_md_mempools *pools);
210
211
212
213
214unsigned dm_get_reserved_bio_based_ios(void);
215
216#endif
217