1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _MD_MD_H
16#define _MD_MD_H
17
18#include <linux/blkdev.h>
19#include <linux/kobject.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/timer.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
26
27#define MaxSector (~(sector_t)0)
28
29typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t;
31
32
33
34
35struct mdk_rdev_s
36{
37 struct list_head same_set;
38
39 sector_t sectors;
40 mddev_t *mddev;
41 int last_events;
42
43 struct block_device *bdev;
44
45 struct page *sb_page;
46 int sb_loaded;
47 __u64 sb_events;
48 sector_t data_offset;
49 sector_t sb_start;
50 int sb_size;
51 int preferred_minor;
52
53 struct kobject kobj;
54
55
56
57
58
59
60
61
62
63
64
65
66 unsigned long flags;
67#define Faulty 1
68#define In_sync 2
69#define WriteMostly 4
70#define BarriersNotsupp 5
71#define AllReserved 6
72
73#define AutoDetected 7
74#define Blocked 8
75
76
77#define StateChanged 9
78
79
80 wait_queue_head_t blocked_wait;
81
82 int desc_nr;
83 int raid_disk;
84 int saved_raid_disk;
85
86
87
88 sector_t recovery_offset;
89
90
91
92
93 atomic_t nr_pending;
94
95
96
97 atomic_t read_errors;
98
99
100 atomic_t corrected_errors;
101
102
103
104 struct work_struct del_work;
105
106 struct sysfs_dirent *sysfs_state;
107
108};
109
110struct mddev_s
111{
112 void *private;
113 struct mdk_personality *pers;
114 dev_t unit;
115 int md_minor;
116 struct list_head disks;
117 unsigned long flags;
118#define MD_CHANGE_DEVS 0
119#define MD_CHANGE_CLEAN 1
120#define MD_CHANGE_PENDING 2
121
122 int suspended;
123 atomic_t active_io;
124 int ro;
125
126 struct gendisk *gendisk;
127
128 struct kobject kobj;
129 int hold_active;
130#define UNTIL_IOCTL 1
131#define UNTIL_STOP 2
132
133
134 int major_version,
135 minor_version,
136 patch_version;
137 int persistent;
138 int external;
139
140 char metadata_type[17];
141 int chunk_sectors;
142 time_t ctime, utime;
143 int level, layout;
144 char clevel[16];
145 int raid_disks;
146 int max_disks;
147 sector_t dev_sectors;
148
149 sector_t array_sectors;
150 int external_size;
151
152 __u64 events;
153
154 char uuid[16];
155
156
157
158
159
160
161 sector_t reshape_position;
162 int delta_disks, new_level, new_layout;
163 int new_chunk_sectors;
164
165 struct mdk_thread_s *thread;
166 struct mdk_thread_s *sync_thread;
167 sector_t curr_resync;
168
169
170
171
172
173
174 sector_t curr_resync_completed;
175 unsigned long resync_mark;
176 sector_t resync_mark_cnt;
177 sector_t curr_mark_cnt;
178
179 sector_t resync_max_sectors;
180
181 sector_t resync_mismatches;
182
183
184
185
186 sector_t suspend_lo;
187 sector_t suspend_hi;
188
189 int sync_speed_min;
190 int sync_speed_max;
191
192
193 int parallel_resync;
194
195 int ok_start_degraded;
196
197
198
199
200
201
202
203
204
205
206
207
208
209#define MD_RECOVERY_RUNNING 0
210#define MD_RECOVERY_SYNC 1
211#define MD_RECOVERY_RECOVER 2
212#define MD_RECOVERY_INTR 3
213#define MD_RECOVERY_DONE 4
214#define MD_RECOVERY_NEEDED 5
215#define MD_RECOVERY_REQUESTED 6
216#define MD_RECOVERY_CHECK 7
217#define MD_RECOVERY_RESHAPE 8
218#define MD_RECOVERY_FROZEN 9
219
220 unsigned long recovery;
221 int recovery_disabled;
222
223
224
225 int in_sync;
226
227
228
229
230
231
232
233
234
235 struct mutex open_mutex;
236 struct mutex reconfig_mutex;
237 atomic_t active;
238 atomic_t openers;
239
240 int changed;
241 int degraded;
242
243
244 int barriers_work;
245
246
247
248 struct bio *biolist;
249
250
251
252 atomic_t recovery_active;
253 wait_queue_head_t recovery_wait;
254 sector_t recovery_cp;
255 sector_t resync_min;
256
257 sector_t resync_max;
258
259
260 struct sysfs_dirent *sysfs_state;
261
262
263 struct sysfs_dirent *sysfs_action;
264
265 struct work_struct del_work;
266
267 spinlock_t write_lock;
268 wait_queue_head_t sb_wait;
269 atomic_t pending_writes;
270
271 unsigned int safemode;
272
273
274 unsigned int safemode_delay;
275 struct timer_list safemode_timer;
276 atomic_t writes_pending;
277 struct request_queue *queue;
278
279 atomic_t write_behind;
280 unsigned int max_write_behind;
281
282 struct bitmap *bitmap;
283 struct file *bitmap_file;
284 long bitmap_offset;
285
286
287
288 long default_bitmap_offset;
289
290
291
292
293 struct list_head all_mddevs;
294};
295
296
297static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
298{
299 int faulty = test_bit(Faulty, &rdev->flags);
300 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
301 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
302}
303
304static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
305{
306 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
307}
308
309struct mdk_personality
310{
311 char *name;
312 int level;
313 struct list_head list;
314 struct module *owner;
315 int (*make_request)(struct request_queue *q, struct bio *bio);
316 int (*run)(mddev_t *mddev);
317 int (*stop)(mddev_t *mddev);
318 void (*status)(struct seq_file *seq, mddev_t *mddev);
319
320
321
322 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
323 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
324 int (*hot_remove_disk) (mddev_t *mddev, int number);
325 int (*spare_active) (mddev_t *mddev);
326 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
327 int (*resize) (mddev_t *mddev, sector_t sectors);
328 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
329 int (*check_reshape) (mddev_t *mddev);
330 int (*start_reshape) (mddev_t *mddev);
331 void (*finish_reshape) (mddev_t *mddev);
332
333
334
335
336
337 void (*quiesce) (mddev_t *mddev, int state);
338
339
340
341
342
343
344
345
346
347 void *(*takeover) (mddev_t *mddev);
348};
349
350
351struct md_sysfs_entry {
352 struct attribute attr;
353 ssize_t (*show)(mddev_t *, char *);
354 ssize_t (*store)(mddev_t *, const char *, size_t);
355};
356
357
358static inline char * mdname (mddev_t * mddev)
359{
360 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
361}
362
363
364
365
366
367#define rdev_for_each_list(rdev, tmp, head) \
368 list_for_each_entry_safe(rdev, tmp, head, same_set)
369
370
371
372
373#define rdev_for_each(rdev, tmp, mddev) \
374 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
375
376#define rdev_for_each_rcu(rdev, mddev) \
377 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
378
379typedef struct mdk_thread_s {
380 void (*run) (mddev_t *mddev);
381 mddev_t *mddev;
382 wait_queue_head_t wqueue;
383 unsigned long flags;
384 struct task_struct *tsk;
385 unsigned long timeout;
386} mdk_thread_t;
387
388#define THREAD_WAKEUP 0
389
390#define __wait_event_lock_irq(wq, condition, lock, cmd) \
391do { \
392 wait_queue_t __wait; \
393 init_waitqueue_entry(&__wait, current); \
394 \
395 add_wait_queue(&wq, &__wait); \
396 for (;;) { \
397 set_current_state(TASK_UNINTERRUPTIBLE); \
398 if (condition) \
399 break; \
400 spin_unlock_irq(&lock); \
401 cmd; \
402 schedule(); \
403 spin_lock_irq(&lock); \
404 } \
405 current->state = TASK_RUNNING; \
406 remove_wait_queue(&wq, &__wait); \
407} while (0)
408
409#define wait_event_lock_irq(wq, condition, lock, cmd) \
410do { \
411 if (condition) \
412 break; \
413 __wait_event_lock_irq(wq, condition, lock, cmd); \
414} while (0)
415
416static inline void safe_put_page(struct page *p)
417{
418 if (p) put_page(p);
419}
420
421extern int register_md_personality(struct mdk_personality *p);
422extern int unregister_md_personality(struct mdk_personality *p);
423extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
424 mddev_t *mddev, const char *name);
425extern void md_unregister_thread(mdk_thread_t *thread);
426extern void md_wakeup_thread(mdk_thread_t *thread);
427extern void md_check_recovery(mddev_t *mddev);
428extern void md_write_start(mddev_t *mddev, struct bio *bi);
429extern void md_write_end(mddev_t *mddev);
430extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
431extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
432
433extern int mddev_congested(mddev_t *mddev, int bits);
434extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
435 sector_t sector, int size, struct page *page);
436extern void md_super_wait(mddev_t *mddev);
437extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
438 struct page *page, int rw);
439extern void md_do_sync(mddev_t *mddev);
440extern void md_new_event(mddev_t *mddev);
441extern int md_allow_write(mddev_t *mddev);
442extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
443extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
444extern int md_check_no_bitmap(mddev_t *mddev);
445extern int md_integrity_register(mddev_t *mddev);
446void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
447
448#endif
449