1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _MD_MD_H
16#define _MD_MD_H
17
18#include <linux/blkdev.h>
19#include <linux/kobject.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/timer.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
26
27#define MaxSector (~(sector_t)0)
28
29typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t;
31
32
33
34
35
36
37#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
38
39
40
41
42struct mdk_rdev_s
43{
44 struct list_head same_set;
45
46 sector_t sectors;
47 mddev_t *mddev;
48 int last_events;
49
50
51
52
53
54
55 struct block_device *meta_bdev;
56 struct block_device *bdev;
57
58 struct page *sb_page, *bb_page;
59 int sb_loaded;
60 __u64 sb_events;
61 sector_t data_offset;
62 sector_t sb_start;
63 int sb_size;
64 int preferred_minor;
65
66 struct kobject kobj;
67
68
69
70
71
72
73
74
75
76
77
78
79 unsigned long flags;
80#define Faulty 1
81#define In_sync 2
82#define WriteMostly 4
83#define AutoDetected 7
84#define Blocked 8
85
86
87
88#define WriteErrorSeen 9
89
90
91#define FaultRecorded 10
92
93
94
95
96
97#define BlockedBadBlocks 11
98
99
100
101
102
103
104
105
106
107 wait_queue_head_t blocked_wait;
108
109 int desc_nr;
110 int raid_disk;
111 int new_raid_disk;
112
113
114 int saved_raid_disk;
115
116
117
118 sector_t recovery_offset;
119
120
121
122
123 atomic_t nr_pending;
124
125
126
127 atomic_t read_errors;
128
129
130 struct timespec last_read_error;
131
132
133 atomic_t corrected_errors;
134
135
136
137 struct work_struct del_work;
138
139 struct sysfs_dirent *sysfs_state;
140
141
142 struct badblocks {
143 int count;
144 int unacked_exist;
145
146
147
148 int shift;
149
150
151 u64 *page;
152 int changed;
153 seqlock_t lock;
154
155 sector_t sector;
156 sector_t size;
157 } badblocks;
158};
159
160#define BB_LEN_MASK (0x00000000000001FFULL)
161#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
162#define BB_ACK_MASK (0x8000000000000000ULL)
163#define BB_MAX_LEN 512
164#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
165#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
166#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
167#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
168
169extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
170 sector_t *first_bad, int *bad_sectors);
171static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
172 sector_t *first_bad, int *bad_sectors)
173{
174 if (unlikely(rdev->badblocks.count)) {
175 int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
176 sectors,
177 first_bad, bad_sectors);
178 if (rv)
179 *first_bad -= rdev->data_offset;
180 return rv;
181 }
182 return 0;
183}
184extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
185 int acknowledged);
186extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors);
187extern void md_ack_all_badblocks(struct badblocks *bb);
188
189struct mddev_s
190{
191 void *private;
192 struct mdk_personality *pers;
193 dev_t unit;
194 int md_minor;
195 struct list_head disks;
196 unsigned long flags;
197#define MD_CHANGE_DEVS 0
198#define MD_CHANGE_CLEAN 1
199#define MD_CHANGE_PENDING 2
200#define MD_ARRAY_FIRST_USE 3
201
202 int suspended;
203 atomic_t active_io;
204 int ro;
205 int sysfs_active;
206
207
208
209 int ready;
210
211 struct gendisk *gendisk;
212
213 struct kobject kobj;
214 int hold_active;
215#define UNTIL_IOCTL 1
216#define UNTIL_STOP 2
217
218
219 int major_version,
220 minor_version,
221 patch_version;
222 int persistent;
223 int external;
224
225 char metadata_type[17];
226 int chunk_sectors;
227 time_t ctime, utime;
228 int level, layout;
229 char clevel[16];
230 int raid_disks;
231 int max_disks;
232 sector_t dev_sectors;
233
234 sector_t array_sectors;
235 int external_size;
236
237 __u64 events;
238
239
240
241
242
243 int can_decrease_events;
244
245 char uuid[16];
246
247
248
249
250
251
252 sector_t reshape_position;
253 int delta_disks, new_level, new_layout;
254 int new_chunk_sectors;
255
256 atomic_t plug_cnt;
257
258
259 struct mdk_thread_s *thread;
260 struct mdk_thread_s *sync_thread;
261 sector_t curr_resync;
262
263
264
265
266
267
268 sector_t curr_resync_completed;
269 unsigned long resync_mark;
270 sector_t resync_mark_cnt;
271 sector_t curr_mark_cnt;
272
273 sector_t resync_max_sectors;
274
275 sector_t resync_mismatches;
276
277
278
279
280 sector_t suspend_lo;
281 sector_t suspend_hi;
282
283 int sync_speed_min;
284 int sync_speed_max;
285
286
287 int parallel_resync;
288
289 int ok_start_degraded;
290
291
292
293
294
295
296
297
298
299
300
301
302
303#define MD_RECOVERY_RUNNING 0
304#define MD_RECOVERY_SYNC 1
305#define MD_RECOVERY_RECOVER 2
306#define MD_RECOVERY_INTR 3
307#define MD_RECOVERY_DONE 4
308#define MD_RECOVERY_NEEDED 5
309#define MD_RECOVERY_REQUESTED 6
310#define MD_RECOVERY_CHECK 7
311#define MD_RECOVERY_RESHAPE 8
312#define MD_RECOVERY_FROZEN 9
313
314 unsigned long recovery;
315
316
317
318
319
320 int recovery_disabled;
321
322 int in_sync;
323
324
325
326
327
328
329
330
331
332 struct mutex open_mutex;
333 struct mutex reconfig_mutex;
334 atomic_t active;
335 atomic_t openers;
336
337 int changed;
338
339 int degraded;
340
341
342
343 atomic_t recovery_active;
344 wait_queue_head_t recovery_wait;
345 sector_t recovery_cp;
346 sector_t resync_min;
347
348 sector_t resync_max;
349
350
351 struct sysfs_dirent *sysfs_state;
352
353
354 struct sysfs_dirent *sysfs_action;
355
356 struct work_struct del_work;
357
358 spinlock_t write_lock;
359 wait_queue_head_t sb_wait;
360 atomic_t pending_writes;
361
362 unsigned int safemode;
363
364
365 unsigned int safemode_delay;
366 struct timer_list safemode_timer;
367 atomic_t writes_pending;
368 struct request_queue *queue;
369
370 struct bitmap *bitmap;
371 struct {
372 struct file *file;
373 loff_t offset;
374
375
376
377
378
379 loff_t default_offset;
380
381
382
383 struct mutex mutex;
384 unsigned long chunksize;
385 unsigned long daemon_sleep;
386 unsigned long max_write_behind;
387 int external;
388 } bitmap_info;
389
390 atomic_t max_corr_read_errors;
391 struct list_head all_mddevs;
392
393 struct attribute_group *to_remove;
394
395 struct bio_set *bio_set;
396
397
398
399
400
401 struct bio *flush_bio;
402 atomic_t flush_pending;
403 struct work_struct flush_work;
404 struct work_struct event_work;
405 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
406};
407
408
409static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
410{
411 int faulty = test_bit(Faulty, &rdev->flags);
412 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
413 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
414}
415
416static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
417{
418 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
419}
420
421struct mdk_personality
422{
423 char *name;
424 int level;
425 struct list_head list;
426 struct module *owner;
427 int (*make_request)(mddev_t *mddev, struct bio *bio);
428 int (*run)(mddev_t *mddev);
429 int (*stop)(mddev_t *mddev);
430 void (*status)(struct seq_file *seq, mddev_t *mddev);
431
432
433
434 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
435 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
436 int (*hot_remove_disk) (mddev_t *mddev, int number);
437 int (*spare_active) (mddev_t *mddev);
438 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
439 int (*resize) (mddev_t *mddev, sector_t sectors);
440 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
441 int (*check_reshape) (mddev_t *mddev);
442 int (*start_reshape) (mddev_t *mddev);
443 void (*finish_reshape) (mddev_t *mddev);
444
445
446
447
448
449 void (*quiesce) (mddev_t *mddev, int state);
450
451
452
453
454
455
456
457
458
459 void *(*takeover) (mddev_t *mddev);
460};
461
462
463struct md_sysfs_entry {
464 struct attribute attr;
465 ssize_t (*show)(mddev_t *, char *);
466 ssize_t (*store)(mddev_t *, const char *, size_t);
467};
468extern struct attribute_group md_bitmap_group;
469
470static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
471{
472 if (sd)
473 return sysfs_get_dirent(sd, NULL, name);
474 return sd;
475}
476static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
477{
478 if (sd)
479 sysfs_notify_dirent(sd);
480}
481
482static inline char * mdname (mddev_t * mddev)
483{
484 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
485}
486
487static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
488{
489 char nm[20];
490 sprintf(nm, "rd%d", rdev->raid_disk);
491 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
492}
493
494static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
495{
496 char nm[20];
497 sprintf(nm, "rd%d", rdev->raid_disk);
498 sysfs_remove_link(&mddev->kobj, nm);
499}
500
501
502
503
504
505#define rdev_for_each_list(rdev, tmp, head) \
506 list_for_each_entry_safe(rdev, tmp, head, same_set)
507
508
509
510
511#define rdev_for_each(rdev, tmp, mddev) \
512 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
513
514#define rdev_for_each_rcu(rdev, mddev) \
515 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
516
517typedef struct mdk_thread_s {
518 void (*run) (mddev_t *mddev);
519 mddev_t *mddev;
520 wait_queue_head_t wqueue;
521 unsigned long flags;
522 struct task_struct *tsk;
523 unsigned long timeout;
524} mdk_thread_t;
525
526#define THREAD_WAKEUP 0
527
528#define __wait_event_lock_irq(wq, condition, lock, cmd) \
529do { \
530 wait_queue_t __wait; \
531 init_waitqueue_entry(&__wait, current); \
532 \
533 add_wait_queue(&wq, &__wait); \
534 for (;;) { \
535 set_current_state(TASK_UNINTERRUPTIBLE); \
536 if (condition) \
537 break; \
538 spin_unlock_irq(&lock); \
539 cmd; \
540 schedule(); \
541 spin_lock_irq(&lock); \
542 } \
543 current->state = TASK_RUNNING; \
544 remove_wait_queue(&wq, &__wait); \
545} while (0)
546
547#define wait_event_lock_irq(wq, condition, lock, cmd) \
548do { \
549 if (condition) \
550 break; \
551 __wait_event_lock_irq(wq, condition, lock, cmd); \
552} while (0)
553
554static inline void safe_put_page(struct page *p)
555{
556 if (p) put_page(p);
557}
558
559extern int register_md_personality(struct mdk_personality *p);
560extern int unregister_md_personality(struct mdk_personality *p);
561extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
562 mddev_t *mddev, const char *name);
563extern void md_unregister_thread(mdk_thread_t **threadp);
564extern void md_wakeup_thread(mdk_thread_t *thread);
565extern void md_check_recovery(mddev_t *mddev);
566extern void md_write_start(mddev_t *mddev, struct bio *bi);
567extern void md_write_end(mddev_t *mddev);
568extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
569extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
570
571extern int mddev_congested(mddev_t *mddev, int bits);
572extern void md_flush_request(mddev_t *mddev, struct bio *bio);
573extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
574 sector_t sector, int size, struct page *page);
575extern void md_super_wait(mddev_t *mddev);
576extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
577 struct page *page, int rw, bool metadata_op);
578extern void md_do_sync(mddev_t *mddev);
579extern void md_new_event(mddev_t *mddev);
580extern int md_allow_write(mddev_t *mddev);
581extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
582extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
583extern int md_check_no_bitmap(mddev_t *mddev);
584extern int md_integrity_register(mddev_t *mddev);
585extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
586extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
587extern void restore_bitmap_write_access(struct file *file);
588
589extern void mddev_init(mddev_t *mddev);
590extern int md_run(mddev_t *mddev);
591extern void md_stop(mddev_t *mddev);
592extern void md_stop_writes(mddev_t *mddev);
593extern int md_rdev_init(mdk_rdev_t *rdev);
594
595extern void mddev_suspend(mddev_t *mddev);
596extern void mddev_resume(mddev_t *mddev);
597extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
598 mddev_t *mddev);
599extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
600 mddev_t *mddev);
601extern int mddev_check_plugged(mddev_t *mddev);
602extern void md_trim_bio(struct bio *bio, int offset, int size);
603#endif
604