1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _MD_MD_H
16#define _MD_MD_H
17
18#include <linux/blkdev.h>
19#include <linux/kobject.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/timer.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
26
27#define MaxSector (~(sector_t)0)
28
29
30
31
32
33
34#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
35
36
37
38
39struct md_rdev {
40 struct list_head same_set;
41
42 sector_t sectors;
43 struct mddev *mddev;
44 int last_events;
45
46
47
48
49
50
51 struct block_device *meta_bdev;
52 struct block_device *bdev;
53
54 struct page *sb_page, *bb_page;
55 int sb_loaded;
56 __u64 sb_events;
57 sector_t data_offset;
58 sector_t new_data_offset;
59 sector_t sb_start;
60 int sb_size;
61 int preferred_minor;
62
63 struct kobject kobj;
64
65
66
67
68
69
70
71
72
73
74
75
76 unsigned long flags;
77 wait_queue_head_t blocked_wait;
78
79 int desc_nr;
80 int raid_disk;
81 int new_raid_disk;
82
83
84 int saved_raid_disk;
85
86
87
88 sector_t recovery_offset;
89
90
91
92
93 atomic_t nr_pending;
94
95
96
97 atomic_t read_errors;
98
99
100 struct timespec last_read_error;
101
102
103 atomic_t corrected_errors;
104
105
106
107 struct work_struct del_work;
108
109 struct sysfs_dirent *sysfs_state;
110
111
112 struct badblocks {
113 int count;
114 int unacked_exist;
115
116
117
118 int shift;
119
120
121 u64 *page;
122 int changed;
123 seqlock_t lock;
124
125 sector_t sector;
126 sector_t size;
127 } badblocks;
128};
129enum flag_bits {
130 Faulty,
131 In_sync,
132 Unmerged,
133
134
135
136 WriteMostly,
137 AutoDetected,
138 Blocked,
139
140
141
142 WriteErrorSeen,
143
144
145 FaultRecorded,
146
147
148
149
150
151 BlockedBadBlocks,
152
153
154
155
156
157
158
159
160
161 WantReplacement,
162
163
164
165
166 Replacement,
167
168
169
170};
171
172#define BB_LEN_MASK (0x00000000000001FFULL)
173#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
174#define BB_ACK_MASK (0x8000000000000000ULL)
175#define BB_MAX_LEN 512
176#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
177#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
178#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
179#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
180
181extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
182 sector_t *first_bad, int *bad_sectors);
183static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
184 sector_t *first_bad, int *bad_sectors)
185{
186 if (unlikely(rdev->badblocks.count)) {
187 int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
188 sectors,
189 first_bad, bad_sectors);
190 if (rv)
191 *first_bad -= rdev->data_offset;
192 return rv;
193 }
194 return 0;
195}
196extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
197 int is_new);
198extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
199 int is_new);
200extern void md_ack_all_badblocks(struct badblocks *bb);
201
202struct mddev {
203 void *private;
204 struct md_personality *pers;
205 dev_t unit;
206 int md_minor;
207 struct list_head disks;
208 unsigned long flags;
209#define MD_CHANGE_DEVS 0
210#define MD_CHANGE_CLEAN 1
211#define MD_CHANGE_PENDING 2
212#define MD_ARRAY_FIRST_USE 3
213
214 int suspended;
215 atomic_t active_io;
216 int ro;
217 int sysfs_active;
218
219
220
221 int ready;
222
223 struct gendisk *gendisk;
224
225 struct kobject kobj;
226 int hold_active;
227#define UNTIL_IOCTL 1
228#define UNTIL_STOP 2
229
230
231 int major_version,
232 minor_version,
233 patch_version;
234 int persistent;
235 int external;
236
237 char metadata_type[17];
238 int chunk_sectors;
239 time_t ctime, utime;
240 int level, layout;
241 char clevel[16];
242 int raid_disks;
243 int max_disks;
244 sector_t dev_sectors;
245
246 sector_t array_sectors;
247 int external_size;
248
249 __u64 events;
250
251
252
253
254
255 int can_decrease_events;
256
257 char uuid[16];
258
259
260
261
262
263
264 sector_t reshape_position;
265 int delta_disks, new_level, new_layout;
266 int new_chunk_sectors;
267 int reshape_backwards;
268
269 struct md_thread *thread;
270 struct md_thread *sync_thread;
271 sector_t curr_resync;
272
273
274
275
276
277
278 sector_t curr_resync_completed;
279 unsigned long resync_mark;
280 sector_t resync_mark_cnt;
281 sector_t curr_mark_cnt;
282
283 sector_t resync_max_sectors;
284
285 atomic64_t resync_mismatches;
286
287
288
289
290 sector_t suspend_lo;
291 sector_t suspend_hi;
292
293 int sync_speed_min;
294 int sync_speed_max;
295
296
297 int parallel_resync;
298
299 int ok_start_degraded;
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314#define MD_RECOVERY_RUNNING 0
315#define MD_RECOVERY_SYNC 1
316#define MD_RECOVERY_RECOVER 2
317#define MD_RECOVERY_INTR 3
318#define MD_RECOVERY_DONE 4
319#define MD_RECOVERY_NEEDED 5
320#define MD_RECOVERY_REQUESTED 6
321#define MD_RECOVERY_CHECK 7
322#define MD_RECOVERY_RESHAPE 8
323#define MD_RECOVERY_FROZEN 9
324#define MD_RECOVERY_ERROR 10
325
326 unsigned long recovery;
327
328
329
330
331
332 int recovery_disabled;
333
334 int in_sync;
335
336
337
338
339
340
341
342
343
344 struct mutex open_mutex;
345 struct mutex reconfig_mutex;
346 atomic_t active;
347 atomic_t openers;
348
349 int changed;
350
351 int degraded;
352
353
354 int merge_check_needed;
355
356
357
358
359 atomic_t recovery_active;
360 wait_queue_head_t recovery_wait;
361 sector_t recovery_cp;
362 sector_t resync_min;
363
364 sector_t resync_max;
365
366
367 struct sysfs_dirent *sysfs_state;
368
369
370 struct sysfs_dirent *sysfs_action;
371
372 struct work_struct del_work;
373
374 spinlock_t write_lock;
375 wait_queue_head_t sb_wait;
376 atomic_t pending_writes;
377
378 unsigned int safemode;
379
380
381 unsigned int safemode_delay;
382 struct timer_list safemode_timer;
383 atomic_t writes_pending;
384 struct request_queue *queue;
385
386 struct bitmap *bitmap;
387 struct {
388 struct file *file;
389 loff_t offset;
390
391
392
393
394
395 unsigned long space;
396 loff_t default_offset;
397
398
399
400 unsigned long default_space;
401
402 struct mutex mutex;
403 unsigned long chunksize;
404 unsigned long daemon_sleep;
405 unsigned long max_write_behind;
406 int external;
407 } bitmap_info;
408
409 atomic_t max_corr_read_errors;
410 struct list_head all_mddevs;
411
412 struct attribute_group *to_remove;
413
414 struct bio_set *bio_set;
415
416
417
418
419
420 struct bio *flush_bio;
421 atomic_t flush_pending;
422 struct work_struct flush_work;
423 struct work_struct event_work;
424 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
425};
426
427
428static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
429{
430 int faulty = test_bit(Faulty, &rdev->flags);
431 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
432 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
433}
434
435static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
436{
437 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
438}
439
440struct md_personality
441{
442 char *name;
443 int level;
444 struct list_head list;
445 struct module *owner;
446 void (*make_request)(struct mddev *mddev, struct bio *bio);
447 int (*run)(struct mddev *mddev);
448 int (*stop)(struct mddev *mddev);
449 void (*status)(struct seq_file *seq, struct mddev *mddev);
450
451
452
453 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
454 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
455 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
456 int (*spare_active) (struct mddev *mddev);
457 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
458 int (*resize) (struct mddev *mddev, sector_t sectors);
459 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
460 int (*check_reshape) (struct mddev *mddev);
461 int (*start_reshape) (struct mddev *mddev);
462 void (*finish_reshape) (struct mddev *mddev);
463
464
465
466
467
468 void (*quiesce) (struct mddev *mddev, int state);
469
470
471
472
473
474
475
476
477
478 void *(*takeover) (struct mddev *mddev);
479};
480
481
482struct md_sysfs_entry {
483 struct attribute attr;
484 ssize_t (*show)(struct mddev *, char *);
485 ssize_t (*store)(struct mddev *, const char *, size_t);
486};
487extern struct attribute_group md_bitmap_group;
488
489static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
490{
491 if (sd)
492 return sysfs_get_dirent(sd, NULL, name);
493 return sd;
494}
495static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
496{
497 if (sd)
498 sysfs_notify_dirent(sd);
499}
500
501static inline char * mdname (struct mddev * mddev)
502{
503 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
504}
505
506static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
507{
508 char nm[20];
509 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
510 sprintf(nm, "rd%d", rdev->raid_disk);
511 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
512 } else
513 return 0;
514}
515
516static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
517{
518 char nm[20];
519 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
520 sprintf(nm, "rd%d", rdev->raid_disk);
521 sysfs_remove_link(&mddev->kobj, nm);
522 }
523}
524
525
526
527
528
529#define rdev_for_each_list(rdev, tmp, head) \
530 list_for_each_entry_safe(rdev, tmp, head, same_set)
531
532
533
534
535#define rdev_for_each(rdev, mddev) \
536 list_for_each_entry(rdev, &((mddev)->disks), same_set)
537
538#define rdev_for_each_safe(rdev, tmp, mddev) \
539 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
540
541#define rdev_for_each_rcu(rdev, mddev) \
542 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
543
544struct md_thread {
545 void (*run) (struct md_thread *thread);
546 struct mddev *mddev;
547 wait_queue_head_t wqueue;
548 unsigned long flags;
549 struct task_struct *tsk;
550 unsigned long timeout;
551 void *private;
552};
553
554#define THREAD_WAKEUP 0
555
556static inline void safe_put_page(struct page *p)
557{
558 if (p) put_page(p);
559}
560
561extern int register_md_personality(struct md_personality *p);
562extern int unregister_md_personality(struct md_personality *p);
563extern struct md_thread *md_register_thread(
564 void (*run)(struct md_thread *thread),
565 struct mddev *mddev,
566 const char *name);
567extern void md_unregister_thread(struct md_thread **threadp);
568extern void md_wakeup_thread(struct md_thread *thread);
569extern void md_check_recovery(struct mddev *mddev);
570extern void md_reap_sync_thread(struct mddev *mddev);
571extern void md_write_start(struct mddev *mddev, struct bio *bi);
572extern void md_write_end(struct mddev *mddev);
573extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
574extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
575extern void md_finish_reshape(struct mddev *mddev);
576
577extern int mddev_congested(struct mddev *mddev, int bits);
578extern void md_flush_request(struct mddev *mddev, struct bio *bio);
579extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
580 sector_t sector, int size, struct page *page);
581extern void md_super_wait(struct mddev *mddev);
582extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
583 struct page *page, int rw, bool metadata_op);
584extern void md_do_sync(struct md_thread *thread);
585extern void md_new_event(struct mddev *mddev);
586extern int md_allow_write(struct mddev *mddev);
587extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
588extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
589extern int md_check_no_bitmap(struct mddev *mddev);
590extern int md_integrity_register(struct mddev *mddev);
591extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
592extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
593extern void restore_bitmap_write_access(struct file *file);
594
595extern void mddev_init(struct mddev *mddev);
596extern int md_run(struct mddev *mddev);
597extern void md_stop(struct mddev *mddev);
598extern void md_stop_writes(struct mddev *mddev);
599extern int md_rdev_init(struct md_rdev *rdev);
600extern void md_rdev_clear(struct md_rdev *rdev);
601
602extern void mddev_suspend(struct mddev *mddev);
603extern void mddev_resume(struct mddev *mddev);
604extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
605 struct mddev *mddev);
606extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
607 struct mddev *mddev);
608extern void md_trim_bio(struct bio *bio, int offset, int size);
609
610extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
611static inline int mddev_check_plugged(struct mddev *mddev)
612{
613 return !!blk_check_plugged(md_unplug, mddev,
614 sizeof(struct blk_plug_cb));
615}
616#endif
617