1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <linux/compiler.h>
30#include <linux/types.h>
31#include <linux/list.h>
32#include <linux/sched.h>
33#include <linux/bitops.h>
34#include <linux/slab.h>
35#include <linux/crypto.h>
36#include <linux/ratelimit.h>
37#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
41#include <linux/genhd.h>
42#include <linux/idr.h>
43#include <net/tcp.h>
44#include <linux/lru_cache.h>
45#include <linux/prefetch.h>
46#include <linux/drbd_genl_api.h>
47#include <linux/drbd.h>
48#include "drbd_state.h"
49
50#ifdef __CHECKER__
51# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
52# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
53# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
54# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
55#else
56# define __protected_by(x)
57# define __protected_read_by(x)
58# define __protected_write_by(x)
59# define __must_hold(x)
60#endif
61
62#define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0)
63
64
65extern unsigned int minor_count;
66extern bool disable_sendpage;
67extern bool allow_oos;
68
69#ifdef CONFIG_DRBD_FAULT_INJECTION
70extern int enable_faults;
71extern int fault_rate;
72extern int fault_devs;
73#endif
74
75extern char usermode_helper[];
76
77
78
79
80
81
82
83#define DRBD_SIG SIGXCPU
84
85
86
87
88
89
90#define DRBD_SIGKILL SIGHUP
91
92#define ID_IN_SYNC (4711ULL)
93#define ID_OUT_OF_SYNC (4712ULL)
94#define ID_SYNCER (-1ULL)
95
96#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
97
98struct drbd_conf;
99struct drbd_tconn;
100
101
102
103#define DEV (disk_to_dev(mdev->vdisk))
104
105#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
106 printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
107#define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
108#define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
109#define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
110#define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
111#define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
112#define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
113#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
114
115#define D_ASSERT(exp) if (!(exp)) \
116 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
117
118
119
120
121
122
123#define expect(exp) ({ \
124 bool _bool = (exp); \
125 if (!_bool) \
126 dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
127 #exp, __func__); \
128 _bool; \
129 })
130
131
132enum {
133 DRBD_FAULT_MD_WR = 0,
134 DRBD_FAULT_MD_RD = 1,
135 DRBD_FAULT_RS_WR = 2,
136 DRBD_FAULT_RS_RD = 3,
137 DRBD_FAULT_DT_WR = 4,
138 DRBD_FAULT_DT_RD = 5,
139 DRBD_FAULT_DT_RA = 6,
140 DRBD_FAULT_BM_ALLOC = 7,
141 DRBD_FAULT_AL_EE = 8,
142 DRBD_FAULT_RECEIVE = 9,
143
144 DRBD_FAULT_MAX,
145};
146
147extern unsigned int
148_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
149
150static inline int
151drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
152#ifdef CONFIG_DRBD_FAULT_INJECTION
153 return fault_rate &&
154 (enable_faults & (1<<type)) &&
155 _drbd_insert_fault(mdev, type);
156#else
157 return 0;
158#endif
159}
160
161
162#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
163
164#define div_floor(A, B) ((A)/(B))
165
166extern struct ratelimit_state drbd_ratelimit_state;
167extern struct idr minors;
168extern struct list_head drbd_tconns;
169
170
171enum drbd_packet {
172
173 P_DATA = 0x00,
174 P_DATA_REPLY = 0x01,
175 P_RS_DATA_REPLY = 0x02,
176 P_BARRIER = 0x03,
177 P_BITMAP = 0x04,
178 P_BECOME_SYNC_TARGET = 0x05,
179 P_BECOME_SYNC_SOURCE = 0x06,
180 P_UNPLUG_REMOTE = 0x07,
181 P_DATA_REQUEST = 0x08,
182 P_RS_DATA_REQUEST = 0x09,
183 P_SYNC_PARAM = 0x0a,
184 P_PROTOCOL = 0x0b,
185 P_UUIDS = 0x0c,
186 P_SIZES = 0x0d,
187 P_STATE = 0x0e,
188 P_SYNC_UUID = 0x0f,
189 P_AUTH_CHALLENGE = 0x10,
190 P_AUTH_RESPONSE = 0x11,
191 P_STATE_CHG_REQ = 0x12,
192
193
194 P_PING = 0x13,
195 P_PING_ACK = 0x14,
196 P_RECV_ACK = 0x15,
197 P_WRITE_ACK = 0x16,
198 P_RS_WRITE_ACK = 0x17,
199 P_SUPERSEDED = 0x18,
200 P_NEG_ACK = 0x19,
201 P_NEG_DREPLY = 0x1a,
202 P_NEG_RS_DREPLY = 0x1b,
203 P_BARRIER_ACK = 0x1c,
204 P_STATE_CHG_REPLY = 0x1d,
205
206
207
208 P_OV_REQUEST = 0x1e,
209 P_OV_REPLY = 0x1f,
210 P_OV_RESULT = 0x20,
211 P_CSUM_RS_REQUEST = 0x21,
212 P_RS_IS_IN_SYNC = 0x22,
213 P_SYNC_PARAM89 = 0x23,
214 P_COMPRESSED_BITMAP = 0x24,
215
216
217 P_DELAY_PROBE = 0x27,
218 P_OUT_OF_SYNC = 0x28,
219 P_RS_CANCEL = 0x29,
220 P_CONN_ST_CHG_REQ = 0x2a,
221 P_CONN_ST_CHG_REPLY = 0x2b,
222 P_RETRY_WRITE = 0x2c,
223 P_PROTOCOL_UPDATE = 0x2d,
224
225 P_MAY_IGNORE = 0x100,
226 P_MAX_OPT_CMD = 0x101,
227
228
229
230 P_INITIAL_META = 0xfff1,
231 P_INITIAL_DATA = 0xfff2,
232
233 P_CONNECTION_FEATURES = 0xfffe
234};
235
236extern const char *cmdname(enum drbd_packet cmd);
237
238
239
240struct bm_xfer_ctx {
241
242
243
244
245 unsigned long bm_bits;
246 unsigned long bm_words;
247
248 unsigned long bit_offset;
249 unsigned long word_offset;
250
251
252 unsigned packets[2];
253 unsigned bytes[2];
254};
255
256extern void INFO_bm_xfer_stats(struct drbd_conf *mdev,
257 const char *direction, struct bm_xfer_ctx *c);
258
259static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
260{
261
262
263
264
265
266
267
268#if BITS_PER_LONG == 64
269 c->word_offset = c->bit_offset >> 6;
270#elif BITS_PER_LONG == 32
271 c->word_offset = c->bit_offset >> 5;
272 c->word_offset &= ~(1UL);
273#else
274# error "unsupported BITS_PER_LONG"
275#endif
276}
277
278#ifndef __packed
279#define __packed __attribute__((packed))
280#endif
281
282
283
284
285
286
287
288
289
290
291
292struct p_header80 {
293 u32 magic;
294 u16 command;
295 u16 length;
296} __packed;
297
298
299struct p_header95 {
300 u16 magic;
301 u16 command;
302 u32 length;
303} __packed;
304
305struct p_header100 {
306 u32 magic;
307 u16 volume;
308 u16 command;
309 u32 length;
310 u32 pad;
311} __packed;
312
313extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
314
315
316#define DP_HARDBARRIER 1
317#define DP_RW_SYNC 2
318#define DP_MAY_SET_IN_SYNC 4
319#define DP_UNPLUG 8
320#define DP_FUA 16
321#define DP_FLUSH 32
322#define DP_DISCARD 64
323#define DP_SEND_RECEIVE_ACK 128
324#define DP_SEND_WRITE_ACK 256
325
326struct p_data {
327 u64 sector;
328 u64 block_id;
329 u32 seq_num;
330 u32 dp_flags;
331} __packed;
332
333
334
335
336
337
338
339
340
341struct p_block_ack {
342 u64 sector;
343 u64 block_id;
344 u32 blksize;
345 u32 seq_num;
346} __packed;
347
348struct p_block_req {
349 u64 sector;
350 u64 block_id;
351 u32 blksize;
352 u32 pad;
353} __packed;
354
355
356
357
358
359
360
361
362
363
364struct p_connection_features {
365 u32 protocol_min;
366 u32 feature_flags;
367 u32 protocol_max;
368
369
370
371
372
373 u32 _pad;
374 u64 reserved[7];
375} __packed;
376
377struct p_barrier {
378 u32 barrier;
379 u32 pad;
380} __packed;
381
382struct p_barrier_ack {
383 u32 barrier;
384 u32 set_size;
385} __packed;
386
387struct p_rs_param {
388 u32 resync_rate;
389
390
391 char verify_alg[0];
392} __packed;
393
394struct p_rs_param_89 {
395 u32 resync_rate;
396
397 char verify_alg[SHARED_SECRET_MAX];
398 char csums_alg[SHARED_SECRET_MAX];
399} __packed;
400
401struct p_rs_param_95 {
402 u32 resync_rate;
403 char verify_alg[SHARED_SECRET_MAX];
404 char csums_alg[SHARED_SECRET_MAX];
405 u32 c_plan_ahead;
406 u32 c_delay_target;
407 u32 c_fill_target;
408 u32 c_max_rate;
409} __packed;
410
411enum drbd_conn_flags {
412 CF_DISCARD_MY_DATA = 1,
413 CF_DRY_RUN = 2,
414};
415
416struct p_protocol {
417 u32 protocol;
418 u32 after_sb_0p;
419 u32 after_sb_1p;
420 u32 after_sb_2p;
421 u32 conn_flags;
422 u32 two_primaries;
423
424
425 char integrity_alg[0];
426
427} __packed;
428
429struct p_uuids {
430 u64 uuid[UI_EXTENDED_SIZE];
431} __packed;
432
433struct p_rs_uuid {
434 u64 uuid;
435} __packed;
436
437struct p_sizes {
438 u64 d_size;
439 u64 u_size;
440 u64 c_size;
441 u32 max_bio_size;
442 u16 queue_order_type;
443 u16 dds_flags;
444} __packed;
445
446struct p_state {
447 u32 state;
448} __packed;
449
450struct p_req_state {
451 u32 mask;
452 u32 val;
453} __packed;
454
455struct p_req_state_reply {
456 u32 retcode;
457} __packed;
458
459struct p_drbd06_param {
460 u64 size;
461 u32 state;
462 u32 blksize;
463 u32 protocol;
464 u32 version;
465 u32 gen_cnt[5];
466 u32 bit_map_gen[5];
467} __packed;
468
469struct p_block_desc {
470 u64 sector;
471 u32 blksize;
472 u32 pad;
473} __packed;
474
475
476
477enum drbd_bitmap_code {
478
479
480
481 RLE_VLI_Bits = 2,
482};
483
484struct p_compressed_bm {
485
486
487
488
489
490 u8 encoding;
491
492 u8 code[0];
493} __packed;
494
495struct p_delay_probe93 {
496 u32 seq_num;
497 u32 offset;
498} __packed;
499
500
501
502
503
504#define DRBD_SOCKET_BUFFER_SIZE 4096
505
506
507enum drbd_thread_state {
508 NONE,
509 RUNNING,
510 EXITING,
511 RESTARTING
512};
513
514struct drbd_thread {
515 spinlock_t t_lock;
516 struct task_struct *task;
517 struct completion stop;
518 enum drbd_thread_state t_state;
519 int (*function) (struct drbd_thread *);
520 struct drbd_tconn *tconn;
521 int reset_cpu_mask;
522 char name[9];
523};
524
525static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
526{
527
528
529
530
531 smp_rmb();
532 return thi->t_state;
533}
534
535struct drbd_work {
536 struct list_head list;
537 int (*cb)(struct drbd_work *, int cancel);
538 union {
539 struct drbd_conf *mdev;
540 struct drbd_tconn *tconn;
541 };
542};
543
544#include "drbd_interval.h"
545
546extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *);
547
548struct drbd_request {
549 struct drbd_work w;
550
551
552
553
554
555 struct bio *private_bio;
556
557 struct drbd_interval i;
558
559
560
561
562
563
564
565
566
567 unsigned int epoch;
568
569 struct list_head tl_requests;
570 struct bio *master_bio;
571 unsigned long start_time;
572
573
574 atomic_t completion_ref;
575
576 struct kref kref;
577
578 unsigned rq_state;
579};
580
581struct drbd_epoch {
582 struct drbd_tconn *tconn;
583 struct list_head list;
584 unsigned int barrier_nr;
585 atomic_t epoch_size;
586 atomic_t active;
587 unsigned long flags;
588};
589
590
591enum {
592 DE_HAVE_BARRIER_NUMBER,
593};
594
595enum epoch_event {
596 EV_PUT,
597 EV_GOT_BARRIER_NR,
598 EV_BECAME_LAST,
599 EV_CLEANUP = 32,
600};
601
602struct drbd_wq_barrier {
603 struct drbd_work w;
604 struct completion done;
605};
606
607struct digest_info {
608 int digest_size;
609 void *digest;
610};
611
612struct drbd_peer_request {
613 struct drbd_work w;
614 struct drbd_epoch *epoch;
615 struct page *pages;
616 atomic_t pending_bios;
617 struct drbd_interval i;
618
619 unsigned long flags;
620 union {
621 u64 block_id;
622 struct digest_info *digest;
623 };
624};
625
626
627
628
629
630
631
632enum {
633 __EE_CALL_AL_COMPLETE_IO,
634 __EE_MAY_SET_IN_SYNC,
635
636
637
638 __EE_RESUBMITTED,
639
640
641
642
643 __EE_WAS_ERROR,
644
645
646 __EE_HAS_DIGEST,
647
648
649 __EE_RESTART_REQUESTS,
650
651
652 __EE_SEND_WRITE_ACK,
653
654
655 __EE_IN_INTERVAL_TREE,
656};
657#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
658#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
659#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
660#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
661#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
662#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
663#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
664#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
665
666
667enum {
668 UNPLUG_REMOTE,
669 MD_DIRTY,
670 USE_DEGR_WFC_T,
671 CL_ST_CHG_SUCCESS,
672 CL_ST_CHG_FAIL,
673 CRASHED_PRIMARY,
674
675
676 CONSIDER_RESYNC,
677
678 MD_NO_FUA,
679 SUSPEND_IO,
680 BITMAP_IO,
681
682 BITMAP_IO_QUEUED,
683 GO_DISKLESS,
684 WAS_IO_ERROR,
685 WAS_READ_ERROR,
686 FORCE_DETACH,
687 RESYNC_AFTER_NEG,
688 RESIZE_PENDING,
689
690 NEW_CUR_UUID,
691 AL_SUSPENDED,
692 AHEAD_TO_SYNC_SOURCE,
693 B_RS_H_DONE,
694 DISCARD_MY_DATA,
695 READ_BALANCE_RR,
696};
697
698struct drbd_bitmap;
699
700
701
702enum bm_flag {
703
704 BM_P_VMALLOCED = 0x10000,
705
706
707 BM_LOCKED_MASK = 0xf,
708
709
710 BM_DONT_CLEAR = 0x1,
711 BM_DONT_SET = 0x2,
712 BM_DONT_TEST = 0x4,
713
714
715
716 BM_IS_LOCKED = 0x8,
717
718
719 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
720
721
722
723
724 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
725
726
727
728 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
729};
730
731struct drbd_work_queue {
732 struct list_head q;
733 spinlock_t q_lock;
734 wait_queue_head_t q_wait;
735};
736
737struct drbd_socket {
738 struct mutex mutex;
739 struct socket *socket;
740
741
742 void *sbuf;
743 void *rbuf;
744};
745
746struct drbd_md {
747 u64 md_offset;
748
749 u64 la_size_sect;
750 spinlock_t uuid_lock;
751 u64 uuid[UI_SIZE];
752 u64 device_uuid;
753 u32 flags;
754 u32 md_size_sect;
755
756 s32 al_offset;
757 s32 bm_offset;
758
759
760 s32 meta_dev_idx;
761
762
763 u32 al_stripes;
764 u32 al_stripe_size_4k;
765 u32 al_size_4k;
766};
767
768struct drbd_backing_dev {
769 struct block_device *backing_bdev;
770 struct block_device *md_bdev;
771 struct drbd_md md;
772 struct disk_conf *disk_conf;
773 sector_t known_size;
774};
775
776struct drbd_md_io {
777 unsigned int done;
778 int error;
779};
780
781struct bm_io_work {
782 struct drbd_work w;
783 char *why;
784 enum bm_flag flags;
785 int (*io_fn)(struct drbd_conf *mdev);
786 void (*done)(struct drbd_conf *mdev, int rv);
787};
788
789enum write_ordering_e {
790 WO_none,
791 WO_drain_io,
792 WO_bdev_flush,
793};
794
795struct fifo_buffer {
796 unsigned int head_index;
797 unsigned int size;
798 int total;
799 int values[0];
800};
801extern struct fifo_buffer *fifo_alloc(int fifo_size);
802
803
804enum {
805 NET_CONGESTED,
806 RESOLVE_CONFLICTS,
807 SEND_PING,
808 SIGNAL_ASENDER,
809 GOT_PING_ACK,
810 CONN_WD_ST_CHG_REQ,
811 CONN_WD_ST_CHG_OKAY,
812 CONN_WD_ST_CHG_FAIL,
813 CONN_DRY_RUN,
814 CREATE_BARRIER,
815 STATE_SENT,
816 CALLBACK_PENDING,
817
818
819
820
821
822 DISCONNECT_SENT,
823};
824
825struct drbd_tconn {
826 char *name;
827 struct list_head all_tconn;
828 struct kref kref;
829 struct idr volumes;
830 enum drbd_conns cstate;
831 unsigned susp:1;
832 unsigned susp_nod:1;
833 unsigned susp_fen:1;
834 struct mutex cstate_mutex;
835
836 unsigned long flags;
837 struct net_conf *net_conf;
838 struct mutex conf_update;
839 wait_queue_head_t ping_wait;
840 struct res_opts res_opts;
841
842 struct sockaddr_storage my_addr;
843 int my_addr_len;
844 struct sockaddr_storage peer_addr;
845 int peer_addr_len;
846
847 struct drbd_socket data;
848 struct drbd_socket meta;
849 int agreed_pro_version;
850 unsigned long last_received;
851 unsigned int ko_count;
852
853 spinlock_t req_lock;
854
855 struct list_head transfer_log;
856
857 struct crypto_hash *cram_hmac_tfm;
858 struct crypto_hash *integrity_tfm;
859 struct crypto_hash *peer_integrity_tfm;
860 struct crypto_hash *csums_tfm;
861 struct crypto_hash *verify_tfm;
862 void *int_dig_in;
863 void *int_dig_vv;
864
865
866 struct drbd_epoch *current_epoch;
867 spinlock_t epoch_lock;
868 unsigned int epochs;
869 enum write_ordering_e write_ordering;
870 atomic_t current_tle_nr;
871 unsigned current_tle_writes;
872
873 unsigned long last_reconnect_jif;
874 struct drbd_thread receiver;
875 struct drbd_thread worker;
876 struct drbd_thread asender;
877 cpumask_var_t cpu_mask;
878
879
880 struct drbd_work_queue sender_work;
881
882 struct {
883
884
885 bool seen_any_write_yet;
886
887
888 int current_epoch_nr;
889
890
891
892
893 unsigned current_epoch_writes;
894 } send;
895};
896
897struct submit_worker {
898 struct workqueue_struct *wq;
899 struct work_struct worker;
900
901 spinlock_t lock;
902 struct list_head writes;
903};
904
905struct drbd_conf {
906 struct drbd_tconn *tconn;
907 int vnr;
908 struct kref kref;
909
910
911 unsigned long flags;
912
913
914 struct drbd_backing_dev *ldev __protected_by(local);
915
916 sector_t p_size;
917 struct request_queue *rq_queue;
918 struct block_device *this_bdev;
919 struct gendisk *vdisk;
920
921 unsigned long last_reattach_jif;
922 struct drbd_work resync_work,
923 unplug_work,
924 go_diskless,
925 md_sync_work,
926 start_resync_work;
927 struct timer_list resync_timer;
928 struct timer_list md_sync_timer;
929 struct timer_list start_resync_timer;
930 struct timer_list request_timer;
931#ifdef DRBD_DEBUG_MD_SYNC
932 struct {
933 unsigned int line;
934 const char* func;
935 } last_md_mark_dirty;
936#endif
937
938
939 union drbd_state new_state_tmp;
940
941 union drbd_dev_state state;
942 wait_queue_head_t misc_wait;
943 wait_queue_head_t state_wait;
944 unsigned int send_cnt;
945 unsigned int recv_cnt;
946 unsigned int read_cnt;
947 unsigned int writ_cnt;
948 unsigned int al_writ_cnt;
949 unsigned int bm_writ_cnt;
950 atomic_t ap_bio_cnt;
951 atomic_t ap_pending_cnt;
952 atomic_t rs_pending_cnt;
953 atomic_t unacked_cnt;
954 atomic_t local_cnt;
955
956
957 struct rb_root read_requests;
958 struct rb_root write_requests;
959
960
961 unsigned long rs_total;
962
963 unsigned long rs_failed;
964
965 unsigned long rs_start;
966
967 unsigned long rs_paused;
968
969 unsigned long rs_same_csum;
970#define DRBD_SYNC_MARKS 8
971#define DRBD_SYNC_MARK_STEP (3*HZ)
972
973 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
974
975 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
976
977 int rs_last_mark;
978 unsigned long rs_last_bcast;
979
980
981 sector_t ov_start_sector;
982 sector_t ov_stop_sector;
983
984 sector_t ov_position;
985
986 sector_t ov_last_oos_start;
987
988 sector_t ov_last_oos_size;
989 unsigned long ov_left;
990
991 struct drbd_bitmap *bitmap;
992 unsigned long bm_resync_fo;
993
994
995 struct lru_cache *resync;
996
997 unsigned int resync_locked;
998
999 unsigned int resync_wenr;
1000
1001 int open_cnt;
1002 u64 *p_uuid;
1003
1004 struct list_head active_ee;
1005 struct list_head sync_ee;
1006 struct list_head done_ee;
1007 struct list_head read_ee;
1008 struct list_head net_ee;
1009
1010 int next_barrier_nr;
1011 struct list_head resync_reads;
1012 atomic_t pp_in_use;
1013 atomic_t pp_in_use_by_net;
1014 wait_queue_head_t ee_wait;
1015 struct page *md_io_page;
1016 struct drbd_md_io md_io;
1017 atomic_t md_io_in_use;
1018 spinlock_t al_lock;
1019 wait_queue_head_t al_wait;
1020 struct lru_cache *act_log;
1021 unsigned int al_tr_number;
1022 int al_tr_cycle;
1023 wait_queue_head_t seq_wait;
1024 atomic_t packet_seq;
1025 unsigned int peer_seq;
1026 spinlock_t peer_seq_lock;
1027 unsigned int minor;
1028 unsigned long comm_bm_set;
1029 struct bm_io_work bm_io_work;
1030 u64 ed_uuid;
1031 struct mutex own_state_mutex;
1032 struct mutex *state_mutex;
1033 char congestion_reason;
1034 atomic_t rs_sect_in;
1035 atomic_t rs_sect_ev;
1036 int rs_last_sect_ev;
1037 int rs_last_events;
1038
1039 int c_sync_rate;
1040 struct fifo_buffer *rs_plan_s;
1041 int rs_in_flight;
1042 atomic_t ap_in_flight;
1043 unsigned int peer_max_bio_size;
1044 unsigned int local_max_bio_size;
1045
1046
1047
1048 struct submit_worker submit;
1049};
1050
1051static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
1052{
1053 return (struct drbd_conf *)idr_find(&minors, minor);
1054}
1055
1056static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
1057{
1058 return mdev->minor;
1059}
1060
1061static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
1062{
1063 return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
1064}
1065
1066
1067
1068
1069
1070
1071
1072enum dds_flags {
1073 DDSF_FORCED = 1,
1074 DDSF_NO_RESYNC = 2,
1075};
1076
1077extern void drbd_init_set_defaults(struct drbd_conf *mdev);
1078extern int drbd_thread_start(struct drbd_thread *thi);
1079extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1080extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
1081#ifdef CONFIG_SMP
1082extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1083extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
1084#else
1085#define drbd_thread_current_set_cpu(A) ({})
1086#define drbd_calc_cpu_mask(A) ({})
1087#endif
1088extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
1089 unsigned int set_size);
1090extern void tl_clear(struct drbd_tconn *);
1091extern void drbd_free_sock(struct drbd_tconn *tconn);
1092extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1093 void *buf, size_t size, unsigned msg_flags);
1094extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
1095 unsigned);
1096
1097extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
1098extern int drbd_send_protocol(struct drbd_tconn *tconn);
1099extern int drbd_send_uuids(struct drbd_conf *mdev);
1100extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
1101extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
1102extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
1103extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
1104extern int drbd_send_current_state(struct drbd_conf *mdev);
1105extern int drbd_send_sync_param(struct drbd_conf *mdev);
1106extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
1107 u32 set_size);
1108extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
1109 struct drbd_peer_request *);
1110extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1111 struct p_block_req *rp);
1112extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1113 struct p_data *dp, int data_size);
1114extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
1115 sector_t sector, int blksize, u64 block_id);
1116extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *);
1117extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
1118 struct drbd_peer_request *);
1119extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
1120extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1121 sector_t sector, int size, u64 block_id);
1122extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
1123 int size, void *digest, int digest_size,
1124 enum drbd_packet cmd);
1125extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
1126
1127extern int drbd_send_bitmap(struct drbd_conf *mdev);
1128extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
1129extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
1130extern void drbd_free_bc(struct drbd_backing_dev *ldev);
1131extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
1132void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
1133
1134extern void conn_md_sync(struct drbd_tconn *tconn);
1135extern void drbd_md_sync(struct drbd_conf *mdev);
1136extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
1137extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1138extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1139extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
1140extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
1141extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
1142extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1143extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
1144extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
1145extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1146#ifndef DRBD_DEBUG_MD_SYNC
1147extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
1148#else
1149#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
1150extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
1151 unsigned int line, const char *func);
1152#endif
1153extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
1154 int (*io_fn)(struct drbd_conf *),
1155 void (*done)(struct drbd_conf *, int),
1156 char *why, enum bm_flag flags);
1157extern int drbd_bitmap_io(struct drbd_conf *mdev,
1158 int (*io_fn)(struct drbd_conf *),
1159 char *why, enum bm_flag flags);
1160extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1161 int (*io_fn)(struct drbd_conf *),
1162 char *why, enum bm_flag flags);
1163extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1164extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1165extern void drbd_ldev_destroy(struct drbd_conf *mdev);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200#define MD_128MB_SECT (128LLU << 11)
1201#define MD_4kB_SECT 8
1202#define MD_32kB_SECT 64
1203
1204
1205#define AL_EXTENT_SHIFT 22
1206#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222#define AL_UPDATES_PER_TRANSACTION 64
1223#define AL_CONTEXT_PER_TRANSACTION 919
1224
1225#if BITS_PER_LONG == 32
1226#define LN2_BPL 5
1227#define cpu_to_lel(A) cpu_to_le32(A)
1228#define lel_to_cpu(A) le32_to_cpu(A)
1229#elif BITS_PER_LONG == 64
1230#define LN2_BPL 6
1231#define cpu_to_lel(A) cpu_to_le64(A)
1232#define lel_to_cpu(A) le64_to_cpu(A)
1233#else
1234#error "LN2 of BITS_PER_LONG unknown!"
1235#endif
1236
1237
1238
1239struct bm_extent {
1240 int rs_left;
1241 int rs_failed;
1242 unsigned long flags;
1243 struct lc_element lce;
1244};
1245
1246#define BME_NO_WRITES 0
1247#define BME_LOCKED 1
1248#define BME_PRIORITY 2
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258#define SLEEP_TIME (HZ/10)
1259
1260
1261
1262#define BM_BLOCK_SHIFT 12
1263#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1264
1265
1266
1267#define BM_EXT_SHIFT 24
1268#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1269
1270#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1271#error "HAVE YOU FIXED drbdmeta AS WELL??"
1272#endif
1273
1274
1275#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1276#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1277#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1278
1279
1280#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1281
1282
1283
1284#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1285
1286
1287#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1288#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1289
1290
1291#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1292
1293#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
1294#define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1311
1312
1313
1314
1315
1316#define DRBD_MAX_SECTORS_FIXED_BM \
1317 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1318#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1319#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1320#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1321#else
1322#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1323
1324#if BITS_PER_LONG == 32
1325
1326
1327
1328#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1329#else
1330
1331#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1332
1333#endif
1334#endif
1335
1336
1337
1338
1339
1340
1341
1342#define DRBD_MAX_BIO_SIZE (1U << 20)
1343#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1344#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1345#endif
1346#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1347
1348#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1349#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1350
1351extern int drbd_bm_init(struct drbd_conf *mdev);
1352extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
1353extern void drbd_bm_cleanup(struct drbd_conf *mdev);
1354extern void drbd_bm_set_all(struct drbd_conf *mdev);
1355extern void drbd_bm_clear_all(struct drbd_conf *mdev);
1356
1357extern int drbd_bm_set_bits(
1358 struct drbd_conf *mdev, unsigned long s, unsigned long e);
1359extern int drbd_bm_clear_bits(
1360 struct drbd_conf *mdev, unsigned long s, unsigned long e);
1361extern int drbd_bm_count_bits(
1362 struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
1363
1364
1365extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
1366 const unsigned long s, const unsigned long e);
1367extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
1368extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
1369extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
1370extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
1371extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr);
1372extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
1373extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local);
1374extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
1375extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
1376extern size_t drbd_bm_words(struct drbd_conf *mdev);
1377extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
1378extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
1379
1380#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1381extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1382
1383extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1384extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
1385extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev);
1386extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
1387extern int drbd_bm_rs_done(struct drbd_conf *mdev);
1388
1389extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
1390 size_t number, unsigned long *buffer);
1391
1392extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
1393 size_t number, unsigned long *buffer);
1394
1395extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
1396extern void drbd_bm_unlock(struct drbd_conf *mdev);
1397
1398
1399extern struct kmem_cache *drbd_request_cache;
1400extern struct kmem_cache *drbd_ee_cache;
1401extern struct kmem_cache *drbd_bm_ext_cache;
1402extern struct kmem_cache *drbd_al_ext_cache;
1403extern mempool_t *drbd_request_mempool;
1404extern mempool_t *drbd_ee_mempool;
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419extern struct page *drbd_pp_pool;
1420extern spinlock_t drbd_pp_lock;
1421extern int drbd_pp_vacant;
1422extern wait_queue_head_t drbd_pp_wait;
1423
1424
1425
1426
1427
1428
1429#define DRBD_MIN_POOL_PAGES 128
1430extern mempool_t *drbd_md_io_page_pool;
1431
1432
1433
1434extern struct bio_set *drbd_md_io_bio_set;
1435
1436extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1437
1438extern rwlock_t global_state_lock;
1439
1440extern int conn_lowest_minor(struct drbd_tconn *tconn);
1441enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
1442extern void drbd_minor_destroy(struct kref *kref);
1443
1444extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
1445extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
1446extern void conn_destroy(struct kref *kref);
1447struct drbd_tconn *conn_get_by_name(const char *name);
1448extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
1449 void *peer_addr, int peer_addr_len);
1450extern void conn_free_crypto(struct drbd_tconn *tconn);
1451
1452extern int proc_details;
1453
1454
1455extern void do_submit(struct work_struct *ws);
1456extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
1457extern void drbd_make_request(struct request_queue *q, struct bio *bio);
1458extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
1459extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1460extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1461
1462
1463
1464extern int drbd_msg_put_info(const char *info);
1465extern void drbd_suspend_io(struct drbd_conf *mdev);
1466extern void drbd_resume_io(struct drbd_conf *mdev);
1467extern char *ppsize(char *buf, unsigned long long size);
1468extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
1469enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
1470extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
1471extern void resync_after_online_grow(struct drbd_conf *);
1472extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
1473extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
1474 enum drbd_role new_role,
1475 int force);
1476extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
1477extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
1478extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
1479
1480
1481extern int drbd_worker(struct drbd_thread *thi);
1482enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor);
1483void drbd_resync_after_changed(struct drbd_conf *mdev);
1484extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
1485extern void resume_next_sg(struct drbd_conf *mdev);
1486extern void suspend_other_sg(struct drbd_conf *mdev);
1487extern int drbd_resync_finished(struct drbd_conf *mdev);
1488
1489extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
1490extern void drbd_md_put_buffer(struct drbd_conf *mdev);
1491extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
1492 struct drbd_backing_dev *bdev, sector_t sector, int rw);
1493extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
1494extern void wait_until_done_or_force_detached(struct drbd_conf *mdev,
1495 struct drbd_backing_dev *bdev, unsigned int *done);
1496extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
1497
1498static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
1499{
1500 if (mdev->ov_last_oos_size) {
1501 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
1502 (unsigned long long)mdev->ov_last_oos_start,
1503 (unsigned long)mdev->ov_last_oos_size);
1504 }
1505 mdev->ov_last_oos_size=0;
1506}
1507
1508
1509extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
1510extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
1511 struct drbd_peer_request *, void *);
1512
1513extern int w_e_end_data_req(struct drbd_work *, int);
1514extern int w_e_end_rsdata_req(struct drbd_work *, int);
1515extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1516extern int w_e_end_ov_reply(struct drbd_work *, int);
1517extern int w_e_end_ov_req(struct drbd_work *, int);
1518extern int w_ov_finished(struct drbd_work *, int);
1519extern int w_resync_timer(struct drbd_work *, int);
1520extern int w_send_write_hint(struct drbd_work *, int);
1521extern int w_make_resync_request(struct drbd_work *, int);
1522extern int w_send_dblock(struct drbd_work *, int);
1523extern int w_send_read_req(struct drbd_work *, int);
1524extern int w_prev_work_done(struct drbd_work *, int);
1525extern int w_e_reissue(struct drbd_work *, int);
1526extern int w_restart_disk_io(struct drbd_work *, int);
1527extern int w_send_out_of_sync(struct drbd_work *, int);
1528extern int w_start_resync(struct drbd_work *, int);
1529
1530extern void resync_timer_fn(unsigned long data);
1531extern void start_resync_timer_fn(unsigned long data);
1532
1533
1534extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
1535extern int drbd_submit_peer_request(struct drbd_conf *,
1536 struct drbd_peer_request *, const unsigned,
1537 const int);
1538extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *);
1539extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
1540 sector_t, unsigned int,
1541 gfp_t) __must_hold(local);
1542extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
1543 int);
1544#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1545#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1546extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
1547extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
1548extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
1549extern void conn_flush_workqueue(struct drbd_tconn *tconn);
1550extern int drbd_connected(struct drbd_conf *mdev);
1551static inline void drbd_flush_workqueue(struct drbd_conf *mdev)
1552{
1553 conn_flush_workqueue(mdev->tconn);
1554}
1555
1556
1557
1558static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
1559 char *optval, int optlen)
1560{
1561 mm_segment_t oldfs = get_fs();
1562 char __user *uoptval;
1563 int err;
1564
1565 uoptval = (char __user __force *)optval;
1566
1567 set_fs(KERNEL_DS);
1568 if (level == SOL_SOCKET)
1569 err = sock_setsockopt(sock, level, optname, uoptval, optlen);
1570 else
1571 err = sock->ops->setsockopt(sock, level, optname, uoptval,
1572 optlen);
1573 set_fs(oldfs);
1574 return err;
1575}
1576
1577static inline void drbd_tcp_cork(struct socket *sock)
1578{
1579 int val = 1;
1580 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1581 (char*)&val, sizeof(val));
1582}
1583
1584static inline void drbd_tcp_uncork(struct socket *sock)
1585{
1586 int val = 0;
1587 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1588 (char*)&val, sizeof(val));
1589}
1590
1591static inline void drbd_tcp_nodelay(struct socket *sock)
1592{
1593 int val = 1;
1594 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1595 (char*)&val, sizeof(val));
1596}
1597
1598static inline void drbd_tcp_quickack(struct socket *sock)
1599{
1600 int val = 2;
1601 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1602 (char*)&val, sizeof(val));
1603}
1604
1605void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
1606
1607
1608extern struct proc_dir_entry *drbd_proc;
1609extern const struct file_operations drbd_proc_fops;
1610extern const char *drbd_conn_str(enum drbd_conns s);
1611extern const char *drbd_role_str(enum drbd_role s);
1612
1613
1614extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i);
1615extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate);
1616extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i);
1617extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate);
1618extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
1619extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
1620extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1621extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1622extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
1623extern int drbd_rs_del_all(struct drbd_conf *mdev);
1624extern void drbd_rs_failed_io(struct drbd_conf *mdev,
1625 sector_t sector, int size);
1626extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
1627extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
1628 int size, const char *file, const unsigned int line);
1629#define drbd_set_in_sync(mdev, sector, size) \
1630 __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
1631extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
1632 int size, const char *file, const unsigned int line);
1633#define drbd_set_out_of_sync(mdev, sector, size) \
1634 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
1635extern void drbd_al_shrink(struct drbd_conf *mdev);
1636
1637
1638
1639struct sib_info {
1640 enum drbd_state_info_bcast_reason sib_reason;
1641 union {
1642 struct {
1643 char *helper_name;
1644 unsigned helper_exit_code;
1645 };
1646 struct {
1647 union drbd_state os;
1648 union drbd_state ns;
1649 };
1650 };
1651};
1652void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib);
1653
1654
1655
1656
1657
1658
1659static inline struct page *page_chain_next(struct page *page)
1660{
1661 return (struct page *)page_private(page);
1662}
1663#define page_chain_for_each(page) \
1664 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1665 page = page_chain_next(page))
1666#define page_chain_for_each_safe(page, n) \
1667 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1668
1669
1670static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1671{
1672 struct page *page = peer_req->pages;
1673 page_chain_for_each(page) {
1674 if (page_count(page) > 1)
1675 return 1;
1676 }
1677 return 0;
1678}
1679
1680static inline enum drbd_state_rv
1681_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1682 enum chg_state_flags flags, struct completion *done)
1683{
1684 enum drbd_state_rv rv;
1685
1686 read_lock(&global_state_lock);
1687 rv = __drbd_set_state(mdev, ns, flags, done);
1688 read_unlock(&global_state_lock);
1689
1690 return rv;
1691}
1692
1693static inline union drbd_state drbd_read_state(struct drbd_conf *mdev)
1694{
1695 union drbd_state rv;
1696
1697 rv.i = mdev->state.i;
1698 rv.susp = mdev->tconn->susp;
1699 rv.susp_nod = mdev->tconn->susp_nod;
1700 rv.susp_fen = mdev->tconn->susp_fen;
1701
1702 return rv;
1703}
1704
1705enum drbd_force_detach_flags {
1706 DRBD_READ_ERROR,
1707 DRBD_WRITE_ERROR,
1708 DRBD_META_IO_ERROR,
1709 DRBD_FORCE_DETACH,
1710};
1711
1712#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1713static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
1714 enum drbd_force_detach_flags df,
1715 const char *where)
1716{
1717 enum drbd_io_error_p ep;
1718
1719 rcu_read_lock();
1720 ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
1721 rcu_read_unlock();
1722 switch (ep) {
1723 case EP_PASS_ON:
1724 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1725 if (__ratelimit(&drbd_ratelimit_state))
1726 dev_err(DEV, "Local IO failed in %s.\n", where);
1727 if (mdev->state.disk > D_INCONSISTENT)
1728 _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
1729 break;
1730 }
1731
1732 case EP_DETACH:
1733 case EP_CALL_HELPER:
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 set_bit(WAS_IO_ERROR, &mdev->flags);
1755 if (df == DRBD_READ_ERROR)
1756 set_bit(WAS_READ_ERROR, &mdev->flags);
1757 if (df == DRBD_FORCE_DETACH)
1758 set_bit(FORCE_DETACH, &mdev->flags);
1759 if (mdev->state.disk > D_FAILED) {
1760 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1761 dev_err(DEV,
1762 "Local IO failed in %s. Detaching...\n", where);
1763 }
1764 break;
1765 }
1766}
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1777static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
1778 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1779{
1780 if (error) {
1781 unsigned long flags;
1782 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1783 __drbd_chk_io_error_(mdev, forcedetach, where);
1784 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1785 }
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1797{
1798 switch (bdev->md.meta_dev_idx) {
1799 case DRBD_MD_INDEX_INTERNAL:
1800 case DRBD_MD_INDEX_FLEX_INT:
1801 return bdev->md.md_offset + bdev->md.bm_offset;
1802 case DRBD_MD_INDEX_FLEX_EXT:
1803 default:
1804 return bdev->md.md_offset;
1805 }
1806}
1807
1808
1809
1810
1811
1812static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1813{
1814 switch (bdev->md.meta_dev_idx) {
1815 case DRBD_MD_INDEX_INTERNAL:
1816 case DRBD_MD_INDEX_FLEX_INT:
1817 return bdev->md.md_offset + MD_4kB_SECT -1;
1818 case DRBD_MD_INDEX_FLEX_EXT:
1819 default:
1820 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1821 }
1822}
1823
1824
1825static inline sector_t drbd_get_capacity(struct block_device *bdev)
1826{
1827
1828 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1840{
1841 sector_t s;
1842
1843 switch (bdev->md.meta_dev_idx) {
1844 case DRBD_MD_INDEX_INTERNAL:
1845 case DRBD_MD_INDEX_FLEX_INT:
1846 s = drbd_get_capacity(bdev->backing_bdev)
1847 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1848 drbd_md_first_sector(bdev))
1849 : 0;
1850 break;
1851 case DRBD_MD_INDEX_FLEX_EXT:
1852 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1853 drbd_get_capacity(bdev->backing_bdev));
1854
1855 s = min_t(sector_t, s,
1856 BM_EXT_TO_SECT(bdev->md.md_size_sect
1857 - bdev->md.bm_offset));
1858 break;
1859 default:
1860 s = min_t(sector_t, DRBD_MAX_SECTORS,
1861 drbd_get_capacity(bdev->backing_bdev));
1862 }
1863 return s;
1864}
1865
1866
1867
1868
1869
1870static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1871{
1872 const int meta_dev_idx = bdev->md.meta_dev_idx;
1873
1874 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1875 return 0;
1876
1877
1878
1879 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1880 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1881 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1882
1883
1884 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1885}
1886
1887static inline void
1888drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
1889{
1890 unsigned long flags;
1891 spin_lock_irqsave(&q->q_lock, flags);
1892 list_add(&w->list, &q->q);
1893 spin_unlock_irqrestore(&q->q_lock, flags);
1894 wake_up(&q->q_wait);
1895}
1896
1897static inline void
1898drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1899{
1900 unsigned long flags;
1901 spin_lock_irqsave(&q->q_lock, flags);
1902 list_add_tail(&w->list, &q->q);
1903 spin_unlock_irqrestore(&q->q_lock, flags);
1904 wake_up(&q->q_wait);
1905}
1906
1907static inline void wake_asender(struct drbd_tconn *tconn)
1908{
1909 if (test_bit(SIGNAL_ASENDER, &tconn->flags))
1910 force_sig(DRBD_SIG, tconn->asender.task);
1911}
1912
1913static inline void request_ping(struct drbd_tconn *tconn)
1914{
1915 set_bit(SEND_PING, &tconn->flags);
1916 wake_asender(tconn);
1917}
1918
1919extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
1920extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *);
1921extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
1922 enum drbd_packet, unsigned int, void *,
1923 unsigned int);
1924extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *,
1925 enum drbd_packet, unsigned int, void *,
1926 unsigned int);
1927
1928extern int drbd_send_ping(struct drbd_tconn *tconn);
1929extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
1930extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
1931extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
1932
1933static inline void drbd_thread_stop(struct drbd_thread *thi)
1934{
1935 _drbd_thread_stop(thi, false, true);
1936}
1937
1938static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1939{
1940 _drbd_thread_stop(thi, false, false);
1941}
1942
1943static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1944{
1945 _drbd_thread_stop(thi, true, false);
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970static inline void inc_ap_pending(struct drbd_conf *mdev)
1971{
1972 atomic_inc(&mdev->ap_pending_cnt);
1973}
1974
1975#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
1976 if (atomic_read(&mdev->which) < 0) \
1977 dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
1978 func, line, \
1979 atomic_read(&mdev->which))
1980
1981#define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
1982static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line)
1983{
1984 if (atomic_dec_and_test(&mdev->ap_pending_cnt))
1985 wake_up(&mdev->misc_wait);
1986 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
1987}
1988
1989
1990
1991
1992
1993
1994
1995static inline void inc_rs_pending(struct drbd_conf *mdev)
1996{
1997 atomic_inc(&mdev->rs_pending_cnt);
1998}
1999
2000#define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
2001static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line)
2002{
2003 atomic_dec(&mdev->rs_pending_cnt);
2004 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2005}
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016static inline void inc_unacked(struct drbd_conf *mdev)
2017{
2018 atomic_inc(&mdev->unacked_cnt);
2019}
2020
2021#define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
2022static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line)
2023{
2024 atomic_dec(&mdev->unacked_cnt);
2025 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2026}
2027
2028#define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
2029static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line)
2030{
2031 atomic_sub(n, &mdev->unacked_cnt);
2032 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2033}
2034
2035
2036
2037
2038
2039
2040
2041#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
2042#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
2043
2044static inline void put_ldev(struct drbd_conf *mdev)
2045{
2046 int i = atomic_dec_return(&mdev->local_cnt);
2047
2048
2049
2050
2051 __release(local);
2052 D_ASSERT(i >= 0);
2053 if (i == 0) {
2054 if (mdev->state.disk == D_DISKLESS)
2055
2056 drbd_ldev_destroy(mdev);
2057 if (mdev->state.disk == D_FAILED) {
2058
2059 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
2060 drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
2061 }
2062 wake_up(&mdev->misc_wait);
2063 }
2064}
2065
2066#ifndef __CHECKER__
2067static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
2068{
2069 int io_allowed;
2070
2071
2072 if (mdev->state.disk == D_DISKLESS)
2073 return 0;
2074
2075 atomic_inc(&mdev->local_cnt);
2076 io_allowed = (mdev->state.disk >= mins);
2077 if (!io_allowed)
2078 put_ldev(mdev);
2079 return io_allowed;
2080}
2081#else
2082extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
2083#endif
2084
2085
2086static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
2087 unsigned long *bits_left, unsigned int *per_mil_done)
2088{
2089
2090
2091 typecheck(unsigned long, mdev->rs_total);
2092
2093
2094
2095
2096
2097 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2098 *bits_left = mdev->ov_left;
2099 else
2100 *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2101
2102
2103 if (*bits_left > mdev->rs_total) {
2104
2105
2106
2107
2108
2109 smp_rmb();
2110 dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
2111 drbd_conn_str(mdev->state.conn),
2112 *bits_left, mdev->rs_total, mdev->rs_failed);
2113 *per_mil_done = 0;
2114 } else {
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
2125 unsigned long left = *bits_left >> shift;
2126 unsigned long total = 1UL + (mdev->rs_total >> shift);
2127 unsigned long tmp = 1000UL - left * 1000UL/total;
2128 *per_mil_done = tmp;
2129 }
2130}
2131
2132
2133
2134
2135
2136static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
2137{
2138 struct net_conf *nc;
2139 int mxb;
2140
2141 rcu_read_lock();
2142 nc = rcu_dereference(mdev->tconn->net_conf);
2143 mxb = nc ? nc->max_buffers : 1000000;
2144 rcu_read_unlock();
2145
2146 return mxb;
2147}
2148
2149static inline int drbd_state_is_stable(struct drbd_conf *mdev)
2150{
2151 union drbd_dev_state s = mdev->state;
2152
2153
2154
2155
2156 switch ((enum drbd_conns)s.conn) {
2157
2158 case C_STANDALONE:
2159 case C_WF_CONNECTION:
2160
2161 case C_CONNECTED:
2162 case C_SYNC_SOURCE:
2163 case C_SYNC_TARGET:
2164 case C_VERIFY_S:
2165 case C_VERIFY_T:
2166 case C_PAUSED_SYNC_S:
2167 case C_PAUSED_SYNC_T:
2168 case C_AHEAD:
2169 case C_BEHIND:
2170
2171 case C_DISCONNECTING:
2172 case C_UNCONNECTED:
2173 case C_TIMEOUT:
2174 case C_BROKEN_PIPE:
2175 case C_NETWORK_FAILURE:
2176 case C_PROTOCOL_ERROR:
2177 case C_TEAR_DOWN:
2178 case C_WF_REPORT_PARAMS:
2179 case C_STARTING_SYNC_S:
2180 case C_STARTING_SYNC_T:
2181 break;
2182
2183
2184 case C_WF_BITMAP_S:
2185 if (mdev->tconn->agreed_pro_version < 96)
2186 return 0;
2187 break;
2188
2189
2190 case C_WF_BITMAP_T:
2191 case C_WF_SYNC_UUID:
2192 case C_MASK:
2193
2194 return 0;
2195 }
2196
2197 switch ((enum drbd_disk_state)s.disk) {
2198 case D_DISKLESS:
2199 case D_INCONSISTENT:
2200 case D_OUTDATED:
2201 case D_CONSISTENT:
2202 case D_UP_TO_DATE:
2203 case D_FAILED:
2204
2205 break;
2206
2207
2208 case D_ATTACHING:
2209 case D_NEGOTIATING:
2210 case D_UNKNOWN:
2211 case D_MASK:
2212
2213 return 0;
2214 }
2215
2216 return 1;
2217}
2218
2219static inline int drbd_suspended(struct drbd_conf *mdev)
2220{
2221 struct drbd_tconn *tconn = mdev->tconn;
2222
2223 return tconn->susp || tconn->susp_fen || tconn->susp_nod;
2224}
2225
2226static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
2227{
2228 int mxb = drbd_get_max_buffers(mdev);
2229
2230 if (drbd_suspended(mdev))
2231 return false;
2232 if (test_bit(SUSPEND_IO, &mdev->flags))
2233 return false;
2234
2235
2236
2237
2238
2239
2240 if (!drbd_state_is_stable(mdev))
2241 return false;
2242
2243
2244
2245 if (atomic_read(&mdev->ap_bio_cnt) > mxb)
2246 return false;
2247 if (test_bit(BITMAP_IO, &mdev->flags))
2248 return false;
2249 return true;
2250}
2251
2252static inline bool inc_ap_bio_cond(struct drbd_conf *mdev)
2253{
2254 bool rv = false;
2255
2256 spin_lock_irq(&mdev->tconn->req_lock);
2257 rv = may_inc_ap_bio(mdev);
2258 if (rv)
2259 atomic_inc(&mdev->ap_bio_cnt);
2260 spin_unlock_irq(&mdev->tconn->req_lock);
2261
2262 return rv;
2263}
2264
2265static inline void inc_ap_bio(struct drbd_conf *mdev)
2266{
2267
2268
2269
2270
2271
2272
2273
2274
2275 wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
2276}
2277
2278static inline void dec_ap_bio(struct drbd_conf *mdev)
2279{
2280 int mxb = drbd_get_max_buffers(mdev);
2281 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
2282
2283 D_ASSERT(ap_bio >= 0);
2284
2285 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2286 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2287 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
2288 }
2289
2290
2291
2292
2293 if (ap_bio < mxb)
2294 wake_up(&mdev->misc_wait);
2295}
2296
2297static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev)
2298{
2299 return mdev->tconn->agreed_pro_version >= 97 &&
2300 mdev->tconn->agreed_pro_version != 100;
2301}
2302
2303static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
2304{
2305 int changed = mdev->ed_uuid != val;
2306 mdev->ed_uuid = val;
2307 return changed;
2308}
2309
2310static inline int drbd_queue_order_type(struct drbd_conf *mdev)
2311{
2312
2313
2314#ifndef QUEUE_ORDERED_NONE
2315#define QUEUE_ORDERED_NONE 0
2316#endif
2317 return QUEUE_ORDERED_NONE;
2318}
2319
2320static inline void drbd_md_flush(struct drbd_conf *mdev)
2321{
2322 int r;
2323
2324 if (mdev->ldev == NULL) {
2325 dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n");
2326 return;
2327 }
2328
2329 if (test_bit(MD_NO_FUA, &mdev->flags))
2330 return;
2331
2332 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
2333 if (r) {
2334 set_bit(MD_NO_FUA, &mdev->flags);
2335 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2336 }
2337}
2338
2339#endif
2340