1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <linux/compiler.h>
30#include <linux/types.h>
31#include <linux/version.h>
32#include <linux/list.h>
33#include <linux/sched.h>
34#include <linux/bitops.h>
35#include <linux/slab.h>
36#include <linux/crypto.h>
37#include <linux/ratelimit.h>
38#include <linux/tcp.h>
39#include <linux/mutex.h>
40#include <linux/major.h>
41#include <linux/blkdev.h>
42#include <linux/genhd.h>
43#include <net/tcp.h>
44#include <linux/lru_cache.h>
45
46#ifdef __CHECKER__
47# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
48# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
49# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
50# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
51#else
52# define __protected_by(x)
53# define __protected_read_by(x)
54# define __protected_write_by(x)
55# define __must_hold(x)
56#endif
57
58#define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0)
59
60
61extern unsigned int minor_count;
62extern int disable_sendpage;
63extern int allow_oos;
64extern unsigned int cn_idx;
65
66#ifdef CONFIG_DRBD_FAULT_INJECTION
67extern int enable_faults;
68extern int fault_rate;
69extern int fault_devs;
70#endif
71
72extern char usermode_helper[];
73
74
75#ifndef TRUE
76#define TRUE 1
77#endif
78#ifndef FALSE
79#define FALSE 0
80#endif
81
82
83
84
85
86
87#define DRBD_SIG SIGXCPU
88
89
90
91
92
93
94#define DRBD_SIGKILL SIGHUP
95
96
97
98
99
100
101#define ID_IN_SYNC (4711ULL)
102#define ID_OUT_OF_SYNC (4712ULL)
103
104#define ID_SYNCER (-1ULL)
105#define ID_VACANT 0
106#define is_syncer_block_id(id) ((id) == ID_SYNCER)
107
108struct drbd_conf;
109
110
111
112#define DEV (disk_to_dev(mdev->vdisk))
113
114#define D_ASSERT(exp) if (!(exp)) \
115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
116
117#define ERR_IF(exp) if (({ \
118 int _b = (exp) != 0; \
119 if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
120 __func__, #exp, __FILE__, __LINE__); \
121 _b; \
122 }))
123
124
125enum {
126 DRBD_FAULT_MD_WR = 0,
127 DRBD_FAULT_MD_RD = 1,
128 DRBD_FAULT_RS_WR = 2,
129 DRBD_FAULT_RS_RD = 3,
130 DRBD_FAULT_DT_WR = 4,
131 DRBD_FAULT_DT_RD = 5,
132 DRBD_FAULT_DT_RA = 6,
133 DRBD_FAULT_BM_ALLOC = 7,
134 DRBD_FAULT_AL_EE = 8,
135 DRBD_FAULT_RECEIVE = 9,
136
137 DRBD_FAULT_MAX,
138};
139
140#ifdef CONFIG_DRBD_FAULT_INJECTION
141extern unsigned int
142_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
143static inline int
144drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
145 return fault_rate &&
146 (enable_faults & (1<<type)) &&
147 _drbd_insert_fault(mdev, type);
148}
149#define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t)))
150
151#else
152#define FAULT_ACTIVE(_m, _t) (0)
153#endif
154
155
156#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
157
158#define div_floor(A, B) ((A)/(B))
159
160
161
162#define DRBD_MD_MAGIC (DRBD_MAGIC+4)
163
164extern struct drbd_conf **minor_table;
165extern struct ratelimit_state drbd_ratelimit_state;
166
167
168enum drbd_packets {
169
170 P_DATA = 0x00,
171 P_DATA_REPLY = 0x01,
172 P_RS_DATA_REPLY = 0x02,
173 P_BARRIER = 0x03,
174 P_BITMAP = 0x04,
175 P_BECOME_SYNC_TARGET = 0x05,
176 P_BECOME_SYNC_SOURCE = 0x06,
177 P_UNPLUG_REMOTE = 0x07,
178 P_DATA_REQUEST = 0x08,
179 P_RS_DATA_REQUEST = 0x09,
180 P_SYNC_PARAM = 0x0a,
181 P_PROTOCOL = 0x0b,
182 P_UUIDS = 0x0c,
183 P_SIZES = 0x0d,
184 P_STATE = 0x0e,
185 P_SYNC_UUID = 0x0f,
186 P_AUTH_CHALLENGE = 0x10,
187 P_AUTH_RESPONSE = 0x11,
188 P_STATE_CHG_REQ = 0x12,
189
190
191 P_PING = 0x13,
192 P_PING_ACK = 0x14,
193 P_RECV_ACK = 0x15,
194 P_WRITE_ACK = 0x16,
195 P_RS_WRITE_ACK = 0x17,
196 P_DISCARD_ACK = 0x18,
197 P_NEG_ACK = 0x19,
198 P_NEG_DREPLY = 0x1a,
199 P_NEG_RS_DREPLY = 0x1b,
200 P_BARRIER_ACK = 0x1c,
201 P_STATE_CHG_REPLY = 0x1d,
202
203
204
205 P_OV_REQUEST = 0x1e,
206 P_OV_REPLY = 0x1f,
207 P_OV_RESULT = 0x20,
208 P_CSUM_RS_REQUEST = 0x21,
209 P_RS_IS_IN_SYNC = 0x22,
210 P_SYNC_PARAM89 = 0x23,
211 P_COMPRESSED_BITMAP = 0x24,
212
213
214 P_DELAY_PROBE = 0x27,
215
216 P_MAX_CMD = 0x28,
217 P_MAY_IGNORE = 0x100,
218 P_MAX_OPT_CMD = 0x101,
219
220
221
222 P_HAND_SHAKE_M = 0xfff1,
223 P_HAND_SHAKE_S = 0xfff2,
224
225 P_HAND_SHAKE = 0xfffe
226};
227
228static inline const char *cmdname(enum drbd_packets cmd)
229{
230
231
232
233 static const char *cmdnames[] = {
234 [P_DATA] = "Data",
235 [P_DATA_REPLY] = "DataReply",
236 [P_RS_DATA_REPLY] = "RSDataReply",
237 [P_BARRIER] = "Barrier",
238 [P_BITMAP] = "ReportBitMap",
239 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
240 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
241 [P_UNPLUG_REMOTE] = "UnplugRemote",
242 [P_DATA_REQUEST] = "DataRequest",
243 [P_RS_DATA_REQUEST] = "RSDataRequest",
244 [P_SYNC_PARAM] = "SyncParam",
245 [P_SYNC_PARAM89] = "SyncParam89",
246 [P_PROTOCOL] = "ReportProtocol",
247 [P_UUIDS] = "ReportUUIDs",
248 [P_SIZES] = "ReportSizes",
249 [P_STATE] = "ReportState",
250 [P_SYNC_UUID] = "ReportSyncUUID",
251 [P_AUTH_CHALLENGE] = "AuthChallenge",
252 [P_AUTH_RESPONSE] = "AuthResponse",
253 [P_PING] = "Ping",
254 [P_PING_ACK] = "PingAck",
255 [P_RECV_ACK] = "RecvAck",
256 [P_WRITE_ACK] = "WriteAck",
257 [P_RS_WRITE_ACK] = "RSWriteAck",
258 [P_DISCARD_ACK] = "DiscardAck",
259 [P_NEG_ACK] = "NegAck",
260 [P_NEG_DREPLY] = "NegDReply",
261 [P_NEG_RS_DREPLY] = "NegRSDReply",
262 [P_BARRIER_ACK] = "BarrierAck",
263 [P_STATE_CHG_REQ] = "StateChgRequest",
264 [P_STATE_CHG_REPLY] = "StateChgReply",
265 [P_OV_REQUEST] = "OVRequest",
266 [P_OV_REPLY] = "OVReply",
267 [P_OV_RESULT] = "OVResult",
268 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
269 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
270 [P_COMPRESSED_BITMAP] = "CBitmap",
271 [P_DELAY_PROBE] = "DelayProbe",
272 [P_MAX_CMD] = NULL,
273 };
274
275 if (cmd == P_HAND_SHAKE_M)
276 return "HandShakeM";
277 if (cmd == P_HAND_SHAKE_S)
278 return "HandShakeS";
279 if (cmd == P_HAND_SHAKE)
280 return "HandShake";
281 if (cmd >= P_MAX_CMD)
282 return "Unknown";
283 return cmdnames[cmd];
284}
285
286
287
288struct bm_xfer_ctx {
289
290
291
292
293 unsigned long bm_bits;
294 unsigned long bm_words;
295
296 unsigned long bit_offset;
297 unsigned long word_offset;
298
299
300 unsigned packets[2];
301 unsigned bytes[2];
302};
303
304extern void INFO_bm_xfer_stats(struct drbd_conf *mdev,
305 const char *direction, struct bm_xfer_ctx *c);
306
307static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
308{
309
310
311
312
313
314
315
316#if BITS_PER_LONG == 64
317 c->word_offset = c->bit_offset >> 6;
318#elif BITS_PER_LONG == 32
319 c->word_offset = c->bit_offset >> 5;
320 c->word_offset &= ~(1UL);
321#else
322# error "unsupported BITS_PER_LONG"
323#endif
324}
325
326#ifndef __packed
327#define __packed __attribute__((packed))
328#endif
329
330
331
332
333
334
335
336
337
338
339
340struct p_header80 {
341 u32 magic;
342 u16 command;
343 u16 length;
344 u8 payload[0];
345} __packed;
346
347
348struct p_header95 {
349 u16 magic;
350 u16 command;
351 u32 length;
352 u8 payload[0];
353} __packed;
354
355union p_header {
356 struct p_header80 h80;
357 struct p_header95 h95;
358};
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377#define DP_HARDBARRIER 1
378#define DP_RW_SYNC 2
379#define DP_MAY_SET_IN_SYNC 4
380#define DP_UNPLUG 8
381#define DP_FUA 16
382#define DP_FLUSH 32
383#define DP_DISCARD 64
384
385struct p_data {
386 union p_header head;
387 u64 sector;
388 u64 block_id;
389 u32 seq_num;
390 u32 dp_flags;
391} __packed;
392
393
394
395
396
397
398
399
400
401struct p_block_ack {
402 struct p_header80 head;
403 u64 sector;
404 u64 block_id;
405 u32 blksize;
406 u32 seq_num;
407} __packed;
408
409
410struct p_block_req {
411 struct p_header80 head;
412 u64 sector;
413 u64 block_id;
414 u32 blksize;
415 u32 pad;
416} __packed;
417
418
419
420
421
422
423
424
425
426
427struct p_handshake {
428 struct p_header80 head;
429 u32 protocol_min;
430 u32 feature_flags;
431 u32 protocol_max;
432
433
434
435
436
437 u32 _pad;
438 u64 reserverd[7];
439} __packed;
440
441
442struct p_barrier {
443 struct p_header80 head;
444 u32 barrier;
445 u32 pad;
446} __packed;
447
448struct p_barrier_ack {
449 struct p_header80 head;
450 u32 barrier;
451 u32 set_size;
452} __packed;
453
454struct p_rs_param {
455 struct p_header80 head;
456 u32 rate;
457
458
459 char verify_alg[0];
460} __packed;
461
462struct p_rs_param_89 {
463 struct p_header80 head;
464 u32 rate;
465
466 char verify_alg[SHARED_SECRET_MAX];
467 char csums_alg[SHARED_SECRET_MAX];
468} __packed;
469
470struct p_rs_param_95 {
471 struct p_header80 head;
472 u32 rate;
473 char verify_alg[SHARED_SECRET_MAX];
474 char csums_alg[SHARED_SECRET_MAX];
475 u32 c_plan_ahead;
476 u32 c_delay_target;
477 u32 c_fill_target;
478 u32 c_max_rate;
479} __packed;
480
481enum drbd_conn_flags {
482 CF_WANT_LOSE = 1,
483 CF_DRY_RUN = 2,
484};
485
486struct p_protocol {
487 struct p_header80 head;
488 u32 protocol;
489 u32 after_sb_0p;
490 u32 after_sb_1p;
491 u32 after_sb_2p;
492 u32 conn_flags;
493 u32 two_primaries;
494
495
496 char integrity_alg[0];
497
498} __packed;
499
500struct p_uuids {
501 struct p_header80 head;
502 u64 uuid[UI_EXTENDED_SIZE];
503} __packed;
504
505struct p_rs_uuid {
506 struct p_header80 head;
507 u64 uuid;
508} __packed;
509
510struct p_sizes {
511 struct p_header80 head;
512 u64 d_size;
513 u64 u_size;
514 u64 c_size;
515 u32 max_segment_size;
516 u16 queue_order_type;
517 u16 dds_flags;
518} __packed;
519
520struct p_state {
521 struct p_header80 head;
522 u32 state;
523} __packed;
524
525struct p_req_state {
526 struct p_header80 head;
527 u32 mask;
528 u32 val;
529} __packed;
530
531struct p_req_state_reply {
532 struct p_header80 head;
533 u32 retcode;
534} __packed;
535
536struct p_drbd06_param {
537 u64 size;
538 u32 state;
539 u32 blksize;
540 u32 protocol;
541 u32 version;
542 u32 gen_cnt[5];
543 u32 bit_map_gen[5];
544} __packed;
545
546struct p_discard {
547 struct p_header80 head;
548 u64 block_id;
549 u32 seq_num;
550 u32 pad;
551} __packed;
552
553
554
555enum drbd_bitmap_code {
556
557
558
559 RLE_VLI_Bits = 2,
560};
561
562struct p_compressed_bm {
563 struct p_header80 head;
564
565
566
567
568
569 u8 encoding;
570
571 u8 code[0];
572} __packed;
573
574struct p_delay_probe93 {
575 struct p_header80 head;
576 u32 seq_num;
577 u32 offset;
578} __packed;
579
580
581static inline enum drbd_bitmap_code
582DCBP_get_code(struct p_compressed_bm *p)
583{
584 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
585}
586
587static inline void
588DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
589{
590 BUG_ON(code & ~0xf);
591 p->encoding = (p->encoding & ~0xf) | code;
592}
593
594static inline int
595DCBP_get_start(struct p_compressed_bm *p)
596{
597 return (p->encoding & 0x80) != 0;
598}
599
600static inline void
601DCBP_set_start(struct p_compressed_bm *p, int set)
602{
603 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
604}
605
606static inline int
607DCBP_get_pad_bits(struct p_compressed_bm *p)
608{
609 return (p->encoding >> 4) & 0x7;
610}
611
612static inline void
613DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
614{
615 BUG_ON(n & ~0x7);
616 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
617}
618
619
620
621
622
623
624#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80))
625#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
626#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
627#if (PAGE_SIZE < 4096)
628
629#error "PAGE_SIZE too small"
630#endif
631
632union p_polymorph {
633 union p_header header;
634 struct p_handshake handshake;
635 struct p_data data;
636 struct p_block_ack block_ack;
637 struct p_barrier barrier;
638 struct p_barrier_ack barrier_ack;
639 struct p_rs_param_89 rs_param_89;
640 struct p_rs_param_95 rs_param_95;
641 struct p_protocol protocol;
642 struct p_sizes sizes;
643 struct p_uuids uuids;
644 struct p_state state;
645 struct p_req_state req_state;
646 struct p_req_state_reply req_state_reply;
647 struct p_block_req block_req;
648 struct p_delay_probe93 delay_probe93;
649 struct p_rs_uuid rs_uuid;
650} __packed;
651
652
653enum drbd_thread_state {
654 None,
655 Running,
656 Exiting,
657 Restarting
658};
659
660struct drbd_thread {
661 spinlock_t t_lock;
662 struct task_struct *task;
663 struct completion stop;
664 enum drbd_thread_state t_state;
665 int (*function) (struct drbd_thread *);
666 struct drbd_conf *mdev;
667 int reset_cpu_mask;
668};
669
670static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
671{
672
673
674
675
676 smp_rmb();
677 return thi->t_state;
678}
679
680
681
682
683
684
685
686
687struct drbd_work;
688typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
689struct drbd_work {
690 struct list_head list;
691 drbd_work_cb cb;
692};
693
694struct drbd_tl_epoch;
695struct drbd_request {
696 struct drbd_work w;
697 struct drbd_conf *mdev;
698
699
700
701
702
703 struct bio *private_bio;
704
705 struct hlist_node colision;
706 sector_t sector;
707 unsigned int size;
708 unsigned int epoch;
709
710
711
712
713
714
715
716
717
718 struct list_head tl_requests;
719 struct bio *master_bio;
720 unsigned long rq_state;
721 int seq_num;
722 unsigned long start_time;
723};
724
725struct drbd_tl_epoch {
726 struct drbd_work w;
727 struct list_head requests;
728 struct drbd_tl_epoch *next;
729 unsigned int br_number;
730 int n_writes;
731};
732
733struct drbd_request;
734
735
736
737
738
739
740
741
742struct drbd_epoch {
743 struct list_head list;
744 unsigned int barrier_nr;
745 atomic_t epoch_size;
746 atomic_t active;
747 unsigned long flags;
748};
749
750
751enum {
752 DE_HAVE_BARRIER_NUMBER,
753};
754
755enum epoch_event {
756 EV_PUT,
757 EV_GOT_BARRIER_NR,
758 EV_BECAME_LAST,
759 EV_CLEANUP = 32,
760};
761
762struct drbd_wq_barrier {
763 struct drbd_work w;
764 struct completion done;
765};
766
767struct digest_info {
768 int digest_size;
769 void *digest;
770};
771
772struct drbd_epoch_entry {
773 struct drbd_work w;
774 struct hlist_node colision;
775 struct drbd_epoch *epoch;
776 struct drbd_conf *mdev;
777 struct page *pages;
778 atomic_t pending_bios;
779 unsigned int size;
780
781 unsigned long flags;
782 sector_t sector;
783 union {
784 u64 block_id;
785 struct digest_info *digest;
786 };
787};
788
789
790
791
792
793
794
795enum {
796 __EE_CALL_AL_COMPLETE_IO,
797 __EE_MAY_SET_IN_SYNC,
798
799
800
801 __EE_RESUBMITTED,
802
803
804
805
806 __EE_WAS_ERROR,
807
808
809 __EE_HAS_DIGEST,
810};
811#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
812#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
813#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
814#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
815#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
816
817
818enum {
819 CREATE_BARRIER,
820 SIGNAL_ASENDER,
821 SEND_PING,
822
823 UNPLUG_QUEUED,
824 UNPLUG_REMOTE,
825 MD_DIRTY,
826 DISCARD_CONCURRENT,
827 USE_DEGR_WFC_T,
828 CLUSTER_ST_CHANGE,
829 CL_ST_CHG_SUCCESS,
830 CL_ST_CHG_FAIL,
831 CRASHED_PRIMARY,
832
833
834 WRITE_BM_AFTER_RESYNC,
835 CONSIDER_RESYNC,
836
837 MD_NO_FUA,
838 SUSPEND_IO,
839 BITMAP_IO,
840
841 BITMAP_IO_QUEUED,
842 GO_DISKLESS,
843 WAS_IO_ERROR,
844 RESYNC_AFTER_NEG,
845 NET_CONGESTED,
846
847 CONFIG_PENDING,
848
849 DEVICE_DYING,
850
851
852
853 RESIZE_PENDING,
854
855 CONN_DRY_RUN,
856 GOT_PING_ACK,
857 NEW_CUR_UUID,
858 AL_SUSPENDED,
859};
860
861struct drbd_bitmap;
862
863
864
865
866
867
868
869struct drbd_work_queue {
870 struct list_head q;
871 struct semaphore s;
872 spinlock_t q_lock;
873};
874
875struct drbd_socket {
876 struct drbd_work_queue work;
877 struct mutex mutex;
878 struct socket *socket;
879
880
881 union p_polymorph sbuf;
882 union p_polymorph rbuf;
883};
884
885struct drbd_md {
886 u64 md_offset;
887
888 u64 la_size_sect;
889 u64 uuid[UI_SIZE];
890 u64 device_uuid;
891 u32 flags;
892 u32 md_size_sect;
893
894 s32 al_offset;
895 s32 bm_offset;
896
897
898
899
900
901};
902
903
904#define NL_PACKET(name, number, fields) struct name { fields };
905#define NL_INTEGER(pn,pr,member) int member;
906#define NL_INT64(pn,pr,member) __u64 member;
907#define NL_BIT(pn,pr,member) unsigned member:1;
908#define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
909#include "linux/drbd_nl.h"
910
911struct drbd_backing_dev {
912 struct block_device *backing_bdev;
913 struct block_device *md_bdev;
914 struct drbd_md md;
915 struct disk_conf dc;
916 sector_t known_size;
917};
918
919struct drbd_md_io {
920 struct drbd_conf *mdev;
921 struct completion event;
922 int error;
923};
924
925struct bm_io_work {
926 struct drbd_work w;
927 char *why;
928 int (*io_fn)(struct drbd_conf *mdev);
929 void (*done)(struct drbd_conf *mdev, int rv);
930};
931
932enum write_ordering_e {
933 WO_none,
934 WO_drain_io,
935 WO_bdev_flush,
936};
937
938struct fifo_buffer {
939 int *values;
940 unsigned int head_index;
941 unsigned int size;
942};
943
944struct drbd_conf {
945
946 unsigned long flags;
947
948
949 struct net_conf *net_conf;
950 struct syncer_conf sync_conf;
951 struct drbd_backing_dev *ldev __protected_by(local);
952
953 sector_t p_size;
954 struct request_queue *rq_queue;
955 struct block_device *this_bdev;
956 struct gendisk *vdisk;
957
958 struct drbd_socket data;
959 struct drbd_socket meta;
960 int agreed_pro_version;
961 unsigned long last_received;
962 unsigned int ko_count;
963 struct drbd_work resync_work,
964 unplug_work,
965 go_diskless,
966 md_sync_work;
967 struct timer_list resync_timer;
968 struct timer_list md_sync_timer;
969#ifdef DRBD_DEBUG_MD_SYNC
970 struct {
971 unsigned int line;
972 const char* func;
973 } last_md_mark_dirty;
974#endif
975
976
977 union drbd_state new_state_tmp;
978
979 union drbd_state state;
980 wait_queue_head_t misc_wait;
981 wait_queue_head_t state_wait;
982 wait_queue_head_t net_cnt_wait;
983 unsigned int send_cnt;
984 unsigned int recv_cnt;
985 unsigned int read_cnt;
986 unsigned int writ_cnt;
987 unsigned int al_writ_cnt;
988 unsigned int bm_writ_cnt;
989 atomic_t ap_bio_cnt;
990 atomic_t ap_pending_cnt;
991 atomic_t rs_pending_cnt;
992 atomic_t unacked_cnt;
993 atomic_t local_cnt;
994 atomic_t net_cnt;
995 spinlock_t req_lock;
996 struct drbd_tl_epoch *unused_spare_tle;
997 struct drbd_tl_epoch *newest_tle;
998 struct drbd_tl_epoch *oldest_tle;
999 struct list_head out_of_sequence_requests;
1000 struct hlist_head *tl_hash;
1001 unsigned int tl_hash_s;
1002
1003
1004 unsigned long rs_total;
1005
1006 unsigned long rs_failed;
1007
1008 unsigned long rs_start;
1009
1010 unsigned long rs_paused;
1011
1012 unsigned long rs_same_csum;
1013#define DRBD_SYNC_MARKS 8
1014#define DRBD_SYNC_MARK_STEP (3*HZ)
1015
1016 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
1017
1018 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
1019
1020 int rs_last_mark;
1021
1022
1023 sector_t ov_start_sector;
1024
1025 sector_t ov_position;
1026
1027 sector_t ov_last_oos_start;
1028
1029 sector_t ov_last_oos_size;
1030 unsigned long ov_left;
1031 struct crypto_hash *csums_tfm;
1032 struct crypto_hash *verify_tfm;
1033
1034 struct drbd_thread receiver;
1035 struct drbd_thread worker;
1036 struct drbd_thread asender;
1037 struct drbd_bitmap *bitmap;
1038 unsigned long bm_resync_fo;
1039
1040
1041 struct lru_cache *resync;
1042
1043 unsigned int resync_locked;
1044
1045 unsigned int resync_wenr;
1046
1047 int open_cnt;
1048 u64 *p_uuid;
1049 struct drbd_epoch *current_epoch;
1050 spinlock_t epoch_lock;
1051 unsigned int epochs;
1052 enum write_ordering_e write_ordering;
1053 struct list_head active_ee;
1054 struct list_head sync_ee;
1055 struct list_head done_ee;
1056 struct list_head read_ee;
1057 struct list_head net_ee;
1058 struct hlist_head *ee_hash;
1059 unsigned int ee_hash_s;
1060
1061
1062 struct drbd_epoch_entry *last_write_w_barrier;
1063
1064 int next_barrier_nr;
1065 struct hlist_head *app_reads_hash;
1066 struct list_head resync_reads;
1067 atomic_t pp_in_use;
1068 atomic_t pp_in_use_by_net;
1069 wait_queue_head_t ee_wait;
1070 struct page *md_io_page;
1071 struct page *md_io_tmpp;
1072 struct mutex md_io_mutex;
1073 spinlock_t al_lock;
1074 wait_queue_head_t al_wait;
1075 struct lru_cache *act_log;
1076 unsigned int al_tr_number;
1077 int al_tr_cycle;
1078 int al_tr_pos;
1079 struct crypto_hash *cram_hmac_tfm;
1080 struct crypto_hash *integrity_w_tfm;
1081 struct crypto_hash *integrity_r_tfm;
1082 void *int_dig_out;
1083 void *int_dig_in;
1084 void *int_dig_vv;
1085 wait_queue_head_t seq_wait;
1086 atomic_t packet_seq;
1087 unsigned int peer_seq;
1088 spinlock_t peer_seq_lock;
1089 unsigned int minor;
1090 unsigned long comm_bm_set;
1091 cpumask_var_t cpu_mask;
1092 struct bm_io_work bm_io_work;
1093 u64 ed_uuid;
1094 struct mutex state_mutex;
1095 char congestion_reason;
1096 atomic_t rs_sect_in;
1097 atomic_t rs_sect_ev;
1098 int rs_last_sect_ev;
1099 int rs_last_events;
1100
1101 int c_sync_rate;
1102 struct fifo_buffer rs_plan_s;
1103 int rs_in_flight;
1104 int rs_planed;
1105};
1106
1107static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
1108{
1109 struct drbd_conf *mdev;
1110
1111 mdev = minor < minor_count ? minor_table[minor] : NULL;
1112
1113 return mdev;
1114}
1115
1116static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
1117{
1118 return mdev->minor;
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129static inline int drbd_get_data_sock(struct drbd_conf *mdev)
1130{
1131 mutex_lock(&mdev->data.mutex);
1132
1133
1134 if (unlikely(mdev->data.socket == NULL)) {
1135 mutex_unlock(&mdev->data.mutex);
1136 return 0;
1137 }
1138 return 1;
1139}
1140
1141static inline void drbd_put_data_sock(struct drbd_conf *mdev)
1142{
1143 mutex_unlock(&mdev->data.mutex);
1144}
1145
1146
1147
1148
1149
1150
1151
1152enum chg_state_flags {
1153 CS_HARD = 1,
1154 CS_VERBOSE = 2,
1155 CS_WAIT_COMPLETE = 4,
1156 CS_SERIALIZE = 8,
1157 CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
1158};
1159
1160enum dds_flags {
1161 DDSF_FORCED = 1,
1162 DDSF_NO_RESYNC = 2,
1163};
1164
1165extern void drbd_init_set_defaults(struct drbd_conf *mdev);
1166extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
1167 union drbd_state mask, union drbd_state val);
1168extern void drbd_force_state(struct drbd_conf *, union drbd_state,
1169 union drbd_state);
1170extern int _drbd_request_state(struct drbd_conf *, union drbd_state,
1171 union drbd_state, enum chg_state_flags);
1172extern int __drbd_set_state(struct drbd_conf *, union drbd_state,
1173 enum chg_state_flags, struct completion *done);
1174extern void print_st_err(struct drbd_conf *, union drbd_state,
1175 union drbd_state, int);
1176extern int drbd_thread_start(struct drbd_thread *thi);
1177extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1178#ifdef CONFIG_SMP
1179extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev);
1180extern void drbd_calc_cpu_mask(struct drbd_conf *mdev);
1181#else
1182#define drbd_thread_current_set_cpu(A) ({})
1183#define drbd_calc_cpu_mask(A) ({})
1184#endif
1185extern void drbd_free_resources(struct drbd_conf *mdev);
1186extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
1187 unsigned int set_size);
1188extern void tl_clear(struct drbd_conf *mdev);
1189enum drbd_req_event;
1190extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
1191extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
1192extern void drbd_free_sock(struct drbd_conf *mdev);
1193extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
1194 void *buf, size_t size, unsigned msg_flags);
1195extern int drbd_send_protocol(struct drbd_conf *mdev);
1196extern int drbd_send_uuids(struct drbd_conf *mdev);
1197extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
1198extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
1199extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
1200extern int _drbd_send_state(struct drbd_conf *mdev);
1201extern int drbd_send_state(struct drbd_conf *mdev);
1202extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1203 enum drbd_packets cmd, struct p_header80 *h,
1204 size_t size, unsigned msg_flags);
1205#define USE_DATA_SOCKET 1
1206#define USE_META_SOCKET 0
1207extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1208 enum drbd_packets cmd, struct p_header80 *h,
1209 size_t size);
1210extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
1211 char *data, size_t size);
1212extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
1213extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
1214 u32 set_size);
1215extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
1216 struct drbd_epoch_entry *e);
1217extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
1218 struct p_block_req *rp);
1219extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
1220 struct p_data *dp, int data_size);
1221extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
1222 sector_t sector, int blksize, u64 block_id);
1223extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
1224 struct drbd_epoch_entry *e);
1225extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
1226extern int _drbd_send_barrier(struct drbd_conf *mdev,
1227 struct drbd_tl_epoch *barrier);
1228extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1229 sector_t sector, int size, u64 block_id);
1230extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
1231 sector_t sector,int size,
1232 void *digest, int digest_size,
1233 enum drbd_packets cmd);
1234extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
1235
1236extern int drbd_send_bitmap(struct drbd_conf *mdev);
1237extern int _drbd_send_bitmap(struct drbd_conf *mdev);
1238extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode);
1239extern void drbd_free_bc(struct drbd_backing_dev *ldev);
1240extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
1241
1242
1243extern void drbd_md_sync(struct drbd_conf *mdev);
1244extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
1245
1246extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1247extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1248extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
1249extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
1250extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
1251extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
1252extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
1253extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1254#ifndef DRBD_DEBUG_MD_SYNC
1255extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
1256#else
1257#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
1258extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
1259 unsigned int line, const char *func);
1260#endif
1261extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
1262 int (*io_fn)(struct drbd_conf *),
1263 void (*done)(struct drbd_conf *, int),
1264 char *why);
1265extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1266extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1267extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
1268extern void drbd_go_diskless(struct drbd_conf *mdev);
1269extern void drbd_ldev_destroy(struct drbd_conf *mdev);
1270
1271
1272
1273
1274
1275
1276
1277#define MD_RESERVED_SECT (128LU << 11)
1278
1279#define MD_AL_OFFSET 8
1280#define MD_AL_MAX_SIZE 64
1281
1282#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
1283
1284
1285#define MD_SECTOR_SHIFT 9
1286#define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT)
1287
1288
1289#define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1)
1290#define AL_EXTENT_SHIFT 22
1291#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1292
1293#if BITS_PER_LONG == 32
1294#define LN2_BPL 5
1295#define cpu_to_lel(A) cpu_to_le32(A)
1296#define lel_to_cpu(A) le32_to_cpu(A)
1297#elif BITS_PER_LONG == 64
1298#define LN2_BPL 6
1299#define cpu_to_lel(A) cpu_to_le64(A)
1300#define lel_to_cpu(A) le64_to_cpu(A)
1301#else
1302#error "LN2 of BITS_PER_LONG unknown!"
1303#endif
1304
1305
1306
1307struct bm_extent {
1308 int rs_left;
1309 int rs_failed;
1310 unsigned long flags;
1311 struct lc_element lce;
1312};
1313
1314#define BME_NO_WRITES 0
1315#define BME_LOCKED 1
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325#define SLEEP_TIME (HZ/10)
1326
1327#define BM_BLOCK_SHIFT 12
1328#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1329
1330
1331#define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3)
1332#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1333
1334#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1335#error "HAVE YOU FIXED drbdmeta AS WELL??"
1336#endif
1337
1338
1339#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1340#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1341#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1342
1343
1344#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1345
1346
1347
1348#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1349
1350
1351#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1352#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1353
1354
1355#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1356#define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
1357
1358#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
1359#define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1376#define DRBD_MAX_SECTORS_BM \
1377 ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
1378#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
1379#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
1380#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
1381#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1382#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1383#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1384#else
1385#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
1386
1387#if BITS_PER_LONG == 32
1388
1389
1390
1391#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1392#else
1393#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32)
1394#endif
1395#endif
1396
1397
1398
1399
1400#define HT_SHIFT 8
1401#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT))
1402
1403#define DRBD_MAX_SIZE_H80_PACKET (1 << 15)
1404
1405
1406#define APP_R_HSIZE 15
1407
1408extern int drbd_bm_init(struct drbd_conf *mdev);
1409extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
1410extern void drbd_bm_cleanup(struct drbd_conf *mdev);
1411extern void drbd_bm_set_all(struct drbd_conf *mdev);
1412extern void drbd_bm_clear_all(struct drbd_conf *mdev);
1413extern int drbd_bm_set_bits(
1414 struct drbd_conf *mdev, unsigned long s, unsigned long e);
1415extern int drbd_bm_clear_bits(
1416 struct drbd_conf *mdev, unsigned long s, unsigned long e);
1417
1418extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
1419 const unsigned long s, const unsigned long e);
1420extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
1421extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
1422extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local);
1423extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
1424extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
1425extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
1426 unsigned long al_enr);
1427extern size_t drbd_bm_words(struct drbd_conf *mdev);
1428extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
1429extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
1430extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1431
1432extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1433extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
1434extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev);
1435extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
1436extern int drbd_bm_rs_done(struct drbd_conf *mdev);
1437
1438extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
1439 size_t number, unsigned long *buffer);
1440
1441extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
1442 size_t number, unsigned long *buffer);
1443
1444extern void drbd_bm_lock(struct drbd_conf *mdev, char *why);
1445extern void drbd_bm_unlock(struct drbd_conf *mdev);
1446
1447extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
1448
1449
1450extern struct kmem_cache *drbd_request_cache;
1451extern struct kmem_cache *drbd_ee_cache;
1452extern struct kmem_cache *drbd_bm_ext_cache;
1453extern struct kmem_cache *drbd_al_ext_cache;
1454extern mempool_t *drbd_request_mempool;
1455extern mempool_t *drbd_ee_mempool;
1456
1457extern struct page *drbd_pp_pool;
1458extern spinlock_t drbd_pp_lock;
1459extern int drbd_pp_vacant;
1460extern wait_queue_head_t drbd_pp_wait;
1461
1462extern rwlock_t global_state_lock;
1463
1464extern struct drbd_conf *drbd_new_device(unsigned int minor);
1465extern void drbd_free_mdev(struct drbd_conf *mdev);
1466
1467extern int proc_details;
1468
1469
1470extern int drbd_make_request_26(struct request_queue *q, struct bio *bio);
1471extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
1472extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1473extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1474
1475
1476
1477extern void drbd_suspend_io(struct drbd_conf *mdev);
1478extern void drbd_resume_io(struct drbd_conf *mdev);
1479extern char *ppsize(char *buf, unsigned long long size);
1480extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
1481enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
1482extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
1483extern void resync_after_online_grow(struct drbd_conf *);
1484extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
1485extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
1486 int force);
1487extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
1488extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
1489extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
1490
1491
1492extern int drbd_worker(struct drbd_thread *thi);
1493extern int drbd_alter_sa(struct drbd_conf *mdev, int na);
1494extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
1495extern void resume_next_sg(struct drbd_conf *mdev);
1496extern void suspend_other_sg(struct drbd_conf *mdev);
1497extern int drbd_resync_finished(struct drbd_conf *mdev);
1498
1499extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
1500 struct drbd_backing_dev *bdev, sector_t sector, int rw);
1501extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
1502
1503static inline void ov_oos_print(struct drbd_conf *mdev)
1504{
1505 if (mdev->ov_last_oos_size) {
1506 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
1507 (unsigned long long)mdev->ov_last_oos_start,
1508 (unsigned long)mdev->ov_last_oos_size);
1509 }
1510 mdev->ov_last_oos_size=0;
1511}
1512
1513
1514extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
1515extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
1516
1517extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
1518extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
1519extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int);
1520extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int);
1521extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
1522extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
1523extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
1524extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
1525extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int);
1526extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
1527extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
1528extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int);
1529extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
1530extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
1531extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
1532extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
1533extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
1534extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
1535
1536extern void resync_timer_fn(unsigned long data);
1537
1538
1539extern int drbd_rs_should_slow_down(struct drbd_conf *mdev);
1540extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1541 const unsigned rw, const int fault_type);
1542extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
1543extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
1544 u64 id,
1545 sector_t sector,
1546 unsigned int data_size,
1547 gfp_t gfp_mask) __must_hold(local);
1548extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1549 int is_net);
1550#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
1551#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
1552extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
1553 struct list_head *head);
1554extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
1555 struct list_head *head);
1556extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
1557extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
1558extern void drbd_flush_workqueue(struct drbd_conf *mdev);
1559extern void drbd_free_tl_hash(struct drbd_conf *mdev);
1560
1561
1562
1563static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
1564 char __user *optval, int optlen)
1565{
1566 int err;
1567 if (level == SOL_SOCKET)
1568 err = sock_setsockopt(sock, level, optname, optval, optlen);
1569 else
1570 err = sock->ops->setsockopt(sock, level, optname, optval,
1571 optlen);
1572 return err;
1573}
1574
1575static inline void drbd_tcp_cork(struct socket *sock)
1576{
1577 int __user val = 1;
1578 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1579 (char __user *)&val, sizeof(val));
1580}
1581
1582static inline void drbd_tcp_uncork(struct socket *sock)
1583{
1584 int __user val = 0;
1585 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1586 (char __user *)&val, sizeof(val));
1587}
1588
1589static inline void drbd_tcp_nodelay(struct socket *sock)
1590{
1591 int __user val = 1;
1592 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1593 (char __user *)&val, sizeof(val));
1594}
1595
1596static inline void drbd_tcp_quickack(struct socket *sock)
1597{
1598 int __user val = 2;
1599 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1600 (char __user *)&val, sizeof(val));
1601}
1602
1603void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
1604
1605
1606extern struct proc_dir_entry *drbd_proc;
1607extern const struct file_operations drbd_proc_fops;
1608extern const char *drbd_conn_str(enum drbd_conns s);
1609extern const char *drbd_role_str(enum drbd_role s);
1610
1611
1612extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector);
1613extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector);
1614extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
1615extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1616extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1617extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
1618extern int drbd_rs_del_all(struct drbd_conf *mdev);
1619extern void drbd_rs_failed_io(struct drbd_conf *mdev,
1620 sector_t sector, int size);
1621extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
1622extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
1623 int size, const char *file, const unsigned int line);
1624#define drbd_set_in_sync(mdev, sector, size) \
1625 __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
1626extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
1627 int size, const char *file, const unsigned int line);
1628#define drbd_set_out_of_sync(mdev, sector, size) \
1629 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
1630extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
1631extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev);
1632extern void drbd_al_shrink(struct drbd_conf *mdev);
1633
1634
1635
1636
1637void drbd_nl_cleanup(void);
1638int __init drbd_nl_init(void);
1639void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
1640void drbd_bcast_sync_progress(struct drbd_conf *mdev);
1641void drbd_bcast_ee(struct drbd_conf *mdev,
1642 const char *reason, const int dgs,
1643 const char* seen_hash, const char* calc_hash,
1644 const struct drbd_epoch_entry* e);
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665#define role_MASK R_MASK
1666#define peer_MASK R_MASK
1667#define disk_MASK D_MASK
1668#define pdsk_MASK D_MASK
1669#define conn_MASK C_MASK
1670#define susp_MASK 1
1671#define user_isp_MASK 1
1672#define aftr_isp_MASK 1
1673#define susp_nod_MASK 1
1674#define susp_fen_MASK 1
1675
1676#define NS(T, S) \
1677 ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
1678 ({ union drbd_state val; val.i = 0; val.T = (S); val; })
1679#define NS2(T1, S1, T2, S2) \
1680 ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
1681 mask.T2 = T2##_MASK; mask; }), \
1682 ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
1683 val.T2 = (S2); val; })
1684#define NS3(T1, S1, T2, S2, T3, S3) \
1685 ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
1686 mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
1687 ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
1688 val.T2 = (S2); val.T3 = (S3); val; })
1689
1690#define _NS(D, T, S) \
1691 D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; })
1692#define _NS2(D, T1, S1, T2, S2) \
1693 D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
1694 __ns.T2 = (S2); __ns; })
1695#define _NS3(D, T1, S1, T2, S2, T3, S3) \
1696 D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
1697 __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
1698
1699
1700
1701
1702
1703
1704static inline struct page *page_chain_next(struct page *page)
1705{
1706 return (struct page *)page_private(page);
1707}
1708#define page_chain_for_each(page) \
1709 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1710 page = page_chain_next(page))
1711#define page_chain_for_each_safe(page, n) \
1712 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1713
1714static inline int drbd_bio_has_active_page(struct bio *bio)
1715{
1716 struct bio_vec *bvec;
1717 int i;
1718
1719 __bio_for_each_segment(bvec, bio, i, 0) {
1720 if (page_count(bvec->bv_page) > 1)
1721 return 1;
1722 }
1723
1724 return 0;
1725}
1726
1727static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
1728{
1729 struct page *page = e->pages;
1730 page_chain_for_each(page) {
1731 if (page_count(page) > 1)
1732 return 1;
1733 }
1734 return 0;
1735}
1736
1737
1738static inline void drbd_state_lock(struct drbd_conf *mdev)
1739{
1740 wait_event(mdev->misc_wait,
1741 !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
1742}
1743
1744static inline void drbd_state_unlock(struct drbd_conf *mdev)
1745{
1746 clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
1747 wake_up(&mdev->misc_wait);
1748}
1749
1750static inline int _drbd_set_state(struct drbd_conf *mdev,
1751 union drbd_state ns, enum chg_state_flags flags,
1752 struct completion *done)
1753{
1754 int rv;
1755
1756 read_lock(&global_state_lock);
1757 rv = __drbd_set_state(mdev, ns, flags, done);
1758 read_unlock(&global_state_lock);
1759
1760 return rv;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773static inline int drbd_request_state(struct drbd_conf *mdev,
1774 union drbd_state mask,
1775 union drbd_state val)
1776{
1777 return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
1778}
1779
1780#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1781static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
1782{
1783 switch (mdev->ldev->dc.on_io_error) {
1784 case EP_PASS_ON:
1785 if (!forcedetach) {
1786 if (__ratelimit(&drbd_ratelimit_state))
1787 dev_err(DEV, "Local IO failed in %s.\n", where);
1788 break;
1789 }
1790
1791 case EP_DETACH:
1792 case EP_CALL_HELPER:
1793 set_bit(WAS_IO_ERROR, &mdev->flags);
1794 if (mdev->state.disk > D_FAILED) {
1795 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1796 dev_err(DEV,
1797 "Local IO failed in %s. Detaching...\n", where);
1798 }
1799 break;
1800 }
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1812static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
1813 int error, int forcedetach, const char *where)
1814{
1815 if (error) {
1816 unsigned long flags;
1817 spin_lock_irqsave(&mdev->req_lock, flags);
1818 __drbd_chk_io_error_(mdev, forcedetach, where);
1819 spin_unlock_irqrestore(&mdev->req_lock, flags);
1820 }
1821}
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1832{
1833 switch (bdev->dc.meta_dev_idx) {
1834 case DRBD_MD_INDEX_INTERNAL:
1835 case DRBD_MD_INDEX_FLEX_INT:
1836 return bdev->md.md_offset + bdev->md.bm_offset;
1837 case DRBD_MD_INDEX_FLEX_EXT:
1838 default:
1839 return bdev->md.md_offset;
1840 }
1841}
1842
1843
1844
1845
1846
1847static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1848{
1849 switch (bdev->dc.meta_dev_idx) {
1850 case DRBD_MD_INDEX_INTERNAL:
1851 case DRBD_MD_INDEX_FLEX_INT:
1852 return bdev->md.md_offset + MD_AL_OFFSET - 1;
1853 case DRBD_MD_INDEX_FLEX_EXT:
1854 default:
1855 return bdev->md.md_offset + bdev->md.md_size_sect;
1856 }
1857}
1858
1859
1860static inline sector_t drbd_get_capacity(struct block_device *bdev)
1861{
1862
1863 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1875{
1876 sector_t s;
1877 switch (bdev->dc.meta_dev_idx) {
1878 case DRBD_MD_INDEX_INTERNAL:
1879 case DRBD_MD_INDEX_FLEX_INT:
1880 s = drbd_get_capacity(bdev->backing_bdev)
1881 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1882 drbd_md_first_sector(bdev))
1883 : 0;
1884 break;
1885 case DRBD_MD_INDEX_FLEX_EXT:
1886 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1887 drbd_get_capacity(bdev->backing_bdev));
1888
1889 s = min_t(sector_t, s,
1890 BM_EXT_TO_SECT(bdev->md.md_size_sect
1891 - bdev->md.bm_offset));
1892 break;
1893 default:
1894 s = min_t(sector_t, DRBD_MAX_SECTORS,
1895 drbd_get_capacity(bdev->backing_bdev));
1896 }
1897 return s;
1898}
1899
1900
1901
1902
1903
1904
1905static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
1906 struct drbd_backing_dev *bdev)
1907{
1908 switch (bdev->dc.meta_dev_idx) {
1909 default:
1910 return MD_RESERVED_SECT * bdev->dc.meta_dev_idx;
1911 case DRBD_MD_INDEX_INTERNAL:
1912
1913 case DRBD_MD_INDEX_FLEX_INT:
1914
1915
1916 if (!bdev->backing_bdev) {
1917 if (__ratelimit(&drbd_ratelimit_state)) {
1918 dev_err(DEV, "bdev->backing_bdev==NULL\n");
1919 dump_stack();
1920 }
1921 return 0;
1922 }
1923 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL)
1924 - MD_AL_OFFSET;
1925 case DRBD_MD_INDEX_FLEX_EXT:
1926 return 0;
1927 }
1928}
1929
1930static inline void
1931drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
1932{
1933 unsigned long flags;
1934 spin_lock_irqsave(&q->q_lock, flags);
1935 list_add(&w->list, &q->q);
1936 up(&q->s);
1937
1938 spin_unlock_irqrestore(&q->q_lock, flags);
1939}
1940
1941static inline void
1942drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1943{
1944 unsigned long flags;
1945 spin_lock_irqsave(&q->q_lock, flags);
1946 list_add_tail(&w->list, &q->q);
1947 up(&q->s);
1948
1949 spin_unlock_irqrestore(&q->q_lock, flags);
1950}
1951
1952static inline void wake_asender(struct drbd_conf *mdev)
1953{
1954 if (test_bit(SIGNAL_ASENDER, &mdev->flags))
1955 force_sig(DRBD_SIG, mdev->asender.task);
1956}
1957
1958static inline void request_ping(struct drbd_conf *mdev)
1959{
1960 set_bit(SEND_PING, &mdev->flags);
1961 wake_asender(mdev);
1962}
1963
1964static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
1965 enum drbd_packets cmd)
1966{
1967 struct p_header80 h;
1968 return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
1969}
1970
1971static inline int drbd_send_ping(struct drbd_conf *mdev)
1972{
1973 struct p_header80 h;
1974 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
1975}
1976
1977static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
1978{
1979 struct p_header80 h;
1980 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
1981}
1982
1983static inline void drbd_thread_stop(struct drbd_thread *thi)
1984{
1985 _drbd_thread_stop(thi, FALSE, TRUE);
1986}
1987
1988static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1989{
1990 _drbd_thread_stop(thi, FALSE, FALSE);
1991}
1992
1993static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1994{
1995 _drbd_thread_stop(thi, TRUE, FALSE);
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020static inline void inc_ap_pending(struct drbd_conf *mdev)
2021{
2022 atomic_inc(&mdev->ap_pending_cnt);
2023}
2024
2025#define ERR_IF_CNT_IS_NEGATIVE(which) \
2026 if (atomic_read(&mdev->which) < 0) \
2027 dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
2028 __func__ , __LINE__ , \
2029 atomic_read(&mdev->which))
2030
2031#define dec_ap_pending(mdev) do { \
2032 typecheck(struct drbd_conf *, mdev); \
2033 if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \
2034 wake_up(&mdev->misc_wait); \
2035 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
2036
2037
2038
2039
2040
2041
2042
2043static inline void inc_rs_pending(struct drbd_conf *mdev)
2044{
2045 atomic_inc(&mdev->rs_pending_cnt);
2046}
2047
2048#define dec_rs_pending(mdev) do { \
2049 typecheck(struct drbd_conf *, mdev); \
2050 atomic_dec(&mdev->rs_pending_cnt); \
2051 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062static inline void inc_unacked(struct drbd_conf *mdev)
2063{
2064 atomic_inc(&mdev->unacked_cnt);
2065}
2066
2067#define dec_unacked(mdev) do { \
2068 typecheck(struct drbd_conf *, mdev); \
2069 atomic_dec(&mdev->unacked_cnt); \
2070 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
2071
2072#define sub_unacked(mdev, n) do { \
2073 typecheck(struct drbd_conf *, mdev); \
2074 atomic_sub(n, &mdev->unacked_cnt); \
2075 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
2076
2077
2078static inline void put_net_conf(struct drbd_conf *mdev)
2079{
2080 if (atomic_dec_and_test(&mdev->net_cnt))
2081 wake_up(&mdev->net_cnt_wait);
2082}
2083
2084
2085
2086
2087
2088
2089
2090static inline int get_net_conf(struct drbd_conf *mdev)
2091{
2092 int have_net_conf;
2093
2094 atomic_inc(&mdev->net_cnt);
2095 have_net_conf = mdev->state.conn >= C_UNCONNECTED;
2096 if (!have_net_conf)
2097 put_net_conf(mdev);
2098 return have_net_conf;
2099}
2100
2101
2102
2103
2104
2105
2106
2107#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
2108#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
2109
2110static inline void put_ldev(struct drbd_conf *mdev)
2111{
2112 int i = atomic_dec_return(&mdev->local_cnt);
2113 __release(local);
2114 D_ASSERT(i >= 0);
2115 if (i == 0) {
2116 if (mdev->state.disk == D_DISKLESS)
2117
2118 drbd_ldev_destroy(mdev);
2119 if (mdev->state.disk == D_FAILED)
2120
2121 drbd_go_diskless(mdev);
2122 wake_up(&mdev->misc_wait);
2123 }
2124}
2125
2126#ifndef __CHECKER__
2127static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
2128{
2129 int io_allowed;
2130
2131
2132 if (mdev->state.disk == D_DISKLESS)
2133 return 0;
2134
2135 atomic_inc(&mdev->local_cnt);
2136 io_allowed = (mdev->state.disk >= mins);
2137 if (!io_allowed)
2138 put_ldev(mdev);
2139 return io_allowed;
2140}
2141#else
2142extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
2143#endif
2144
2145
2146static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
2147 unsigned long *bits_left, unsigned int *per_mil_done)
2148{
2149
2150
2151
2152
2153 typecheck(unsigned long, mdev->rs_total);
2154
2155
2156
2157
2158
2159 *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2160
2161
2162 if (*bits_left > mdev->rs_total) {
2163
2164
2165
2166
2167
2168 smp_rmb();
2169 dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
2170 drbd_conn_str(mdev->state.conn),
2171 *bits_left, mdev->rs_total, mdev->rs_failed);
2172 *per_mil_done = 0;
2173 } else {
2174
2175 unsigned long tmp = 1000UL -
2176 (*bits_left >> 10)*1000UL
2177 / ((mdev->rs_total >> 10) + 1UL);
2178 *per_mil_done = tmp;
2179 }
2180}
2181
2182
2183
2184
2185
2186static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
2187{
2188 int mxb = 1000000;
2189 if (get_net_conf(mdev)) {
2190 mxb = mdev->net_conf->max_buffers;
2191 put_net_conf(mdev);
2192 }
2193 return mxb;
2194}
2195
2196static inline int drbd_state_is_stable(union drbd_state s)
2197{
2198
2199
2200
2201
2202 switch ((enum drbd_conns)s.conn) {
2203
2204 case C_STANDALONE:
2205 case C_WF_CONNECTION:
2206
2207 case C_CONNECTED:
2208 case C_SYNC_SOURCE:
2209 case C_SYNC_TARGET:
2210 case C_VERIFY_S:
2211 case C_VERIFY_T:
2212 case C_PAUSED_SYNC_S:
2213 case C_PAUSED_SYNC_T:
2214
2215 break;
2216
2217
2218
2219 case C_DISCONNECTING:
2220 case C_UNCONNECTED:
2221 case C_TIMEOUT:
2222 case C_BROKEN_PIPE:
2223 case C_NETWORK_FAILURE:
2224 case C_PROTOCOL_ERROR:
2225 case C_TEAR_DOWN:
2226 case C_WF_REPORT_PARAMS:
2227 case C_STARTING_SYNC_S:
2228 case C_STARTING_SYNC_T:
2229 case C_WF_BITMAP_S:
2230 case C_WF_BITMAP_T:
2231 case C_WF_SYNC_UUID:
2232 case C_MASK:
2233
2234 return 0;
2235 }
2236
2237 switch ((enum drbd_disk_state)s.disk) {
2238 case D_DISKLESS:
2239 case D_INCONSISTENT:
2240 case D_OUTDATED:
2241 case D_CONSISTENT:
2242 case D_UP_TO_DATE:
2243
2244 break;
2245
2246
2247 case D_ATTACHING:
2248 case D_FAILED:
2249 case D_NEGOTIATING:
2250 case D_UNKNOWN:
2251 case D_MASK:
2252
2253 return 0;
2254 }
2255
2256 return 1;
2257}
2258
2259static inline int is_susp(union drbd_state s)
2260{
2261 return s.susp || s.susp_nod || s.susp_fen;
2262}
2263
2264static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
2265{
2266 int mxb = drbd_get_max_buffers(mdev);
2267
2268 if (is_susp(mdev->state))
2269 return 0;
2270 if (test_bit(SUSPEND_IO, &mdev->flags))
2271 return 0;
2272
2273
2274
2275
2276
2277
2278 if (!drbd_state_is_stable(mdev->state))
2279 return 0;
2280
2281
2282
2283 if (atomic_read(&mdev->ap_bio_cnt) > mxb)
2284 return 0;
2285 if (test_bit(BITMAP_IO, &mdev->flags))
2286 return 0;
2287 return 1;
2288}
2289
2290
2291
2292
2293static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
2294{
2295
2296
2297 DEFINE_WAIT(wait);
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307 spin_lock_irq(&mdev->req_lock);
2308 while (!__inc_ap_bio_cond(mdev)) {
2309 prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
2310 spin_unlock_irq(&mdev->req_lock);
2311 schedule();
2312 finish_wait(&mdev->misc_wait, &wait);
2313 spin_lock_irq(&mdev->req_lock);
2314 }
2315 atomic_add(count, &mdev->ap_bio_cnt);
2316 spin_unlock_irq(&mdev->req_lock);
2317}
2318
2319static inline void dec_ap_bio(struct drbd_conf *mdev)
2320{
2321 int mxb = drbd_get_max_buffers(mdev);
2322 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
2323
2324 D_ASSERT(ap_bio >= 0);
2325
2326
2327
2328 if (ap_bio < mxb)
2329 wake_up(&mdev->misc_wait);
2330 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2331 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2332 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
2333 }
2334}
2335
2336static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
2337{
2338 mdev->ed_uuid = val;
2339}
2340
2341static inline int seq_cmp(u32 a, u32 b)
2342{
2343
2344
2345
2346
2347
2348 return (s32)(a) - (s32)(b);
2349}
2350#define seq_lt(a, b) (seq_cmp((a), (b)) < 0)
2351#define seq_gt(a, b) (seq_cmp((a), (b)) > 0)
2352#define seq_ge(a, b) (seq_cmp((a), (b)) >= 0)
2353#define seq_le(a, b) (seq_cmp((a), (b)) <= 0)
2354
2355#define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b)))
2356
2357static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq)
2358{
2359 unsigned int m;
2360 spin_lock(&mdev->peer_seq_lock);
2361 m = seq_max(mdev->peer_seq, new_seq);
2362 mdev->peer_seq = m;
2363 spin_unlock(&mdev->peer_seq_lock);
2364 if (m == new_seq)
2365 wake_up(&mdev->seq_wait);
2366}
2367
2368static inline void drbd_update_congested(struct drbd_conf *mdev)
2369{
2370 struct sock *sk = mdev->data.socket->sk;
2371 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
2372 set_bit(NET_CONGESTED, &mdev->flags);
2373}
2374
2375static inline int drbd_queue_order_type(struct drbd_conf *mdev)
2376{
2377
2378
2379#ifndef QUEUE_ORDERED_NONE
2380#define QUEUE_ORDERED_NONE 0
2381#endif
2382 return QUEUE_ORDERED_NONE;
2383}
2384
2385static inline void drbd_blk_run_queue(struct request_queue *q)
2386{
2387 if (q && q->unplug_fn)
2388 q->unplug_fn(q);
2389}
2390
2391static inline void drbd_kick_lo(struct drbd_conf *mdev)
2392{
2393 if (get_ldev(mdev)) {
2394 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
2395 put_ldev(mdev);
2396 }
2397}
2398
2399static inline void drbd_md_flush(struct drbd_conf *mdev)
2400{
2401 int r;
2402
2403 if (test_bit(MD_NO_FUA, &mdev->flags))
2404 return;
2405
2406 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2407 if (r) {
2408 set_bit(MD_NO_FUA, &mdev->flags);
2409 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2410 }
2411}
2412
2413#endif
2414