1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <crypto/hash.h>
30#include <linux/compiler.h>
31#include <linux/types.h>
32#include <linux/list.h>
33#include <linux/sched.h>
34#include <linux/bitops.h>
35#include <linux/slab.h>
36#include <linux/ratelimit.h>
37#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
41#include <linux/backing-dev.h>
42#include <linux/genhd.h>
43#include <linux/idr.h>
44#include <linux/dynamic_debug.h>
45#include <net/tcp.h>
46#include <linux/lru_cache.h>
47#include <linux/prefetch.h>
48#include <linux/drbd_genl_api.h>
49#include <linux/drbd.h>
50#include "drbd_strings.h"
51#include "drbd_state.h"
52#include "drbd_protocol.h"
53
54#ifdef __CHECKER__
55# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
56# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
57# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
58# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
59#else
60# define __protected_by(x)
61# define __protected_read_by(x)
62# define __protected_write_by(x)
63# define __must_hold(x)
64#endif
65
66
67extern unsigned int minor_count;
68extern bool disable_sendpage;
69extern bool allow_oos;
70void tl_abort_disk_io(struct drbd_device *device);
71
72#ifdef CONFIG_DRBD_FAULT_INJECTION
73extern int enable_faults;
74extern int fault_rate;
75extern int fault_devs;
76#endif
77
78extern char usermode_helper[];
79
80
81
82
83
84
85
86#define DRBD_SIGKILL SIGHUP
87
88#define ID_IN_SYNC (4711ULL)
89#define ID_OUT_OF_SYNC (4712ULL)
90#define ID_SYNCER (-1ULL)
91
92#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
93
94struct drbd_device;
95struct drbd_connection;
96
97#define __drbd_printk_device(level, device, fmt, args...) \
98 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
99#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
100 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
101#define __drbd_printk_resource(level, resource, fmt, args...) \
102 printk(level "drbd %s: " fmt, (resource)->name, ## args)
103#define __drbd_printk_connection(level, connection, fmt, args...) \
104 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
105
106void drbd_printk_with_wrong_object_type(void);
107
108#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
109 (__builtin_types_compatible_p(typeof(obj), type) || \
110 __builtin_types_compatible_p(typeof(obj), const type)), \
111 func(level, (const type)(obj), fmt, ## args)
112
113#define drbd_printk(level, obj, fmt, args...) \
114 __builtin_choose_expr( \
115 __drbd_printk_if_same_type(obj, struct drbd_device *, \
116 __drbd_printk_device, level, fmt, ## args), \
117 __builtin_choose_expr( \
118 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
119 __drbd_printk_resource, level, fmt, ## args), \
120 __builtin_choose_expr( \
121 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
122 __drbd_printk_connection, level, fmt, ## args), \
123 __builtin_choose_expr( \
124 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
125 __drbd_printk_peer_device, level, fmt, ## args), \
126 drbd_printk_with_wrong_object_type()))))
127
128#define drbd_dbg(obj, fmt, args...) \
129 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
130#define drbd_alert(obj, fmt, args...) \
131 drbd_printk(KERN_ALERT, obj, fmt, ## args)
132#define drbd_err(obj, fmt, args...) \
133 drbd_printk(KERN_ERR, obj, fmt, ## args)
134#define drbd_warn(obj, fmt, args...) \
135 drbd_printk(KERN_WARNING, obj, fmt, ## args)
136#define drbd_info(obj, fmt, args...) \
137 drbd_printk(KERN_INFO, obj, fmt, ## args)
138#define drbd_emerg(obj, fmt, args...) \
139 drbd_printk(KERN_EMERG, obj, fmt, ## args)
140
141#define dynamic_drbd_dbg(device, fmt, args...) \
142 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
143
144#define D_ASSERT(device, exp) do { \
145 if (!(exp)) \
146 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
147 } while (0)
148
149
150
151
152
153
154#define expect(exp) ({ \
155 bool _bool = (exp); \
156 if (!_bool) \
157 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
158 #exp, __func__); \
159 _bool; \
160 })
161
162
163enum {
164 DRBD_FAULT_MD_WR = 0,
165 DRBD_FAULT_MD_RD = 1,
166 DRBD_FAULT_RS_WR = 2,
167 DRBD_FAULT_RS_RD = 3,
168 DRBD_FAULT_DT_WR = 4,
169 DRBD_FAULT_DT_RD = 5,
170 DRBD_FAULT_DT_RA = 6,
171 DRBD_FAULT_BM_ALLOC = 7,
172 DRBD_FAULT_AL_EE = 8,
173 DRBD_FAULT_RECEIVE = 9,
174
175 DRBD_FAULT_MAX,
176};
177
178extern unsigned int
179_drbd_insert_fault(struct drbd_device *device, unsigned int type);
180
181static inline int
182drbd_insert_fault(struct drbd_device *device, unsigned int type) {
183#ifdef CONFIG_DRBD_FAULT_INJECTION
184 return fault_rate &&
185 (enable_faults & (1<<type)) &&
186 _drbd_insert_fault(device, type);
187#else
188 return 0;
189#endif
190}
191
192
193#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
194
195#define div_floor(A, B) ((A)/(B))
196
197extern struct ratelimit_state drbd_ratelimit_state;
198extern struct idr drbd_devices;
199extern struct list_head drbd_resources;
200
201extern const char *cmdname(enum drbd_packet cmd);
202
203
204
205struct bm_xfer_ctx {
206
207
208
209
210 unsigned long bm_bits;
211 unsigned long bm_words;
212
213 unsigned long bit_offset;
214 unsigned long word_offset;
215
216
217 unsigned packets[2];
218 unsigned bytes[2];
219};
220
221extern void INFO_bm_xfer_stats(struct drbd_device *device,
222 const char *direction, struct bm_xfer_ctx *c);
223
224static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
225{
226
227
228
229
230
231
232
233#if BITS_PER_LONG == 64
234 c->word_offset = c->bit_offset >> 6;
235#elif BITS_PER_LONG == 32
236 c->word_offset = c->bit_offset >> 5;
237 c->word_offset &= ~(1UL);
238#else
239# error "unsupported BITS_PER_LONG"
240#endif
241}
242
243extern unsigned int drbd_header_size(struct drbd_connection *connection);
244
245
246enum drbd_thread_state {
247 NONE,
248 RUNNING,
249 EXITING,
250 RESTARTING
251};
252
253struct drbd_thread {
254 spinlock_t t_lock;
255 struct task_struct *task;
256 struct completion stop;
257 enum drbd_thread_state t_state;
258 int (*function) (struct drbd_thread *);
259 struct drbd_resource *resource;
260 struct drbd_connection *connection;
261 int reset_cpu_mask;
262 const char *name;
263};
264
265static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
266{
267
268
269
270
271 smp_rmb();
272 return thi->t_state;
273}
274
275struct drbd_work {
276 struct list_head list;
277 int (*cb)(struct drbd_work *, int cancel);
278};
279
280struct drbd_device_work {
281 struct drbd_work w;
282 struct drbd_device *device;
283};
284
285#include "drbd_interval.h"
286
287extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
288
289extern void lock_all_resources(void);
290extern void unlock_all_resources(void);
291
292struct drbd_request {
293 struct drbd_work w;
294 struct drbd_device *device;
295
296
297
298
299
300 struct bio *private_bio;
301
302 struct drbd_interval i;
303
304
305
306
307
308
309
310
311
312 unsigned int epoch;
313
314 struct list_head tl_requests;
315 struct bio *master_bio;
316
317
318 struct list_head req_pending_master_completion;
319 struct list_head req_pending_local;
320
321
322 unsigned long start_jif;
323
324
325
326
327
328
329
330
331
332 unsigned long in_actlog_jif;
333
334
335 unsigned long pre_submit_jif;
336
337
338 unsigned long pre_send_jif;
339 unsigned long acked_jif;
340 unsigned long net_done_jif;
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375 atomic_t completion_ref;
376
377 struct kref kref;
378
379 unsigned rq_state;
380};
381
382struct drbd_epoch {
383 struct drbd_connection *connection;
384 struct list_head list;
385 unsigned int barrier_nr;
386 atomic_t epoch_size;
387 atomic_t active;
388 unsigned long flags;
389};
390
391
392int drbdd_init(struct drbd_thread *);
393int drbd_asender(struct drbd_thread *);
394
395
396enum {
397 DE_HAVE_BARRIER_NUMBER,
398};
399
400enum epoch_event {
401 EV_PUT,
402 EV_GOT_BARRIER_NR,
403 EV_BECAME_LAST,
404 EV_CLEANUP = 32,
405};
406
407struct digest_info {
408 int digest_size;
409 void *digest;
410};
411
412struct drbd_peer_request {
413 struct drbd_work w;
414 struct drbd_peer_device *peer_device;
415 struct drbd_epoch *epoch;
416 struct page *pages;
417 atomic_t pending_bios;
418 struct drbd_interval i;
419
420 unsigned long flags;
421 unsigned long submit_jif;
422 union {
423 u64 block_id;
424 struct digest_info *digest;
425 };
426};
427
428
429
430
431
432
433
434enum {
435 __EE_CALL_AL_COMPLETE_IO,
436 __EE_MAY_SET_IN_SYNC,
437
438
439 __EE_IS_TRIM,
440
441
442 __EE_IS_TRIM_USE_ZEROOUT,
443
444
445
446 __EE_RESUBMITTED,
447
448
449
450
451 __EE_WAS_ERROR,
452
453
454 __EE_HAS_DIGEST,
455
456
457 __EE_RESTART_REQUESTS,
458
459
460 __EE_SEND_WRITE_ACK,
461
462
463 __EE_IN_INTERVAL_TREE,
464
465
466
467 __EE_SUBMITTED,
468
469
470 __EE_WRITE,
471
472
473 __EE_WRITE_SAME,
474
475
476
477 __EE_APPLICATION,
478
479
480 __EE_RS_THIN_REQ,
481};
482#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
483#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
484#define EE_IS_TRIM (1<<__EE_IS_TRIM)
485#define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT)
486#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
487#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
488#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
489#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
490#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
491#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
492#define EE_SUBMITTED (1<<__EE_SUBMITTED)
493#define EE_WRITE (1<<__EE_WRITE)
494#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
495#define EE_APPLICATION (1<<__EE_APPLICATION)
496#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
497
498
499enum {
500 UNPLUG_REMOTE,
501 MD_DIRTY,
502 USE_DEGR_WFC_T,
503 CL_ST_CHG_SUCCESS,
504 CL_ST_CHG_FAIL,
505 CRASHED_PRIMARY,
506
507
508 CONSIDER_RESYNC,
509
510 MD_NO_FUA,
511
512 BITMAP_IO,
513
514 BITMAP_IO_QUEUED,
515 WAS_IO_ERROR,
516 WAS_READ_ERROR,
517 FORCE_DETACH,
518 RESYNC_AFTER_NEG,
519 RESIZE_PENDING,
520
521 NEW_CUR_UUID,
522 AL_SUSPENDED,
523 AHEAD_TO_SYNC_SOURCE,
524 B_RS_H_DONE,
525 DISCARD_MY_DATA,
526 READ_BALANCE_RR,
527
528 FLUSH_PENDING,
529
530
531
532 GOING_DISKLESS,
533
534
535 GO_DISKLESS,
536 DESTROY_DISK,
537 MD_SYNC,
538 RS_START,
539 RS_PROGRESS,
540 RS_DONE,
541};
542
543struct drbd_bitmap;
544
545
546
547enum bm_flag {
548
549 BM_LOCKED_MASK = 0xf,
550
551
552 BM_DONT_CLEAR = 0x1,
553 BM_DONT_SET = 0x2,
554 BM_DONT_TEST = 0x4,
555
556
557
558 BM_IS_LOCKED = 0x8,
559
560
561 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
562
563
564
565
566 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
567
568
569
570 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
571};
572
573struct drbd_work_queue {
574 struct list_head q;
575 spinlock_t q_lock;
576 wait_queue_head_t q_wait;
577};
578
579struct drbd_socket {
580 struct mutex mutex;
581 struct socket *socket;
582
583
584 void *sbuf;
585 void *rbuf;
586};
587
588struct drbd_md {
589 u64 md_offset;
590
591 u64 la_size_sect;
592 spinlock_t uuid_lock;
593 u64 uuid[UI_SIZE];
594 u64 device_uuid;
595 u32 flags;
596 u32 md_size_sect;
597
598 s32 al_offset;
599 s32 bm_offset;
600
601
602 s32 meta_dev_idx;
603
604
605 u32 al_stripes;
606 u32 al_stripe_size_4k;
607 u32 al_size_4k;
608};
609
610struct drbd_backing_dev {
611 struct block_device *backing_bdev;
612 struct block_device *md_bdev;
613 struct drbd_md md;
614 struct disk_conf *disk_conf;
615 sector_t known_size;
616};
617
618struct drbd_md_io {
619 struct page *page;
620 unsigned long start_jif;
621 unsigned long submit_jif;
622 const char *current_use;
623 atomic_t in_use;
624 unsigned int done;
625 int error;
626};
627
628struct bm_io_work {
629 struct drbd_work w;
630 char *why;
631 enum bm_flag flags;
632 int (*io_fn)(struct drbd_device *device);
633 void (*done)(struct drbd_device *device, int rv);
634};
635
636struct fifo_buffer {
637 unsigned int head_index;
638 unsigned int size;
639 int total;
640 int values[0];
641};
642extern struct fifo_buffer *fifo_alloc(int fifo_size);
643
644
645enum {
646 NET_CONGESTED,
647 RESOLVE_CONFLICTS,
648 SEND_PING,
649 GOT_PING_ACK,
650 CONN_WD_ST_CHG_REQ,
651 CONN_WD_ST_CHG_OKAY,
652 CONN_WD_ST_CHG_FAIL,
653 CONN_DRY_RUN,
654 CREATE_BARRIER,
655 STATE_SENT,
656 CALLBACK_PENDING,
657
658
659
660
661
662 DISCONNECT_SENT,
663
664 DEVICE_WORK_PENDING,
665};
666
667enum which_state { NOW, OLD = NOW, NEW };
668
669struct drbd_resource {
670 char *name;
671#ifdef CONFIG_DEBUG_FS
672 struct dentry *debugfs_res;
673 struct dentry *debugfs_res_volumes;
674 struct dentry *debugfs_res_connections;
675 struct dentry *debugfs_res_in_flight_summary;
676#endif
677 struct kref kref;
678 struct idr devices;
679 struct list_head connections;
680 struct list_head resources;
681 struct res_opts res_opts;
682 struct mutex conf_update;
683 struct mutex adm_mutex;
684 spinlock_t req_lock;
685
686 unsigned susp:1;
687 unsigned susp_nod:1;
688 unsigned susp_fen:1;
689
690 enum write_ordering_e write_ordering;
691
692 cpumask_var_t cpu_mask;
693};
694
695struct drbd_thread_timing_details
696{
697 unsigned long start_jif;
698 void *cb_addr;
699 const char *caller_fn;
700 unsigned int line;
701 unsigned int cb_nr;
702};
703
704struct drbd_connection {
705 struct list_head connections;
706 struct drbd_resource *resource;
707#ifdef CONFIG_DEBUG_FS
708 struct dentry *debugfs_conn;
709 struct dentry *debugfs_conn_callback_history;
710 struct dentry *debugfs_conn_oldest_requests;
711#endif
712 struct kref kref;
713 struct idr peer_devices;
714 enum drbd_conns cstate;
715 struct mutex cstate_mutex;
716 unsigned int connect_cnt;
717
718 unsigned long flags;
719 struct net_conf *net_conf;
720 wait_queue_head_t ping_wait;
721
722 struct sockaddr_storage my_addr;
723 int my_addr_len;
724 struct sockaddr_storage peer_addr;
725 int peer_addr_len;
726
727 struct drbd_socket data;
728 struct drbd_socket meta;
729 int agreed_pro_version;
730 u32 agreed_features;
731 unsigned long last_received;
732 unsigned int ko_count;
733
734 struct list_head transfer_log;
735
736 struct crypto_shash *cram_hmac_tfm;
737 struct crypto_ahash *integrity_tfm;
738 struct crypto_ahash *peer_integrity_tfm;
739 struct crypto_ahash *csums_tfm;
740 struct crypto_ahash *verify_tfm;
741 void *int_dig_in;
742 void *int_dig_vv;
743
744
745 struct drbd_epoch *current_epoch;
746 spinlock_t epoch_lock;
747 unsigned int epochs;
748 atomic_t current_tle_nr;
749 unsigned current_tle_writes;
750
751 unsigned long last_reconnect_jif;
752 struct drbd_thread receiver;
753 struct drbd_thread worker;
754 struct drbd_thread ack_receiver;
755 struct workqueue_struct *ack_sender;
756
757
758
759
760 struct drbd_request *req_next;
761 struct drbd_request *req_ack_pending;
762 struct drbd_request *req_not_net_done;
763
764
765 struct drbd_work_queue sender_work;
766
767#define DRBD_THREAD_DETAILS_HIST 16
768 unsigned int w_cb_nr;
769 unsigned int r_cb_nr;
770 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
771 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
772
773 struct {
774 unsigned long last_sent_barrier_jif;
775
776
777
778 bool seen_any_write_yet;
779
780
781 int current_epoch_nr;
782
783
784
785
786 unsigned current_epoch_writes;
787 } send;
788};
789
790static inline bool has_net_conf(struct drbd_connection *connection)
791{
792 bool has_net_conf;
793
794 rcu_read_lock();
795 has_net_conf = rcu_dereference(connection->net_conf);
796 rcu_read_unlock();
797
798 return has_net_conf;
799}
800
801void __update_timing_details(
802 struct drbd_thread_timing_details *tdp,
803 unsigned int *cb_nr,
804 void *cb,
805 const char *fn, const unsigned int line);
806
807#define update_worker_timing_details(c, cb) \
808 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
809#define update_receiver_timing_details(c, cb) \
810 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
811
812struct submit_worker {
813 struct workqueue_struct *wq;
814 struct work_struct worker;
815
816
817 struct list_head writes;
818};
819
820struct drbd_peer_device {
821 struct list_head peer_devices;
822 struct drbd_device *device;
823 struct drbd_connection *connection;
824 struct work_struct send_acks_work;
825#ifdef CONFIG_DEBUG_FS
826 struct dentry *debugfs_peer_dev;
827#endif
828};
829
830struct drbd_device {
831 struct drbd_resource *resource;
832 struct list_head peer_devices;
833 struct list_head pending_bitmap_io;
834
835 unsigned long flush_jif;
836#ifdef CONFIG_DEBUG_FS
837 struct dentry *debugfs_minor;
838 struct dentry *debugfs_vol;
839 struct dentry *debugfs_vol_oldest_requests;
840 struct dentry *debugfs_vol_act_log_extents;
841 struct dentry *debugfs_vol_resync_extents;
842 struct dentry *debugfs_vol_data_gen_id;
843 struct dentry *debugfs_vol_ed_gen_id;
844#endif
845
846 unsigned int vnr;
847 unsigned int minor;
848
849 struct kref kref;
850
851
852 unsigned long flags;
853
854
855 struct drbd_backing_dev *ldev __protected_by(local);
856
857 sector_t p_size;
858 struct request_queue *rq_queue;
859 struct block_device *this_bdev;
860 struct gendisk *vdisk;
861
862 unsigned long last_reattach_jif;
863 struct drbd_work resync_work;
864 struct drbd_work unplug_work;
865 struct timer_list resync_timer;
866 struct timer_list md_sync_timer;
867 struct timer_list start_resync_timer;
868 struct timer_list request_timer;
869
870
871 union drbd_state new_state_tmp;
872
873 union drbd_dev_state state;
874 wait_queue_head_t misc_wait;
875 wait_queue_head_t state_wait;
876 unsigned int send_cnt;
877 unsigned int recv_cnt;
878 unsigned int read_cnt;
879 unsigned int writ_cnt;
880 unsigned int al_writ_cnt;
881 unsigned int bm_writ_cnt;
882 atomic_t ap_bio_cnt;
883 atomic_t ap_actlog_cnt;
884 atomic_t ap_pending_cnt;
885 atomic_t rs_pending_cnt;
886 atomic_t unacked_cnt;
887 atomic_t local_cnt;
888 atomic_t suspend_cnt;
889
890
891 struct rb_root read_requests;
892 struct rb_root write_requests;
893
894
895
896 struct list_head pending_master_completion[2];
897 struct list_head pending_completion[2];
898
899
900 bool use_csums;
901
902 unsigned long rs_total;
903
904 unsigned long rs_failed;
905
906 unsigned long rs_start;
907
908 unsigned long rs_paused;
909
910 unsigned long rs_same_csum;
911#define DRBD_SYNC_MARKS 8
912#define DRBD_SYNC_MARK_STEP (3*HZ)
913
914 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
915
916 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
917
918 int rs_last_mark;
919 unsigned long rs_last_bcast;
920
921
922 sector_t ov_start_sector;
923 sector_t ov_stop_sector;
924
925 sector_t ov_position;
926
927 sector_t ov_last_oos_start;
928
929 sector_t ov_last_oos_size;
930 unsigned long ov_left;
931
932 struct drbd_bitmap *bitmap;
933 unsigned long bm_resync_fo;
934
935
936 struct lru_cache *resync;
937
938 unsigned int resync_locked;
939
940 unsigned int resync_wenr;
941
942 int open_cnt;
943 u64 *p_uuid;
944
945 struct list_head active_ee;
946 struct list_head sync_ee;
947 struct list_head done_ee;
948 struct list_head read_ee;
949 struct list_head net_ee;
950
951 int next_barrier_nr;
952 struct list_head resync_reads;
953 atomic_t pp_in_use;
954 atomic_t pp_in_use_by_net;
955 wait_queue_head_t ee_wait;
956 struct drbd_md_io md_io;
957 spinlock_t al_lock;
958 wait_queue_head_t al_wait;
959 struct lru_cache *act_log;
960 unsigned int al_tr_number;
961 int al_tr_cycle;
962 wait_queue_head_t seq_wait;
963 atomic_t packet_seq;
964 unsigned int peer_seq;
965 spinlock_t peer_seq_lock;
966 unsigned long comm_bm_set;
967 struct bm_io_work bm_io_work;
968 u64 ed_uuid;
969 struct mutex own_state_mutex;
970 struct mutex *state_mutex;
971 char congestion_reason;
972 atomic_t rs_sect_in;
973 atomic_t rs_sect_ev;
974 int rs_last_sect_ev;
975 int rs_last_events;
976
977 int c_sync_rate;
978 struct fifo_buffer *rs_plan_s;
979 int rs_in_flight;
980 atomic_t ap_in_flight;
981 unsigned int peer_max_bio_size;
982 unsigned int local_max_bio_size;
983
984
985
986 struct submit_worker submit;
987};
988
989struct drbd_bm_aio_ctx {
990 struct drbd_device *device;
991 struct list_head list; ;
992 unsigned long start_jif;
993 atomic_t in_flight;
994 unsigned int done;
995 unsigned flags;
996#define BM_AIO_COPY_PAGES 1
997#define BM_AIO_WRITE_HINTED 2
998#define BM_AIO_WRITE_ALL_PAGES 4
999#define BM_AIO_READ 8
1000 int error;
1001 struct kref kref;
1002};
1003
1004struct drbd_config_context {
1005
1006 unsigned int minor;
1007
1008 unsigned int volume;
1009#define VOLUME_UNSPECIFIED (-1U)
1010
1011
1012 char *resource_name;
1013 struct nlattr *my_addr;
1014 struct nlattr *peer_addr;
1015
1016
1017 struct sk_buff *reply_skb;
1018
1019 struct drbd_genlmsghdr *reply_dh;
1020
1021 struct drbd_device *device;
1022 struct drbd_resource *resource;
1023 struct drbd_connection *connection;
1024};
1025
1026static inline struct drbd_device *minor_to_device(unsigned int minor)
1027{
1028 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1029}
1030
1031static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1032{
1033 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1034}
1035
1036static inline struct drbd_peer_device *
1037conn_peer_device(struct drbd_connection *connection, int volume_number)
1038{
1039 return idr_find(&connection->peer_devices, volume_number);
1040}
1041
1042#define for_each_resource(resource, _resources) \
1043 list_for_each_entry(resource, _resources, resources)
1044
1045#define for_each_resource_rcu(resource, _resources) \
1046 list_for_each_entry_rcu(resource, _resources, resources)
1047
1048#define for_each_resource_safe(resource, tmp, _resources) \
1049 list_for_each_entry_safe(resource, tmp, _resources, resources)
1050
1051#define for_each_connection(connection, resource) \
1052 list_for_each_entry(connection, &resource->connections, connections)
1053
1054#define for_each_connection_rcu(connection, resource) \
1055 list_for_each_entry_rcu(connection, &resource->connections, connections)
1056
1057#define for_each_connection_safe(connection, tmp, resource) \
1058 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1059
1060#define for_each_peer_device(peer_device, device) \
1061 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1062
1063#define for_each_peer_device_rcu(peer_device, device) \
1064 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1065
1066#define for_each_peer_device_safe(peer_device, tmp, device) \
1067 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1068
1069static inline unsigned int device_to_minor(struct drbd_device *device)
1070{
1071 return device->minor;
1072}
1073
1074
1075
1076
1077
1078
1079
1080enum dds_flags {
1081 DDSF_FORCED = 1,
1082 DDSF_NO_RESYNC = 2,
1083};
1084
1085extern void drbd_init_set_defaults(struct drbd_device *device);
1086extern int drbd_thread_start(struct drbd_thread *thi);
1087extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1088#ifdef CONFIG_SMP
1089extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1090#else
1091#define drbd_thread_current_set_cpu(A) ({})
1092#endif
1093extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1094 unsigned int set_size);
1095extern void tl_clear(struct drbd_connection *);
1096extern void drbd_free_sock(struct drbd_connection *connection);
1097extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1098 void *buf, size_t size, unsigned msg_flags);
1099extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1100 unsigned);
1101
1102extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1103extern int drbd_send_protocol(struct drbd_connection *connection);
1104extern int drbd_send_uuids(struct drbd_peer_device *);
1105extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1106extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1107extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1108extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1109extern int drbd_send_current_state(struct drbd_peer_device *);
1110extern int drbd_send_sync_param(struct drbd_peer_device *);
1111extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1112 u32 set_size);
1113extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1114 struct drbd_peer_request *);
1115extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1116 struct p_block_req *rp);
1117extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1118 struct p_data *dp, int data_size);
1119extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1120 sector_t sector, int blksize, u64 block_id);
1121extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1122extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1123 struct drbd_peer_request *);
1124extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1125extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1126 sector_t sector, int size, u64 block_id);
1127extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1128 int size, void *digest, int digest_size,
1129 enum drbd_packet cmd);
1130extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1131
1132extern int drbd_send_bitmap(struct drbd_device *device);
1133extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1134extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1135extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1136extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1137extern void drbd_device_cleanup(struct drbd_device *device);
1138void drbd_print_uuids(struct drbd_device *device, const char *text);
1139
1140extern void conn_md_sync(struct drbd_connection *connection);
1141extern void drbd_md_write(struct drbd_device *device, void *buffer);
1142extern void drbd_md_sync(struct drbd_device *device);
1143extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1144extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1145extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1146extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1147extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1148extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1149extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1150extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1151extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1152extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1153extern void drbd_md_mark_dirty(struct drbd_device *device);
1154extern void drbd_queue_bitmap_io(struct drbd_device *device,
1155 int (*io_fn)(struct drbd_device *),
1156 void (*done)(struct drbd_device *, int),
1157 char *why, enum bm_flag flags);
1158extern int drbd_bitmap_io(struct drbd_device *device,
1159 int (*io_fn)(struct drbd_device *),
1160 char *why, enum bm_flag flags);
1161extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1162 int (*io_fn)(struct drbd_device *),
1163 char *why, enum bm_flag flags);
1164extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1165extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200#define MD_128MB_SECT (128LLU << 11)
1201#define MD_4kB_SECT 8
1202#define MD_32kB_SECT 64
1203
1204
1205#define AL_EXTENT_SHIFT 22
1206#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222#define AL_UPDATES_PER_TRANSACTION 64
1223#define AL_CONTEXT_PER_TRANSACTION 919
1224
1225#if BITS_PER_LONG == 32
1226#define LN2_BPL 5
1227#define cpu_to_lel(A) cpu_to_le32(A)
1228#define lel_to_cpu(A) le32_to_cpu(A)
1229#elif BITS_PER_LONG == 64
1230#define LN2_BPL 6
1231#define cpu_to_lel(A) cpu_to_le64(A)
1232#define lel_to_cpu(A) le64_to_cpu(A)
1233#else
1234#error "LN2 of BITS_PER_LONG unknown!"
1235#endif
1236
1237
1238
1239struct bm_extent {
1240 int rs_left;
1241 int rs_failed;
1242 unsigned long flags;
1243 struct lc_element lce;
1244};
1245
1246#define BME_NO_WRITES 0
1247#define BME_LOCKED 1
1248#define BME_PRIORITY 2
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258#define SLEEP_TIME (HZ/10)
1259
1260
1261
1262#define BM_BLOCK_SHIFT 12
1263#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1264
1265
1266
1267#define BM_EXT_SHIFT 24
1268#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1269
1270#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1271#error "HAVE YOU FIXED drbdmeta AS WELL??"
1272#endif
1273
1274
1275#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1276#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1277#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1278
1279
1280#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1281
1282
1283
1284#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1285#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1286
1287
1288#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1289
1290#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1291
1292#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1293
1294#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1295
1296
1297
1298#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1315
1316
1317
1318
1319
1320#define DRBD_MAX_SECTORS_FIXED_BM \
1321 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1322#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1323#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1324#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1325#else
1326#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1327
1328#if BITS_PER_LONG == 32
1329
1330
1331
1332#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1333#else
1334
1335#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1336
1337#endif
1338#endif
1339
1340
1341
1342
1343
1344
1345
1346#define DRBD_MAX_BIO_SIZE (1U << 20)
1347#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1348#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1349#endif
1350#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1351
1352#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1353#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1354
1355
1356
1357
1358#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1359#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1360
1361extern int drbd_bm_init(struct drbd_device *device);
1362extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1363extern void drbd_bm_cleanup(struct drbd_device *device);
1364extern void drbd_bm_set_all(struct drbd_device *device);
1365extern void drbd_bm_clear_all(struct drbd_device *device);
1366
1367extern int drbd_bm_set_bits(
1368 struct drbd_device *device, unsigned long s, unsigned long e);
1369extern int drbd_bm_clear_bits(
1370 struct drbd_device *device, unsigned long s, unsigned long e);
1371extern int drbd_bm_count_bits(
1372 struct drbd_device *device, const unsigned long s, const unsigned long e);
1373
1374
1375extern void _drbd_bm_set_bits(struct drbd_device *device,
1376 const unsigned long s, const unsigned long e);
1377extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1378extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1379extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1380extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1381extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1382extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1383extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1384extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1385extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1386extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1387extern size_t drbd_bm_words(struct drbd_device *device);
1388extern unsigned long drbd_bm_bits(struct drbd_device *device);
1389extern sector_t drbd_bm_capacity(struct drbd_device *device);
1390
1391#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1392extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1393
1394extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1395extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1396extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1397extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1398
1399extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1400 size_t number, unsigned long *buffer);
1401
1402extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1403 size_t number, unsigned long *buffer);
1404
1405extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1406extern void drbd_bm_unlock(struct drbd_device *device);
1407
1408
1409extern struct kmem_cache *drbd_request_cache;
1410extern struct kmem_cache *drbd_ee_cache;
1411extern struct kmem_cache *drbd_bm_ext_cache;
1412extern struct kmem_cache *drbd_al_ext_cache;
1413extern mempool_t *drbd_request_mempool;
1414extern mempool_t *drbd_ee_mempool;
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429extern struct page *drbd_pp_pool;
1430extern spinlock_t drbd_pp_lock;
1431extern int drbd_pp_vacant;
1432extern wait_queue_head_t drbd_pp_wait;
1433
1434
1435
1436
1437
1438
1439#define DRBD_MIN_POOL_PAGES 128
1440extern mempool_t *drbd_md_io_page_pool;
1441
1442
1443
1444extern struct bio_set *drbd_md_io_bio_set;
1445
1446extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1447
1448extern struct mutex resources_mutex;
1449
1450extern int conn_lowest_minor(struct drbd_connection *connection);
1451extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1452extern void drbd_destroy_device(struct kref *kref);
1453extern void drbd_delete_device(struct drbd_device *device);
1454
1455extern struct drbd_resource *drbd_create_resource(const char *name);
1456extern void drbd_free_resource(struct drbd_resource *resource);
1457
1458extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1459extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1460extern void drbd_destroy_connection(struct kref *kref);
1461extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1462 void *peer_addr, int peer_addr_len);
1463extern struct drbd_resource *drbd_find_resource(const char *name);
1464extern void drbd_destroy_resource(struct kref *kref);
1465extern void conn_free_crypto(struct drbd_connection *connection);
1466
1467extern int proc_details;
1468
1469
1470extern void do_submit(struct work_struct *ws);
1471extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1472extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1473extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1474extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1475
1476
1477
1478
1479extern struct mutex notification_mutex;
1480
1481extern void drbd_suspend_io(struct drbd_device *device);
1482extern void drbd_resume_io(struct drbd_device *device);
1483extern char *ppsize(char *buf, unsigned long long size);
1484extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1485enum determine_dev_size {
1486 DS_ERROR_SHRINK = -3,
1487 DS_ERROR_SPACE_MD = -2,
1488 DS_ERROR = -1,
1489 DS_UNCHANGED = 0,
1490 DS_SHRUNK = 1,
1491 DS_GREW = 2,
1492 DS_GREW_FROM_ZERO = 3,
1493};
1494extern enum determine_dev_size
1495drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1496extern void resync_after_online_grow(struct drbd_device *);
1497extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1498 struct drbd_backing_dev *bdev, struct o_qlim *o);
1499extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1500 enum drbd_role new_role,
1501 int force);
1502extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1503extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1504extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1505extern int drbd_khelper(struct drbd_device *device, char *cmd);
1506
1507
1508
1509extern void drbd_md_endio(struct bio *bio);
1510extern void drbd_peer_request_endio(struct bio *bio);
1511extern void drbd_request_endio(struct bio *bio);
1512extern int drbd_worker(struct drbd_thread *thi);
1513enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1514void drbd_resync_after_changed(struct drbd_device *device);
1515extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1516extern void resume_next_sg(struct drbd_device *device);
1517extern void suspend_other_sg(struct drbd_device *device);
1518extern int drbd_resync_finished(struct drbd_device *device);
1519
1520extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1521extern void drbd_md_put_buffer(struct drbd_device *device);
1522extern int drbd_md_sync_page_io(struct drbd_device *device,
1523 struct drbd_backing_dev *bdev, sector_t sector, int op);
1524extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1525extern void wait_until_done_or_force_detached(struct drbd_device *device,
1526 struct drbd_backing_dev *bdev, unsigned int *done);
1527extern void drbd_rs_controller_reset(struct drbd_device *device);
1528
1529static inline void ov_out_of_sync_print(struct drbd_device *device)
1530{
1531 if (device->ov_last_oos_size) {
1532 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1533 (unsigned long long)device->ov_last_oos_start,
1534 (unsigned long)device->ov_last_oos_size);
1535 }
1536 device->ov_last_oos_size = 0;
1537}
1538
1539
1540extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
1541extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
1542
1543extern int w_e_end_data_req(struct drbd_work *, int);
1544extern int w_e_end_rsdata_req(struct drbd_work *, int);
1545extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1546extern int w_e_end_ov_reply(struct drbd_work *, int);
1547extern int w_e_end_ov_req(struct drbd_work *, int);
1548extern int w_ov_finished(struct drbd_work *, int);
1549extern int w_resync_timer(struct drbd_work *, int);
1550extern int w_send_write_hint(struct drbd_work *, int);
1551extern int w_send_dblock(struct drbd_work *, int);
1552extern int w_send_read_req(struct drbd_work *, int);
1553extern int w_e_reissue(struct drbd_work *, int);
1554extern int w_restart_disk_io(struct drbd_work *, int);
1555extern int w_send_out_of_sync(struct drbd_work *, int);
1556extern int w_start_resync(struct drbd_work *, int);
1557
1558extern void resync_timer_fn(unsigned long data);
1559extern void start_resync_timer_fn(unsigned long data);
1560
1561extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1562
1563
1564extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1565 sector_t start, unsigned int nr_sectors, bool discard);
1566extern int drbd_receiver(struct drbd_thread *thi);
1567extern int drbd_ack_receiver(struct drbd_thread *thi);
1568extern void drbd_send_ping_wf(struct work_struct *ws);
1569extern void drbd_send_acks_wf(struct work_struct *ws);
1570extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1571extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1572 bool throttle_if_app_is_waiting);
1573extern int drbd_submit_peer_request(struct drbd_device *,
1574 struct drbd_peer_request *, const unsigned,
1575 const unsigned, const int);
1576extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1577extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1578 sector_t, unsigned int,
1579 unsigned int,
1580 gfp_t) __must_hold(local);
1581extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1582 int);
1583#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1584#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1585extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1586extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1587extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1588extern int drbd_connected(struct drbd_peer_device *);
1589
1590static inline void drbd_tcp_cork(struct socket *sock)
1591{
1592 int val = 1;
1593 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1594 (char*)&val, sizeof(val));
1595}
1596
1597static inline void drbd_tcp_uncork(struct socket *sock)
1598{
1599 int val = 0;
1600 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1601 (char*)&val, sizeof(val));
1602}
1603
1604static inline void drbd_tcp_nodelay(struct socket *sock)
1605{
1606 int val = 1;
1607 (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1608 (char*)&val, sizeof(val));
1609}
1610
1611static inline void drbd_tcp_quickack(struct socket *sock)
1612{
1613 int val = 2;
1614 (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1615 (char*)&val, sizeof(val));
1616}
1617
1618
1619static inline void drbd_set_my_capacity(struct drbd_device *device,
1620 sector_t size)
1621{
1622
1623 set_capacity(device->vdisk, size);
1624 device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1625}
1626
1627
1628
1629
1630static inline void drbd_generic_make_request(struct drbd_device *device,
1631 int fault_type, struct bio *bio)
1632{
1633 __release(local);
1634 if (!bio->bi_bdev) {
1635 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
1636 bio->bi_error = -ENODEV;
1637 bio_endio(bio);
1638 return;
1639 }
1640
1641 if (drbd_insert_fault(device, fault_type))
1642 bio_io_error(bio);
1643 else
1644 generic_make_request(bio);
1645}
1646
1647void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1648 enum write_ordering_e wo);
1649
1650
1651extern struct proc_dir_entry *drbd_proc;
1652extern const struct file_operations drbd_proc_fops;
1653
1654
1655extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1656extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1657extern void drbd_al_begin_io_commit(struct drbd_device *device);
1658extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1659extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1660extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1661extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1662extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1663extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1664extern void drbd_rs_cancel_all(struct drbd_device *device);
1665extern int drbd_rs_del_all(struct drbd_device *device);
1666extern void drbd_rs_failed_io(struct drbd_device *device,
1667 sector_t sector, int size);
1668extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1669
1670enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1671extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1672 enum update_sync_bits_mode mode);
1673#define drbd_set_in_sync(device, sector, size) \
1674 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1675#define drbd_set_out_of_sync(device, sector, size) \
1676 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1677#define drbd_rs_failed_io(device, sector, size) \
1678 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1679extern void drbd_al_shrink(struct drbd_device *device);
1680extern int drbd_al_initialize(struct drbd_device *, void *);
1681
1682
1683
1684struct sib_info {
1685 enum drbd_state_info_bcast_reason sib_reason;
1686 union {
1687 struct {
1688 char *helper_name;
1689 unsigned helper_exit_code;
1690 };
1691 struct {
1692 union drbd_state os;
1693 union drbd_state ns;
1694 };
1695 };
1696};
1697void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1698
1699extern void notify_resource_state(struct sk_buff *,
1700 unsigned int,
1701 struct drbd_resource *,
1702 struct resource_info *,
1703 enum drbd_notification_type);
1704extern void notify_device_state(struct sk_buff *,
1705 unsigned int,
1706 struct drbd_device *,
1707 struct device_info *,
1708 enum drbd_notification_type);
1709extern void notify_connection_state(struct sk_buff *,
1710 unsigned int,
1711 struct drbd_connection *,
1712 struct connection_info *,
1713 enum drbd_notification_type);
1714extern void notify_peer_device_state(struct sk_buff *,
1715 unsigned int,
1716 struct drbd_peer_device *,
1717 struct peer_device_info *,
1718 enum drbd_notification_type);
1719extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1720 struct drbd_connection *, const char *, int);
1721
1722
1723
1724
1725
1726
1727static inline struct page *page_chain_next(struct page *page)
1728{
1729 return (struct page *)page_private(page);
1730}
1731#define page_chain_for_each(page) \
1732 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1733 page = page_chain_next(page))
1734#define page_chain_for_each_safe(page, n) \
1735 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1736
1737
1738static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1739{
1740 struct page *page = peer_req->pages;
1741 page_chain_for_each(page) {
1742 if (page_count(page) > 1)
1743 return 1;
1744 }
1745 return 0;
1746}
1747
1748static inline union drbd_state drbd_read_state(struct drbd_device *device)
1749{
1750 struct drbd_resource *resource = device->resource;
1751 union drbd_state rv;
1752
1753 rv.i = device->state.i;
1754 rv.susp = resource->susp;
1755 rv.susp_nod = resource->susp_nod;
1756 rv.susp_fen = resource->susp_fen;
1757
1758 return rv;
1759}
1760
1761enum drbd_force_detach_flags {
1762 DRBD_READ_ERROR,
1763 DRBD_WRITE_ERROR,
1764 DRBD_META_IO_ERROR,
1765 DRBD_FORCE_DETACH,
1766};
1767
1768#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1769static inline void __drbd_chk_io_error_(struct drbd_device *device,
1770 enum drbd_force_detach_flags df,
1771 const char *where)
1772{
1773 enum drbd_io_error_p ep;
1774
1775 rcu_read_lock();
1776 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1777 rcu_read_unlock();
1778 switch (ep) {
1779 case EP_PASS_ON:
1780 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1781 if (__ratelimit(&drbd_ratelimit_state))
1782 drbd_err(device, "Local IO failed in %s.\n", where);
1783 if (device->state.disk > D_INCONSISTENT)
1784 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1785 break;
1786 }
1787
1788 case EP_DETACH:
1789 case EP_CALL_HELPER:
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 set_bit(WAS_IO_ERROR, &device->flags);
1811 if (df == DRBD_READ_ERROR)
1812 set_bit(WAS_READ_ERROR, &device->flags);
1813 if (df == DRBD_FORCE_DETACH)
1814 set_bit(FORCE_DETACH, &device->flags);
1815 if (device->state.disk > D_FAILED) {
1816 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1817 drbd_err(device,
1818 "Local IO failed in %s. Detaching...\n", where);
1819 }
1820 break;
1821 }
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1833static inline void drbd_chk_io_error_(struct drbd_device *device,
1834 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1835{
1836 if (error) {
1837 unsigned long flags;
1838 spin_lock_irqsave(&device->resource->req_lock, flags);
1839 __drbd_chk_io_error_(device, forcedetach, where);
1840 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1841 }
1842}
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1853{
1854 switch (bdev->md.meta_dev_idx) {
1855 case DRBD_MD_INDEX_INTERNAL:
1856 case DRBD_MD_INDEX_FLEX_INT:
1857 return bdev->md.md_offset + bdev->md.bm_offset;
1858 case DRBD_MD_INDEX_FLEX_EXT:
1859 default:
1860 return bdev->md.md_offset;
1861 }
1862}
1863
1864
1865
1866
1867
1868static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1869{
1870 switch (bdev->md.meta_dev_idx) {
1871 case DRBD_MD_INDEX_INTERNAL:
1872 case DRBD_MD_INDEX_FLEX_INT:
1873 return bdev->md.md_offset + MD_4kB_SECT -1;
1874 case DRBD_MD_INDEX_FLEX_EXT:
1875 default:
1876 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1877 }
1878}
1879
1880
1881static inline sector_t drbd_get_capacity(struct block_device *bdev)
1882{
1883
1884 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1896{
1897 sector_t s;
1898
1899 switch (bdev->md.meta_dev_idx) {
1900 case DRBD_MD_INDEX_INTERNAL:
1901 case DRBD_MD_INDEX_FLEX_INT:
1902 s = drbd_get_capacity(bdev->backing_bdev)
1903 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1904 drbd_md_first_sector(bdev))
1905 : 0;
1906 break;
1907 case DRBD_MD_INDEX_FLEX_EXT:
1908 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1909 drbd_get_capacity(bdev->backing_bdev));
1910
1911 s = min_t(sector_t, s,
1912 BM_EXT_TO_SECT(bdev->md.md_size_sect
1913 - bdev->md.bm_offset));
1914 break;
1915 default:
1916 s = min_t(sector_t, DRBD_MAX_SECTORS,
1917 drbd_get_capacity(bdev->backing_bdev));
1918 }
1919 return s;
1920}
1921
1922
1923
1924
1925
1926static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1927{
1928 const int meta_dev_idx = bdev->md.meta_dev_idx;
1929
1930 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1931 return 0;
1932
1933
1934
1935 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1936 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1937 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1938
1939
1940 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1941}
1942
1943static inline void
1944drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1945{
1946 unsigned long flags;
1947 spin_lock_irqsave(&q->q_lock, flags);
1948 list_add_tail(&w->list, &q->q);
1949 spin_unlock_irqrestore(&q->q_lock, flags);
1950 wake_up(&q->q_wait);
1951}
1952
1953static inline void
1954drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1955{
1956 unsigned long flags;
1957 spin_lock_irqsave(&q->q_lock, flags);
1958 if (list_empty_careful(&w->list))
1959 list_add_tail(&w->list, &q->q);
1960 spin_unlock_irqrestore(&q->q_lock, flags);
1961 wake_up(&q->q_wait);
1962}
1963
1964static inline void
1965drbd_device_post_work(struct drbd_device *device, int work_bit)
1966{
1967 if (!test_and_set_bit(work_bit, &device->flags)) {
1968 struct drbd_connection *connection =
1969 first_peer_device(device)->connection;
1970 struct drbd_work_queue *q = &connection->sender_work;
1971 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1972 wake_up(&q->q_wait);
1973 }
1974}
1975
1976extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1977
1978
1979
1980
1981
1982static inline void wake_ack_receiver(struct drbd_connection *connection)
1983{
1984 struct task_struct *task = connection->ack_receiver.task;
1985 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1986 force_sig(SIGXCPU, task);
1987}
1988
1989static inline void request_ping(struct drbd_connection *connection)
1990{
1991 set_bit(SEND_PING, &connection->flags);
1992 wake_ack_receiver(connection);
1993}
1994
1995extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1996extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1997extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1998 enum drbd_packet, unsigned int, void *,
1999 unsigned int);
2000extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
2001 enum drbd_packet, unsigned int, void *,
2002 unsigned int);
2003
2004extern int drbd_send_ping(struct drbd_connection *connection);
2005extern int drbd_send_ping_ack(struct drbd_connection *connection);
2006extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
2007extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
2008
2009static inline void drbd_thread_stop(struct drbd_thread *thi)
2010{
2011 _drbd_thread_stop(thi, false, true);
2012}
2013
2014static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
2015{
2016 _drbd_thread_stop(thi, false, false);
2017}
2018
2019static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
2020{
2021 _drbd_thread_stop(thi, true, false);
2022}
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static inline void inc_ap_pending(struct drbd_device *device)
2047{
2048 atomic_inc(&device->ap_pending_cnt);
2049}
2050
2051#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
2052 if (atomic_read(&device->which) < 0) \
2053 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2054 func, line, \
2055 atomic_read(&device->which))
2056
2057#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2058static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2059{
2060 if (atomic_dec_and_test(&device->ap_pending_cnt))
2061 wake_up(&device->misc_wait);
2062 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2063}
2064
2065
2066
2067
2068
2069
2070
2071static inline void inc_rs_pending(struct drbd_device *device)
2072{
2073 atomic_inc(&device->rs_pending_cnt);
2074}
2075
2076#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2077static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2078{
2079 atomic_dec(&device->rs_pending_cnt);
2080 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2081}
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092static inline void inc_unacked(struct drbd_device *device)
2093{
2094 atomic_inc(&device->unacked_cnt);
2095}
2096
2097#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2098static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2099{
2100 atomic_dec(&device->unacked_cnt);
2101 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2102}
2103
2104#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2105static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2106{
2107 atomic_sub(n, &device->unacked_cnt);
2108 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2109}
2110
2111static inline bool is_sync_target_state(enum drbd_conns connection_state)
2112{
2113 return connection_state == C_SYNC_TARGET ||
2114 connection_state == C_PAUSED_SYNC_T;
2115}
2116
2117static inline bool is_sync_source_state(enum drbd_conns connection_state)
2118{
2119 return connection_state == C_SYNC_SOURCE ||
2120 connection_state == C_PAUSED_SYNC_S;
2121}
2122
2123static inline bool is_sync_state(enum drbd_conns connection_state)
2124{
2125 return is_sync_source_state(connection_state) ||
2126 is_sync_target_state(connection_state);
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136#define get_ldev_if_state(_device, _min_state) \
2137 (_get_ldev_if_state((_device), (_min_state)) ? \
2138 ({ __acquire(x); true; }) : false)
2139#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2140
2141static inline void put_ldev(struct drbd_device *device)
2142{
2143 enum drbd_disk_state disk_state = device->state.disk;
2144
2145
2146
2147
2148 int i = atomic_dec_return(&device->local_cnt);
2149
2150
2151
2152
2153 __release(local);
2154 D_ASSERT(device, i >= 0);
2155 if (i == 0) {
2156 if (disk_state == D_DISKLESS)
2157
2158 drbd_device_post_work(device, DESTROY_DISK);
2159 if (disk_state == D_FAILED)
2160
2161 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2162 drbd_device_post_work(device, GO_DISKLESS);
2163 wake_up(&device->misc_wait);
2164 }
2165}
2166
2167#ifndef __CHECKER__
2168static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2169{
2170 int io_allowed;
2171
2172
2173 if (device->state.disk == D_DISKLESS)
2174 return 0;
2175
2176 atomic_inc(&device->local_cnt);
2177 io_allowed = (device->state.disk >= mins);
2178 if (!io_allowed)
2179 put_ldev(device);
2180 return io_allowed;
2181}
2182#else
2183extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2184#endif
2185
2186
2187
2188
2189static inline int drbd_get_max_buffers(struct drbd_device *device)
2190{
2191 struct net_conf *nc;
2192 int mxb;
2193
2194 rcu_read_lock();
2195 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2196 mxb = nc ? nc->max_buffers : 1000000;
2197 rcu_read_unlock();
2198
2199 return mxb;
2200}
2201
2202static inline int drbd_state_is_stable(struct drbd_device *device)
2203{
2204 union drbd_dev_state s = device->state;
2205
2206
2207
2208
2209 switch ((enum drbd_conns)s.conn) {
2210
2211 case C_STANDALONE:
2212 case C_WF_CONNECTION:
2213
2214 case C_CONNECTED:
2215 case C_SYNC_SOURCE:
2216 case C_SYNC_TARGET:
2217 case C_VERIFY_S:
2218 case C_VERIFY_T:
2219 case C_PAUSED_SYNC_S:
2220 case C_PAUSED_SYNC_T:
2221 case C_AHEAD:
2222 case C_BEHIND:
2223
2224 case C_DISCONNECTING:
2225 case C_UNCONNECTED:
2226 case C_TIMEOUT:
2227 case C_BROKEN_PIPE:
2228 case C_NETWORK_FAILURE:
2229 case C_PROTOCOL_ERROR:
2230 case C_TEAR_DOWN:
2231 case C_WF_REPORT_PARAMS:
2232 case C_STARTING_SYNC_S:
2233 case C_STARTING_SYNC_T:
2234 break;
2235
2236
2237 case C_WF_BITMAP_S:
2238 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2239 return 0;
2240 break;
2241
2242
2243 case C_WF_BITMAP_T:
2244 case C_WF_SYNC_UUID:
2245 case C_MASK:
2246
2247 return 0;
2248 }
2249
2250 switch ((enum drbd_disk_state)s.disk) {
2251 case D_DISKLESS:
2252 case D_INCONSISTENT:
2253 case D_OUTDATED:
2254 case D_CONSISTENT:
2255 case D_UP_TO_DATE:
2256 case D_FAILED:
2257
2258 break;
2259
2260
2261 case D_ATTACHING:
2262 case D_NEGOTIATING:
2263 case D_UNKNOWN:
2264 case D_MASK:
2265
2266 return 0;
2267 }
2268
2269 return 1;
2270}
2271
2272static inline int drbd_suspended(struct drbd_device *device)
2273{
2274 struct drbd_resource *resource = device->resource;
2275
2276 return resource->susp || resource->susp_fen || resource->susp_nod;
2277}
2278
2279static inline bool may_inc_ap_bio(struct drbd_device *device)
2280{
2281 int mxb = drbd_get_max_buffers(device);
2282
2283 if (drbd_suspended(device))
2284 return false;
2285 if (atomic_read(&device->suspend_cnt))
2286 return false;
2287
2288
2289
2290
2291
2292
2293 if (!drbd_state_is_stable(device))
2294 return false;
2295
2296
2297
2298 if (atomic_read(&device->ap_bio_cnt) > mxb)
2299 return false;
2300 if (test_bit(BITMAP_IO, &device->flags))
2301 return false;
2302 return true;
2303}
2304
2305static inline bool inc_ap_bio_cond(struct drbd_device *device)
2306{
2307 bool rv = false;
2308
2309 spin_lock_irq(&device->resource->req_lock);
2310 rv = may_inc_ap_bio(device);
2311 if (rv)
2312 atomic_inc(&device->ap_bio_cnt);
2313 spin_unlock_irq(&device->resource->req_lock);
2314
2315 return rv;
2316}
2317
2318static inline void inc_ap_bio(struct drbd_device *device)
2319{
2320
2321
2322
2323
2324
2325
2326
2327
2328 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2329}
2330
2331static inline void dec_ap_bio(struct drbd_device *device)
2332{
2333 int mxb = drbd_get_max_buffers(device);
2334 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2335
2336 D_ASSERT(device, ap_bio >= 0);
2337
2338 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2339 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2340 drbd_queue_work(&first_peer_device(device)->
2341 connection->sender_work,
2342 &device->bm_io_work.w);
2343 }
2344
2345
2346
2347
2348 if (ap_bio < mxb)
2349 wake_up(&device->misc_wait);
2350}
2351
2352static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2353{
2354 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2355 first_peer_device(device)->connection->agreed_pro_version != 100;
2356}
2357
2358static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2359{
2360 int changed = device->ed_uuid != val;
2361 device->ed_uuid = val;
2362 return changed;
2363}
2364
2365static inline int drbd_queue_order_type(struct drbd_device *device)
2366{
2367
2368
2369#ifndef QUEUE_ORDERED_NONE
2370#define QUEUE_ORDERED_NONE 0
2371#endif
2372 return QUEUE_ORDERED_NONE;
2373}
2374
2375static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2376{
2377 return list_first_entry_or_null(&resource->connections,
2378 struct drbd_connection, connections);
2379}
2380
2381#endif
2382