1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <crypto/hash.h>
30#include <linux/compiler.h>
31#include <linux/types.h>
32#include <linux/list.h>
33#include <linux/sched.h>
34#include <linux/bitops.h>
35#include <linux/slab.h>
36#include <linux/ratelimit.h>
37#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
41#include <linux/backing-dev.h>
42#include <linux/genhd.h>
43#include <linux/idr.h>
44#include <net/tcp.h>
45#include <linux/lru_cache.h>
46#include <linux/prefetch.h>
47#include <linux/drbd_genl_api.h>
48#include <linux/drbd.h>
49#include "drbd_strings.h"
50#include "drbd_state.h"
51#include "drbd_protocol.h"
52
53#ifdef __CHECKER__
54# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
55# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
56# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
57# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
58#else
59# define __protected_by(x)
60# define __protected_read_by(x)
61# define __protected_write_by(x)
62# define __must_hold(x)
63#endif
64
65
66extern unsigned int minor_count;
67extern bool disable_sendpage;
68extern bool allow_oos;
69void tl_abort_disk_io(struct drbd_device *device);
70
71#ifdef CONFIG_DRBD_FAULT_INJECTION
72extern int enable_faults;
73extern int fault_rate;
74extern int fault_devs;
75#endif
76
77extern char usermode_helper[];
78
79
80
81
82
83
84
85#define DRBD_SIGKILL SIGHUP
86
87#define ID_IN_SYNC (4711ULL)
88#define ID_OUT_OF_SYNC (4712ULL)
89#define ID_SYNCER (-1ULL)
90
91#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
92
93struct drbd_device;
94struct drbd_connection;
95
96#define __drbd_printk_device(level, device, fmt, args...) \
97 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
98#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
99 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
100#define __drbd_printk_resource(level, resource, fmt, args...) \
101 printk(level "drbd %s: " fmt, (resource)->name, ## args)
102#define __drbd_printk_connection(level, connection, fmt, args...) \
103 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
104
105void drbd_printk_with_wrong_object_type(void);
106
107#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
108 (__builtin_types_compatible_p(typeof(obj), type) || \
109 __builtin_types_compatible_p(typeof(obj), const type)), \
110 func(level, (const type)(obj), fmt, ## args)
111
112#define drbd_printk(level, obj, fmt, args...) \
113 __builtin_choose_expr( \
114 __drbd_printk_if_same_type(obj, struct drbd_device *, \
115 __drbd_printk_device, level, fmt, ## args), \
116 __builtin_choose_expr( \
117 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
118 __drbd_printk_resource, level, fmt, ## args), \
119 __builtin_choose_expr( \
120 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
121 __drbd_printk_connection, level, fmt, ## args), \
122 __builtin_choose_expr( \
123 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
124 __drbd_printk_peer_device, level, fmt, ## args), \
125 drbd_printk_with_wrong_object_type()))))
126
127#define drbd_dbg(obj, fmt, args...) \
128 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
129#define drbd_alert(obj, fmt, args...) \
130 drbd_printk(KERN_ALERT, obj, fmt, ## args)
131#define drbd_err(obj, fmt, args...) \
132 drbd_printk(KERN_ERR, obj, fmt, ## args)
133#define drbd_warn(obj, fmt, args...) \
134 drbd_printk(KERN_WARNING, obj, fmt, ## args)
135#define drbd_info(obj, fmt, args...) \
136 drbd_printk(KERN_INFO, obj, fmt, ## args)
137#define drbd_emerg(obj, fmt, args...) \
138 drbd_printk(KERN_EMERG, obj, fmt, ## args)
139
140#define dynamic_drbd_dbg(device, fmt, args...) \
141 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
142
143#define D_ASSERT(device, exp) do { \
144 if (!(exp)) \
145 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
146 } while (0)
147
148
149
150
151
152
153#define expect(exp) ({ \
154 bool _bool = (exp); \
155 if (!_bool) \
156 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
157 #exp, __func__); \
158 _bool; \
159 })
160
161
162enum {
163 DRBD_FAULT_MD_WR = 0,
164 DRBD_FAULT_MD_RD = 1,
165 DRBD_FAULT_RS_WR = 2,
166 DRBD_FAULT_RS_RD = 3,
167 DRBD_FAULT_DT_WR = 4,
168 DRBD_FAULT_DT_RD = 5,
169 DRBD_FAULT_DT_RA = 6,
170 DRBD_FAULT_BM_ALLOC = 7,
171 DRBD_FAULT_AL_EE = 8,
172 DRBD_FAULT_RECEIVE = 9,
173
174 DRBD_FAULT_MAX,
175};
176
177extern unsigned int
178_drbd_insert_fault(struct drbd_device *device, unsigned int type);
179
180static inline int
181drbd_insert_fault(struct drbd_device *device, unsigned int type) {
182#ifdef CONFIG_DRBD_FAULT_INJECTION
183 return fault_rate &&
184 (enable_faults & (1<<type)) &&
185 _drbd_insert_fault(device, type);
186#else
187 return 0;
188#endif
189}
190
191
192#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
193
194#define div_floor(A, B) ((A)/(B))
195
196extern struct ratelimit_state drbd_ratelimit_state;
197extern struct idr drbd_devices;
198extern struct list_head drbd_resources;
199
200extern const char *cmdname(enum drbd_packet cmd);
201
202
203
204struct bm_xfer_ctx {
205
206
207
208
209 unsigned long bm_bits;
210 unsigned long bm_words;
211
212 unsigned long bit_offset;
213 unsigned long word_offset;
214
215
216 unsigned packets[2];
217 unsigned bytes[2];
218};
219
220extern void INFO_bm_xfer_stats(struct drbd_device *device,
221 const char *direction, struct bm_xfer_ctx *c);
222
223static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
224{
225
226
227
228
229
230
231
232#if BITS_PER_LONG == 64
233 c->word_offset = c->bit_offset >> 6;
234#elif BITS_PER_LONG == 32
235 c->word_offset = c->bit_offset >> 5;
236 c->word_offset &= ~(1UL);
237#else
238# error "unsupported BITS_PER_LONG"
239#endif
240}
241
242extern unsigned int drbd_header_size(struct drbd_connection *connection);
243
244
245enum drbd_thread_state {
246 NONE,
247 RUNNING,
248 EXITING,
249 RESTARTING
250};
251
252struct drbd_thread {
253 spinlock_t t_lock;
254 struct task_struct *task;
255 struct completion stop;
256 enum drbd_thread_state t_state;
257 int (*function) (struct drbd_thread *);
258 struct drbd_resource *resource;
259 struct drbd_connection *connection;
260 int reset_cpu_mask;
261 const char *name;
262};
263
264static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
265{
266
267
268
269
270 smp_rmb();
271 return thi->t_state;
272}
273
274struct drbd_work {
275 struct list_head list;
276 int (*cb)(struct drbd_work *, int cancel);
277};
278
279struct drbd_device_work {
280 struct drbd_work w;
281 struct drbd_device *device;
282};
283
284#include "drbd_interval.h"
285
286extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
287
288extern void lock_all_resources(void);
289extern void unlock_all_resources(void);
290
291struct drbd_request {
292 struct drbd_work w;
293 struct drbd_device *device;
294
295
296
297
298
299 struct bio *private_bio;
300
301 struct drbd_interval i;
302
303
304
305
306
307
308
309
310
311 unsigned int epoch;
312
313 struct list_head tl_requests;
314 struct bio *master_bio;
315
316
317 struct list_head req_pending_master_completion;
318 struct list_head req_pending_local;
319
320
321 unsigned long start_jif;
322
323
324
325
326
327
328
329
330
331 unsigned long in_actlog_jif;
332
333
334 unsigned long pre_submit_jif;
335
336
337 unsigned long pre_send_jif;
338 unsigned long acked_jif;
339 unsigned long net_done_jif;
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 atomic_t completion_ref;
375
376 struct kref kref;
377
378 unsigned rq_state;
379};
380
381struct drbd_epoch {
382 struct drbd_connection *connection;
383 struct list_head list;
384 unsigned int barrier_nr;
385 atomic_t epoch_size;
386 atomic_t active;
387 unsigned long flags;
388};
389
390
391int drbdd_init(struct drbd_thread *);
392int drbd_asender(struct drbd_thread *);
393
394
395enum {
396 DE_HAVE_BARRIER_NUMBER,
397};
398
399enum epoch_event {
400 EV_PUT,
401 EV_GOT_BARRIER_NR,
402 EV_BECAME_LAST,
403 EV_CLEANUP = 32,
404};
405
406struct digest_info {
407 int digest_size;
408 void *digest;
409};
410
411struct drbd_peer_request {
412 struct drbd_work w;
413 struct drbd_peer_device *peer_device;
414 struct drbd_epoch *epoch;
415 struct page *pages;
416 atomic_t pending_bios;
417 struct drbd_interval i;
418
419 unsigned long flags;
420 unsigned long submit_jif;
421 union {
422 u64 block_id;
423 struct digest_info *digest;
424 };
425};
426
427
428
429
430
431
432
433enum {
434 __EE_CALL_AL_COMPLETE_IO,
435 __EE_MAY_SET_IN_SYNC,
436
437
438 __EE_IS_TRIM,
439
440
441 __EE_IS_TRIM_USE_ZEROOUT,
442
443
444
445 __EE_RESUBMITTED,
446
447
448
449
450 __EE_WAS_ERROR,
451
452
453 __EE_HAS_DIGEST,
454
455
456 __EE_RESTART_REQUESTS,
457
458
459 __EE_SEND_WRITE_ACK,
460
461
462 __EE_IN_INTERVAL_TREE,
463
464
465
466 __EE_SUBMITTED,
467
468
469 __EE_WRITE,
470
471
472
473 __EE_APPLICATION,
474};
475#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
476#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
477#define EE_IS_TRIM (1<<__EE_IS_TRIM)
478#define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT)
479#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
480#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
481#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
482#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
483#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
484#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
485#define EE_SUBMITTED (1<<__EE_SUBMITTED)
486#define EE_WRITE (1<<__EE_WRITE)
487#define EE_APPLICATION (1<<__EE_APPLICATION)
488
489
490enum {
491 UNPLUG_REMOTE,
492 MD_DIRTY,
493 USE_DEGR_WFC_T,
494 CL_ST_CHG_SUCCESS,
495 CL_ST_CHG_FAIL,
496 CRASHED_PRIMARY,
497
498
499 CONSIDER_RESYNC,
500
501 MD_NO_FUA,
502
503 BITMAP_IO,
504
505 BITMAP_IO_QUEUED,
506 WAS_IO_ERROR,
507 WAS_READ_ERROR,
508 FORCE_DETACH,
509 RESYNC_AFTER_NEG,
510 RESIZE_PENDING,
511
512 NEW_CUR_UUID,
513 AL_SUSPENDED,
514 AHEAD_TO_SYNC_SOURCE,
515 B_RS_H_DONE,
516 DISCARD_MY_DATA,
517 READ_BALANCE_RR,
518
519 FLUSH_PENDING,
520
521
522
523 GOING_DISKLESS,
524
525
526 GO_DISKLESS,
527 DESTROY_DISK,
528 MD_SYNC,
529 RS_START,
530 RS_PROGRESS,
531 RS_DONE,
532};
533
534struct drbd_bitmap;
535
536
537
538enum bm_flag {
539
540 BM_LOCKED_MASK = 0xf,
541
542
543 BM_DONT_CLEAR = 0x1,
544 BM_DONT_SET = 0x2,
545 BM_DONT_TEST = 0x4,
546
547
548
549 BM_IS_LOCKED = 0x8,
550
551
552 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
553
554
555
556
557 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
558
559
560
561 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
562};
563
564struct drbd_work_queue {
565 struct list_head q;
566 spinlock_t q_lock;
567 wait_queue_head_t q_wait;
568};
569
570struct drbd_socket {
571 struct mutex mutex;
572 struct socket *socket;
573
574
575 void *sbuf;
576 void *rbuf;
577};
578
579struct drbd_md {
580 u64 md_offset;
581
582 u64 la_size_sect;
583 spinlock_t uuid_lock;
584 u64 uuid[UI_SIZE];
585 u64 device_uuid;
586 u32 flags;
587 u32 md_size_sect;
588
589 s32 al_offset;
590 s32 bm_offset;
591
592
593 s32 meta_dev_idx;
594
595
596 u32 al_stripes;
597 u32 al_stripe_size_4k;
598 u32 al_size_4k;
599};
600
601struct drbd_backing_dev {
602 struct block_device *backing_bdev;
603 struct block_device *md_bdev;
604 struct drbd_md md;
605 struct disk_conf *disk_conf;
606 sector_t known_size;
607};
608
609struct drbd_md_io {
610 struct page *page;
611 unsigned long start_jif;
612 unsigned long submit_jif;
613 const char *current_use;
614 atomic_t in_use;
615 unsigned int done;
616 int error;
617};
618
619struct bm_io_work {
620 struct drbd_work w;
621 char *why;
622 enum bm_flag flags;
623 int (*io_fn)(struct drbd_device *device);
624 void (*done)(struct drbd_device *device, int rv);
625};
626
627struct fifo_buffer {
628 unsigned int head_index;
629 unsigned int size;
630 int total;
631 int values[0];
632};
633extern struct fifo_buffer *fifo_alloc(int fifo_size);
634
635
636enum {
637 NET_CONGESTED,
638 RESOLVE_CONFLICTS,
639 SEND_PING,
640 GOT_PING_ACK,
641 CONN_WD_ST_CHG_REQ,
642 CONN_WD_ST_CHG_OKAY,
643 CONN_WD_ST_CHG_FAIL,
644 CONN_DRY_RUN,
645 CREATE_BARRIER,
646 STATE_SENT,
647 CALLBACK_PENDING,
648
649
650
651
652
653 DISCONNECT_SENT,
654
655 DEVICE_WORK_PENDING,
656};
657
658enum which_state { NOW, OLD = NOW, NEW };
659
660struct drbd_resource {
661 char *name;
662#ifdef CONFIG_DEBUG_FS
663 struct dentry *debugfs_res;
664 struct dentry *debugfs_res_volumes;
665 struct dentry *debugfs_res_connections;
666 struct dentry *debugfs_res_in_flight_summary;
667#endif
668 struct kref kref;
669 struct idr devices;
670 struct list_head connections;
671 struct list_head resources;
672 struct res_opts res_opts;
673 struct mutex conf_update;
674 struct mutex adm_mutex;
675 spinlock_t req_lock;
676
677 unsigned susp:1;
678 unsigned susp_nod:1;
679 unsigned susp_fen:1;
680
681 enum write_ordering_e write_ordering;
682
683 cpumask_var_t cpu_mask;
684};
685
686struct drbd_thread_timing_details
687{
688 unsigned long start_jif;
689 void *cb_addr;
690 const char *caller_fn;
691 unsigned int line;
692 unsigned int cb_nr;
693};
694
695struct drbd_connection {
696 struct list_head connections;
697 struct drbd_resource *resource;
698#ifdef CONFIG_DEBUG_FS
699 struct dentry *debugfs_conn;
700 struct dentry *debugfs_conn_callback_history;
701 struct dentry *debugfs_conn_oldest_requests;
702#endif
703 struct kref kref;
704 struct idr peer_devices;
705 enum drbd_conns cstate;
706 struct mutex cstate_mutex;
707 unsigned int connect_cnt;
708
709 unsigned long flags;
710 struct net_conf *net_conf;
711 wait_queue_head_t ping_wait;
712
713 struct sockaddr_storage my_addr;
714 int my_addr_len;
715 struct sockaddr_storage peer_addr;
716 int peer_addr_len;
717
718 struct drbd_socket data;
719 struct drbd_socket meta;
720 int agreed_pro_version;
721 u32 agreed_features;
722 unsigned long last_received;
723 unsigned int ko_count;
724
725 struct list_head transfer_log;
726
727 struct crypto_shash *cram_hmac_tfm;
728 struct crypto_ahash *integrity_tfm;
729 struct crypto_ahash *peer_integrity_tfm;
730 struct crypto_ahash *csums_tfm;
731 struct crypto_ahash *verify_tfm;
732 void *int_dig_in;
733 void *int_dig_vv;
734
735
736 struct drbd_epoch *current_epoch;
737 spinlock_t epoch_lock;
738 unsigned int epochs;
739 atomic_t current_tle_nr;
740 unsigned current_tle_writes;
741
742 unsigned long last_reconnect_jif;
743 struct drbd_thread receiver;
744 struct drbd_thread worker;
745 struct drbd_thread ack_receiver;
746 struct workqueue_struct *ack_sender;
747
748
749
750
751 struct drbd_request *req_next;
752 struct drbd_request *req_ack_pending;
753 struct drbd_request *req_not_net_done;
754
755
756 struct drbd_work_queue sender_work;
757
758#define DRBD_THREAD_DETAILS_HIST 16
759 unsigned int w_cb_nr;
760 unsigned int r_cb_nr;
761 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
762 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
763
764 struct {
765 unsigned long last_sent_barrier_jif;
766
767
768
769 bool seen_any_write_yet;
770
771
772 int current_epoch_nr;
773
774
775
776
777 unsigned current_epoch_writes;
778 } send;
779};
780
781static inline bool has_net_conf(struct drbd_connection *connection)
782{
783 bool has_net_conf;
784
785 rcu_read_lock();
786 has_net_conf = rcu_dereference(connection->net_conf);
787 rcu_read_unlock();
788
789 return has_net_conf;
790}
791
792void __update_timing_details(
793 struct drbd_thread_timing_details *tdp,
794 unsigned int *cb_nr,
795 void *cb,
796 const char *fn, const unsigned int line);
797
798#define update_worker_timing_details(c, cb) \
799 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
800#define update_receiver_timing_details(c, cb) \
801 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
802
803struct submit_worker {
804 struct workqueue_struct *wq;
805 struct work_struct worker;
806
807
808 struct list_head writes;
809};
810
811struct drbd_peer_device {
812 struct list_head peer_devices;
813 struct drbd_device *device;
814 struct drbd_connection *connection;
815 struct work_struct send_acks_work;
816#ifdef CONFIG_DEBUG_FS
817 struct dentry *debugfs_peer_dev;
818#endif
819};
820
821struct drbd_device {
822 struct drbd_resource *resource;
823 struct list_head peer_devices;
824 struct list_head pending_bitmap_io;
825
826 unsigned long flush_jif;
827#ifdef CONFIG_DEBUG_FS
828 struct dentry *debugfs_minor;
829 struct dentry *debugfs_vol;
830 struct dentry *debugfs_vol_oldest_requests;
831 struct dentry *debugfs_vol_act_log_extents;
832 struct dentry *debugfs_vol_resync_extents;
833 struct dentry *debugfs_vol_data_gen_id;
834 struct dentry *debugfs_vol_ed_gen_id;
835#endif
836
837 unsigned int vnr;
838 unsigned int minor;
839
840 struct kref kref;
841
842
843 unsigned long flags;
844
845
846 struct drbd_backing_dev *ldev __protected_by(local);
847
848 sector_t p_size;
849 struct request_queue *rq_queue;
850 struct block_device *this_bdev;
851 struct gendisk *vdisk;
852
853 unsigned long last_reattach_jif;
854 struct drbd_work resync_work;
855 struct drbd_work unplug_work;
856 struct timer_list resync_timer;
857 struct timer_list md_sync_timer;
858 struct timer_list start_resync_timer;
859 struct timer_list request_timer;
860
861
862 union drbd_state new_state_tmp;
863
864 union drbd_dev_state state;
865 wait_queue_head_t misc_wait;
866 wait_queue_head_t state_wait;
867 unsigned int send_cnt;
868 unsigned int recv_cnt;
869 unsigned int read_cnt;
870 unsigned int writ_cnt;
871 unsigned int al_writ_cnt;
872 unsigned int bm_writ_cnt;
873 atomic_t ap_bio_cnt;
874 atomic_t ap_actlog_cnt;
875 atomic_t ap_pending_cnt;
876 atomic_t rs_pending_cnt;
877 atomic_t unacked_cnt;
878 atomic_t local_cnt;
879 atomic_t suspend_cnt;
880
881
882 struct rb_root read_requests;
883 struct rb_root write_requests;
884
885
886
887 struct list_head pending_master_completion[2];
888 struct list_head pending_completion[2];
889
890
891 bool use_csums;
892
893 unsigned long rs_total;
894
895 unsigned long rs_failed;
896
897 unsigned long rs_start;
898
899 unsigned long rs_paused;
900
901 unsigned long rs_same_csum;
902#define DRBD_SYNC_MARKS 8
903#define DRBD_SYNC_MARK_STEP (3*HZ)
904
905 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
906
907 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
908
909 int rs_last_mark;
910 unsigned long rs_last_bcast;
911
912
913 sector_t ov_start_sector;
914 sector_t ov_stop_sector;
915
916 sector_t ov_position;
917
918 sector_t ov_last_oos_start;
919
920 sector_t ov_last_oos_size;
921 unsigned long ov_left;
922
923 struct drbd_bitmap *bitmap;
924 unsigned long bm_resync_fo;
925
926
927 struct lru_cache *resync;
928
929 unsigned int resync_locked;
930
931 unsigned int resync_wenr;
932
933 int open_cnt;
934 u64 *p_uuid;
935
936 struct list_head active_ee;
937 struct list_head sync_ee;
938 struct list_head done_ee;
939 struct list_head read_ee;
940 struct list_head net_ee;
941
942 int next_barrier_nr;
943 struct list_head resync_reads;
944 atomic_t pp_in_use;
945 atomic_t pp_in_use_by_net;
946 wait_queue_head_t ee_wait;
947 struct drbd_md_io md_io;
948 spinlock_t al_lock;
949 wait_queue_head_t al_wait;
950 struct lru_cache *act_log;
951 unsigned int al_tr_number;
952 int al_tr_cycle;
953 wait_queue_head_t seq_wait;
954 atomic_t packet_seq;
955 unsigned int peer_seq;
956 spinlock_t peer_seq_lock;
957 unsigned long comm_bm_set;
958 struct bm_io_work bm_io_work;
959 u64 ed_uuid;
960 struct mutex own_state_mutex;
961 struct mutex *state_mutex;
962 char congestion_reason;
963 atomic_t rs_sect_in;
964 atomic_t rs_sect_ev;
965 int rs_last_sect_ev;
966 int rs_last_events;
967
968 int c_sync_rate;
969 struct fifo_buffer *rs_plan_s;
970 int rs_in_flight;
971 atomic_t ap_in_flight;
972 unsigned int peer_max_bio_size;
973 unsigned int local_max_bio_size;
974
975
976
977 struct submit_worker submit;
978};
979
980struct drbd_bm_aio_ctx {
981 struct drbd_device *device;
982 struct list_head list; ;
983 unsigned long start_jif;
984 atomic_t in_flight;
985 unsigned int done;
986 unsigned flags;
987#define BM_AIO_COPY_PAGES 1
988#define BM_AIO_WRITE_HINTED 2
989#define BM_AIO_WRITE_ALL_PAGES 4
990#define BM_AIO_READ 8
991 int error;
992 struct kref kref;
993};
994
995struct drbd_config_context {
996
997 unsigned int minor;
998
999 unsigned int volume;
1000#define VOLUME_UNSPECIFIED (-1U)
1001
1002
1003 char *resource_name;
1004 struct nlattr *my_addr;
1005 struct nlattr *peer_addr;
1006
1007
1008 struct sk_buff *reply_skb;
1009
1010 struct drbd_genlmsghdr *reply_dh;
1011
1012 struct drbd_device *device;
1013 struct drbd_resource *resource;
1014 struct drbd_connection *connection;
1015};
1016
1017static inline struct drbd_device *minor_to_device(unsigned int minor)
1018{
1019 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1020}
1021
1022static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1023{
1024 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1025}
1026
1027static inline struct drbd_peer_device *
1028conn_peer_device(struct drbd_connection *connection, int volume_number)
1029{
1030 return idr_find(&connection->peer_devices, volume_number);
1031}
1032
1033#define for_each_resource(resource, _resources) \
1034 list_for_each_entry(resource, _resources, resources)
1035
1036#define for_each_resource_rcu(resource, _resources) \
1037 list_for_each_entry_rcu(resource, _resources, resources)
1038
1039#define for_each_resource_safe(resource, tmp, _resources) \
1040 list_for_each_entry_safe(resource, tmp, _resources, resources)
1041
1042#define for_each_connection(connection, resource) \
1043 list_for_each_entry(connection, &resource->connections, connections)
1044
1045#define for_each_connection_rcu(connection, resource) \
1046 list_for_each_entry_rcu(connection, &resource->connections, connections)
1047
1048#define for_each_connection_safe(connection, tmp, resource) \
1049 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1050
1051#define for_each_peer_device(peer_device, device) \
1052 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1053
1054#define for_each_peer_device_rcu(peer_device, device) \
1055 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1056
1057#define for_each_peer_device_safe(peer_device, tmp, device) \
1058 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1059
1060static inline unsigned int device_to_minor(struct drbd_device *device)
1061{
1062 return device->minor;
1063}
1064
1065
1066
1067
1068
1069
1070
1071enum dds_flags {
1072 DDSF_FORCED = 1,
1073 DDSF_NO_RESYNC = 2,
1074};
1075
1076extern void drbd_init_set_defaults(struct drbd_device *device);
1077extern int drbd_thread_start(struct drbd_thread *thi);
1078extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1079#ifdef CONFIG_SMP
1080extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1081#else
1082#define drbd_thread_current_set_cpu(A) ({})
1083#endif
1084extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1085 unsigned int set_size);
1086extern void tl_clear(struct drbd_connection *);
1087extern void drbd_free_sock(struct drbd_connection *connection);
1088extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1089 void *buf, size_t size, unsigned msg_flags);
1090extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1091 unsigned);
1092
1093extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1094extern int drbd_send_protocol(struct drbd_connection *connection);
1095extern int drbd_send_uuids(struct drbd_peer_device *);
1096extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1097extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1098extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1099extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1100extern int drbd_send_current_state(struct drbd_peer_device *);
1101extern int drbd_send_sync_param(struct drbd_peer_device *);
1102extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1103 u32 set_size);
1104extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1105 struct drbd_peer_request *);
1106extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1107 struct p_block_req *rp);
1108extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1109 struct p_data *dp, int data_size);
1110extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1111 sector_t sector, int blksize, u64 block_id);
1112extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1113extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1114 struct drbd_peer_request *);
1115extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1116extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1117 sector_t sector, int size, u64 block_id);
1118extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1119 int size, void *digest, int digest_size,
1120 enum drbd_packet cmd);
1121extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1122
1123extern int drbd_send_bitmap(struct drbd_device *device);
1124extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1125extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1126extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1127extern void drbd_device_cleanup(struct drbd_device *device);
1128void drbd_print_uuids(struct drbd_device *device, const char *text);
1129
1130extern void conn_md_sync(struct drbd_connection *connection);
1131extern void drbd_md_write(struct drbd_device *device, void *buffer);
1132extern void drbd_md_sync(struct drbd_device *device);
1133extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1134extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1135extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1136extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1137extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1138extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1139extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1140extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1141extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1142extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1143extern void drbd_md_mark_dirty(struct drbd_device *device);
1144extern void drbd_queue_bitmap_io(struct drbd_device *device,
1145 int (*io_fn)(struct drbd_device *),
1146 void (*done)(struct drbd_device *, int),
1147 char *why, enum bm_flag flags);
1148extern int drbd_bitmap_io(struct drbd_device *device,
1149 int (*io_fn)(struct drbd_device *),
1150 char *why, enum bm_flag flags);
1151extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1152 int (*io_fn)(struct drbd_device *),
1153 char *why, enum bm_flag flags);
1154extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1155extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190#define MD_128MB_SECT (128LLU << 11)
1191#define MD_4kB_SECT 8
1192#define MD_32kB_SECT 64
1193
1194
1195#define AL_EXTENT_SHIFT 22
1196#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212#define AL_UPDATES_PER_TRANSACTION 64
1213#define AL_CONTEXT_PER_TRANSACTION 919
1214
1215#if BITS_PER_LONG == 32
1216#define LN2_BPL 5
1217#define cpu_to_lel(A) cpu_to_le32(A)
1218#define lel_to_cpu(A) le32_to_cpu(A)
1219#elif BITS_PER_LONG == 64
1220#define LN2_BPL 6
1221#define cpu_to_lel(A) cpu_to_le64(A)
1222#define lel_to_cpu(A) le64_to_cpu(A)
1223#else
1224#error "LN2 of BITS_PER_LONG unknown!"
1225#endif
1226
1227
1228
1229struct bm_extent {
1230 int rs_left;
1231 int rs_failed;
1232 unsigned long flags;
1233 struct lc_element lce;
1234};
1235
1236#define BME_NO_WRITES 0
1237#define BME_LOCKED 1
1238#define BME_PRIORITY 2
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248#define SLEEP_TIME (HZ/10)
1249
1250
1251
1252#define BM_BLOCK_SHIFT 12
1253#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1254
1255
1256
1257#define BM_EXT_SHIFT 24
1258#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1259
1260#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1261#error "HAVE YOU FIXED drbdmeta AS WELL??"
1262#endif
1263
1264
1265#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1266#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1267#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1268
1269
1270#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1271
1272
1273
1274#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1275#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1276
1277
1278#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1279
1280#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1281
1282#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1283
1284#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1285
1286
1287
1288#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1305
1306
1307
1308
1309
1310#define DRBD_MAX_SECTORS_FIXED_BM \
1311 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1312#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1313#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1314#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1315#else
1316#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1317
1318#if BITS_PER_LONG == 32
1319
1320
1321
1322#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1323#else
1324
1325#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1326
1327#endif
1328#endif
1329
1330
1331
1332
1333
1334
1335
1336#define DRBD_MAX_BIO_SIZE (1U << 20)
1337#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1338#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1339#endif
1340#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1341
1342#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1343#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1344
1345
1346
1347
1348#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE
1349#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9)
1350
1351extern int drbd_bm_init(struct drbd_device *device);
1352extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1353extern void drbd_bm_cleanup(struct drbd_device *device);
1354extern void drbd_bm_set_all(struct drbd_device *device);
1355extern void drbd_bm_clear_all(struct drbd_device *device);
1356
1357extern int drbd_bm_set_bits(
1358 struct drbd_device *device, unsigned long s, unsigned long e);
1359extern int drbd_bm_clear_bits(
1360 struct drbd_device *device, unsigned long s, unsigned long e);
1361extern int drbd_bm_count_bits(
1362 struct drbd_device *device, const unsigned long s, const unsigned long e);
1363
1364
1365extern void _drbd_bm_set_bits(struct drbd_device *device,
1366 const unsigned long s, const unsigned long e);
1367extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1368extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1369extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1370extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1371extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1372extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1373extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1374extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1375extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1376extern size_t drbd_bm_words(struct drbd_device *device);
1377extern unsigned long drbd_bm_bits(struct drbd_device *device);
1378extern sector_t drbd_bm_capacity(struct drbd_device *device);
1379
1380#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1381extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1382
1383extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1384extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1385extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1386extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1387
1388extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1389 size_t number, unsigned long *buffer);
1390
1391extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1392 size_t number, unsigned long *buffer);
1393
1394extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1395extern void drbd_bm_unlock(struct drbd_device *device);
1396
1397
1398extern struct kmem_cache *drbd_request_cache;
1399extern struct kmem_cache *drbd_ee_cache;
1400extern struct kmem_cache *drbd_bm_ext_cache;
1401extern struct kmem_cache *drbd_al_ext_cache;
1402extern mempool_t *drbd_request_mempool;
1403extern mempool_t *drbd_ee_mempool;
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418extern struct page *drbd_pp_pool;
1419extern spinlock_t drbd_pp_lock;
1420extern int drbd_pp_vacant;
1421extern wait_queue_head_t drbd_pp_wait;
1422
1423
1424
1425
1426
1427
1428#define DRBD_MIN_POOL_PAGES 128
1429extern mempool_t *drbd_md_io_page_pool;
1430
1431
1432
1433extern struct bio_set *drbd_md_io_bio_set;
1434
1435extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1436
1437extern struct mutex resources_mutex;
1438
1439extern int conn_lowest_minor(struct drbd_connection *connection);
1440extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1441extern void drbd_destroy_device(struct kref *kref);
1442extern void drbd_delete_device(struct drbd_device *device);
1443
1444extern struct drbd_resource *drbd_create_resource(const char *name);
1445extern void drbd_free_resource(struct drbd_resource *resource);
1446
1447extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1448extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1449extern void drbd_destroy_connection(struct kref *kref);
1450extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1451 void *peer_addr, int peer_addr_len);
1452extern struct drbd_resource *drbd_find_resource(const char *name);
1453extern void drbd_destroy_resource(struct kref *kref);
1454extern void conn_free_crypto(struct drbd_connection *connection);
1455
1456extern int proc_details;
1457
1458
1459extern void do_submit(struct work_struct *ws);
1460extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1461extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1462extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1463extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1464
1465
1466
1467
1468extern struct mutex notification_mutex;
1469
1470extern void drbd_suspend_io(struct drbd_device *device);
1471extern void drbd_resume_io(struct drbd_device *device);
1472extern char *ppsize(char *buf, unsigned long long size);
1473extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1474enum determine_dev_size {
1475 DS_ERROR_SHRINK = -3,
1476 DS_ERROR_SPACE_MD = -2,
1477 DS_ERROR = -1,
1478 DS_UNCHANGED = 0,
1479 DS_SHRUNK = 1,
1480 DS_GREW = 2,
1481 DS_GREW_FROM_ZERO = 3,
1482};
1483extern enum determine_dev_size
1484drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1485extern void resync_after_online_grow(struct drbd_device *);
1486extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
1487extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1488 enum drbd_role new_role,
1489 int force);
1490extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1491extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1492extern int drbd_khelper(struct drbd_device *device, char *cmd);
1493
1494
1495
1496extern void drbd_md_endio(struct bio *bio);
1497extern void drbd_peer_request_endio(struct bio *bio);
1498extern void drbd_request_endio(struct bio *bio);
1499extern int drbd_worker(struct drbd_thread *thi);
1500enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1501void drbd_resync_after_changed(struct drbd_device *device);
1502extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1503extern void resume_next_sg(struct drbd_device *device);
1504extern void suspend_other_sg(struct drbd_device *device);
1505extern int drbd_resync_finished(struct drbd_device *device);
1506
1507extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1508extern void drbd_md_put_buffer(struct drbd_device *device);
1509extern int drbd_md_sync_page_io(struct drbd_device *device,
1510 struct drbd_backing_dev *bdev, sector_t sector, int rw);
1511extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1512extern void wait_until_done_or_force_detached(struct drbd_device *device,
1513 struct drbd_backing_dev *bdev, unsigned int *done);
1514extern void drbd_rs_controller_reset(struct drbd_device *device);
1515
1516static inline void ov_out_of_sync_print(struct drbd_device *device)
1517{
1518 if (device->ov_last_oos_size) {
1519 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1520 (unsigned long long)device->ov_last_oos_start,
1521 (unsigned long)device->ov_last_oos_size);
1522 }
1523 device->ov_last_oos_size = 0;
1524}
1525
1526
1527extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
1528extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
1529
1530extern int w_e_end_data_req(struct drbd_work *, int);
1531extern int w_e_end_rsdata_req(struct drbd_work *, int);
1532extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1533extern int w_e_end_ov_reply(struct drbd_work *, int);
1534extern int w_e_end_ov_req(struct drbd_work *, int);
1535extern int w_ov_finished(struct drbd_work *, int);
1536extern int w_resync_timer(struct drbd_work *, int);
1537extern int w_send_write_hint(struct drbd_work *, int);
1538extern int w_send_dblock(struct drbd_work *, int);
1539extern int w_send_read_req(struct drbd_work *, int);
1540extern int w_e_reissue(struct drbd_work *, int);
1541extern int w_restart_disk_io(struct drbd_work *, int);
1542extern int w_send_out_of_sync(struct drbd_work *, int);
1543extern int w_start_resync(struct drbd_work *, int);
1544
1545extern void resync_timer_fn(unsigned long data);
1546extern void start_resync_timer_fn(unsigned long data);
1547
1548extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1549
1550
1551extern int drbd_receiver(struct drbd_thread *thi);
1552extern int drbd_ack_receiver(struct drbd_thread *thi);
1553extern void drbd_send_ping_wf(struct work_struct *ws);
1554extern void drbd_send_acks_wf(struct work_struct *ws);
1555extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1556extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1557 bool throttle_if_app_is_waiting);
1558extern int drbd_submit_peer_request(struct drbd_device *,
1559 struct drbd_peer_request *, const unsigned,
1560 const int);
1561extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1562extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1563 sector_t, unsigned int,
1564 bool,
1565 gfp_t) __must_hold(local);
1566extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1567 int);
1568#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1569#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1570extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1571extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1572extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1573extern int drbd_connected(struct drbd_peer_device *);
1574
1575static inline void drbd_tcp_cork(struct socket *sock)
1576{
1577 int val = 1;
1578 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1579 (char*)&val, sizeof(val));
1580}
1581
1582static inline void drbd_tcp_uncork(struct socket *sock)
1583{
1584 int val = 0;
1585 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1586 (char*)&val, sizeof(val));
1587}
1588
1589static inline void drbd_tcp_nodelay(struct socket *sock)
1590{
1591 int val = 1;
1592 (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1593 (char*)&val, sizeof(val));
1594}
1595
1596static inline void drbd_tcp_quickack(struct socket *sock)
1597{
1598 int val = 2;
1599 (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1600 (char*)&val, sizeof(val));
1601}
1602
1603
1604static inline void drbd_set_my_capacity(struct drbd_device *device,
1605 sector_t size)
1606{
1607
1608 set_capacity(device->vdisk, size);
1609 device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1610}
1611
1612
1613
1614
1615static inline void drbd_generic_make_request(struct drbd_device *device,
1616 int fault_type, struct bio *bio)
1617{
1618 __release(local);
1619 if (!bio->bi_bdev) {
1620 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
1621 bio->bi_error = -ENODEV;
1622 bio_endio(bio);
1623 return;
1624 }
1625
1626 if (drbd_insert_fault(device, fault_type))
1627 bio_io_error(bio);
1628 else
1629 generic_make_request(bio);
1630}
1631
1632void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1633 enum write_ordering_e wo);
1634
1635
1636extern struct proc_dir_entry *drbd_proc;
1637extern const struct file_operations drbd_proc_fops;
1638extern const char *drbd_conn_str(enum drbd_conns s);
1639extern const char *drbd_role_str(enum drbd_role s);
1640
1641
1642extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1643extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1644extern void drbd_al_begin_io_commit(struct drbd_device *device);
1645extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1646extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1647extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1648extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1649extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1650extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1651extern void drbd_rs_cancel_all(struct drbd_device *device);
1652extern int drbd_rs_del_all(struct drbd_device *device);
1653extern void drbd_rs_failed_io(struct drbd_device *device,
1654 sector_t sector, int size);
1655extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1656
1657enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1658extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1659 enum update_sync_bits_mode mode);
1660#define drbd_set_in_sync(device, sector, size) \
1661 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1662#define drbd_set_out_of_sync(device, sector, size) \
1663 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1664#define drbd_rs_failed_io(device, sector, size) \
1665 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1666extern void drbd_al_shrink(struct drbd_device *device);
1667extern int drbd_al_initialize(struct drbd_device *, void *);
1668
1669
1670
1671struct sib_info {
1672 enum drbd_state_info_bcast_reason sib_reason;
1673 union {
1674 struct {
1675 char *helper_name;
1676 unsigned helper_exit_code;
1677 };
1678 struct {
1679 union drbd_state os;
1680 union drbd_state ns;
1681 };
1682 };
1683};
1684void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1685
1686extern void notify_resource_state(struct sk_buff *,
1687 unsigned int,
1688 struct drbd_resource *,
1689 struct resource_info *,
1690 enum drbd_notification_type);
1691extern void notify_device_state(struct sk_buff *,
1692 unsigned int,
1693 struct drbd_device *,
1694 struct device_info *,
1695 enum drbd_notification_type);
1696extern void notify_connection_state(struct sk_buff *,
1697 unsigned int,
1698 struct drbd_connection *,
1699 struct connection_info *,
1700 enum drbd_notification_type);
1701extern void notify_peer_device_state(struct sk_buff *,
1702 unsigned int,
1703 struct drbd_peer_device *,
1704 struct peer_device_info *,
1705 enum drbd_notification_type);
1706extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1707 struct drbd_connection *, const char *, int);
1708
1709
1710
1711
1712
1713
1714static inline struct page *page_chain_next(struct page *page)
1715{
1716 return (struct page *)page_private(page);
1717}
1718#define page_chain_for_each(page) \
1719 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1720 page = page_chain_next(page))
1721#define page_chain_for_each_safe(page, n) \
1722 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1723
1724
1725static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1726{
1727 struct page *page = peer_req->pages;
1728 page_chain_for_each(page) {
1729 if (page_count(page) > 1)
1730 return 1;
1731 }
1732 return 0;
1733}
1734
1735static inline union drbd_state drbd_read_state(struct drbd_device *device)
1736{
1737 struct drbd_resource *resource = device->resource;
1738 union drbd_state rv;
1739
1740 rv.i = device->state.i;
1741 rv.susp = resource->susp;
1742 rv.susp_nod = resource->susp_nod;
1743 rv.susp_fen = resource->susp_fen;
1744
1745 return rv;
1746}
1747
1748enum drbd_force_detach_flags {
1749 DRBD_READ_ERROR,
1750 DRBD_WRITE_ERROR,
1751 DRBD_META_IO_ERROR,
1752 DRBD_FORCE_DETACH,
1753};
1754
1755#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1756static inline void __drbd_chk_io_error_(struct drbd_device *device,
1757 enum drbd_force_detach_flags df,
1758 const char *where)
1759{
1760 enum drbd_io_error_p ep;
1761
1762 rcu_read_lock();
1763 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1764 rcu_read_unlock();
1765 switch (ep) {
1766 case EP_PASS_ON:
1767 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1768 if (__ratelimit(&drbd_ratelimit_state))
1769 drbd_err(device, "Local IO failed in %s.\n", where);
1770 if (device->state.disk > D_INCONSISTENT)
1771 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1772 break;
1773 }
1774
1775 case EP_DETACH:
1776 case EP_CALL_HELPER:
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797 set_bit(WAS_IO_ERROR, &device->flags);
1798 if (df == DRBD_READ_ERROR)
1799 set_bit(WAS_READ_ERROR, &device->flags);
1800 if (df == DRBD_FORCE_DETACH)
1801 set_bit(FORCE_DETACH, &device->flags);
1802 if (device->state.disk > D_FAILED) {
1803 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1804 drbd_err(device,
1805 "Local IO failed in %s. Detaching...\n", where);
1806 }
1807 break;
1808 }
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1820static inline void drbd_chk_io_error_(struct drbd_device *device,
1821 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1822{
1823 if (error) {
1824 unsigned long flags;
1825 spin_lock_irqsave(&device->resource->req_lock, flags);
1826 __drbd_chk_io_error_(device, forcedetach, where);
1827 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1828 }
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1840{
1841 switch (bdev->md.meta_dev_idx) {
1842 case DRBD_MD_INDEX_INTERNAL:
1843 case DRBD_MD_INDEX_FLEX_INT:
1844 return bdev->md.md_offset + bdev->md.bm_offset;
1845 case DRBD_MD_INDEX_FLEX_EXT:
1846 default:
1847 return bdev->md.md_offset;
1848 }
1849}
1850
1851
1852
1853
1854
1855static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1856{
1857 switch (bdev->md.meta_dev_idx) {
1858 case DRBD_MD_INDEX_INTERNAL:
1859 case DRBD_MD_INDEX_FLEX_INT:
1860 return bdev->md.md_offset + MD_4kB_SECT -1;
1861 case DRBD_MD_INDEX_FLEX_EXT:
1862 default:
1863 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1864 }
1865}
1866
1867
1868static inline sector_t drbd_get_capacity(struct block_device *bdev)
1869{
1870
1871 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1883{
1884 sector_t s;
1885
1886 switch (bdev->md.meta_dev_idx) {
1887 case DRBD_MD_INDEX_INTERNAL:
1888 case DRBD_MD_INDEX_FLEX_INT:
1889 s = drbd_get_capacity(bdev->backing_bdev)
1890 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1891 drbd_md_first_sector(bdev))
1892 : 0;
1893 break;
1894 case DRBD_MD_INDEX_FLEX_EXT:
1895 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1896 drbd_get_capacity(bdev->backing_bdev));
1897
1898 s = min_t(sector_t, s,
1899 BM_EXT_TO_SECT(bdev->md.md_size_sect
1900 - bdev->md.bm_offset));
1901 break;
1902 default:
1903 s = min_t(sector_t, DRBD_MAX_SECTORS,
1904 drbd_get_capacity(bdev->backing_bdev));
1905 }
1906 return s;
1907}
1908
1909
1910
1911
1912
1913static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1914{
1915 const int meta_dev_idx = bdev->md.meta_dev_idx;
1916
1917 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1918 return 0;
1919
1920
1921
1922 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1923 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1924 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1925
1926
1927 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1928}
1929
1930static inline void
1931drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1932{
1933 unsigned long flags;
1934 spin_lock_irqsave(&q->q_lock, flags);
1935 list_add_tail(&w->list, &q->q);
1936 spin_unlock_irqrestore(&q->q_lock, flags);
1937 wake_up(&q->q_wait);
1938}
1939
1940static inline void
1941drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1942{
1943 unsigned long flags;
1944 spin_lock_irqsave(&q->q_lock, flags);
1945 if (list_empty_careful(&w->list))
1946 list_add_tail(&w->list, &q->q);
1947 spin_unlock_irqrestore(&q->q_lock, flags);
1948 wake_up(&q->q_wait);
1949}
1950
1951static inline void
1952drbd_device_post_work(struct drbd_device *device, int work_bit)
1953{
1954 if (!test_and_set_bit(work_bit, &device->flags)) {
1955 struct drbd_connection *connection =
1956 first_peer_device(device)->connection;
1957 struct drbd_work_queue *q = &connection->sender_work;
1958 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1959 wake_up(&q->q_wait);
1960 }
1961}
1962
1963extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1964
1965
1966
1967
1968
1969static inline void wake_ack_receiver(struct drbd_connection *connection)
1970{
1971 struct task_struct *task = connection->ack_receiver.task;
1972 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1973 force_sig(SIGXCPU, task);
1974}
1975
1976static inline void request_ping(struct drbd_connection *connection)
1977{
1978 set_bit(SEND_PING, &connection->flags);
1979 wake_ack_receiver(connection);
1980}
1981
1982extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1983extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1984extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1985 enum drbd_packet, unsigned int, void *,
1986 unsigned int);
1987extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1988 enum drbd_packet, unsigned int, void *,
1989 unsigned int);
1990
1991extern int drbd_send_ping(struct drbd_connection *connection);
1992extern int drbd_send_ping_ack(struct drbd_connection *connection);
1993extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1994extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1995
1996static inline void drbd_thread_stop(struct drbd_thread *thi)
1997{
1998 _drbd_thread_stop(thi, false, true);
1999}
2000
2001static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
2002{
2003 _drbd_thread_stop(thi, false, false);
2004}
2005
2006static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
2007{
2008 _drbd_thread_stop(thi, true, false);
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static inline void inc_ap_pending(struct drbd_device *device)
2034{
2035 atomic_inc(&device->ap_pending_cnt);
2036}
2037
2038#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
2039 if (atomic_read(&device->which) < 0) \
2040 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2041 func, line, \
2042 atomic_read(&device->which))
2043
2044#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2045static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2046{
2047 if (atomic_dec_and_test(&device->ap_pending_cnt))
2048 wake_up(&device->misc_wait);
2049 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2050}
2051
2052
2053
2054
2055
2056
2057
2058static inline void inc_rs_pending(struct drbd_device *device)
2059{
2060 atomic_inc(&device->rs_pending_cnt);
2061}
2062
2063#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2064static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2065{
2066 atomic_dec(&device->rs_pending_cnt);
2067 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079static inline void inc_unacked(struct drbd_device *device)
2080{
2081 atomic_inc(&device->unacked_cnt);
2082}
2083
2084#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2085static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2086{
2087 atomic_dec(&device->unacked_cnt);
2088 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2089}
2090
2091#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2092static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2093{
2094 atomic_sub(n, &device->unacked_cnt);
2095 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2096}
2097
2098static inline bool is_sync_state(enum drbd_conns connection_state)
2099{
2100 return
2101 (connection_state == C_SYNC_SOURCE
2102 || connection_state == C_SYNC_TARGET
2103 || connection_state == C_PAUSED_SYNC_S
2104 || connection_state == C_PAUSED_SYNC_T);
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114#define get_ldev_if_state(_device, _min_state) \
2115 (_get_ldev_if_state((_device), (_min_state)) ? \
2116 ({ __acquire(x); true; }) : false)
2117#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2118
2119static inline void put_ldev(struct drbd_device *device)
2120{
2121 enum drbd_disk_state disk_state = device->state.disk;
2122
2123
2124
2125
2126 int i = atomic_dec_return(&device->local_cnt);
2127
2128
2129
2130
2131 __release(local);
2132 D_ASSERT(device, i >= 0);
2133 if (i == 0) {
2134 if (disk_state == D_DISKLESS)
2135
2136 drbd_device_post_work(device, DESTROY_DISK);
2137 if (disk_state == D_FAILED)
2138
2139 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2140 drbd_device_post_work(device, GO_DISKLESS);
2141 wake_up(&device->misc_wait);
2142 }
2143}
2144
2145#ifndef __CHECKER__
2146static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2147{
2148 int io_allowed;
2149
2150
2151 if (device->state.disk == D_DISKLESS)
2152 return 0;
2153
2154 atomic_inc(&device->local_cnt);
2155 io_allowed = (device->state.disk >= mins);
2156 if (!io_allowed)
2157 put_ldev(device);
2158 return io_allowed;
2159}
2160#else
2161extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2162#endif
2163
2164
2165
2166
2167static inline int drbd_get_max_buffers(struct drbd_device *device)
2168{
2169 struct net_conf *nc;
2170 int mxb;
2171
2172 rcu_read_lock();
2173 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2174 mxb = nc ? nc->max_buffers : 1000000;
2175 rcu_read_unlock();
2176
2177 return mxb;
2178}
2179
2180static inline int drbd_state_is_stable(struct drbd_device *device)
2181{
2182 union drbd_dev_state s = device->state;
2183
2184
2185
2186
2187 switch ((enum drbd_conns)s.conn) {
2188
2189 case C_STANDALONE:
2190 case C_WF_CONNECTION:
2191
2192 case C_CONNECTED:
2193 case C_SYNC_SOURCE:
2194 case C_SYNC_TARGET:
2195 case C_VERIFY_S:
2196 case C_VERIFY_T:
2197 case C_PAUSED_SYNC_S:
2198 case C_PAUSED_SYNC_T:
2199 case C_AHEAD:
2200 case C_BEHIND:
2201
2202 case C_DISCONNECTING:
2203 case C_UNCONNECTED:
2204 case C_TIMEOUT:
2205 case C_BROKEN_PIPE:
2206 case C_NETWORK_FAILURE:
2207 case C_PROTOCOL_ERROR:
2208 case C_TEAR_DOWN:
2209 case C_WF_REPORT_PARAMS:
2210 case C_STARTING_SYNC_S:
2211 case C_STARTING_SYNC_T:
2212 break;
2213
2214
2215 case C_WF_BITMAP_S:
2216 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2217 return 0;
2218 break;
2219
2220
2221 case C_WF_BITMAP_T:
2222 case C_WF_SYNC_UUID:
2223 case C_MASK:
2224
2225 return 0;
2226 }
2227
2228 switch ((enum drbd_disk_state)s.disk) {
2229 case D_DISKLESS:
2230 case D_INCONSISTENT:
2231 case D_OUTDATED:
2232 case D_CONSISTENT:
2233 case D_UP_TO_DATE:
2234 case D_FAILED:
2235
2236 break;
2237
2238
2239 case D_ATTACHING:
2240 case D_NEGOTIATING:
2241 case D_UNKNOWN:
2242 case D_MASK:
2243
2244 return 0;
2245 }
2246
2247 return 1;
2248}
2249
2250static inline int drbd_suspended(struct drbd_device *device)
2251{
2252 struct drbd_resource *resource = device->resource;
2253
2254 return resource->susp || resource->susp_fen || resource->susp_nod;
2255}
2256
2257static inline bool may_inc_ap_bio(struct drbd_device *device)
2258{
2259 int mxb = drbd_get_max_buffers(device);
2260
2261 if (drbd_suspended(device))
2262 return false;
2263 if (atomic_read(&device->suspend_cnt))
2264 return false;
2265
2266
2267
2268
2269
2270
2271 if (!drbd_state_is_stable(device))
2272 return false;
2273
2274
2275
2276 if (atomic_read(&device->ap_bio_cnt) > mxb)
2277 return false;
2278 if (test_bit(BITMAP_IO, &device->flags))
2279 return false;
2280 return true;
2281}
2282
2283static inline bool inc_ap_bio_cond(struct drbd_device *device)
2284{
2285 bool rv = false;
2286
2287 spin_lock_irq(&device->resource->req_lock);
2288 rv = may_inc_ap_bio(device);
2289 if (rv)
2290 atomic_inc(&device->ap_bio_cnt);
2291 spin_unlock_irq(&device->resource->req_lock);
2292
2293 return rv;
2294}
2295
2296static inline void inc_ap_bio(struct drbd_device *device)
2297{
2298
2299
2300
2301
2302
2303
2304
2305
2306 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2307}
2308
2309static inline void dec_ap_bio(struct drbd_device *device)
2310{
2311 int mxb = drbd_get_max_buffers(device);
2312 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2313
2314 D_ASSERT(device, ap_bio >= 0);
2315
2316 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2317 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2318 drbd_queue_work(&first_peer_device(device)->
2319 connection->sender_work,
2320 &device->bm_io_work.w);
2321 }
2322
2323
2324
2325
2326 if (ap_bio < mxb)
2327 wake_up(&device->misc_wait);
2328}
2329
2330static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2331{
2332 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2333 first_peer_device(device)->connection->agreed_pro_version != 100;
2334}
2335
2336static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2337{
2338 int changed = device->ed_uuid != val;
2339 device->ed_uuid = val;
2340 return changed;
2341}
2342
2343static inline int drbd_queue_order_type(struct drbd_device *device)
2344{
2345
2346
2347#ifndef QUEUE_ORDERED_NONE
2348#define QUEUE_ORDERED_NONE 0
2349#endif
2350 return QUEUE_ORDERED_NONE;
2351}
2352
2353static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2354{
2355 return list_first_entry_or_null(&resource->connections,
2356 struct drbd_connection, connections);
2357}
2358
2359#endif
2360