1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _DRBD_INT_H
15#define _DRBD_INT_H
16
17#include <crypto/hash.h>
18#include <linux/compiler.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/sched/signal.h>
22#include <linux/bitops.h>
23#include <linux/slab.h>
24#include <linux/ratelimit.h>
25#include <linux/tcp.h>
26#include <linux/mutex.h>
27#include <linux/major.h>
28#include <linux/blkdev.h>
29#include <linux/backing-dev.h>
30#include <linux/genhd.h>
31#include <linux/idr.h>
32#include <linux/dynamic_debug.h>
33#include <net/tcp.h>
34#include <linux/lru_cache.h>
35#include <linux/prefetch.h>
36#include <linux/drbd_genl_api.h>
37#include <linux/drbd.h>
38#include "drbd_strings.h"
39#include "drbd_state.h"
40#include "drbd_protocol.h"
41
42#ifdef __CHECKER__
43# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
44# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
45# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
46#else
47# define __protected_by(x)
48# define __protected_read_by(x)
49# define __protected_write_by(x)
50#endif
51
52
53#ifdef CONFIG_DRBD_FAULT_INJECTION
54extern int drbd_enable_faults;
55extern int drbd_fault_rate;
56#endif
57
58extern unsigned int drbd_minor_count;
59extern char drbd_usermode_helper[];
60extern int drbd_proc_details;
61
62
63
64
65
66
67
68#define DRBD_SIGKILL SIGHUP
69
70#define ID_IN_SYNC (4711ULL)
71#define ID_OUT_OF_SYNC (4712ULL)
72#define ID_SYNCER (-1ULL)
73
74#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
75
76struct drbd_device;
77struct drbd_connection;
78
79#define __drbd_printk_device(level, device, fmt, args...) \
80 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
81#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
82 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
83#define __drbd_printk_resource(level, resource, fmt, args...) \
84 printk(level "drbd %s: " fmt, (resource)->name, ## args)
85#define __drbd_printk_connection(level, connection, fmt, args...) \
86 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
87
88void drbd_printk_with_wrong_object_type(void);
89
90#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
91 (__builtin_types_compatible_p(typeof(obj), type) || \
92 __builtin_types_compatible_p(typeof(obj), const type)), \
93 func(level, (const type)(obj), fmt, ## args)
94
95#define drbd_printk(level, obj, fmt, args...) \
96 __builtin_choose_expr( \
97 __drbd_printk_if_same_type(obj, struct drbd_device *, \
98 __drbd_printk_device, level, fmt, ## args), \
99 __builtin_choose_expr( \
100 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
101 __drbd_printk_resource, level, fmt, ## args), \
102 __builtin_choose_expr( \
103 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
104 __drbd_printk_connection, level, fmt, ## args), \
105 __builtin_choose_expr( \
106 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
107 __drbd_printk_peer_device, level, fmt, ## args), \
108 drbd_printk_with_wrong_object_type()))))
109
110#define drbd_dbg(obj, fmt, args...) \
111 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
112#define drbd_alert(obj, fmt, args...) \
113 drbd_printk(KERN_ALERT, obj, fmt, ## args)
114#define drbd_err(obj, fmt, args...) \
115 drbd_printk(KERN_ERR, obj, fmt, ## args)
116#define drbd_warn(obj, fmt, args...) \
117 drbd_printk(KERN_WARNING, obj, fmt, ## args)
118#define drbd_info(obj, fmt, args...) \
119 drbd_printk(KERN_INFO, obj, fmt, ## args)
120#define drbd_emerg(obj, fmt, args...) \
121 drbd_printk(KERN_EMERG, obj, fmt, ## args)
122
123#define dynamic_drbd_dbg(device, fmt, args...) \
124 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
125
126#define D_ASSERT(device, exp) do { \
127 if (!(exp)) \
128 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
129 } while (0)
130
131
132
133
134
135
136#define expect(exp) ({ \
137 bool _bool = (exp); \
138 if (!_bool) \
139 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
140 #exp, __func__); \
141 _bool; \
142 })
143
144
145enum {
146 DRBD_FAULT_MD_WR = 0,
147 DRBD_FAULT_MD_RD = 1,
148 DRBD_FAULT_RS_WR = 2,
149 DRBD_FAULT_RS_RD = 3,
150 DRBD_FAULT_DT_WR = 4,
151 DRBD_FAULT_DT_RD = 5,
152 DRBD_FAULT_DT_RA = 6,
153 DRBD_FAULT_BM_ALLOC = 7,
154 DRBD_FAULT_AL_EE = 8,
155 DRBD_FAULT_RECEIVE = 9,
156
157 DRBD_FAULT_MAX,
158};
159
160extern unsigned int
161_drbd_insert_fault(struct drbd_device *device, unsigned int type);
162
163static inline int
164drbd_insert_fault(struct drbd_device *device, unsigned int type) {
165#ifdef CONFIG_DRBD_FAULT_INJECTION
166 return drbd_fault_rate &&
167 (drbd_enable_faults & (1<<type)) &&
168 _drbd_insert_fault(device, type);
169#else
170 return 0;
171#endif
172}
173
174
175#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
176
177#define div_floor(A, B) ((A)/(B))
178
179extern struct ratelimit_state drbd_ratelimit_state;
180extern struct idr drbd_devices;
181extern struct list_head drbd_resources;
182
183extern const char *cmdname(enum drbd_packet cmd);
184
185
186
187struct bm_xfer_ctx {
188
189
190
191
192 unsigned long bm_bits;
193 unsigned long bm_words;
194
195 unsigned long bit_offset;
196 unsigned long word_offset;
197
198
199 unsigned packets[2];
200 unsigned bytes[2];
201};
202
203extern void INFO_bm_xfer_stats(struct drbd_device *device,
204 const char *direction, struct bm_xfer_ctx *c);
205
206static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
207{
208
209
210
211
212
213
214
215#if BITS_PER_LONG == 64
216 c->word_offset = c->bit_offset >> 6;
217#elif BITS_PER_LONG == 32
218 c->word_offset = c->bit_offset >> 5;
219 c->word_offset &= ~(1UL);
220#else
221# error "unsupported BITS_PER_LONG"
222#endif
223}
224
225extern unsigned int drbd_header_size(struct drbd_connection *connection);
226
227
228enum drbd_thread_state {
229 NONE,
230 RUNNING,
231 EXITING,
232 RESTARTING
233};
234
235struct drbd_thread {
236 spinlock_t t_lock;
237 struct task_struct *task;
238 struct completion stop;
239 enum drbd_thread_state t_state;
240 int (*function) (struct drbd_thread *);
241 struct drbd_resource *resource;
242 struct drbd_connection *connection;
243 int reset_cpu_mask;
244 const char *name;
245};
246
247static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
248{
249
250
251
252
253 smp_rmb();
254 return thi->t_state;
255}
256
257struct drbd_work {
258 struct list_head list;
259 int (*cb)(struct drbd_work *, int cancel);
260};
261
262struct drbd_device_work {
263 struct drbd_work w;
264 struct drbd_device *device;
265};
266
267#include "drbd_interval.h"
268
269extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
270
271extern void lock_all_resources(void);
272extern void unlock_all_resources(void);
273
274struct drbd_request {
275 struct drbd_work w;
276 struct drbd_device *device;
277
278
279
280
281
282 struct bio *private_bio;
283
284 struct drbd_interval i;
285
286
287
288
289
290
291
292
293
294 unsigned int epoch;
295
296 struct list_head tl_requests;
297 struct bio *master_bio;
298
299
300 struct list_head req_pending_master_completion;
301 struct list_head req_pending_local;
302
303
304 unsigned long start_jif;
305
306
307
308
309
310
311
312
313
314 unsigned long in_actlog_jif;
315
316
317 unsigned long pre_submit_jif;
318
319
320 unsigned long pre_send_jif;
321 unsigned long acked_jif;
322 unsigned long net_done_jif;
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357 atomic_t completion_ref;
358
359 struct kref kref;
360
361 unsigned rq_state;
362};
363
364struct drbd_epoch {
365 struct drbd_connection *connection;
366 struct list_head list;
367 unsigned int barrier_nr;
368 atomic_t epoch_size;
369 atomic_t active;
370 unsigned long flags;
371};
372
373
374int drbdd_init(struct drbd_thread *);
375int drbd_asender(struct drbd_thread *);
376
377
378enum {
379 DE_HAVE_BARRIER_NUMBER,
380};
381
382enum epoch_event {
383 EV_PUT,
384 EV_GOT_BARRIER_NR,
385 EV_BECAME_LAST,
386 EV_CLEANUP = 32,
387};
388
389struct digest_info {
390 int digest_size;
391 void *digest;
392};
393
394struct drbd_peer_request {
395 struct drbd_work w;
396 struct drbd_peer_device *peer_device;
397 struct drbd_epoch *epoch;
398 struct page *pages;
399 atomic_t pending_bios;
400 struct drbd_interval i;
401
402 unsigned long flags;
403 unsigned long submit_jif;
404 union {
405 u64 block_id;
406 struct digest_info *digest;
407 };
408};
409
410
411
412
413
414
415
416enum {
417 __EE_CALL_AL_COMPLETE_IO,
418 __EE_MAY_SET_IN_SYNC,
419
420
421 __EE_TRIM,
422
423
424
425 __EE_ZEROOUT,
426
427
428
429 __EE_RESUBMITTED,
430
431
432
433
434 __EE_WAS_ERROR,
435
436
437 __EE_HAS_DIGEST,
438
439
440 __EE_RESTART_REQUESTS,
441
442
443 __EE_SEND_WRITE_ACK,
444
445
446 __EE_IN_INTERVAL_TREE,
447
448
449
450 __EE_SUBMITTED,
451
452
453 __EE_WRITE,
454
455
456 __EE_WRITE_SAME,
457
458
459
460 __EE_APPLICATION,
461
462
463 __EE_RS_THIN_REQ,
464};
465#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
466#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
467#define EE_TRIM (1<<__EE_TRIM)
468#define EE_ZEROOUT (1<<__EE_ZEROOUT)
469#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
470#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
471#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
472#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
473#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
474#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
475#define EE_SUBMITTED (1<<__EE_SUBMITTED)
476#define EE_WRITE (1<<__EE_WRITE)
477#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
478#define EE_APPLICATION (1<<__EE_APPLICATION)
479#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
480
481
482enum {
483 UNPLUG_REMOTE,
484 MD_DIRTY,
485 USE_DEGR_WFC_T,
486 CL_ST_CHG_SUCCESS,
487 CL_ST_CHG_FAIL,
488 CRASHED_PRIMARY,
489
490
491 CONSIDER_RESYNC,
492
493 MD_NO_FUA,
494
495 BITMAP_IO,
496
497 BITMAP_IO_QUEUED,
498 WAS_IO_ERROR,
499 WAS_READ_ERROR,
500 FORCE_DETACH,
501 RESYNC_AFTER_NEG,
502 RESIZE_PENDING,
503
504 NEW_CUR_UUID,
505 AL_SUSPENDED,
506 AHEAD_TO_SYNC_SOURCE,
507 B_RS_H_DONE,
508 DISCARD_MY_DATA,
509 READ_BALANCE_RR,
510
511 FLUSH_PENDING,
512
513
514
515 GOING_DISKLESS,
516
517
518 GO_DISKLESS,
519 DESTROY_DISK,
520 MD_SYNC,
521 RS_START,
522 RS_PROGRESS,
523 RS_DONE,
524};
525
526struct drbd_bitmap;
527
528
529
530enum bm_flag {
531
532 BM_LOCKED_MASK = 0xf,
533
534
535 BM_DONT_CLEAR = 0x1,
536 BM_DONT_SET = 0x2,
537 BM_DONT_TEST = 0x4,
538
539
540
541 BM_IS_LOCKED = 0x8,
542
543
544 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
545
546
547
548
549 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
550
551
552
553 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
554};
555
556struct drbd_work_queue {
557 struct list_head q;
558 spinlock_t q_lock;
559 wait_queue_head_t q_wait;
560};
561
562struct drbd_socket {
563 struct mutex mutex;
564 struct socket *socket;
565
566
567 void *sbuf;
568 void *rbuf;
569};
570
571struct drbd_md {
572 u64 md_offset;
573
574 u64 la_size_sect;
575 spinlock_t uuid_lock;
576 u64 uuid[UI_SIZE];
577 u64 device_uuid;
578 u32 flags;
579 u32 md_size_sect;
580
581 s32 al_offset;
582 s32 bm_offset;
583
584
585 s32 meta_dev_idx;
586
587
588 u32 al_stripes;
589 u32 al_stripe_size_4k;
590 u32 al_size_4k;
591};
592
593struct drbd_backing_dev {
594 struct block_device *backing_bdev;
595 struct block_device *md_bdev;
596 struct drbd_md md;
597 struct disk_conf *disk_conf;
598 sector_t known_size;
599};
600
601struct drbd_md_io {
602 struct page *page;
603 unsigned long start_jif;
604 unsigned long submit_jif;
605 const char *current_use;
606 atomic_t in_use;
607 unsigned int done;
608 int error;
609};
610
611struct bm_io_work {
612 struct drbd_work w;
613 char *why;
614 enum bm_flag flags;
615 int (*io_fn)(struct drbd_device *device);
616 void (*done)(struct drbd_device *device, int rv);
617};
618
619struct fifo_buffer {
620 unsigned int head_index;
621 unsigned int size;
622 int total;
623 int values[];
624};
625extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
626
627
628enum {
629 NET_CONGESTED,
630 RESOLVE_CONFLICTS,
631 SEND_PING,
632 GOT_PING_ACK,
633 CONN_WD_ST_CHG_REQ,
634 CONN_WD_ST_CHG_OKAY,
635 CONN_WD_ST_CHG_FAIL,
636 CONN_DRY_RUN,
637 CREATE_BARRIER,
638 STATE_SENT,
639 CALLBACK_PENDING,
640
641
642
643
644
645 DISCONNECT_SENT,
646
647 DEVICE_WORK_PENDING,
648};
649
650enum which_state { NOW, OLD = NOW, NEW };
651
652struct drbd_resource {
653 char *name;
654#ifdef CONFIG_DEBUG_FS
655 struct dentry *debugfs_res;
656 struct dentry *debugfs_res_volumes;
657 struct dentry *debugfs_res_connections;
658 struct dentry *debugfs_res_in_flight_summary;
659#endif
660 struct kref kref;
661 struct idr devices;
662 struct list_head connections;
663 struct list_head resources;
664 struct res_opts res_opts;
665 struct mutex conf_update;
666 struct mutex adm_mutex;
667 spinlock_t req_lock;
668
669 unsigned susp:1;
670 unsigned susp_nod:1;
671 unsigned susp_fen:1;
672
673 enum write_ordering_e write_ordering;
674
675 cpumask_var_t cpu_mask;
676};
677
678struct drbd_thread_timing_details
679{
680 unsigned long start_jif;
681 void *cb_addr;
682 const char *caller_fn;
683 unsigned int line;
684 unsigned int cb_nr;
685};
686
687struct drbd_connection {
688 struct list_head connections;
689 struct drbd_resource *resource;
690#ifdef CONFIG_DEBUG_FS
691 struct dentry *debugfs_conn;
692 struct dentry *debugfs_conn_callback_history;
693 struct dentry *debugfs_conn_oldest_requests;
694#endif
695 struct kref kref;
696 struct idr peer_devices;
697 enum drbd_conns cstate;
698 struct mutex cstate_mutex;
699 unsigned int connect_cnt;
700
701 unsigned long flags;
702 struct net_conf *net_conf;
703 wait_queue_head_t ping_wait;
704
705 struct sockaddr_storage my_addr;
706 int my_addr_len;
707 struct sockaddr_storage peer_addr;
708 int peer_addr_len;
709
710 struct drbd_socket data;
711 struct drbd_socket meta;
712 int agreed_pro_version;
713 u32 agreed_features;
714 unsigned long last_received;
715 unsigned int ko_count;
716
717 struct list_head transfer_log;
718
719 struct crypto_shash *cram_hmac_tfm;
720 struct crypto_shash *integrity_tfm;
721 struct crypto_shash *peer_integrity_tfm;
722 struct crypto_shash *csums_tfm;
723 struct crypto_shash *verify_tfm;
724 void *int_dig_in;
725 void *int_dig_vv;
726
727
728 struct drbd_epoch *current_epoch;
729 spinlock_t epoch_lock;
730 unsigned int epochs;
731 atomic_t current_tle_nr;
732 unsigned current_tle_writes;
733
734 unsigned long last_reconnect_jif;
735
736 struct blk_plug receiver_plug;
737 struct drbd_thread receiver;
738 struct drbd_thread worker;
739 struct drbd_thread ack_receiver;
740 struct workqueue_struct *ack_sender;
741
742
743
744
745 struct drbd_request *req_next;
746 struct drbd_request *req_ack_pending;
747 struct drbd_request *req_not_net_done;
748
749
750 struct drbd_work_queue sender_work;
751
752#define DRBD_THREAD_DETAILS_HIST 16
753 unsigned int w_cb_nr;
754 unsigned int r_cb_nr;
755 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
756 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
757
758 struct {
759 unsigned long last_sent_barrier_jif;
760
761
762
763 bool seen_any_write_yet;
764
765
766 int current_epoch_nr;
767
768
769
770
771 unsigned current_epoch_writes;
772 } send;
773};
774
775static inline bool has_net_conf(struct drbd_connection *connection)
776{
777 bool has_net_conf;
778
779 rcu_read_lock();
780 has_net_conf = rcu_dereference(connection->net_conf);
781 rcu_read_unlock();
782
783 return has_net_conf;
784}
785
786void __update_timing_details(
787 struct drbd_thread_timing_details *tdp,
788 unsigned int *cb_nr,
789 void *cb,
790 const char *fn, const unsigned int line);
791
792#define update_worker_timing_details(c, cb) \
793 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
794#define update_receiver_timing_details(c, cb) \
795 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
796
797struct submit_worker {
798 struct workqueue_struct *wq;
799 struct work_struct worker;
800
801
802 struct list_head writes;
803};
804
805struct drbd_peer_device {
806 struct list_head peer_devices;
807 struct drbd_device *device;
808 struct drbd_connection *connection;
809 struct work_struct send_acks_work;
810#ifdef CONFIG_DEBUG_FS
811 struct dentry *debugfs_peer_dev;
812#endif
813};
814
815struct drbd_device {
816 struct drbd_resource *resource;
817 struct list_head peer_devices;
818 struct list_head pending_bitmap_io;
819
820 unsigned long flush_jif;
821#ifdef CONFIG_DEBUG_FS
822 struct dentry *debugfs_minor;
823 struct dentry *debugfs_vol;
824 struct dentry *debugfs_vol_oldest_requests;
825 struct dentry *debugfs_vol_act_log_extents;
826 struct dentry *debugfs_vol_resync_extents;
827 struct dentry *debugfs_vol_data_gen_id;
828 struct dentry *debugfs_vol_ed_gen_id;
829#endif
830
831 unsigned int vnr;
832 unsigned int minor;
833
834 struct kref kref;
835
836
837 unsigned long flags;
838
839
840 struct drbd_backing_dev *ldev __protected_by(local);
841
842 sector_t p_size;
843 struct request_queue *rq_queue;
844 struct gendisk *vdisk;
845
846 unsigned long last_reattach_jif;
847 struct drbd_work resync_work;
848 struct drbd_work unplug_work;
849 struct timer_list resync_timer;
850 struct timer_list md_sync_timer;
851 struct timer_list start_resync_timer;
852 struct timer_list request_timer;
853
854
855 union drbd_state new_state_tmp;
856
857 union drbd_dev_state state;
858 wait_queue_head_t misc_wait;
859 wait_queue_head_t state_wait;
860 unsigned int send_cnt;
861 unsigned int recv_cnt;
862 unsigned int read_cnt;
863 unsigned int writ_cnt;
864 unsigned int al_writ_cnt;
865 unsigned int bm_writ_cnt;
866 atomic_t ap_bio_cnt;
867 atomic_t ap_actlog_cnt;
868 atomic_t ap_pending_cnt;
869 atomic_t rs_pending_cnt;
870 atomic_t unacked_cnt;
871 atomic_t local_cnt;
872 atomic_t suspend_cnt;
873
874
875 struct rb_root read_requests;
876 struct rb_root write_requests;
877
878
879
880 struct list_head pending_master_completion[2];
881 struct list_head pending_completion[2];
882
883
884 bool use_csums;
885
886 unsigned long rs_total;
887
888 unsigned long rs_failed;
889
890 unsigned long rs_start;
891
892 unsigned long rs_paused;
893
894 unsigned long rs_same_csum;
895#define DRBD_SYNC_MARKS 8
896#define DRBD_SYNC_MARK_STEP (3*HZ)
897
898 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
899
900 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
901
902 int rs_last_mark;
903 unsigned long rs_last_bcast;
904
905
906 sector_t ov_start_sector;
907 sector_t ov_stop_sector;
908
909 sector_t ov_position;
910
911 sector_t ov_last_oos_start;
912
913 sector_t ov_last_oos_size;
914 unsigned long ov_left;
915
916 struct drbd_bitmap *bitmap;
917 unsigned long bm_resync_fo;
918
919
920 struct lru_cache *resync;
921
922 unsigned int resync_locked;
923
924 unsigned int resync_wenr;
925
926 int open_cnt;
927 u64 *p_uuid;
928
929 struct list_head active_ee;
930 struct list_head sync_ee;
931 struct list_head done_ee;
932 struct list_head read_ee;
933 struct list_head net_ee;
934
935 int next_barrier_nr;
936 struct list_head resync_reads;
937 atomic_t pp_in_use;
938 atomic_t pp_in_use_by_net;
939 wait_queue_head_t ee_wait;
940 struct drbd_md_io md_io;
941 spinlock_t al_lock;
942 wait_queue_head_t al_wait;
943 struct lru_cache *act_log;
944 unsigned int al_tr_number;
945 int al_tr_cycle;
946 wait_queue_head_t seq_wait;
947 atomic_t packet_seq;
948 unsigned int peer_seq;
949 spinlock_t peer_seq_lock;
950 unsigned long comm_bm_set;
951 struct bm_io_work bm_io_work;
952 u64 ed_uuid;
953 struct mutex own_state_mutex;
954 struct mutex *state_mutex;
955 char congestion_reason;
956 atomic_t rs_sect_in;
957 atomic_t rs_sect_ev;
958 int rs_last_sect_ev;
959 int rs_last_events;
960
961 int c_sync_rate;
962 struct fifo_buffer *rs_plan_s;
963 int rs_in_flight;
964 atomic_t ap_in_flight;
965 unsigned int peer_max_bio_size;
966 unsigned int local_max_bio_size;
967
968
969
970 struct submit_worker submit;
971};
972
973struct drbd_bm_aio_ctx {
974 struct drbd_device *device;
975 struct list_head list; ;
976 unsigned long start_jif;
977 atomic_t in_flight;
978 unsigned int done;
979 unsigned flags;
980#define BM_AIO_COPY_PAGES 1
981#define BM_AIO_WRITE_HINTED 2
982#define BM_AIO_WRITE_ALL_PAGES 4
983#define BM_AIO_READ 8
984 int error;
985 struct kref kref;
986};
987
988struct drbd_config_context {
989
990 unsigned int minor;
991
992 unsigned int volume;
993#define VOLUME_UNSPECIFIED (-1U)
994
995
996 char *resource_name;
997 struct nlattr *my_addr;
998 struct nlattr *peer_addr;
999
1000
1001 struct sk_buff *reply_skb;
1002
1003 struct drbd_genlmsghdr *reply_dh;
1004
1005 struct drbd_device *device;
1006 struct drbd_resource *resource;
1007 struct drbd_connection *connection;
1008};
1009
1010static inline struct drbd_device *minor_to_device(unsigned int minor)
1011{
1012 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1013}
1014
1015static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1016{
1017 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1018}
1019
1020static inline struct drbd_peer_device *
1021conn_peer_device(struct drbd_connection *connection, int volume_number)
1022{
1023 return idr_find(&connection->peer_devices, volume_number);
1024}
1025
1026#define for_each_resource(resource, _resources) \
1027 list_for_each_entry(resource, _resources, resources)
1028
1029#define for_each_resource_rcu(resource, _resources) \
1030 list_for_each_entry_rcu(resource, _resources, resources)
1031
1032#define for_each_resource_safe(resource, tmp, _resources) \
1033 list_for_each_entry_safe(resource, tmp, _resources, resources)
1034
1035#define for_each_connection(connection, resource) \
1036 list_for_each_entry(connection, &resource->connections, connections)
1037
1038#define for_each_connection_rcu(connection, resource) \
1039 list_for_each_entry_rcu(connection, &resource->connections, connections)
1040
1041#define for_each_connection_safe(connection, tmp, resource) \
1042 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1043
1044#define for_each_peer_device(peer_device, device) \
1045 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1046
1047#define for_each_peer_device_rcu(peer_device, device) \
1048 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1049
1050#define for_each_peer_device_safe(peer_device, tmp, device) \
1051 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1052
1053static inline unsigned int device_to_minor(struct drbd_device *device)
1054{
1055 return device->minor;
1056}
1057
1058
1059
1060
1061
1062
1063
1064enum dds_flags {
1065 DDSF_FORCED = 1,
1066 DDSF_NO_RESYNC = 2,
1067};
1068
1069extern void drbd_init_set_defaults(struct drbd_device *device);
1070extern int drbd_thread_start(struct drbd_thread *thi);
1071extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1072#ifdef CONFIG_SMP
1073extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1074#else
1075#define drbd_thread_current_set_cpu(A) ({})
1076#endif
1077extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1078 unsigned int set_size);
1079extern void tl_clear(struct drbd_connection *);
1080extern void drbd_free_sock(struct drbd_connection *connection);
1081extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1082 void *buf, size_t size, unsigned msg_flags);
1083extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1084 unsigned);
1085
1086extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1087extern int drbd_send_protocol(struct drbd_connection *connection);
1088extern int drbd_send_uuids(struct drbd_peer_device *);
1089extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1090extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1091extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1092extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1093extern int drbd_send_current_state(struct drbd_peer_device *);
1094extern int drbd_send_sync_param(struct drbd_peer_device *);
1095extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1096 u32 set_size);
1097extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1098 struct drbd_peer_request *);
1099extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1100 struct p_block_req *rp);
1101extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1102 struct p_data *dp, int data_size);
1103extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1104 sector_t sector, int blksize, u64 block_id);
1105extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1106extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1107 struct drbd_peer_request *);
1108extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1109extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1110 sector_t sector, int size, u64 block_id);
1111extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1112 int size, void *digest, int digest_size,
1113 enum drbd_packet cmd);
1114extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1115
1116extern int drbd_send_bitmap(struct drbd_device *device);
1117extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1118extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1119extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1120extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1121extern void drbd_device_cleanup(struct drbd_device *device);
1122extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1123extern void drbd_queue_unplug(struct drbd_device *device);
1124
1125extern void conn_md_sync(struct drbd_connection *connection);
1126extern void drbd_md_write(struct drbd_device *device, void *buffer);
1127extern void drbd_md_sync(struct drbd_device *device);
1128extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1129extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1130extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1131extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1132extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1133extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1134extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1135extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1136extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1137extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1138extern void drbd_md_mark_dirty(struct drbd_device *device);
1139extern void drbd_queue_bitmap_io(struct drbd_device *device,
1140 int (*io_fn)(struct drbd_device *),
1141 void (*done)(struct drbd_device *, int),
1142 char *why, enum bm_flag flags);
1143extern int drbd_bitmap_io(struct drbd_device *device,
1144 int (*io_fn)(struct drbd_device *),
1145 char *why, enum bm_flag flags);
1146extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1147 int (*io_fn)(struct drbd_device *),
1148 char *why, enum bm_flag flags);
1149extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1150extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185#define MD_128MB_SECT (128LLU << 11)
1186#define MD_4kB_SECT 8
1187#define MD_32kB_SECT 64
1188
1189
1190#define AL_EXTENT_SHIFT 22
1191#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207#define AL_UPDATES_PER_TRANSACTION 64
1208#define AL_CONTEXT_PER_TRANSACTION 919
1209
1210#if BITS_PER_LONG == 32
1211#define LN2_BPL 5
1212#define cpu_to_lel(A) cpu_to_le32(A)
1213#define lel_to_cpu(A) le32_to_cpu(A)
1214#elif BITS_PER_LONG == 64
1215#define LN2_BPL 6
1216#define cpu_to_lel(A) cpu_to_le64(A)
1217#define lel_to_cpu(A) le64_to_cpu(A)
1218#else
1219#error "LN2 of BITS_PER_LONG unknown!"
1220#endif
1221
1222
1223
1224struct bm_extent {
1225 int rs_left;
1226 int rs_failed;
1227 unsigned long flags;
1228 struct lc_element lce;
1229};
1230
1231#define BME_NO_WRITES 0
1232#define BME_LOCKED 1
1233#define BME_PRIORITY 2
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243#define SLEEP_TIME (HZ/10)
1244
1245
1246
1247#define BM_BLOCK_SHIFT 12
1248#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1249
1250
1251
1252#define BM_EXT_SHIFT 24
1253#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1254
1255#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1256#error "HAVE YOU FIXED drbdmeta AS WELL??"
1257#endif
1258
1259
1260#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1261#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1262#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1263
1264
1265#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1266
1267
1268
1269#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1270#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1271
1272
1273#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1274
1275#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1276
1277#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1278
1279#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1280
1281
1282
1283#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1300
1301
1302
1303
1304
1305#define DRBD_MAX_SECTORS_FIXED_BM \
1306 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1307#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1308
1309#if BITS_PER_LONG == 32
1310
1311
1312
1313#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1314#else
1315
1316#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1317
1318#endif
1319
1320
1321
1322
1323
1324
1325
1326#define DRBD_MAX_BIO_SIZE (1U << 20)
1327#if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT)
1328#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1329#endif
1330#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1331
1332#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1333#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1334
1335
1336
1337
1338#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1339#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1340
1341extern int drbd_bm_init(struct drbd_device *device);
1342extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1343extern void drbd_bm_cleanup(struct drbd_device *device);
1344extern void drbd_bm_set_all(struct drbd_device *device);
1345extern void drbd_bm_clear_all(struct drbd_device *device);
1346
1347extern int drbd_bm_set_bits(
1348 struct drbd_device *device, unsigned long s, unsigned long e);
1349extern int drbd_bm_clear_bits(
1350 struct drbd_device *device, unsigned long s, unsigned long e);
1351extern int drbd_bm_count_bits(
1352 struct drbd_device *device, const unsigned long s, const unsigned long e);
1353
1354
1355extern void _drbd_bm_set_bits(struct drbd_device *device,
1356 const unsigned long s, const unsigned long e);
1357extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1358extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1359extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1360extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1361extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1362extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1363extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1364extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1365extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1366extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1367extern size_t drbd_bm_words(struct drbd_device *device);
1368extern unsigned long drbd_bm_bits(struct drbd_device *device);
1369extern sector_t drbd_bm_capacity(struct drbd_device *device);
1370
1371#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1372extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1373
1374extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1375extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1376extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1377extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1378
1379extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1380 size_t number, unsigned long *buffer);
1381
1382extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1383 size_t number, unsigned long *buffer);
1384
1385extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1386extern void drbd_bm_unlock(struct drbd_device *device);
1387
1388
1389extern struct kmem_cache *drbd_request_cache;
1390extern struct kmem_cache *drbd_ee_cache;
1391extern struct kmem_cache *drbd_bm_ext_cache;
1392extern struct kmem_cache *drbd_al_ext_cache;
1393extern mempool_t drbd_request_mempool;
1394extern mempool_t drbd_ee_mempool;
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409extern struct page *drbd_pp_pool;
1410extern spinlock_t drbd_pp_lock;
1411extern int drbd_pp_vacant;
1412extern wait_queue_head_t drbd_pp_wait;
1413
1414
1415
1416
1417
1418
1419#define DRBD_MIN_POOL_PAGES 128
1420extern mempool_t drbd_md_io_page_pool;
1421
1422
1423
1424extern struct bio_set drbd_md_io_bio_set;
1425
1426
1427extern struct bio_set drbd_io_bio_set;
1428
1429extern struct mutex resources_mutex;
1430
1431extern int conn_lowest_minor(struct drbd_connection *connection);
1432extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1433extern void drbd_destroy_device(struct kref *kref);
1434extern void drbd_delete_device(struct drbd_device *device);
1435
1436extern struct drbd_resource *drbd_create_resource(const char *name);
1437extern void drbd_free_resource(struct drbd_resource *resource);
1438
1439extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1440extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1441extern void drbd_destroy_connection(struct kref *kref);
1442extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1443 void *peer_addr, int peer_addr_len);
1444extern struct drbd_resource *drbd_find_resource(const char *name);
1445extern void drbd_destroy_resource(struct kref *kref);
1446extern void conn_free_crypto(struct drbd_connection *connection);
1447
1448
1449extern void do_submit(struct work_struct *ws);
1450extern void __drbd_make_request(struct drbd_device *, struct bio *);
1451extern blk_qc_t drbd_submit_bio(struct bio *bio);
1452extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1453extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1454
1455
1456
1457
1458extern struct mutex notification_mutex;
1459
1460extern void drbd_suspend_io(struct drbd_device *device);
1461extern void drbd_resume_io(struct drbd_device *device);
1462extern char *ppsize(char *buf, unsigned long long size);
1463extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1464enum determine_dev_size {
1465 DS_ERROR_SHRINK = -3,
1466 DS_ERROR_SPACE_MD = -2,
1467 DS_ERROR = -1,
1468 DS_UNCHANGED = 0,
1469 DS_SHRUNK = 1,
1470 DS_GREW = 2,
1471 DS_GREW_FROM_ZERO = 3,
1472};
1473extern enum determine_dev_size
1474drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1475extern void resync_after_online_grow(struct drbd_device *);
1476extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1477 struct drbd_backing_dev *bdev, struct o_qlim *o);
1478extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1479 enum drbd_role new_role,
1480 int force);
1481extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1482extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1483extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1484extern int drbd_khelper(struct drbd_device *device, char *cmd);
1485
1486
1487
1488extern void drbd_md_endio(struct bio *bio);
1489extern void drbd_peer_request_endio(struct bio *bio);
1490extern void drbd_request_endio(struct bio *bio);
1491extern int drbd_worker(struct drbd_thread *thi);
1492enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1493void drbd_resync_after_changed(struct drbd_device *device);
1494extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1495extern void resume_next_sg(struct drbd_device *device);
1496extern void suspend_other_sg(struct drbd_device *device);
1497extern int drbd_resync_finished(struct drbd_device *device);
1498
1499extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1500extern void drbd_md_put_buffer(struct drbd_device *device);
1501extern int drbd_md_sync_page_io(struct drbd_device *device,
1502 struct drbd_backing_dev *bdev, sector_t sector, int op);
1503extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1504extern void wait_until_done_or_force_detached(struct drbd_device *device,
1505 struct drbd_backing_dev *bdev, unsigned int *done);
1506extern void drbd_rs_controller_reset(struct drbd_device *device);
1507
1508static inline void ov_out_of_sync_print(struct drbd_device *device)
1509{
1510 if (device->ov_last_oos_size) {
1511 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1512 (unsigned long long)device->ov_last_oos_start,
1513 (unsigned long)device->ov_last_oos_size);
1514 }
1515 device->ov_last_oos_size = 0;
1516}
1517
1518
1519extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
1520extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
1521 void *);
1522
1523extern int w_e_end_data_req(struct drbd_work *, int);
1524extern int w_e_end_rsdata_req(struct drbd_work *, int);
1525extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1526extern int w_e_end_ov_reply(struct drbd_work *, int);
1527extern int w_e_end_ov_req(struct drbd_work *, int);
1528extern int w_ov_finished(struct drbd_work *, int);
1529extern int w_resync_timer(struct drbd_work *, int);
1530extern int w_send_write_hint(struct drbd_work *, int);
1531extern int w_send_dblock(struct drbd_work *, int);
1532extern int w_send_read_req(struct drbd_work *, int);
1533extern int w_e_reissue(struct drbd_work *, int);
1534extern int w_restart_disk_io(struct drbd_work *, int);
1535extern int w_send_out_of_sync(struct drbd_work *, int);
1536extern int w_start_resync(struct drbd_work *, int);
1537
1538extern void resync_timer_fn(struct timer_list *t);
1539extern void start_resync_timer_fn(struct timer_list *t);
1540
1541extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1542
1543
1544extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1545 sector_t start, unsigned int nr_sectors, int flags);
1546extern int drbd_receiver(struct drbd_thread *thi);
1547extern int drbd_ack_receiver(struct drbd_thread *thi);
1548extern void drbd_send_ping_wf(struct work_struct *ws);
1549extern void drbd_send_acks_wf(struct work_struct *ws);
1550extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1551extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1552 bool throttle_if_app_is_waiting);
1553extern int drbd_submit_peer_request(struct drbd_device *,
1554 struct drbd_peer_request *, const unsigned,
1555 const unsigned, const int);
1556extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1557extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1558 sector_t, unsigned int,
1559 unsigned int,
1560 gfp_t) __must_hold(local);
1561extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1562 int);
1563#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1564#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1565extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1566extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1567extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1568extern int drbd_connected(struct drbd_peer_device *);
1569
1570
1571void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1572
1573
1574
1575
1576static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1577 int fault_type, struct bio *bio)
1578{
1579 __release(local);
1580 if (!bio->bi_bdev) {
1581 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
1582 bio->bi_status = BLK_STS_IOERR;
1583 bio_endio(bio);
1584 return;
1585 }
1586
1587 if (drbd_insert_fault(device, fault_type))
1588 bio_io_error(bio);
1589 else
1590 submit_bio_noacct(bio);
1591}
1592
1593void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1594 enum write_ordering_e wo);
1595
1596
1597extern struct proc_dir_entry *drbd_proc;
1598int drbd_seq_show(struct seq_file *seq, void *v);
1599
1600
1601extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1602extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1603extern void drbd_al_begin_io_commit(struct drbd_device *device);
1604extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1605extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1606extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1607extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1608extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1609extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1610extern void drbd_rs_cancel_all(struct drbd_device *device);
1611extern int drbd_rs_del_all(struct drbd_device *device);
1612extern void drbd_rs_failed_io(struct drbd_device *device,
1613 sector_t sector, int size);
1614extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1615
1616enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1617extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1618 enum update_sync_bits_mode mode);
1619#define drbd_set_in_sync(device, sector, size) \
1620 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1621#define drbd_set_out_of_sync(device, sector, size) \
1622 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1623#define drbd_rs_failed_io(device, sector, size) \
1624 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1625extern void drbd_al_shrink(struct drbd_device *device);
1626extern int drbd_al_initialize(struct drbd_device *, void *);
1627
1628
1629
1630struct sib_info {
1631 enum drbd_state_info_bcast_reason sib_reason;
1632 union {
1633 struct {
1634 char *helper_name;
1635 unsigned helper_exit_code;
1636 };
1637 struct {
1638 union drbd_state os;
1639 union drbd_state ns;
1640 };
1641 };
1642};
1643void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1644
1645extern void notify_resource_state(struct sk_buff *,
1646 unsigned int,
1647 struct drbd_resource *,
1648 struct resource_info *,
1649 enum drbd_notification_type);
1650extern void notify_device_state(struct sk_buff *,
1651 unsigned int,
1652 struct drbd_device *,
1653 struct device_info *,
1654 enum drbd_notification_type);
1655extern void notify_connection_state(struct sk_buff *,
1656 unsigned int,
1657 struct drbd_connection *,
1658 struct connection_info *,
1659 enum drbd_notification_type);
1660extern void notify_peer_device_state(struct sk_buff *,
1661 unsigned int,
1662 struct drbd_peer_device *,
1663 struct peer_device_info *,
1664 enum drbd_notification_type);
1665extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1666 struct drbd_connection *, const char *, int);
1667
1668
1669
1670
1671
1672
1673static inline struct page *page_chain_next(struct page *page)
1674{
1675 return (struct page *)page_private(page);
1676}
1677#define page_chain_for_each(page) \
1678 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1679 page = page_chain_next(page))
1680#define page_chain_for_each_safe(page, n) \
1681 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1682
1683
1684static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1685{
1686 struct page *page = peer_req->pages;
1687 page_chain_for_each(page) {
1688 if (page_count(page) > 1)
1689 return 1;
1690 }
1691 return 0;
1692}
1693
1694static inline union drbd_state drbd_read_state(struct drbd_device *device)
1695{
1696 struct drbd_resource *resource = device->resource;
1697 union drbd_state rv;
1698
1699 rv.i = device->state.i;
1700 rv.susp = resource->susp;
1701 rv.susp_nod = resource->susp_nod;
1702 rv.susp_fen = resource->susp_fen;
1703
1704 return rv;
1705}
1706
1707enum drbd_force_detach_flags {
1708 DRBD_READ_ERROR,
1709 DRBD_WRITE_ERROR,
1710 DRBD_META_IO_ERROR,
1711 DRBD_FORCE_DETACH,
1712};
1713
1714#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1715static inline void __drbd_chk_io_error_(struct drbd_device *device,
1716 enum drbd_force_detach_flags df,
1717 const char *where)
1718{
1719 enum drbd_io_error_p ep;
1720
1721 rcu_read_lock();
1722 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1723 rcu_read_unlock();
1724 switch (ep) {
1725 case EP_PASS_ON:
1726 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1727 if (__ratelimit(&drbd_ratelimit_state))
1728 drbd_err(device, "Local IO failed in %s.\n", where);
1729 if (device->state.disk > D_INCONSISTENT)
1730 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1731 break;
1732 }
1733 fallthrough;
1734 case EP_DETACH:
1735 case EP_CALL_HELPER:
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756 set_bit(WAS_IO_ERROR, &device->flags);
1757 if (df == DRBD_READ_ERROR)
1758 set_bit(WAS_READ_ERROR, &device->flags);
1759 if (df == DRBD_FORCE_DETACH)
1760 set_bit(FORCE_DETACH, &device->flags);
1761 if (device->state.disk > D_FAILED) {
1762 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1763 drbd_err(device,
1764 "Local IO failed in %s. Detaching...\n", where);
1765 }
1766 break;
1767 }
1768}
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1779static inline void drbd_chk_io_error_(struct drbd_device *device,
1780 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1781{
1782 if (error) {
1783 unsigned long flags;
1784 spin_lock_irqsave(&device->resource->req_lock, flags);
1785 __drbd_chk_io_error_(device, forcedetach, where);
1786 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1787 }
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1799{
1800 switch (bdev->md.meta_dev_idx) {
1801 case DRBD_MD_INDEX_INTERNAL:
1802 case DRBD_MD_INDEX_FLEX_INT:
1803 return bdev->md.md_offset + bdev->md.bm_offset;
1804 case DRBD_MD_INDEX_FLEX_EXT:
1805 default:
1806 return bdev->md.md_offset;
1807 }
1808}
1809
1810
1811
1812
1813
1814static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1815{
1816 switch (bdev->md.meta_dev_idx) {
1817 case DRBD_MD_INDEX_INTERNAL:
1818 case DRBD_MD_INDEX_FLEX_INT:
1819 return bdev->md.md_offset + MD_4kB_SECT -1;
1820 case DRBD_MD_INDEX_FLEX_EXT:
1821 default:
1822 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1823 }
1824}
1825
1826
1827static inline sector_t drbd_get_capacity(struct block_device *bdev)
1828{
1829
1830 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1842{
1843 sector_t s;
1844
1845 switch (bdev->md.meta_dev_idx) {
1846 case DRBD_MD_INDEX_INTERNAL:
1847 case DRBD_MD_INDEX_FLEX_INT:
1848 s = drbd_get_capacity(bdev->backing_bdev)
1849 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1850 drbd_md_first_sector(bdev))
1851 : 0;
1852 break;
1853 case DRBD_MD_INDEX_FLEX_EXT:
1854 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1855 drbd_get_capacity(bdev->backing_bdev));
1856
1857 s = min_t(sector_t, s,
1858 BM_EXT_TO_SECT(bdev->md.md_size_sect
1859 - bdev->md.bm_offset));
1860 break;
1861 default:
1862 s = min_t(sector_t, DRBD_MAX_SECTORS,
1863 drbd_get_capacity(bdev->backing_bdev));
1864 }
1865 return s;
1866}
1867
1868
1869
1870
1871
1872static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1873{
1874 const int meta_dev_idx = bdev->md.meta_dev_idx;
1875
1876 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1877 return 0;
1878
1879
1880
1881 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1882 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1883 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1884
1885
1886 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1887}
1888
1889static inline void
1890drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1891{
1892 unsigned long flags;
1893 spin_lock_irqsave(&q->q_lock, flags);
1894 list_add_tail(&w->list, &q->q);
1895 spin_unlock_irqrestore(&q->q_lock, flags);
1896 wake_up(&q->q_wait);
1897}
1898
1899static inline void
1900drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1901{
1902 unsigned long flags;
1903 spin_lock_irqsave(&q->q_lock, flags);
1904 if (list_empty_careful(&w->list))
1905 list_add_tail(&w->list, &q->q);
1906 spin_unlock_irqrestore(&q->q_lock, flags);
1907 wake_up(&q->q_wait);
1908}
1909
1910static inline void
1911drbd_device_post_work(struct drbd_device *device, int work_bit)
1912{
1913 if (!test_and_set_bit(work_bit, &device->flags)) {
1914 struct drbd_connection *connection =
1915 first_peer_device(device)->connection;
1916 struct drbd_work_queue *q = &connection->sender_work;
1917 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1918 wake_up(&q->q_wait);
1919 }
1920}
1921
1922extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1923
1924
1925
1926
1927
1928static inline void wake_ack_receiver(struct drbd_connection *connection)
1929{
1930 struct task_struct *task = connection->ack_receiver.task;
1931 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1932 send_sig(SIGXCPU, task, 1);
1933}
1934
1935static inline void request_ping(struct drbd_connection *connection)
1936{
1937 set_bit(SEND_PING, &connection->flags);
1938 wake_ack_receiver(connection);
1939}
1940
1941extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1942extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1943extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1944 enum drbd_packet, unsigned int, void *,
1945 unsigned int);
1946extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1947 enum drbd_packet, unsigned int, void *,
1948 unsigned int);
1949
1950extern int drbd_send_ping(struct drbd_connection *connection);
1951extern int drbd_send_ping_ack(struct drbd_connection *connection);
1952extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1953extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1954
1955static inline void drbd_thread_stop(struct drbd_thread *thi)
1956{
1957 _drbd_thread_stop(thi, false, true);
1958}
1959
1960static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1961{
1962 _drbd_thread_stop(thi, false, false);
1963}
1964
1965static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1966{
1967 _drbd_thread_stop(thi, true, false);
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static inline void inc_ap_pending(struct drbd_device *device)
1993{
1994 atomic_inc(&device->ap_pending_cnt);
1995}
1996
1997#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
1998 if (atomic_read(&device->which) < 0) \
1999 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2000 func, line, \
2001 atomic_read(&device->which))
2002
2003#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2004static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2005{
2006 if (atomic_dec_and_test(&device->ap_pending_cnt))
2007 wake_up(&device->misc_wait);
2008 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2009}
2010
2011
2012
2013
2014
2015
2016
2017static inline void inc_rs_pending(struct drbd_device *device)
2018{
2019 atomic_inc(&device->rs_pending_cnt);
2020}
2021
2022#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2023static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2024{
2025 atomic_dec(&device->rs_pending_cnt);
2026 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038static inline void inc_unacked(struct drbd_device *device)
2039{
2040 atomic_inc(&device->unacked_cnt);
2041}
2042
2043#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2044static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2045{
2046 atomic_dec(&device->unacked_cnt);
2047 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2048}
2049
2050#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2051static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2052{
2053 atomic_sub(n, &device->unacked_cnt);
2054 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2055}
2056
2057static inline bool is_sync_target_state(enum drbd_conns connection_state)
2058{
2059 return connection_state == C_SYNC_TARGET ||
2060 connection_state == C_PAUSED_SYNC_T;
2061}
2062
2063static inline bool is_sync_source_state(enum drbd_conns connection_state)
2064{
2065 return connection_state == C_SYNC_SOURCE ||
2066 connection_state == C_PAUSED_SYNC_S;
2067}
2068
2069static inline bool is_sync_state(enum drbd_conns connection_state)
2070{
2071 return is_sync_source_state(connection_state) ||
2072 is_sync_target_state(connection_state);
2073}
2074
2075
2076
2077
2078
2079
2080
2081
2082#define get_ldev_if_state(_device, _min_state) \
2083 (_get_ldev_if_state((_device), (_min_state)) ? \
2084 ({ __acquire(x); true; }) : false)
2085#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2086
2087static inline void put_ldev(struct drbd_device *device)
2088{
2089 enum drbd_disk_state disk_state = device->state.disk;
2090
2091
2092
2093
2094 int i = atomic_dec_return(&device->local_cnt);
2095
2096
2097
2098
2099 __release(local);
2100 D_ASSERT(device, i >= 0);
2101 if (i == 0) {
2102 if (disk_state == D_DISKLESS)
2103
2104 drbd_device_post_work(device, DESTROY_DISK);
2105 if (disk_state == D_FAILED)
2106
2107 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2108 drbd_device_post_work(device, GO_DISKLESS);
2109 wake_up(&device->misc_wait);
2110 }
2111}
2112
2113#ifndef __CHECKER__
2114static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2115{
2116 int io_allowed;
2117
2118
2119 if (device->state.disk == D_DISKLESS)
2120 return 0;
2121
2122 atomic_inc(&device->local_cnt);
2123 io_allowed = (device->state.disk >= mins);
2124 if (!io_allowed)
2125 put_ldev(device);
2126 return io_allowed;
2127}
2128#else
2129extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2130#endif
2131
2132
2133
2134
2135static inline int drbd_get_max_buffers(struct drbd_device *device)
2136{
2137 struct net_conf *nc;
2138 int mxb;
2139
2140 rcu_read_lock();
2141 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2142 mxb = nc ? nc->max_buffers : 1000000;
2143 rcu_read_unlock();
2144
2145 return mxb;
2146}
2147
2148static inline int drbd_state_is_stable(struct drbd_device *device)
2149{
2150 union drbd_dev_state s = device->state;
2151
2152
2153
2154
2155 switch ((enum drbd_conns)s.conn) {
2156
2157 case C_STANDALONE:
2158 case C_WF_CONNECTION:
2159
2160 case C_CONNECTED:
2161 case C_SYNC_SOURCE:
2162 case C_SYNC_TARGET:
2163 case C_VERIFY_S:
2164 case C_VERIFY_T:
2165 case C_PAUSED_SYNC_S:
2166 case C_PAUSED_SYNC_T:
2167 case C_AHEAD:
2168 case C_BEHIND:
2169
2170 case C_DISCONNECTING:
2171 case C_UNCONNECTED:
2172 case C_TIMEOUT:
2173 case C_BROKEN_PIPE:
2174 case C_NETWORK_FAILURE:
2175 case C_PROTOCOL_ERROR:
2176 case C_TEAR_DOWN:
2177 case C_WF_REPORT_PARAMS:
2178 case C_STARTING_SYNC_S:
2179 case C_STARTING_SYNC_T:
2180 break;
2181
2182
2183 case C_WF_BITMAP_S:
2184 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2185 return 0;
2186 break;
2187
2188
2189 case C_WF_BITMAP_T:
2190 case C_WF_SYNC_UUID:
2191 case C_MASK:
2192
2193 return 0;
2194 }
2195
2196 switch ((enum drbd_disk_state)s.disk) {
2197 case D_DISKLESS:
2198 case D_INCONSISTENT:
2199 case D_OUTDATED:
2200 case D_CONSISTENT:
2201 case D_UP_TO_DATE:
2202 case D_FAILED:
2203
2204 break;
2205
2206
2207 case D_ATTACHING:
2208 case D_NEGOTIATING:
2209 case D_UNKNOWN:
2210 case D_MASK:
2211
2212 return 0;
2213 }
2214
2215 return 1;
2216}
2217
2218static inline int drbd_suspended(struct drbd_device *device)
2219{
2220 struct drbd_resource *resource = device->resource;
2221
2222 return resource->susp || resource->susp_fen || resource->susp_nod;
2223}
2224
2225static inline bool may_inc_ap_bio(struct drbd_device *device)
2226{
2227 int mxb = drbd_get_max_buffers(device);
2228
2229 if (drbd_suspended(device))
2230 return false;
2231 if (atomic_read(&device->suspend_cnt))
2232 return false;
2233
2234
2235
2236
2237
2238
2239 if (!drbd_state_is_stable(device))
2240 return false;
2241
2242
2243
2244 if (atomic_read(&device->ap_bio_cnt) > mxb)
2245 return false;
2246 if (test_bit(BITMAP_IO, &device->flags))
2247 return false;
2248 return true;
2249}
2250
2251static inline bool inc_ap_bio_cond(struct drbd_device *device)
2252{
2253 bool rv = false;
2254
2255 spin_lock_irq(&device->resource->req_lock);
2256 rv = may_inc_ap_bio(device);
2257 if (rv)
2258 atomic_inc(&device->ap_bio_cnt);
2259 spin_unlock_irq(&device->resource->req_lock);
2260
2261 return rv;
2262}
2263
2264static inline void inc_ap_bio(struct drbd_device *device)
2265{
2266
2267
2268
2269
2270
2271
2272
2273
2274 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2275}
2276
2277static inline void dec_ap_bio(struct drbd_device *device)
2278{
2279 int mxb = drbd_get_max_buffers(device);
2280 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2281
2282 D_ASSERT(device, ap_bio >= 0);
2283
2284 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2285 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2286 drbd_queue_work(&first_peer_device(device)->
2287 connection->sender_work,
2288 &device->bm_io_work.w);
2289 }
2290
2291
2292
2293
2294 if (ap_bio < mxb)
2295 wake_up(&device->misc_wait);
2296}
2297
2298static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2299{
2300 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2301 first_peer_device(device)->connection->agreed_pro_version != 100;
2302}
2303
2304static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2305{
2306 int changed = device->ed_uuid != val;
2307 device->ed_uuid = val;
2308 return changed;
2309}
2310
2311static inline int drbd_queue_order_type(struct drbd_device *device)
2312{
2313
2314
2315#ifndef QUEUE_ORDERED_NONE
2316#define QUEUE_ORDERED_NONE 0
2317#endif
2318 return QUEUE_ORDERED_NONE;
2319}
2320
2321static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2322{
2323 return list_first_entry_or_null(&resource->connections,
2324 struct drbd_connection, connections);
2325}
2326
2327#endif
2328