1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _DRBD_INT_H
15#define _DRBD_INT_H
16
17#include <crypto/hash.h>
18#include <linux/compiler.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/sched/signal.h>
22#include <linux/bitops.h>
23#include <linux/slab.h>
24#include <linux/ratelimit.h>
25#include <linux/tcp.h>
26#include <linux/mutex.h>
27#include <linux/major.h>
28#include <linux/blkdev.h>
29#include <linux/backing-dev.h>
30#include <linux/genhd.h>
31#include <linux/idr.h>
32#include <linux/dynamic_debug.h>
33#include <net/tcp.h>
34#include <linux/lru_cache.h>
35#include <linux/prefetch.h>
36#include <linux/drbd_genl_api.h>
37#include <linux/drbd.h>
38#include "drbd_strings.h"
39#include "drbd_state.h"
40#include "drbd_protocol.h"
41
42#ifdef __CHECKER__
43# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
44# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
45# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
46#else
47# define __protected_by(x)
48# define __protected_read_by(x)
49# define __protected_write_by(x)
50#endif
51
52
53#ifdef CONFIG_DRBD_FAULT_INJECTION
54extern int drbd_enable_faults;
55extern int drbd_fault_rate;
56#endif
57
58extern unsigned int drbd_minor_count;
59extern char drbd_usermode_helper[];
60extern int drbd_proc_details;
61
62
63
64
65
66
67
68#define DRBD_SIGKILL SIGHUP
69
70#define ID_IN_SYNC (4711ULL)
71#define ID_OUT_OF_SYNC (4712ULL)
72#define ID_SYNCER (-1ULL)
73
74#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
75
76struct drbd_device;
77struct drbd_connection;
78
79#define __drbd_printk_device(level, device, fmt, args...) \
80 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
81#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
82 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
83#define __drbd_printk_resource(level, resource, fmt, args...) \
84 printk(level "drbd %s: " fmt, (resource)->name, ## args)
85#define __drbd_printk_connection(level, connection, fmt, args...) \
86 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
87
88void drbd_printk_with_wrong_object_type(void);
89
90#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
91 (__builtin_types_compatible_p(typeof(obj), type) || \
92 __builtin_types_compatible_p(typeof(obj), const type)), \
93 func(level, (const type)(obj), fmt, ## args)
94
95#define drbd_printk(level, obj, fmt, args...) \
96 __builtin_choose_expr( \
97 __drbd_printk_if_same_type(obj, struct drbd_device *, \
98 __drbd_printk_device, level, fmt, ## args), \
99 __builtin_choose_expr( \
100 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
101 __drbd_printk_resource, level, fmt, ## args), \
102 __builtin_choose_expr( \
103 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
104 __drbd_printk_connection, level, fmt, ## args), \
105 __builtin_choose_expr( \
106 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
107 __drbd_printk_peer_device, level, fmt, ## args), \
108 drbd_printk_with_wrong_object_type()))))
109
110#define drbd_dbg(obj, fmt, args...) \
111 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
112#define drbd_alert(obj, fmt, args...) \
113 drbd_printk(KERN_ALERT, obj, fmt, ## args)
114#define drbd_err(obj, fmt, args...) \
115 drbd_printk(KERN_ERR, obj, fmt, ## args)
116#define drbd_warn(obj, fmt, args...) \
117 drbd_printk(KERN_WARNING, obj, fmt, ## args)
118#define drbd_info(obj, fmt, args...) \
119 drbd_printk(KERN_INFO, obj, fmt, ## args)
120#define drbd_emerg(obj, fmt, args...) \
121 drbd_printk(KERN_EMERG, obj, fmt, ## args)
122
123#define dynamic_drbd_dbg(device, fmt, args...) \
124 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
125
126#define D_ASSERT(device, exp) do { \
127 if (!(exp)) \
128 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
129 } while (0)
130
131
132
133
134
135
136#define expect(exp) ({ \
137 bool _bool = (exp); \
138 if (!_bool) \
139 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
140 #exp, __func__); \
141 _bool; \
142 })
143
144
145enum {
146 DRBD_FAULT_MD_WR = 0,
147 DRBD_FAULT_MD_RD = 1,
148 DRBD_FAULT_RS_WR = 2,
149 DRBD_FAULT_RS_RD = 3,
150 DRBD_FAULT_DT_WR = 4,
151 DRBD_FAULT_DT_RD = 5,
152 DRBD_FAULT_DT_RA = 6,
153 DRBD_FAULT_BM_ALLOC = 7,
154 DRBD_FAULT_AL_EE = 8,
155 DRBD_FAULT_RECEIVE = 9,
156
157 DRBD_FAULT_MAX,
158};
159
160extern unsigned int
161_drbd_insert_fault(struct drbd_device *device, unsigned int type);
162
163static inline int
164drbd_insert_fault(struct drbd_device *device, unsigned int type) {
165#ifdef CONFIG_DRBD_FAULT_INJECTION
166 return drbd_fault_rate &&
167 (drbd_enable_faults & (1<<type)) &&
168 _drbd_insert_fault(device, type);
169#else
170 return 0;
171#endif
172}
173
174
175#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
176
177#define div_floor(A, B) ((A)/(B))
178
179extern struct ratelimit_state drbd_ratelimit_state;
180extern struct idr drbd_devices;
181extern struct list_head drbd_resources;
182
183extern const char *cmdname(enum drbd_packet cmd);
184
185
186
187struct bm_xfer_ctx {
188
189
190
191
192 unsigned long bm_bits;
193 unsigned long bm_words;
194
195 unsigned long bit_offset;
196 unsigned long word_offset;
197
198
199 unsigned packets[2];
200 unsigned bytes[2];
201};
202
203extern void INFO_bm_xfer_stats(struct drbd_device *device,
204 const char *direction, struct bm_xfer_ctx *c);
205
206static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
207{
208
209
210
211
212
213
214
215#if BITS_PER_LONG == 64
216 c->word_offset = c->bit_offset >> 6;
217#elif BITS_PER_LONG == 32
218 c->word_offset = c->bit_offset >> 5;
219 c->word_offset &= ~(1UL);
220#else
221# error "unsupported BITS_PER_LONG"
222#endif
223}
224
225extern unsigned int drbd_header_size(struct drbd_connection *connection);
226
227
228enum drbd_thread_state {
229 NONE,
230 RUNNING,
231 EXITING,
232 RESTARTING
233};
234
235struct drbd_thread {
236 spinlock_t t_lock;
237 struct task_struct *task;
238 struct completion stop;
239 enum drbd_thread_state t_state;
240 int (*function) (struct drbd_thread *);
241 struct drbd_resource *resource;
242 struct drbd_connection *connection;
243 int reset_cpu_mask;
244 const char *name;
245};
246
247static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
248{
249
250
251
252
253 smp_rmb();
254 return thi->t_state;
255}
256
257struct drbd_work {
258 struct list_head list;
259 int (*cb)(struct drbd_work *, int cancel);
260};
261
262struct drbd_device_work {
263 struct drbd_work w;
264 struct drbd_device *device;
265};
266
267#include "drbd_interval.h"
268
269extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
270
271extern void lock_all_resources(void);
272extern void unlock_all_resources(void);
273
274struct drbd_request {
275 struct drbd_work w;
276 struct drbd_device *device;
277
278
279
280
281
282 struct bio *private_bio;
283
284 struct drbd_interval i;
285
286
287
288
289
290
291
292
293
294 unsigned int epoch;
295
296 struct list_head tl_requests;
297 struct bio *master_bio;
298
299
300 struct list_head req_pending_master_completion;
301 struct list_head req_pending_local;
302
303
304 unsigned long start_jif;
305
306
307
308
309
310
311
312
313
314 unsigned long in_actlog_jif;
315
316
317 unsigned long pre_submit_jif;
318
319
320 unsigned long pre_send_jif;
321 unsigned long acked_jif;
322 unsigned long net_done_jif;
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357 atomic_t completion_ref;
358
359 struct kref kref;
360
361 unsigned rq_state;
362};
363
364struct drbd_epoch {
365 struct drbd_connection *connection;
366 struct list_head list;
367 unsigned int barrier_nr;
368 atomic_t epoch_size;
369 atomic_t active;
370 unsigned long flags;
371};
372
373
374int drbdd_init(struct drbd_thread *);
375int drbd_asender(struct drbd_thread *);
376
377
378enum {
379 DE_HAVE_BARRIER_NUMBER,
380};
381
382enum epoch_event {
383 EV_PUT,
384 EV_GOT_BARRIER_NR,
385 EV_BECAME_LAST,
386 EV_CLEANUP = 32,
387};
388
389struct digest_info {
390 int digest_size;
391 void *digest;
392};
393
394struct drbd_peer_request {
395 struct drbd_work w;
396 struct drbd_peer_device *peer_device;
397 struct drbd_epoch *epoch;
398 struct page *pages;
399 atomic_t pending_bios;
400 struct drbd_interval i;
401
402 unsigned long flags;
403 unsigned long submit_jif;
404 union {
405 u64 block_id;
406 struct digest_info *digest;
407 };
408};
409
410
411
412
413
414
415
416enum {
417 __EE_CALL_AL_COMPLETE_IO,
418 __EE_MAY_SET_IN_SYNC,
419
420
421 __EE_TRIM,
422
423
424
425 __EE_ZEROOUT,
426
427
428
429 __EE_RESUBMITTED,
430
431
432
433
434 __EE_WAS_ERROR,
435
436
437 __EE_HAS_DIGEST,
438
439
440 __EE_RESTART_REQUESTS,
441
442
443 __EE_SEND_WRITE_ACK,
444
445
446 __EE_IN_INTERVAL_TREE,
447
448
449
450 __EE_SUBMITTED,
451
452
453 __EE_WRITE,
454
455
456 __EE_WRITE_SAME,
457
458
459
460 __EE_APPLICATION,
461
462
463 __EE_RS_THIN_REQ,
464};
465#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
466#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
467#define EE_TRIM (1<<__EE_TRIM)
468#define EE_ZEROOUT (1<<__EE_ZEROOUT)
469#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
470#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
471#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
472#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
473#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
474#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
475#define EE_SUBMITTED (1<<__EE_SUBMITTED)
476#define EE_WRITE (1<<__EE_WRITE)
477#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
478#define EE_APPLICATION (1<<__EE_APPLICATION)
479#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
480
481
482enum {
483 UNPLUG_REMOTE,
484 MD_DIRTY,
485 USE_DEGR_WFC_T,
486 CL_ST_CHG_SUCCESS,
487 CL_ST_CHG_FAIL,
488 CRASHED_PRIMARY,
489
490
491 CONSIDER_RESYNC,
492
493 MD_NO_FUA,
494
495 BITMAP_IO,
496
497 BITMAP_IO_QUEUED,
498 WAS_IO_ERROR,
499 WAS_READ_ERROR,
500 FORCE_DETACH,
501 RESYNC_AFTER_NEG,
502 RESIZE_PENDING,
503
504 NEW_CUR_UUID,
505 AL_SUSPENDED,
506 AHEAD_TO_SYNC_SOURCE,
507 B_RS_H_DONE,
508 DISCARD_MY_DATA,
509 READ_BALANCE_RR,
510
511 FLUSH_PENDING,
512
513
514
515 GOING_DISKLESS,
516
517
518 GO_DISKLESS,
519 DESTROY_DISK,
520 MD_SYNC,
521 RS_START,
522 RS_PROGRESS,
523 RS_DONE,
524};
525
526struct drbd_bitmap;
527
528
529
530enum bm_flag {
531
532 BM_LOCKED_MASK = 0xf,
533
534
535 BM_DONT_CLEAR = 0x1,
536 BM_DONT_SET = 0x2,
537 BM_DONT_TEST = 0x4,
538
539
540
541 BM_IS_LOCKED = 0x8,
542
543
544 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
545
546
547
548
549 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
550
551
552
553 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
554};
555
556struct drbd_work_queue {
557 struct list_head q;
558 spinlock_t q_lock;
559 wait_queue_head_t q_wait;
560};
561
562struct drbd_socket {
563 struct mutex mutex;
564 struct socket *socket;
565
566
567 void *sbuf;
568 void *rbuf;
569};
570
571struct drbd_md {
572 u64 md_offset;
573
574 u64 la_size_sect;
575 spinlock_t uuid_lock;
576 u64 uuid[UI_SIZE];
577 u64 device_uuid;
578 u32 flags;
579 u32 md_size_sect;
580
581 s32 al_offset;
582 s32 bm_offset;
583
584
585 s32 meta_dev_idx;
586
587
588 u32 al_stripes;
589 u32 al_stripe_size_4k;
590 u32 al_size_4k;
591};
592
593struct drbd_backing_dev {
594 struct block_device *backing_bdev;
595 struct block_device *md_bdev;
596 struct drbd_md md;
597 struct disk_conf *disk_conf;
598 sector_t known_size;
599};
600
601struct drbd_md_io {
602 struct page *page;
603 unsigned long start_jif;
604 unsigned long submit_jif;
605 const char *current_use;
606 atomic_t in_use;
607 unsigned int done;
608 int error;
609};
610
611struct bm_io_work {
612 struct drbd_work w;
613 char *why;
614 enum bm_flag flags;
615 int (*io_fn)(struct drbd_device *device);
616 void (*done)(struct drbd_device *device, int rv);
617};
618
619struct fifo_buffer {
620 unsigned int head_index;
621 unsigned int size;
622 int total;
623 int values[];
624};
625extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
626
627
628enum {
629 NET_CONGESTED,
630 RESOLVE_CONFLICTS,
631 SEND_PING,
632 GOT_PING_ACK,
633 CONN_WD_ST_CHG_REQ,
634 CONN_WD_ST_CHG_OKAY,
635 CONN_WD_ST_CHG_FAIL,
636 CONN_DRY_RUN,
637 CREATE_BARRIER,
638 STATE_SENT,
639 CALLBACK_PENDING,
640
641
642
643
644
645 DISCONNECT_SENT,
646
647 DEVICE_WORK_PENDING,
648};
649
650enum which_state { NOW, OLD = NOW, NEW };
651
652struct drbd_resource {
653 char *name;
654#ifdef CONFIG_DEBUG_FS
655 struct dentry *debugfs_res;
656 struct dentry *debugfs_res_volumes;
657 struct dentry *debugfs_res_connections;
658 struct dentry *debugfs_res_in_flight_summary;
659#endif
660 struct kref kref;
661 struct idr devices;
662 struct list_head connections;
663 struct list_head resources;
664 struct res_opts res_opts;
665 struct mutex conf_update;
666 struct mutex adm_mutex;
667 spinlock_t req_lock;
668
669 unsigned susp:1;
670 unsigned susp_nod:1;
671 unsigned susp_fen:1;
672
673 enum write_ordering_e write_ordering;
674
675 cpumask_var_t cpu_mask;
676};
677
678struct drbd_thread_timing_details
679{
680 unsigned long start_jif;
681 void *cb_addr;
682 const char *caller_fn;
683 unsigned int line;
684 unsigned int cb_nr;
685};
686
687struct drbd_connection {
688 struct list_head connections;
689 struct drbd_resource *resource;
690#ifdef CONFIG_DEBUG_FS
691 struct dentry *debugfs_conn;
692 struct dentry *debugfs_conn_callback_history;
693 struct dentry *debugfs_conn_oldest_requests;
694#endif
695 struct kref kref;
696 struct idr peer_devices;
697 enum drbd_conns cstate;
698 struct mutex cstate_mutex;
699 unsigned int connect_cnt;
700
701 unsigned long flags;
702 struct net_conf *net_conf;
703 wait_queue_head_t ping_wait;
704
705 struct sockaddr_storage my_addr;
706 int my_addr_len;
707 struct sockaddr_storage peer_addr;
708 int peer_addr_len;
709
710 struct drbd_socket data;
711 struct drbd_socket meta;
712 int agreed_pro_version;
713 u32 agreed_features;
714 unsigned long last_received;
715 unsigned int ko_count;
716
717 struct list_head transfer_log;
718
719 struct crypto_shash *cram_hmac_tfm;
720 struct crypto_shash *integrity_tfm;
721 struct crypto_shash *peer_integrity_tfm;
722 struct crypto_shash *csums_tfm;
723 struct crypto_shash *verify_tfm;
724 void *int_dig_in;
725 void *int_dig_vv;
726
727
728 struct drbd_epoch *current_epoch;
729 spinlock_t epoch_lock;
730 unsigned int epochs;
731 atomic_t current_tle_nr;
732 unsigned current_tle_writes;
733
734 unsigned long last_reconnect_jif;
735
736 struct blk_plug receiver_plug;
737 struct drbd_thread receiver;
738 struct drbd_thread worker;
739 struct drbd_thread ack_receiver;
740 struct workqueue_struct *ack_sender;
741
742
743
744
745 struct drbd_request *req_next;
746 struct drbd_request *req_ack_pending;
747 struct drbd_request *req_not_net_done;
748
749
750 struct drbd_work_queue sender_work;
751
752#define DRBD_THREAD_DETAILS_HIST 16
753 unsigned int w_cb_nr;
754 unsigned int r_cb_nr;
755 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
756 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
757
758 struct {
759 unsigned long last_sent_barrier_jif;
760
761
762
763 bool seen_any_write_yet;
764
765
766 int current_epoch_nr;
767
768
769
770
771 unsigned current_epoch_writes;
772 } send;
773};
774
775static inline bool has_net_conf(struct drbd_connection *connection)
776{
777 bool has_net_conf;
778
779 rcu_read_lock();
780 has_net_conf = rcu_dereference(connection->net_conf);
781 rcu_read_unlock();
782
783 return has_net_conf;
784}
785
786void __update_timing_details(
787 struct drbd_thread_timing_details *tdp,
788 unsigned int *cb_nr,
789 void *cb,
790 const char *fn, const unsigned int line);
791
792#define update_worker_timing_details(c, cb) \
793 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
794#define update_receiver_timing_details(c, cb) \
795 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
796
797struct submit_worker {
798 struct workqueue_struct *wq;
799 struct work_struct worker;
800
801
802 struct list_head writes;
803};
804
805struct drbd_peer_device {
806 struct list_head peer_devices;
807 struct drbd_device *device;
808 struct drbd_connection *connection;
809 struct work_struct send_acks_work;
810#ifdef CONFIG_DEBUG_FS
811 struct dentry *debugfs_peer_dev;
812#endif
813};
814
815struct drbd_device {
816 struct drbd_resource *resource;
817 struct list_head peer_devices;
818 struct list_head pending_bitmap_io;
819
820 unsigned long flush_jif;
821#ifdef CONFIG_DEBUG_FS
822 struct dentry *debugfs_minor;
823 struct dentry *debugfs_vol;
824 struct dentry *debugfs_vol_oldest_requests;
825 struct dentry *debugfs_vol_act_log_extents;
826 struct dentry *debugfs_vol_resync_extents;
827 struct dentry *debugfs_vol_data_gen_id;
828 struct dentry *debugfs_vol_ed_gen_id;
829#endif
830
831 unsigned int vnr;
832 unsigned int minor;
833
834 struct kref kref;
835
836
837 unsigned long flags;
838
839
840 struct drbd_backing_dev *ldev __protected_by(local);
841
842 sector_t p_size;
843 struct request_queue *rq_queue;
844 struct block_device *this_bdev;
845 struct gendisk *vdisk;
846
847 unsigned long last_reattach_jif;
848 struct drbd_work resync_work;
849 struct drbd_work unplug_work;
850 struct timer_list resync_timer;
851 struct timer_list md_sync_timer;
852 struct timer_list start_resync_timer;
853 struct timer_list request_timer;
854
855
856 union drbd_state new_state_tmp;
857
858 union drbd_dev_state state;
859 wait_queue_head_t misc_wait;
860 wait_queue_head_t state_wait;
861 unsigned int send_cnt;
862 unsigned int recv_cnt;
863 unsigned int read_cnt;
864 unsigned int writ_cnt;
865 unsigned int al_writ_cnt;
866 unsigned int bm_writ_cnt;
867 atomic_t ap_bio_cnt;
868 atomic_t ap_actlog_cnt;
869 atomic_t ap_pending_cnt;
870 atomic_t rs_pending_cnt;
871 atomic_t unacked_cnt;
872 atomic_t local_cnt;
873 atomic_t suspend_cnt;
874
875
876 struct rb_root read_requests;
877 struct rb_root write_requests;
878
879
880
881 struct list_head pending_master_completion[2];
882 struct list_head pending_completion[2];
883
884
885 bool use_csums;
886
887 unsigned long rs_total;
888
889 unsigned long rs_failed;
890
891 unsigned long rs_start;
892
893 unsigned long rs_paused;
894
895 unsigned long rs_same_csum;
896#define DRBD_SYNC_MARKS 8
897#define DRBD_SYNC_MARK_STEP (3*HZ)
898
899 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
900
901 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
902
903 int rs_last_mark;
904 unsigned long rs_last_bcast;
905
906
907 sector_t ov_start_sector;
908 sector_t ov_stop_sector;
909
910 sector_t ov_position;
911
912 sector_t ov_last_oos_start;
913
914 sector_t ov_last_oos_size;
915 unsigned long ov_left;
916
917 struct drbd_bitmap *bitmap;
918 unsigned long bm_resync_fo;
919
920
921 struct lru_cache *resync;
922
923 unsigned int resync_locked;
924
925 unsigned int resync_wenr;
926
927 int open_cnt;
928 u64 *p_uuid;
929
930 struct list_head active_ee;
931 struct list_head sync_ee;
932 struct list_head done_ee;
933 struct list_head read_ee;
934 struct list_head net_ee;
935
936 int next_barrier_nr;
937 struct list_head resync_reads;
938 atomic_t pp_in_use;
939 atomic_t pp_in_use_by_net;
940 wait_queue_head_t ee_wait;
941 struct drbd_md_io md_io;
942 spinlock_t al_lock;
943 wait_queue_head_t al_wait;
944 struct lru_cache *act_log;
945 unsigned int al_tr_number;
946 int al_tr_cycle;
947 wait_queue_head_t seq_wait;
948 atomic_t packet_seq;
949 unsigned int peer_seq;
950 spinlock_t peer_seq_lock;
951 unsigned long comm_bm_set;
952 struct bm_io_work bm_io_work;
953 u64 ed_uuid;
954 struct mutex own_state_mutex;
955 struct mutex *state_mutex;
956 char congestion_reason;
957 atomic_t rs_sect_in;
958 atomic_t rs_sect_ev;
959 int rs_last_sect_ev;
960 int rs_last_events;
961
962 int c_sync_rate;
963 struct fifo_buffer *rs_plan_s;
964 int rs_in_flight;
965 atomic_t ap_in_flight;
966 unsigned int peer_max_bio_size;
967 unsigned int local_max_bio_size;
968
969
970
971 struct submit_worker submit;
972};
973
974struct drbd_bm_aio_ctx {
975 struct drbd_device *device;
976 struct list_head list; ;
977 unsigned long start_jif;
978 atomic_t in_flight;
979 unsigned int done;
980 unsigned flags;
981#define BM_AIO_COPY_PAGES 1
982#define BM_AIO_WRITE_HINTED 2
983#define BM_AIO_WRITE_ALL_PAGES 4
984#define BM_AIO_READ 8
985 int error;
986 struct kref kref;
987};
988
989struct drbd_config_context {
990
991 unsigned int minor;
992
993 unsigned int volume;
994#define VOLUME_UNSPECIFIED (-1U)
995
996
997 char *resource_name;
998 struct nlattr *my_addr;
999 struct nlattr *peer_addr;
1000
1001
1002 struct sk_buff *reply_skb;
1003
1004 struct drbd_genlmsghdr *reply_dh;
1005
1006 struct drbd_device *device;
1007 struct drbd_resource *resource;
1008 struct drbd_connection *connection;
1009};
1010
1011static inline struct drbd_device *minor_to_device(unsigned int minor)
1012{
1013 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1014}
1015
1016static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1017{
1018 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1019}
1020
1021static inline struct drbd_peer_device *
1022conn_peer_device(struct drbd_connection *connection, int volume_number)
1023{
1024 return idr_find(&connection->peer_devices, volume_number);
1025}
1026
1027#define for_each_resource(resource, _resources) \
1028 list_for_each_entry(resource, _resources, resources)
1029
1030#define for_each_resource_rcu(resource, _resources) \
1031 list_for_each_entry_rcu(resource, _resources, resources)
1032
1033#define for_each_resource_safe(resource, tmp, _resources) \
1034 list_for_each_entry_safe(resource, tmp, _resources, resources)
1035
1036#define for_each_connection(connection, resource) \
1037 list_for_each_entry(connection, &resource->connections, connections)
1038
1039#define for_each_connection_rcu(connection, resource) \
1040 list_for_each_entry_rcu(connection, &resource->connections, connections)
1041
1042#define for_each_connection_safe(connection, tmp, resource) \
1043 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1044
1045#define for_each_peer_device(peer_device, device) \
1046 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1047
1048#define for_each_peer_device_rcu(peer_device, device) \
1049 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1050
1051#define for_each_peer_device_safe(peer_device, tmp, device) \
1052 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1053
1054static inline unsigned int device_to_minor(struct drbd_device *device)
1055{
1056 return device->minor;
1057}
1058
1059
1060
1061
1062
1063
1064
1065enum dds_flags {
1066 DDSF_FORCED = 1,
1067 DDSF_NO_RESYNC = 2,
1068};
1069
1070extern void drbd_init_set_defaults(struct drbd_device *device);
1071extern int drbd_thread_start(struct drbd_thread *thi);
1072extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1073#ifdef CONFIG_SMP
1074extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1075#else
1076#define drbd_thread_current_set_cpu(A) ({})
1077#endif
1078extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1079 unsigned int set_size);
1080extern void tl_clear(struct drbd_connection *);
1081extern void drbd_free_sock(struct drbd_connection *connection);
1082extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1083 void *buf, size_t size, unsigned msg_flags);
1084extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1085 unsigned);
1086
1087extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1088extern int drbd_send_protocol(struct drbd_connection *connection);
1089extern int drbd_send_uuids(struct drbd_peer_device *);
1090extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1091extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1092extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1093extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1094extern int drbd_send_current_state(struct drbd_peer_device *);
1095extern int drbd_send_sync_param(struct drbd_peer_device *);
1096extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1097 u32 set_size);
1098extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1099 struct drbd_peer_request *);
1100extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1101 struct p_block_req *rp);
1102extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1103 struct p_data *dp, int data_size);
1104extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1105 sector_t sector, int blksize, u64 block_id);
1106extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1107extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1108 struct drbd_peer_request *);
1109extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1110extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1111 sector_t sector, int size, u64 block_id);
1112extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1113 int size, void *digest, int digest_size,
1114 enum drbd_packet cmd);
1115extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1116
1117extern int drbd_send_bitmap(struct drbd_device *device);
1118extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1119extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1120extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1121extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1122extern void drbd_device_cleanup(struct drbd_device *device);
1123extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1124extern void drbd_queue_unplug(struct drbd_device *device);
1125
1126extern void conn_md_sync(struct drbd_connection *connection);
1127extern void drbd_md_write(struct drbd_device *device, void *buffer);
1128extern void drbd_md_sync(struct drbd_device *device);
1129extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1130extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1131extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1132extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1133extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1134extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1135extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1136extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1137extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1138extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1139extern void drbd_md_mark_dirty(struct drbd_device *device);
1140extern void drbd_queue_bitmap_io(struct drbd_device *device,
1141 int (*io_fn)(struct drbd_device *),
1142 void (*done)(struct drbd_device *, int),
1143 char *why, enum bm_flag flags);
1144extern int drbd_bitmap_io(struct drbd_device *device,
1145 int (*io_fn)(struct drbd_device *),
1146 char *why, enum bm_flag flags);
1147extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1148 int (*io_fn)(struct drbd_device *),
1149 char *why, enum bm_flag flags);
1150extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1151extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186#define MD_128MB_SECT (128LLU << 11)
1187#define MD_4kB_SECT 8
1188#define MD_32kB_SECT 64
1189
1190
1191#define AL_EXTENT_SHIFT 22
1192#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208#define AL_UPDATES_PER_TRANSACTION 64
1209#define AL_CONTEXT_PER_TRANSACTION 919
1210
1211#if BITS_PER_LONG == 32
1212#define LN2_BPL 5
1213#define cpu_to_lel(A) cpu_to_le32(A)
1214#define lel_to_cpu(A) le32_to_cpu(A)
1215#elif BITS_PER_LONG == 64
1216#define LN2_BPL 6
1217#define cpu_to_lel(A) cpu_to_le64(A)
1218#define lel_to_cpu(A) le64_to_cpu(A)
1219#else
1220#error "LN2 of BITS_PER_LONG unknown!"
1221#endif
1222
1223
1224
1225struct bm_extent {
1226 int rs_left;
1227 int rs_failed;
1228 unsigned long flags;
1229 struct lc_element lce;
1230};
1231
1232#define BME_NO_WRITES 0
1233#define BME_LOCKED 1
1234#define BME_PRIORITY 2
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244#define SLEEP_TIME (HZ/10)
1245
1246
1247
1248#define BM_BLOCK_SHIFT 12
1249#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1250
1251
1252
1253#define BM_EXT_SHIFT 24
1254#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1255
1256#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1257#error "HAVE YOU FIXED drbdmeta AS WELL??"
1258#endif
1259
1260
1261#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1262#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1263#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1264
1265
1266#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1267
1268
1269
1270#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1271#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1272
1273
1274#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1275
1276#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1277
1278#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1279
1280#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1281
1282
1283
1284#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1301
1302
1303
1304
1305
1306#define DRBD_MAX_SECTORS_FIXED_BM \
1307 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1308#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1309
1310#if BITS_PER_LONG == 32
1311
1312
1313
1314#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1315#else
1316
1317#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1318
1319#endif
1320
1321
1322
1323
1324
1325
1326
1327#define DRBD_MAX_BIO_SIZE (1U << 20)
1328#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1329#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1330#endif
1331#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1332
1333#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1334#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1335
1336
1337
1338
1339#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1340#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1341
1342extern int drbd_bm_init(struct drbd_device *device);
1343extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1344extern void drbd_bm_cleanup(struct drbd_device *device);
1345extern void drbd_bm_set_all(struct drbd_device *device);
1346extern void drbd_bm_clear_all(struct drbd_device *device);
1347
1348extern int drbd_bm_set_bits(
1349 struct drbd_device *device, unsigned long s, unsigned long e);
1350extern int drbd_bm_clear_bits(
1351 struct drbd_device *device, unsigned long s, unsigned long e);
1352extern int drbd_bm_count_bits(
1353 struct drbd_device *device, const unsigned long s, const unsigned long e);
1354
1355
1356extern void _drbd_bm_set_bits(struct drbd_device *device,
1357 const unsigned long s, const unsigned long e);
1358extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1359extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1360extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1361extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1362extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1363extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1364extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1365extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1366extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1367extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1368extern size_t drbd_bm_words(struct drbd_device *device);
1369extern unsigned long drbd_bm_bits(struct drbd_device *device);
1370extern sector_t drbd_bm_capacity(struct drbd_device *device);
1371
1372#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1373extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1374
1375extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1376extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1377extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1378extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1379
1380extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1381 size_t number, unsigned long *buffer);
1382
1383extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1384 size_t number, unsigned long *buffer);
1385
1386extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1387extern void drbd_bm_unlock(struct drbd_device *device);
1388
1389
1390extern struct kmem_cache *drbd_request_cache;
1391extern struct kmem_cache *drbd_ee_cache;
1392extern struct kmem_cache *drbd_bm_ext_cache;
1393extern struct kmem_cache *drbd_al_ext_cache;
1394extern mempool_t drbd_request_mempool;
1395extern mempool_t drbd_ee_mempool;
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410extern struct page *drbd_pp_pool;
1411extern spinlock_t drbd_pp_lock;
1412extern int drbd_pp_vacant;
1413extern wait_queue_head_t drbd_pp_wait;
1414
1415
1416
1417
1418
1419
1420#define DRBD_MIN_POOL_PAGES 128
1421extern mempool_t drbd_md_io_page_pool;
1422
1423
1424
1425extern struct bio_set drbd_md_io_bio_set;
1426
1427extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1428
1429
1430extern struct bio_set drbd_io_bio_set;
1431
1432extern struct mutex resources_mutex;
1433
1434extern int conn_lowest_minor(struct drbd_connection *connection);
1435extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1436extern void drbd_destroy_device(struct kref *kref);
1437extern void drbd_delete_device(struct drbd_device *device);
1438
1439extern struct drbd_resource *drbd_create_resource(const char *name);
1440extern void drbd_free_resource(struct drbd_resource *resource);
1441
1442extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1443extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1444extern void drbd_destroy_connection(struct kref *kref);
1445extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1446 void *peer_addr, int peer_addr_len);
1447extern struct drbd_resource *drbd_find_resource(const char *name);
1448extern void drbd_destroy_resource(struct kref *kref);
1449extern void conn_free_crypto(struct drbd_connection *connection);
1450
1451
1452extern void do_submit(struct work_struct *ws);
1453extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1454extern blk_qc_t drbd_submit_bio(struct bio *bio);
1455extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1456extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1457
1458
1459
1460
1461extern struct mutex notification_mutex;
1462
1463extern void drbd_suspend_io(struct drbd_device *device);
1464extern void drbd_resume_io(struct drbd_device *device);
1465extern char *ppsize(char *buf, unsigned long long size);
1466extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1467enum determine_dev_size {
1468 DS_ERROR_SHRINK = -3,
1469 DS_ERROR_SPACE_MD = -2,
1470 DS_ERROR = -1,
1471 DS_UNCHANGED = 0,
1472 DS_SHRUNK = 1,
1473 DS_GREW = 2,
1474 DS_GREW_FROM_ZERO = 3,
1475};
1476extern enum determine_dev_size
1477drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1478extern void resync_after_online_grow(struct drbd_device *);
1479extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1480 struct drbd_backing_dev *bdev, struct o_qlim *o);
1481extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1482 enum drbd_role new_role,
1483 int force);
1484extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1485extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1486extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1487extern int drbd_khelper(struct drbd_device *device, char *cmd);
1488
1489
1490
1491extern void drbd_md_endio(struct bio *bio);
1492extern void drbd_peer_request_endio(struct bio *bio);
1493extern void drbd_request_endio(struct bio *bio);
1494extern int drbd_worker(struct drbd_thread *thi);
1495enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1496void drbd_resync_after_changed(struct drbd_device *device);
1497extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1498extern void resume_next_sg(struct drbd_device *device);
1499extern void suspend_other_sg(struct drbd_device *device);
1500extern int drbd_resync_finished(struct drbd_device *device);
1501
1502extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1503extern void drbd_md_put_buffer(struct drbd_device *device);
1504extern int drbd_md_sync_page_io(struct drbd_device *device,
1505 struct drbd_backing_dev *bdev, sector_t sector, int op);
1506extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1507extern void wait_until_done_or_force_detached(struct drbd_device *device,
1508 struct drbd_backing_dev *bdev, unsigned int *done);
1509extern void drbd_rs_controller_reset(struct drbd_device *device);
1510
1511static inline void ov_out_of_sync_print(struct drbd_device *device)
1512{
1513 if (device->ov_last_oos_size) {
1514 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1515 (unsigned long long)device->ov_last_oos_start,
1516 (unsigned long)device->ov_last_oos_size);
1517 }
1518 device->ov_last_oos_size = 0;
1519}
1520
1521
1522extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
1523extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
1524 void *);
1525
1526extern int w_e_end_data_req(struct drbd_work *, int);
1527extern int w_e_end_rsdata_req(struct drbd_work *, int);
1528extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1529extern int w_e_end_ov_reply(struct drbd_work *, int);
1530extern int w_e_end_ov_req(struct drbd_work *, int);
1531extern int w_ov_finished(struct drbd_work *, int);
1532extern int w_resync_timer(struct drbd_work *, int);
1533extern int w_send_write_hint(struct drbd_work *, int);
1534extern int w_send_dblock(struct drbd_work *, int);
1535extern int w_send_read_req(struct drbd_work *, int);
1536extern int w_e_reissue(struct drbd_work *, int);
1537extern int w_restart_disk_io(struct drbd_work *, int);
1538extern int w_send_out_of_sync(struct drbd_work *, int);
1539extern int w_start_resync(struct drbd_work *, int);
1540
1541extern void resync_timer_fn(struct timer_list *t);
1542extern void start_resync_timer_fn(struct timer_list *t);
1543
1544extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1545
1546
1547extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1548 sector_t start, unsigned int nr_sectors, int flags);
1549extern int drbd_receiver(struct drbd_thread *thi);
1550extern int drbd_ack_receiver(struct drbd_thread *thi);
1551extern void drbd_send_ping_wf(struct work_struct *ws);
1552extern void drbd_send_acks_wf(struct work_struct *ws);
1553extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1554extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1555 bool throttle_if_app_is_waiting);
1556extern int drbd_submit_peer_request(struct drbd_device *,
1557 struct drbd_peer_request *, const unsigned,
1558 const unsigned, const int);
1559extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1560extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1561 sector_t, unsigned int,
1562 unsigned int,
1563 gfp_t) __must_hold(local);
1564extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1565 int);
1566#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1567#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1568extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1569extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1570extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1571extern int drbd_connected(struct drbd_peer_device *);
1572
1573
1574void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1575
1576
1577
1578
1579static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1580 int fault_type, struct bio *bio)
1581{
1582 __release(local);
1583 if (!bio->bi_disk) {
1584 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
1585 bio->bi_status = BLK_STS_IOERR;
1586 bio_endio(bio);
1587 return;
1588 }
1589
1590 if (drbd_insert_fault(device, fault_type))
1591 bio_io_error(bio);
1592 else
1593 submit_bio_noacct(bio);
1594}
1595
1596void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1597 enum write_ordering_e wo);
1598
1599
1600extern struct proc_dir_entry *drbd_proc;
1601int drbd_seq_show(struct seq_file *seq, void *v);
1602
1603
1604extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1605extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1606extern void drbd_al_begin_io_commit(struct drbd_device *device);
1607extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1608extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1609extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1610extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1611extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1612extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1613extern void drbd_rs_cancel_all(struct drbd_device *device);
1614extern int drbd_rs_del_all(struct drbd_device *device);
1615extern void drbd_rs_failed_io(struct drbd_device *device,
1616 sector_t sector, int size);
1617extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1618
1619enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1620extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1621 enum update_sync_bits_mode mode);
1622#define drbd_set_in_sync(device, sector, size) \
1623 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1624#define drbd_set_out_of_sync(device, sector, size) \
1625 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1626#define drbd_rs_failed_io(device, sector, size) \
1627 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1628extern void drbd_al_shrink(struct drbd_device *device);
1629extern int drbd_al_initialize(struct drbd_device *, void *);
1630
1631
1632
1633struct sib_info {
1634 enum drbd_state_info_bcast_reason sib_reason;
1635 union {
1636 struct {
1637 char *helper_name;
1638 unsigned helper_exit_code;
1639 };
1640 struct {
1641 union drbd_state os;
1642 union drbd_state ns;
1643 };
1644 };
1645};
1646void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1647
1648extern void notify_resource_state(struct sk_buff *,
1649 unsigned int,
1650 struct drbd_resource *,
1651 struct resource_info *,
1652 enum drbd_notification_type);
1653extern void notify_device_state(struct sk_buff *,
1654 unsigned int,
1655 struct drbd_device *,
1656 struct device_info *,
1657 enum drbd_notification_type);
1658extern void notify_connection_state(struct sk_buff *,
1659 unsigned int,
1660 struct drbd_connection *,
1661 struct connection_info *,
1662 enum drbd_notification_type);
1663extern void notify_peer_device_state(struct sk_buff *,
1664 unsigned int,
1665 struct drbd_peer_device *,
1666 struct peer_device_info *,
1667 enum drbd_notification_type);
1668extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1669 struct drbd_connection *, const char *, int);
1670
1671
1672
1673
1674
1675
1676static inline struct page *page_chain_next(struct page *page)
1677{
1678 return (struct page *)page_private(page);
1679}
1680#define page_chain_for_each(page) \
1681 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1682 page = page_chain_next(page))
1683#define page_chain_for_each_safe(page, n) \
1684 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1685
1686
1687static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1688{
1689 struct page *page = peer_req->pages;
1690 page_chain_for_each(page) {
1691 if (page_count(page) > 1)
1692 return 1;
1693 }
1694 return 0;
1695}
1696
1697static inline union drbd_state drbd_read_state(struct drbd_device *device)
1698{
1699 struct drbd_resource *resource = device->resource;
1700 union drbd_state rv;
1701
1702 rv.i = device->state.i;
1703 rv.susp = resource->susp;
1704 rv.susp_nod = resource->susp_nod;
1705 rv.susp_fen = resource->susp_fen;
1706
1707 return rv;
1708}
1709
1710enum drbd_force_detach_flags {
1711 DRBD_READ_ERROR,
1712 DRBD_WRITE_ERROR,
1713 DRBD_META_IO_ERROR,
1714 DRBD_FORCE_DETACH,
1715};
1716
1717#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1718static inline void __drbd_chk_io_error_(struct drbd_device *device,
1719 enum drbd_force_detach_flags df,
1720 const char *where)
1721{
1722 enum drbd_io_error_p ep;
1723
1724 rcu_read_lock();
1725 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1726 rcu_read_unlock();
1727 switch (ep) {
1728 case EP_PASS_ON:
1729 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1730 if (__ratelimit(&drbd_ratelimit_state))
1731 drbd_err(device, "Local IO failed in %s.\n", where);
1732 if (device->state.disk > D_INCONSISTENT)
1733 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1734 break;
1735 }
1736 fallthrough;
1737 case EP_DETACH:
1738 case EP_CALL_HELPER:
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 set_bit(WAS_IO_ERROR, &device->flags);
1760 if (df == DRBD_READ_ERROR)
1761 set_bit(WAS_READ_ERROR, &device->flags);
1762 if (df == DRBD_FORCE_DETACH)
1763 set_bit(FORCE_DETACH, &device->flags);
1764 if (device->state.disk > D_FAILED) {
1765 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1766 drbd_err(device,
1767 "Local IO failed in %s. Detaching...\n", where);
1768 }
1769 break;
1770 }
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1782static inline void drbd_chk_io_error_(struct drbd_device *device,
1783 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1784{
1785 if (error) {
1786 unsigned long flags;
1787 spin_lock_irqsave(&device->resource->req_lock, flags);
1788 __drbd_chk_io_error_(device, forcedetach, where);
1789 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1790 }
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1802{
1803 switch (bdev->md.meta_dev_idx) {
1804 case DRBD_MD_INDEX_INTERNAL:
1805 case DRBD_MD_INDEX_FLEX_INT:
1806 return bdev->md.md_offset + bdev->md.bm_offset;
1807 case DRBD_MD_INDEX_FLEX_EXT:
1808 default:
1809 return bdev->md.md_offset;
1810 }
1811}
1812
1813
1814
1815
1816
1817static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1818{
1819 switch (bdev->md.meta_dev_idx) {
1820 case DRBD_MD_INDEX_INTERNAL:
1821 case DRBD_MD_INDEX_FLEX_INT:
1822 return bdev->md.md_offset + MD_4kB_SECT -1;
1823 case DRBD_MD_INDEX_FLEX_EXT:
1824 default:
1825 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1826 }
1827}
1828
1829
1830static inline sector_t drbd_get_capacity(struct block_device *bdev)
1831{
1832
1833 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1845{
1846 sector_t s;
1847
1848 switch (bdev->md.meta_dev_idx) {
1849 case DRBD_MD_INDEX_INTERNAL:
1850 case DRBD_MD_INDEX_FLEX_INT:
1851 s = drbd_get_capacity(bdev->backing_bdev)
1852 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1853 drbd_md_first_sector(bdev))
1854 : 0;
1855 break;
1856 case DRBD_MD_INDEX_FLEX_EXT:
1857 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1858 drbd_get_capacity(bdev->backing_bdev));
1859
1860 s = min_t(sector_t, s,
1861 BM_EXT_TO_SECT(bdev->md.md_size_sect
1862 - bdev->md.bm_offset));
1863 break;
1864 default:
1865 s = min_t(sector_t, DRBD_MAX_SECTORS,
1866 drbd_get_capacity(bdev->backing_bdev));
1867 }
1868 return s;
1869}
1870
1871
1872
1873
1874
1875static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1876{
1877 const int meta_dev_idx = bdev->md.meta_dev_idx;
1878
1879 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1880 return 0;
1881
1882
1883
1884 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1885 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1886 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1887
1888
1889 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1890}
1891
1892static inline void
1893drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1894{
1895 unsigned long flags;
1896 spin_lock_irqsave(&q->q_lock, flags);
1897 list_add_tail(&w->list, &q->q);
1898 spin_unlock_irqrestore(&q->q_lock, flags);
1899 wake_up(&q->q_wait);
1900}
1901
1902static inline void
1903drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1904{
1905 unsigned long flags;
1906 spin_lock_irqsave(&q->q_lock, flags);
1907 if (list_empty_careful(&w->list))
1908 list_add_tail(&w->list, &q->q);
1909 spin_unlock_irqrestore(&q->q_lock, flags);
1910 wake_up(&q->q_wait);
1911}
1912
1913static inline void
1914drbd_device_post_work(struct drbd_device *device, int work_bit)
1915{
1916 if (!test_and_set_bit(work_bit, &device->flags)) {
1917 struct drbd_connection *connection =
1918 first_peer_device(device)->connection;
1919 struct drbd_work_queue *q = &connection->sender_work;
1920 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1921 wake_up(&q->q_wait);
1922 }
1923}
1924
1925extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1926
1927
1928
1929
1930
1931static inline void wake_ack_receiver(struct drbd_connection *connection)
1932{
1933 struct task_struct *task = connection->ack_receiver.task;
1934 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1935 send_sig(SIGXCPU, task, 1);
1936}
1937
1938static inline void request_ping(struct drbd_connection *connection)
1939{
1940 set_bit(SEND_PING, &connection->flags);
1941 wake_ack_receiver(connection);
1942}
1943
1944extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1945extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1946extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1947 enum drbd_packet, unsigned int, void *,
1948 unsigned int);
1949extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1950 enum drbd_packet, unsigned int, void *,
1951 unsigned int);
1952
1953extern int drbd_send_ping(struct drbd_connection *connection);
1954extern int drbd_send_ping_ack(struct drbd_connection *connection);
1955extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1956extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1957
1958static inline void drbd_thread_stop(struct drbd_thread *thi)
1959{
1960 _drbd_thread_stop(thi, false, true);
1961}
1962
1963static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1964{
1965 _drbd_thread_stop(thi, false, false);
1966}
1967
1968static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1969{
1970 _drbd_thread_stop(thi, true, false);
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995static inline void inc_ap_pending(struct drbd_device *device)
1996{
1997 atomic_inc(&device->ap_pending_cnt);
1998}
1999
2000#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
2001 if (atomic_read(&device->which) < 0) \
2002 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2003 func, line, \
2004 atomic_read(&device->which))
2005
2006#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2007static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2008{
2009 if (atomic_dec_and_test(&device->ap_pending_cnt))
2010 wake_up(&device->misc_wait);
2011 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2012}
2013
2014
2015
2016
2017
2018
2019
2020static inline void inc_rs_pending(struct drbd_device *device)
2021{
2022 atomic_inc(&device->rs_pending_cnt);
2023}
2024
2025#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2026static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2027{
2028 atomic_dec(&device->rs_pending_cnt);
2029 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2030}
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041static inline void inc_unacked(struct drbd_device *device)
2042{
2043 atomic_inc(&device->unacked_cnt);
2044}
2045
2046#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2047static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2048{
2049 atomic_dec(&device->unacked_cnt);
2050 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2051}
2052
2053#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2054static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2055{
2056 atomic_sub(n, &device->unacked_cnt);
2057 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2058}
2059
2060static inline bool is_sync_target_state(enum drbd_conns connection_state)
2061{
2062 return connection_state == C_SYNC_TARGET ||
2063 connection_state == C_PAUSED_SYNC_T;
2064}
2065
2066static inline bool is_sync_source_state(enum drbd_conns connection_state)
2067{
2068 return connection_state == C_SYNC_SOURCE ||
2069 connection_state == C_PAUSED_SYNC_S;
2070}
2071
2072static inline bool is_sync_state(enum drbd_conns connection_state)
2073{
2074 return is_sync_source_state(connection_state) ||
2075 is_sync_target_state(connection_state);
2076}
2077
2078
2079
2080
2081
2082
2083
2084
2085#define get_ldev_if_state(_device, _min_state) \
2086 (_get_ldev_if_state((_device), (_min_state)) ? \
2087 ({ __acquire(x); true; }) : false)
2088#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2089
2090static inline void put_ldev(struct drbd_device *device)
2091{
2092 enum drbd_disk_state disk_state = device->state.disk;
2093
2094
2095
2096
2097 int i = atomic_dec_return(&device->local_cnt);
2098
2099
2100
2101
2102 __release(local);
2103 D_ASSERT(device, i >= 0);
2104 if (i == 0) {
2105 if (disk_state == D_DISKLESS)
2106
2107 drbd_device_post_work(device, DESTROY_DISK);
2108 if (disk_state == D_FAILED)
2109
2110 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2111 drbd_device_post_work(device, GO_DISKLESS);
2112 wake_up(&device->misc_wait);
2113 }
2114}
2115
2116#ifndef __CHECKER__
2117static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2118{
2119 int io_allowed;
2120
2121
2122 if (device->state.disk == D_DISKLESS)
2123 return 0;
2124
2125 atomic_inc(&device->local_cnt);
2126 io_allowed = (device->state.disk >= mins);
2127 if (!io_allowed)
2128 put_ldev(device);
2129 return io_allowed;
2130}
2131#else
2132extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2133#endif
2134
2135
2136
2137
2138static inline int drbd_get_max_buffers(struct drbd_device *device)
2139{
2140 struct net_conf *nc;
2141 int mxb;
2142
2143 rcu_read_lock();
2144 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2145 mxb = nc ? nc->max_buffers : 1000000;
2146 rcu_read_unlock();
2147
2148 return mxb;
2149}
2150
2151static inline int drbd_state_is_stable(struct drbd_device *device)
2152{
2153 union drbd_dev_state s = device->state;
2154
2155
2156
2157
2158 switch ((enum drbd_conns)s.conn) {
2159
2160 case C_STANDALONE:
2161 case C_WF_CONNECTION:
2162
2163 case C_CONNECTED:
2164 case C_SYNC_SOURCE:
2165 case C_SYNC_TARGET:
2166 case C_VERIFY_S:
2167 case C_VERIFY_T:
2168 case C_PAUSED_SYNC_S:
2169 case C_PAUSED_SYNC_T:
2170 case C_AHEAD:
2171 case C_BEHIND:
2172
2173 case C_DISCONNECTING:
2174 case C_UNCONNECTED:
2175 case C_TIMEOUT:
2176 case C_BROKEN_PIPE:
2177 case C_NETWORK_FAILURE:
2178 case C_PROTOCOL_ERROR:
2179 case C_TEAR_DOWN:
2180 case C_WF_REPORT_PARAMS:
2181 case C_STARTING_SYNC_S:
2182 case C_STARTING_SYNC_T:
2183 break;
2184
2185
2186 case C_WF_BITMAP_S:
2187 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2188 return 0;
2189 break;
2190
2191
2192 case C_WF_BITMAP_T:
2193 case C_WF_SYNC_UUID:
2194 case C_MASK:
2195
2196 return 0;
2197 }
2198
2199 switch ((enum drbd_disk_state)s.disk) {
2200 case D_DISKLESS:
2201 case D_INCONSISTENT:
2202 case D_OUTDATED:
2203 case D_CONSISTENT:
2204 case D_UP_TO_DATE:
2205 case D_FAILED:
2206
2207 break;
2208
2209
2210 case D_ATTACHING:
2211 case D_NEGOTIATING:
2212 case D_UNKNOWN:
2213 case D_MASK:
2214
2215 return 0;
2216 }
2217
2218 return 1;
2219}
2220
2221static inline int drbd_suspended(struct drbd_device *device)
2222{
2223 struct drbd_resource *resource = device->resource;
2224
2225 return resource->susp || resource->susp_fen || resource->susp_nod;
2226}
2227
2228static inline bool may_inc_ap_bio(struct drbd_device *device)
2229{
2230 int mxb = drbd_get_max_buffers(device);
2231
2232 if (drbd_suspended(device))
2233 return false;
2234 if (atomic_read(&device->suspend_cnt))
2235 return false;
2236
2237
2238
2239
2240
2241
2242 if (!drbd_state_is_stable(device))
2243 return false;
2244
2245
2246
2247 if (atomic_read(&device->ap_bio_cnt) > mxb)
2248 return false;
2249 if (test_bit(BITMAP_IO, &device->flags))
2250 return false;
2251 return true;
2252}
2253
2254static inline bool inc_ap_bio_cond(struct drbd_device *device)
2255{
2256 bool rv = false;
2257
2258 spin_lock_irq(&device->resource->req_lock);
2259 rv = may_inc_ap_bio(device);
2260 if (rv)
2261 atomic_inc(&device->ap_bio_cnt);
2262 spin_unlock_irq(&device->resource->req_lock);
2263
2264 return rv;
2265}
2266
2267static inline void inc_ap_bio(struct drbd_device *device)
2268{
2269
2270
2271
2272
2273
2274
2275
2276
2277 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2278}
2279
2280static inline void dec_ap_bio(struct drbd_device *device)
2281{
2282 int mxb = drbd_get_max_buffers(device);
2283 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2284
2285 D_ASSERT(device, ap_bio >= 0);
2286
2287 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2288 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2289 drbd_queue_work(&first_peer_device(device)->
2290 connection->sender_work,
2291 &device->bm_io_work.w);
2292 }
2293
2294
2295
2296
2297 if (ap_bio < mxb)
2298 wake_up(&device->misc_wait);
2299}
2300
2301static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2302{
2303 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2304 first_peer_device(device)->connection->agreed_pro_version != 100;
2305}
2306
2307static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2308{
2309 int changed = device->ed_uuid != val;
2310 device->ed_uuid = val;
2311 return changed;
2312}
2313
2314static inline int drbd_queue_order_type(struct drbd_device *device)
2315{
2316
2317
2318#ifndef QUEUE_ORDERED_NONE
2319#define QUEUE_ORDERED_NONE 0
2320#endif
2321 return QUEUE_ORDERED_NONE;
2322}
2323
2324static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2325{
2326 return list_first_entry_or_null(&resource->connections,
2327 struct drbd_connection, connections);
2328}
2329
2330#endif
2331