1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _DRBD_INT_H
15#define _DRBD_INT_H
16
17#include <crypto/hash.h>
18#include <linux/compiler.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/sched/signal.h>
22#include <linux/bitops.h>
23#include <linux/slab.h>
24#include <linux/ratelimit.h>
25#include <linux/tcp.h>
26#include <linux/mutex.h>
27#include <linux/major.h>
28#include <linux/blkdev.h>
29#include <linux/backing-dev.h>
30#include <linux/idr.h>
31#include <linux/dynamic_debug.h>
32#include <net/tcp.h>
33#include <linux/lru_cache.h>
34#include <linux/prefetch.h>
35#include <linux/drbd_genl_api.h>
36#include <linux/drbd.h>
37#include "drbd_strings.h"
38#include "drbd_state.h"
39#include "drbd_protocol.h"
40
41#ifdef __CHECKER__
42# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
43# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
44# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
45#else
46# define __protected_by(x)
47# define __protected_read_by(x)
48# define __protected_write_by(x)
49#endif
50
51
52#ifdef CONFIG_DRBD_FAULT_INJECTION
53extern int drbd_enable_faults;
54extern int drbd_fault_rate;
55#endif
56
57extern unsigned int drbd_minor_count;
58extern char drbd_usermode_helper[];
59extern int drbd_proc_details;
60
61
62
63
64
65
66
67#define DRBD_SIGKILL SIGHUP
68
69#define ID_IN_SYNC (4711ULL)
70#define ID_OUT_OF_SYNC (4712ULL)
71#define ID_SYNCER (-1ULL)
72
73#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
74
75struct drbd_device;
76struct drbd_connection;
77
78#define __drbd_printk_device(level, device, fmt, args...) \
79 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
80#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
81 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
82#define __drbd_printk_resource(level, resource, fmt, args...) \
83 printk(level "drbd %s: " fmt, (resource)->name, ## args)
84#define __drbd_printk_connection(level, connection, fmt, args...) \
85 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
86
87void drbd_printk_with_wrong_object_type(void);
88
89#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
90 (__builtin_types_compatible_p(typeof(obj), type) || \
91 __builtin_types_compatible_p(typeof(obj), const type)), \
92 func(level, (const type)(obj), fmt, ## args)
93
94#define drbd_printk(level, obj, fmt, args...) \
95 __builtin_choose_expr( \
96 __drbd_printk_if_same_type(obj, struct drbd_device *, \
97 __drbd_printk_device, level, fmt, ## args), \
98 __builtin_choose_expr( \
99 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
100 __drbd_printk_resource, level, fmt, ## args), \
101 __builtin_choose_expr( \
102 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
103 __drbd_printk_connection, level, fmt, ## args), \
104 __builtin_choose_expr( \
105 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
106 __drbd_printk_peer_device, level, fmt, ## args), \
107 drbd_printk_with_wrong_object_type()))))
108
109#define drbd_dbg(obj, fmt, args...) \
110 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
111#define drbd_alert(obj, fmt, args...) \
112 drbd_printk(KERN_ALERT, obj, fmt, ## args)
113#define drbd_err(obj, fmt, args...) \
114 drbd_printk(KERN_ERR, obj, fmt, ## args)
115#define drbd_warn(obj, fmt, args...) \
116 drbd_printk(KERN_WARNING, obj, fmt, ## args)
117#define drbd_info(obj, fmt, args...) \
118 drbd_printk(KERN_INFO, obj, fmt, ## args)
119#define drbd_emerg(obj, fmt, args...) \
120 drbd_printk(KERN_EMERG, obj, fmt, ## args)
121
122#define dynamic_drbd_dbg(device, fmt, args...) \
123 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
124
125#define D_ASSERT(device, exp) do { \
126 if (!(exp)) \
127 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
128 } while (0)
129
130
131
132
133
134
135#define expect(exp) ({ \
136 bool _bool = (exp); \
137 if (!_bool) \
138 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
139 #exp, __func__); \
140 _bool; \
141 })
142
143
144enum {
145 DRBD_FAULT_MD_WR = 0,
146 DRBD_FAULT_MD_RD = 1,
147 DRBD_FAULT_RS_WR = 2,
148 DRBD_FAULT_RS_RD = 3,
149 DRBD_FAULT_DT_WR = 4,
150 DRBD_FAULT_DT_RD = 5,
151 DRBD_FAULT_DT_RA = 6,
152 DRBD_FAULT_BM_ALLOC = 7,
153 DRBD_FAULT_AL_EE = 8,
154 DRBD_FAULT_RECEIVE = 9,
155
156 DRBD_FAULT_MAX,
157};
158
159extern unsigned int
160_drbd_insert_fault(struct drbd_device *device, unsigned int type);
161
162static inline int
163drbd_insert_fault(struct drbd_device *device, unsigned int type) {
164#ifdef CONFIG_DRBD_FAULT_INJECTION
165 return drbd_fault_rate &&
166 (drbd_enable_faults & (1<<type)) &&
167 _drbd_insert_fault(device, type);
168#else
169 return 0;
170#endif
171}
172
173
174#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
175
176#define div_floor(A, B) ((A)/(B))
177
178extern struct ratelimit_state drbd_ratelimit_state;
179extern struct idr drbd_devices;
180extern struct list_head drbd_resources;
181
182extern const char *cmdname(enum drbd_packet cmd);
183
184
185
186struct bm_xfer_ctx {
187
188
189
190
191 unsigned long bm_bits;
192 unsigned long bm_words;
193
194 unsigned long bit_offset;
195 unsigned long word_offset;
196
197
198 unsigned packets[2];
199 unsigned bytes[2];
200};
201
202extern void INFO_bm_xfer_stats(struct drbd_device *device,
203 const char *direction, struct bm_xfer_ctx *c);
204
205static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
206{
207
208
209
210
211
212
213
214#if BITS_PER_LONG == 64
215 c->word_offset = c->bit_offset >> 6;
216#elif BITS_PER_LONG == 32
217 c->word_offset = c->bit_offset >> 5;
218 c->word_offset &= ~(1UL);
219#else
220# error "unsupported BITS_PER_LONG"
221#endif
222}
223
224extern unsigned int drbd_header_size(struct drbd_connection *connection);
225
226
227enum drbd_thread_state {
228 NONE,
229 RUNNING,
230 EXITING,
231 RESTARTING
232};
233
234struct drbd_thread {
235 spinlock_t t_lock;
236 struct task_struct *task;
237 struct completion stop;
238 enum drbd_thread_state t_state;
239 int (*function) (struct drbd_thread *);
240 struct drbd_resource *resource;
241 struct drbd_connection *connection;
242 int reset_cpu_mask;
243 const char *name;
244};
245
246static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
247{
248
249
250
251
252 smp_rmb();
253 return thi->t_state;
254}
255
256struct drbd_work {
257 struct list_head list;
258 int (*cb)(struct drbd_work *, int cancel);
259};
260
261struct drbd_device_work {
262 struct drbd_work w;
263 struct drbd_device *device;
264};
265
266#include "drbd_interval.h"
267
268extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
269
270extern void lock_all_resources(void);
271extern void unlock_all_resources(void);
272
273struct drbd_request {
274 struct drbd_work w;
275 struct drbd_device *device;
276
277
278
279
280
281 struct bio *private_bio;
282
283 struct drbd_interval i;
284
285
286
287
288
289
290
291
292
293 unsigned int epoch;
294
295 struct list_head tl_requests;
296 struct bio *master_bio;
297
298
299 struct list_head req_pending_master_completion;
300 struct list_head req_pending_local;
301
302
303 unsigned long start_jif;
304
305
306
307
308
309
310
311
312
313 unsigned long in_actlog_jif;
314
315
316 unsigned long pre_submit_jif;
317
318
319 unsigned long pre_send_jif;
320 unsigned long acked_jif;
321 unsigned long net_done_jif;
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356 atomic_t completion_ref;
357
358 struct kref kref;
359
360 unsigned rq_state;
361};
362
363struct drbd_epoch {
364 struct drbd_connection *connection;
365 struct list_head list;
366 unsigned int barrier_nr;
367 atomic_t epoch_size;
368 atomic_t active;
369 unsigned long flags;
370};
371
372
373int drbdd_init(struct drbd_thread *);
374int drbd_asender(struct drbd_thread *);
375
376
377enum {
378 DE_HAVE_BARRIER_NUMBER,
379};
380
381enum epoch_event {
382 EV_PUT,
383 EV_GOT_BARRIER_NR,
384 EV_BECAME_LAST,
385 EV_CLEANUP = 32,
386};
387
388struct digest_info {
389 int digest_size;
390 void *digest;
391};
392
393struct drbd_peer_request {
394 struct drbd_work w;
395 struct drbd_peer_device *peer_device;
396 struct drbd_epoch *epoch;
397 struct page *pages;
398 atomic_t pending_bios;
399 struct drbd_interval i;
400
401 unsigned long flags;
402 unsigned long submit_jif;
403 union {
404 u64 block_id;
405 struct digest_info *digest;
406 };
407};
408
409
410
411
412
413
414
415enum {
416 __EE_CALL_AL_COMPLETE_IO,
417 __EE_MAY_SET_IN_SYNC,
418
419
420 __EE_TRIM,
421
422
423
424 __EE_ZEROOUT,
425
426
427
428 __EE_RESUBMITTED,
429
430
431
432
433 __EE_WAS_ERROR,
434
435
436 __EE_HAS_DIGEST,
437
438
439 __EE_RESTART_REQUESTS,
440
441
442 __EE_SEND_WRITE_ACK,
443
444
445 __EE_IN_INTERVAL_TREE,
446
447
448
449 __EE_SUBMITTED,
450
451
452 __EE_WRITE,
453
454
455 __EE_WRITE_SAME,
456
457
458
459 __EE_APPLICATION,
460
461
462 __EE_RS_THIN_REQ,
463};
464#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
465#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
466#define EE_TRIM (1<<__EE_TRIM)
467#define EE_ZEROOUT (1<<__EE_ZEROOUT)
468#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
469#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
470#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
471#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
472#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
473#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
474#define EE_SUBMITTED (1<<__EE_SUBMITTED)
475#define EE_WRITE (1<<__EE_WRITE)
476#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
477#define EE_APPLICATION (1<<__EE_APPLICATION)
478#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
479
480
481enum {
482 UNPLUG_REMOTE,
483 MD_DIRTY,
484 USE_DEGR_WFC_T,
485 CL_ST_CHG_SUCCESS,
486 CL_ST_CHG_FAIL,
487 CRASHED_PRIMARY,
488
489
490 CONSIDER_RESYNC,
491
492 MD_NO_FUA,
493
494 BITMAP_IO,
495
496 BITMAP_IO_QUEUED,
497 WAS_IO_ERROR,
498 WAS_READ_ERROR,
499 FORCE_DETACH,
500 RESYNC_AFTER_NEG,
501 RESIZE_PENDING,
502
503 NEW_CUR_UUID,
504 AL_SUSPENDED,
505 AHEAD_TO_SYNC_SOURCE,
506 B_RS_H_DONE,
507 DISCARD_MY_DATA,
508 READ_BALANCE_RR,
509
510 FLUSH_PENDING,
511
512
513
514 GOING_DISKLESS,
515
516
517 GO_DISKLESS,
518 DESTROY_DISK,
519 MD_SYNC,
520 RS_START,
521 RS_PROGRESS,
522 RS_DONE,
523};
524
525struct drbd_bitmap;
526
527
528
529enum bm_flag {
530
531 BM_LOCKED_MASK = 0xf,
532
533
534 BM_DONT_CLEAR = 0x1,
535 BM_DONT_SET = 0x2,
536 BM_DONT_TEST = 0x4,
537
538
539
540 BM_IS_LOCKED = 0x8,
541
542
543 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
544
545
546
547
548 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
549
550
551
552 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
553};
554
555struct drbd_work_queue {
556 struct list_head q;
557 spinlock_t q_lock;
558 wait_queue_head_t q_wait;
559};
560
561struct drbd_socket {
562 struct mutex mutex;
563 struct socket *socket;
564
565
566 void *sbuf;
567 void *rbuf;
568};
569
570struct drbd_md {
571 u64 md_offset;
572
573 u64 la_size_sect;
574 spinlock_t uuid_lock;
575 u64 uuid[UI_SIZE];
576 u64 device_uuid;
577 u32 flags;
578 u32 md_size_sect;
579
580 s32 al_offset;
581 s32 bm_offset;
582
583
584 s32 meta_dev_idx;
585
586
587 u32 al_stripes;
588 u32 al_stripe_size_4k;
589 u32 al_size_4k;
590};
591
592struct drbd_backing_dev {
593 struct block_device *backing_bdev;
594 struct block_device *md_bdev;
595 struct drbd_md md;
596 struct disk_conf *disk_conf;
597 sector_t known_size;
598};
599
600struct drbd_md_io {
601 struct page *page;
602 unsigned long start_jif;
603 unsigned long submit_jif;
604 const char *current_use;
605 atomic_t in_use;
606 unsigned int done;
607 int error;
608};
609
610struct bm_io_work {
611 struct drbd_work w;
612 char *why;
613 enum bm_flag flags;
614 int (*io_fn)(struct drbd_device *device);
615 void (*done)(struct drbd_device *device, int rv);
616};
617
618struct fifo_buffer {
619 unsigned int head_index;
620 unsigned int size;
621 int total;
622 int values[];
623};
624extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
625
626
627enum {
628 NET_CONGESTED,
629 RESOLVE_CONFLICTS,
630 SEND_PING,
631 GOT_PING_ACK,
632 CONN_WD_ST_CHG_REQ,
633 CONN_WD_ST_CHG_OKAY,
634 CONN_WD_ST_CHG_FAIL,
635 CONN_DRY_RUN,
636 CREATE_BARRIER,
637 STATE_SENT,
638 CALLBACK_PENDING,
639
640
641 DISCONNECT_SENT,
642
643 DEVICE_WORK_PENDING,
644};
645
646enum which_state { NOW, OLD = NOW, NEW };
647
648struct drbd_resource {
649 char *name;
650#ifdef CONFIG_DEBUG_FS
651 struct dentry *debugfs_res;
652 struct dentry *debugfs_res_volumes;
653 struct dentry *debugfs_res_connections;
654 struct dentry *debugfs_res_in_flight_summary;
655#endif
656 struct kref kref;
657 struct idr devices;
658 struct list_head connections;
659 struct list_head resources;
660 struct res_opts res_opts;
661 struct mutex conf_update;
662 struct mutex adm_mutex;
663 spinlock_t req_lock;
664
665 unsigned susp:1;
666 unsigned susp_nod:1;
667 unsigned susp_fen:1;
668
669 enum write_ordering_e write_ordering;
670
671 cpumask_var_t cpu_mask;
672};
673
674struct drbd_thread_timing_details
675{
676 unsigned long start_jif;
677 void *cb_addr;
678 const char *caller_fn;
679 unsigned int line;
680 unsigned int cb_nr;
681};
682
683struct drbd_connection {
684 struct list_head connections;
685 struct drbd_resource *resource;
686#ifdef CONFIG_DEBUG_FS
687 struct dentry *debugfs_conn;
688 struct dentry *debugfs_conn_callback_history;
689 struct dentry *debugfs_conn_oldest_requests;
690#endif
691 struct kref kref;
692 struct idr peer_devices;
693 enum drbd_conns cstate;
694 struct mutex cstate_mutex;
695 unsigned int connect_cnt;
696
697 unsigned long flags;
698 struct net_conf *net_conf;
699 wait_queue_head_t ping_wait;
700
701 struct sockaddr_storage my_addr;
702 int my_addr_len;
703 struct sockaddr_storage peer_addr;
704 int peer_addr_len;
705
706 struct drbd_socket data;
707 struct drbd_socket meta;
708 int agreed_pro_version;
709 u32 agreed_features;
710 unsigned long last_received;
711 unsigned int ko_count;
712
713 struct list_head transfer_log;
714
715 struct crypto_shash *cram_hmac_tfm;
716 struct crypto_shash *integrity_tfm;
717 struct crypto_shash *peer_integrity_tfm;
718 struct crypto_shash *csums_tfm;
719 struct crypto_shash *verify_tfm;
720 void *int_dig_in;
721 void *int_dig_vv;
722
723
724 struct drbd_epoch *current_epoch;
725 spinlock_t epoch_lock;
726 unsigned int epochs;
727 atomic_t current_tle_nr;
728 unsigned current_tle_writes;
729
730 unsigned long last_reconnect_jif;
731
732 struct blk_plug receiver_plug;
733 struct drbd_thread receiver;
734 struct drbd_thread worker;
735 struct drbd_thread ack_receiver;
736 struct workqueue_struct *ack_sender;
737
738
739
740
741 struct drbd_request *req_next;
742 struct drbd_request *req_ack_pending;
743 struct drbd_request *req_not_net_done;
744
745
746 struct drbd_work_queue sender_work;
747
748#define DRBD_THREAD_DETAILS_HIST 16
749 unsigned int w_cb_nr;
750 unsigned int r_cb_nr;
751 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
752 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
753
754 struct {
755 unsigned long last_sent_barrier_jif;
756
757
758
759 bool seen_any_write_yet;
760
761
762 int current_epoch_nr;
763
764
765
766
767 unsigned current_epoch_writes;
768 } send;
769};
770
771static inline bool has_net_conf(struct drbd_connection *connection)
772{
773 bool has_net_conf;
774
775 rcu_read_lock();
776 has_net_conf = rcu_dereference(connection->net_conf);
777 rcu_read_unlock();
778
779 return has_net_conf;
780}
781
782void __update_timing_details(
783 struct drbd_thread_timing_details *tdp,
784 unsigned int *cb_nr,
785 void *cb,
786 const char *fn, const unsigned int line);
787
788#define update_worker_timing_details(c, cb) \
789 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
790#define update_receiver_timing_details(c, cb) \
791 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
792
793struct submit_worker {
794 struct workqueue_struct *wq;
795 struct work_struct worker;
796
797
798 struct list_head writes;
799};
800
801struct drbd_peer_device {
802 struct list_head peer_devices;
803 struct drbd_device *device;
804 struct drbd_connection *connection;
805 struct work_struct send_acks_work;
806#ifdef CONFIG_DEBUG_FS
807 struct dentry *debugfs_peer_dev;
808#endif
809};
810
811struct drbd_device {
812 struct drbd_resource *resource;
813 struct list_head peer_devices;
814 struct list_head pending_bitmap_io;
815
816 unsigned long flush_jif;
817#ifdef CONFIG_DEBUG_FS
818 struct dentry *debugfs_minor;
819 struct dentry *debugfs_vol;
820 struct dentry *debugfs_vol_oldest_requests;
821 struct dentry *debugfs_vol_act_log_extents;
822 struct dentry *debugfs_vol_resync_extents;
823 struct dentry *debugfs_vol_data_gen_id;
824 struct dentry *debugfs_vol_ed_gen_id;
825#endif
826
827 unsigned int vnr;
828 unsigned int minor;
829
830 struct kref kref;
831
832
833 unsigned long flags;
834
835
836 struct drbd_backing_dev *ldev __protected_by(local);
837
838 sector_t p_size;
839 struct request_queue *rq_queue;
840 struct gendisk *vdisk;
841
842 unsigned long last_reattach_jif;
843 struct drbd_work resync_work;
844 struct drbd_work unplug_work;
845 struct timer_list resync_timer;
846 struct timer_list md_sync_timer;
847 struct timer_list start_resync_timer;
848 struct timer_list request_timer;
849
850
851 union drbd_state new_state_tmp;
852
853 union drbd_dev_state state;
854 wait_queue_head_t misc_wait;
855 wait_queue_head_t state_wait;
856 unsigned int send_cnt;
857 unsigned int recv_cnt;
858 unsigned int read_cnt;
859 unsigned int writ_cnt;
860 unsigned int al_writ_cnt;
861 unsigned int bm_writ_cnt;
862 atomic_t ap_bio_cnt;
863 atomic_t ap_actlog_cnt;
864 atomic_t ap_pending_cnt;
865 atomic_t rs_pending_cnt;
866 atomic_t unacked_cnt;
867 atomic_t local_cnt;
868 atomic_t suspend_cnt;
869
870
871 struct rb_root read_requests;
872 struct rb_root write_requests;
873
874
875
876 struct list_head pending_master_completion[2];
877 struct list_head pending_completion[2];
878
879
880 bool use_csums;
881
882 unsigned long rs_total;
883
884 unsigned long rs_failed;
885
886 unsigned long rs_start;
887
888 unsigned long rs_paused;
889
890 unsigned long rs_same_csum;
891#define DRBD_SYNC_MARKS 8
892#define DRBD_SYNC_MARK_STEP (3*HZ)
893
894 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
895
896 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
897
898 int rs_last_mark;
899 unsigned long rs_last_bcast;
900
901
902 sector_t ov_start_sector;
903 sector_t ov_stop_sector;
904
905 sector_t ov_position;
906
907 sector_t ov_last_oos_start;
908
909 sector_t ov_last_oos_size;
910 unsigned long ov_left;
911
912 struct drbd_bitmap *bitmap;
913 unsigned long bm_resync_fo;
914
915
916 struct lru_cache *resync;
917
918 unsigned int resync_locked;
919
920 unsigned int resync_wenr;
921
922 int open_cnt;
923 u64 *p_uuid;
924
925 struct list_head active_ee;
926 struct list_head sync_ee;
927 struct list_head done_ee;
928 struct list_head read_ee;
929 struct list_head net_ee;
930
931 int next_barrier_nr;
932 struct list_head resync_reads;
933 atomic_t pp_in_use;
934 atomic_t pp_in_use_by_net;
935 wait_queue_head_t ee_wait;
936 struct drbd_md_io md_io;
937 spinlock_t al_lock;
938 wait_queue_head_t al_wait;
939 struct lru_cache *act_log;
940 unsigned int al_tr_number;
941 int al_tr_cycle;
942 wait_queue_head_t seq_wait;
943 atomic_t packet_seq;
944 unsigned int peer_seq;
945 spinlock_t peer_seq_lock;
946 unsigned long comm_bm_set;
947 struct bm_io_work bm_io_work;
948 u64 ed_uuid;
949 struct mutex own_state_mutex;
950 struct mutex *state_mutex;
951 char congestion_reason;
952 atomic_t rs_sect_in;
953 atomic_t rs_sect_ev;
954 int rs_last_sect_ev;
955 int rs_last_events;
956
957 int c_sync_rate;
958 struct fifo_buffer *rs_plan_s;
959 int rs_in_flight;
960 atomic_t ap_in_flight;
961 unsigned int peer_max_bio_size;
962 unsigned int local_max_bio_size;
963
964
965
966 struct submit_worker submit;
967};
968
969struct drbd_bm_aio_ctx {
970 struct drbd_device *device;
971 struct list_head list; ;
972 unsigned long start_jif;
973 atomic_t in_flight;
974 unsigned int done;
975 unsigned flags;
976#define BM_AIO_COPY_PAGES 1
977#define BM_AIO_WRITE_HINTED 2
978#define BM_AIO_WRITE_ALL_PAGES 4
979#define BM_AIO_READ 8
980 int error;
981 struct kref kref;
982};
983
984struct drbd_config_context {
985
986 unsigned int minor;
987
988 unsigned int volume;
989#define VOLUME_UNSPECIFIED (-1U)
990
991
992 char *resource_name;
993 struct nlattr *my_addr;
994 struct nlattr *peer_addr;
995
996
997 struct sk_buff *reply_skb;
998
999 struct drbd_genlmsghdr *reply_dh;
1000
1001 struct drbd_device *device;
1002 struct drbd_resource *resource;
1003 struct drbd_connection *connection;
1004};
1005
1006static inline struct drbd_device *minor_to_device(unsigned int minor)
1007{
1008 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1009}
1010
1011static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1012{
1013 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1014}
1015
1016static inline struct drbd_peer_device *
1017conn_peer_device(struct drbd_connection *connection, int volume_number)
1018{
1019 return idr_find(&connection->peer_devices, volume_number);
1020}
1021
1022#define for_each_resource(resource, _resources) \
1023 list_for_each_entry(resource, _resources, resources)
1024
1025#define for_each_resource_rcu(resource, _resources) \
1026 list_for_each_entry_rcu(resource, _resources, resources)
1027
1028#define for_each_resource_safe(resource, tmp, _resources) \
1029 list_for_each_entry_safe(resource, tmp, _resources, resources)
1030
1031#define for_each_connection(connection, resource) \
1032 list_for_each_entry(connection, &resource->connections, connections)
1033
1034#define for_each_connection_rcu(connection, resource) \
1035 list_for_each_entry_rcu(connection, &resource->connections, connections)
1036
1037#define for_each_connection_safe(connection, tmp, resource) \
1038 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1039
1040#define for_each_peer_device(peer_device, device) \
1041 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1042
1043#define for_each_peer_device_rcu(peer_device, device) \
1044 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1045
1046#define for_each_peer_device_safe(peer_device, tmp, device) \
1047 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1048
1049static inline unsigned int device_to_minor(struct drbd_device *device)
1050{
1051 return device->minor;
1052}
1053
1054
1055
1056
1057
1058
1059
1060enum dds_flags {
1061 DDSF_FORCED = 1,
1062 DDSF_NO_RESYNC = 2,
1063};
1064
1065extern void drbd_init_set_defaults(struct drbd_device *device);
1066extern int drbd_thread_start(struct drbd_thread *thi);
1067extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1068#ifdef CONFIG_SMP
1069extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1070#else
1071#define drbd_thread_current_set_cpu(A) ({})
1072#endif
1073extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1074 unsigned int set_size);
1075extern void tl_clear(struct drbd_connection *);
1076extern void drbd_free_sock(struct drbd_connection *connection);
1077extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1078 void *buf, size_t size, unsigned msg_flags);
1079extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1080 unsigned);
1081
1082extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1083extern int drbd_send_protocol(struct drbd_connection *connection);
1084extern int drbd_send_uuids(struct drbd_peer_device *);
1085extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1086extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1087extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1088extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1089extern int drbd_send_current_state(struct drbd_peer_device *);
1090extern int drbd_send_sync_param(struct drbd_peer_device *);
1091extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1092 u32 set_size);
1093extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1094 struct drbd_peer_request *);
1095extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1096 struct p_block_req *rp);
1097extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1098 struct p_data *dp, int data_size);
1099extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1100 sector_t sector, int blksize, u64 block_id);
1101extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1102extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1103 struct drbd_peer_request *);
1104extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1105extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1106 sector_t sector, int size, u64 block_id);
1107extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1108 int size, void *digest, int digest_size,
1109 enum drbd_packet cmd);
1110extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1111
1112extern int drbd_send_bitmap(struct drbd_device *device);
1113extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1114extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1115extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1116extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1117extern void drbd_device_cleanup(struct drbd_device *device);
1118extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1119extern void drbd_queue_unplug(struct drbd_device *device);
1120
1121extern void conn_md_sync(struct drbd_connection *connection);
1122extern void drbd_md_write(struct drbd_device *device, void *buffer);
1123extern void drbd_md_sync(struct drbd_device *device);
1124extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1125extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1126extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1127extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1128extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1129extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1130extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1131extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1132extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1133extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1134extern void drbd_md_mark_dirty(struct drbd_device *device);
1135extern void drbd_queue_bitmap_io(struct drbd_device *device,
1136 int (*io_fn)(struct drbd_device *),
1137 void (*done)(struct drbd_device *, int),
1138 char *why, enum bm_flag flags);
1139extern int drbd_bitmap_io(struct drbd_device *device,
1140 int (*io_fn)(struct drbd_device *),
1141 char *why, enum bm_flag flags);
1142extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1143 int (*io_fn)(struct drbd_device *),
1144 char *why, enum bm_flag flags);
1145extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1146extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181#define MD_128MB_SECT (128LLU << 11)
1182#define MD_4kB_SECT 8
1183#define MD_32kB_SECT 64
1184
1185
1186#define AL_EXTENT_SHIFT 22
1187#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203#define AL_UPDATES_PER_TRANSACTION 64
1204#define AL_CONTEXT_PER_TRANSACTION 919
1205
1206#if BITS_PER_LONG == 32
1207#define LN2_BPL 5
1208#define cpu_to_lel(A) cpu_to_le32(A)
1209#define lel_to_cpu(A) le32_to_cpu(A)
1210#elif BITS_PER_LONG == 64
1211#define LN2_BPL 6
1212#define cpu_to_lel(A) cpu_to_le64(A)
1213#define lel_to_cpu(A) le64_to_cpu(A)
1214#else
1215#error "LN2 of BITS_PER_LONG unknown!"
1216#endif
1217
1218
1219
1220struct bm_extent {
1221 int rs_left;
1222 int rs_failed;
1223 unsigned long flags;
1224 struct lc_element lce;
1225};
1226
1227#define BME_NO_WRITES 0
1228#define BME_LOCKED 1
1229#define BME_PRIORITY 2
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239#define SLEEP_TIME (HZ/10)
1240
1241
1242
1243#define BM_BLOCK_SHIFT 12
1244#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1245
1246
1247
1248#define BM_EXT_SHIFT 24
1249#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1250
1251#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1252#error "HAVE YOU FIXED drbdmeta AS WELL??"
1253#endif
1254
1255
1256#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1257#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1258#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1259
1260
1261#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1262
1263
1264
1265#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1266#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1267
1268
1269#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1270
1271#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1272
1273#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1274
1275#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1276
1277
1278
1279#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1296
1297
1298
1299
1300
1301#define DRBD_MAX_SECTORS_FIXED_BM \
1302 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1303#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1304
1305#if BITS_PER_LONG == 32
1306
1307
1308
1309#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1310#else
1311
1312#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1313
1314#endif
1315
1316
1317
1318
1319
1320
1321
1322#define DRBD_MAX_BIO_SIZE (1U << 20)
1323#if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT)
1324#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1325#endif
1326#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1327
1328#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1329#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1330
1331
1332
1333
1334#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1335#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1336
1337extern int drbd_bm_init(struct drbd_device *device);
1338extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1339extern void drbd_bm_cleanup(struct drbd_device *device);
1340extern void drbd_bm_set_all(struct drbd_device *device);
1341extern void drbd_bm_clear_all(struct drbd_device *device);
1342
1343extern int drbd_bm_set_bits(
1344 struct drbd_device *device, unsigned long s, unsigned long e);
1345extern int drbd_bm_clear_bits(
1346 struct drbd_device *device, unsigned long s, unsigned long e);
1347extern int drbd_bm_count_bits(
1348 struct drbd_device *device, const unsigned long s, const unsigned long e);
1349
1350
1351extern void _drbd_bm_set_bits(struct drbd_device *device,
1352 const unsigned long s, const unsigned long e);
1353extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1354extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1355extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1356extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1357extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1358extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1359extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1360extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1361extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1362extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1363extern size_t drbd_bm_words(struct drbd_device *device);
1364extern unsigned long drbd_bm_bits(struct drbd_device *device);
1365extern sector_t drbd_bm_capacity(struct drbd_device *device);
1366
1367#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1368extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1369
1370extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1371extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1372extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1373extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1374
1375extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1376 size_t number, unsigned long *buffer);
1377
1378extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1379 size_t number, unsigned long *buffer);
1380
1381extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1382extern void drbd_bm_unlock(struct drbd_device *device);
1383
1384
1385extern struct kmem_cache *drbd_request_cache;
1386extern struct kmem_cache *drbd_ee_cache;
1387extern struct kmem_cache *drbd_bm_ext_cache;
1388extern struct kmem_cache *drbd_al_ext_cache;
1389extern mempool_t drbd_request_mempool;
1390extern mempool_t drbd_ee_mempool;
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405extern struct page *drbd_pp_pool;
1406extern spinlock_t drbd_pp_lock;
1407extern int drbd_pp_vacant;
1408extern wait_queue_head_t drbd_pp_wait;
1409
1410
1411
1412
1413
1414
1415#define DRBD_MIN_POOL_PAGES 128
1416extern mempool_t drbd_md_io_page_pool;
1417
1418
1419
1420extern struct bio_set drbd_md_io_bio_set;
1421
1422
1423extern struct bio_set drbd_io_bio_set;
1424
1425extern struct mutex resources_mutex;
1426
1427extern int conn_lowest_minor(struct drbd_connection *connection);
1428extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1429extern void drbd_destroy_device(struct kref *kref);
1430extern void drbd_delete_device(struct drbd_device *device);
1431
1432extern struct drbd_resource *drbd_create_resource(const char *name);
1433extern void drbd_free_resource(struct drbd_resource *resource);
1434
1435extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1436extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1437extern void drbd_destroy_connection(struct kref *kref);
1438extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1439 void *peer_addr, int peer_addr_len);
1440extern struct drbd_resource *drbd_find_resource(const char *name);
1441extern void drbd_destroy_resource(struct kref *kref);
1442extern void conn_free_crypto(struct drbd_connection *connection);
1443
1444
1445extern void do_submit(struct work_struct *ws);
1446extern void __drbd_make_request(struct drbd_device *, struct bio *);
1447void drbd_submit_bio(struct bio *bio);
1448extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1449extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1450
1451
1452
1453
1454extern struct mutex notification_mutex;
1455
1456extern void drbd_suspend_io(struct drbd_device *device);
1457extern void drbd_resume_io(struct drbd_device *device);
1458extern char *ppsize(char *buf, unsigned long long size);
1459extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1460enum determine_dev_size {
1461 DS_ERROR_SHRINK = -3,
1462 DS_ERROR_SPACE_MD = -2,
1463 DS_ERROR = -1,
1464 DS_UNCHANGED = 0,
1465 DS_SHRUNK = 1,
1466 DS_GREW = 2,
1467 DS_GREW_FROM_ZERO = 3,
1468};
1469extern enum determine_dev_size
1470drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1471extern void resync_after_online_grow(struct drbd_device *);
1472extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1473 struct drbd_backing_dev *bdev, struct o_qlim *o);
1474extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1475 enum drbd_role new_role,
1476 int force);
1477extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1478extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1479extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1480extern int drbd_khelper(struct drbd_device *device, char *cmd);
1481
1482
1483
1484extern void drbd_md_endio(struct bio *bio);
1485extern void drbd_peer_request_endio(struct bio *bio);
1486extern void drbd_request_endio(struct bio *bio);
1487extern int drbd_worker(struct drbd_thread *thi);
1488enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1489void drbd_resync_after_changed(struct drbd_device *device);
1490extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1491extern void resume_next_sg(struct drbd_device *device);
1492extern void suspend_other_sg(struct drbd_device *device);
1493extern int drbd_resync_finished(struct drbd_device *device);
1494
1495extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1496extern void drbd_md_put_buffer(struct drbd_device *device);
1497extern int drbd_md_sync_page_io(struct drbd_device *device,
1498 struct drbd_backing_dev *bdev, sector_t sector, int op);
1499extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1500extern void wait_until_done_or_force_detached(struct drbd_device *device,
1501 struct drbd_backing_dev *bdev, unsigned int *done);
1502extern void drbd_rs_controller_reset(struct drbd_device *device);
1503
1504static inline void ov_out_of_sync_print(struct drbd_device *device)
1505{
1506 if (device->ov_last_oos_size) {
1507 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1508 (unsigned long long)device->ov_last_oos_start,
1509 (unsigned long)device->ov_last_oos_size);
1510 }
1511 device->ov_last_oos_size = 0;
1512}
1513
1514
1515extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
1516extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
1517 void *);
1518
1519extern int w_e_end_data_req(struct drbd_work *, int);
1520extern int w_e_end_rsdata_req(struct drbd_work *, int);
1521extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1522extern int w_e_end_ov_reply(struct drbd_work *, int);
1523extern int w_e_end_ov_req(struct drbd_work *, int);
1524extern int w_ov_finished(struct drbd_work *, int);
1525extern int w_resync_timer(struct drbd_work *, int);
1526extern int w_send_write_hint(struct drbd_work *, int);
1527extern int w_send_dblock(struct drbd_work *, int);
1528extern int w_send_read_req(struct drbd_work *, int);
1529extern int w_e_reissue(struct drbd_work *, int);
1530extern int w_restart_disk_io(struct drbd_work *, int);
1531extern int w_send_out_of_sync(struct drbd_work *, int);
1532extern int w_start_resync(struct drbd_work *, int);
1533
1534extern void resync_timer_fn(struct timer_list *t);
1535extern void start_resync_timer_fn(struct timer_list *t);
1536
1537extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1538
1539
1540extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1541 sector_t start, unsigned int nr_sectors, int flags);
1542extern int drbd_receiver(struct drbd_thread *thi);
1543extern int drbd_ack_receiver(struct drbd_thread *thi);
1544extern void drbd_send_ping_wf(struct work_struct *ws);
1545extern void drbd_send_acks_wf(struct work_struct *ws);
1546extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1547extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1548 bool throttle_if_app_is_waiting);
1549extern int drbd_submit_peer_request(struct drbd_device *,
1550 struct drbd_peer_request *, const unsigned,
1551 const unsigned, const int);
1552extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1553extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1554 sector_t, unsigned int,
1555 unsigned int,
1556 gfp_t) __must_hold(local);
1557extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1558 int);
1559#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1560#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1561extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1562extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1563extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1564extern int drbd_connected(struct drbd_peer_device *);
1565
1566
1567void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1568
1569
1570
1571
1572static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1573 int fault_type, struct bio *bio)
1574{
1575 __release(local);
1576 if (!bio->bi_bdev) {
1577 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
1578 bio->bi_status = BLK_STS_IOERR;
1579 bio_endio(bio);
1580 return;
1581 }
1582
1583 if (drbd_insert_fault(device, fault_type))
1584 bio_io_error(bio);
1585 else
1586 submit_bio_noacct(bio);
1587}
1588
1589void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1590 enum write_ordering_e wo);
1591
1592
1593extern struct proc_dir_entry *drbd_proc;
1594int drbd_seq_show(struct seq_file *seq, void *v);
1595
1596
1597extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1598extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1599extern void drbd_al_begin_io_commit(struct drbd_device *device);
1600extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1601extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1602extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1603extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1604extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1605extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1606extern void drbd_rs_cancel_all(struct drbd_device *device);
1607extern int drbd_rs_del_all(struct drbd_device *device);
1608extern void drbd_rs_failed_io(struct drbd_device *device,
1609 sector_t sector, int size);
1610extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1611
1612enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1613extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1614 enum update_sync_bits_mode mode);
1615#define drbd_set_in_sync(device, sector, size) \
1616 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1617#define drbd_set_out_of_sync(device, sector, size) \
1618 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1619#define drbd_rs_failed_io(device, sector, size) \
1620 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1621extern void drbd_al_shrink(struct drbd_device *device);
1622extern int drbd_al_initialize(struct drbd_device *, void *);
1623
1624
1625
1626struct sib_info {
1627 enum drbd_state_info_bcast_reason sib_reason;
1628 union {
1629 struct {
1630 char *helper_name;
1631 unsigned helper_exit_code;
1632 };
1633 struct {
1634 union drbd_state os;
1635 union drbd_state ns;
1636 };
1637 };
1638};
1639void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1640
1641extern int notify_resource_state(struct sk_buff *,
1642 unsigned int,
1643 struct drbd_resource *,
1644 struct resource_info *,
1645 enum drbd_notification_type);
1646extern int notify_device_state(struct sk_buff *,
1647 unsigned int,
1648 struct drbd_device *,
1649 struct device_info *,
1650 enum drbd_notification_type);
1651extern int notify_connection_state(struct sk_buff *,
1652 unsigned int,
1653 struct drbd_connection *,
1654 struct connection_info *,
1655 enum drbd_notification_type);
1656extern int notify_peer_device_state(struct sk_buff *,
1657 unsigned int,
1658 struct drbd_peer_device *,
1659 struct peer_device_info *,
1660 enum drbd_notification_type);
1661extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1662 struct drbd_connection *, const char *, int);
1663
1664
1665
1666
1667
1668
1669static inline struct page *page_chain_next(struct page *page)
1670{
1671 return (struct page *)page_private(page);
1672}
1673#define page_chain_for_each(page) \
1674 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1675 page = page_chain_next(page))
1676#define page_chain_for_each_safe(page, n) \
1677 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1678
1679
1680static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1681{
1682 struct page *page = peer_req->pages;
1683 page_chain_for_each(page) {
1684 if (page_count(page) > 1)
1685 return 1;
1686 }
1687 return 0;
1688}
1689
1690static inline union drbd_state drbd_read_state(struct drbd_device *device)
1691{
1692 struct drbd_resource *resource = device->resource;
1693 union drbd_state rv;
1694
1695 rv.i = device->state.i;
1696 rv.susp = resource->susp;
1697 rv.susp_nod = resource->susp_nod;
1698 rv.susp_fen = resource->susp_fen;
1699
1700 return rv;
1701}
1702
1703enum drbd_force_detach_flags {
1704 DRBD_READ_ERROR,
1705 DRBD_WRITE_ERROR,
1706 DRBD_META_IO_ERROR,
1707 DRBD_FORCE_DETACH,
1708};
1709
1710#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1711static inline void __drbd_chk_io_error_(struct drbd_device *device,
1712 enum drbd_force_detach_flags df,
1713 const char *where)
1714{
1715 enum drbd_io_error_p ep;
1716
1717 rcu_read_lock();
1718 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1719 rcu_read_unlock();
1720 switch (ep) {
1721 case EP_PASS_ON:
1722 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1723 if (__ratelimit(&drbd_ratelimit_state))
1724 drbd_err(device, "Local IO failed in %s.\n", where);
1725 if (device->state.disk > D_INCONSISTENT)
1726 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1727 break;
1728 }
1729 fallthrough;
1730 case EP_DETACH:
1731 case EP_CALL_HELPER:
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 set_bit(WAS_IO_ERROR, &device->flags);
1753 if (df == DRBD_READ_ERROR)
1754 set_bit(WAS_READ_ERROR, &device->flags);
1755 if (df == DRBD_FORCE_DETACH)
1756 set_bit(FORCE_DETACH, &device->flags);
1757 if (device->state.disk > D_FAILED) {
1758 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1759 drbd_err(device,
1760 "Local IO failed in %s. Detaching...\n", where);
1761 }
1762 break;
1763 }
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1775static inline void drbd_chk_io_error_(struct drbd_device *device,
1776 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1777{
1778 if (error) {
1779 unsigned long flags;
1780 spin_lock_irqsave(&device->resource->req_lock, flags);
1781 __drbd_chk_io_error_(device, forcedetach, where);
1782 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1783 }
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1795{
1796 switch (bdev->md.meta_dev_idx) {
1797 case DRBD_MD_INDEX_INTERNAL:
1798 case DRBD_MD_INDEX_FLEX_INT:
1799 return bdev->md.md_offset + bdev->md.bm_offset;
1800 case DRBD_MD_INDEX_FLEX_EXT:
1801 default:
1802 return bdev->md.md_offset;
1803 }
1804}
1805
1806
1807
1808
1809
1810static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1811{
1812 switch (bdev->md.meta_dev_idx) {
1813 case DRBD_MD_INDEX_INTERNAL:
1814 case DRBD_MD_INDEX_FLEX_INT:
1815 return bdev->md.md_offset + MD_4kB_SECT -1;
1816 case DRBD_MD_INDEX_FLEX_EXT:
1817 default:
1818 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1819 }
1820}
1821
1822
1823static inline sector_t drbd_get_capacity(struct block_device *bdev)
1824{
1825 return bdev ? bdev_nr_sectors(bdev) : 0;
1826}
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1837{
1838 sector_t s;
1839
1840 switch (bdev->md.meta_dev_idx) {
1841 case DRBD_MD_INDEX_INTERNAL:
1842 case DRBD_MD_INDEX_FLEX_INT:
1843 s = drbd_get_capacity(bdev->backing_bdev)
1844 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1845 drbd_md_first_sector(bdev))
1846 : 0;
1847 break;
1848 case DRBD_MD_INDEX_FLEX_EXT:
1849 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1850 drbd_get_capacity(bdev->backing_bdev));
1851
1852 s = min_t(sector_t, s,
1853 BM_EXT_TO_SECT(bdev->md.md_size_sect
1854 - bdev->md.bm_offset));
1855 break;
1856 default:
1857 s = min_t(sector_t, DRBD_MAX_SECTORS,
1858 drbd_get_capacity(bdev->backing_bdev));
1859 }
1860 return s;
1861}
1862
1863
1864
1865
1866
1867static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1868{
1869 const int meta_dev_idx = bdev->md.meta_dev_idx;
1870
1871 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1872 return 0;
1873
1874
1875
1876 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1877 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1878 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1879
1880
1881 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1882}
1883
1884static inline void
1885drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1886{
1887 unsigned long flags;
1888 spin_lock_irqsave(&q->q_lock, flags);
1889 list_add_tail(&w->list, &q->q);
1890 spin_unlock_irqrestore(&q->q_lock, flags);
1891 wake_up(&q->q_wait);
1892}
1893
1894static inline void
1895drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1896{
1897 unsigned long flags;
1898 spin_lock_irqsave(&q->q_lock, flags);
1899 if (list_empty_careful(&w->list))
1900 list_add_tail(&w->list, &q->q);
1901 spin_unlock_irqrestore(&q->q_lock, flags);
1902 wake_up(&q->q_wait);
1903}
1904
1905static inline void
1906drbd_device_post_work(struct drbd_device *device, int work_bit)
1907{
1908 if (!test_and_set_bit(work_bit, &device->flags)) {
1909 struct drbd_connection *connection =
1910 first_peer_device(device)->connection;
1911 struct drbd_work_queue *q = &connection->sender_work;
1912 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1913 wake_up(&q->q_wait);
1914 }
1915}
1916
1917extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1918
1919
1920
1921
1922
1923static inline void wake_ack_receiver(struct drbd_connection *connection)
1924{
1925 struct task_struct *task = connection->ack_receiver.task;
1926 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1927 send_sig(SIGXCPU, task, 1);
1928}
1929
1930static inline void request_ping(struct drbd_connection *connection)
1931{
1932 set_bit(SEND_PING, &connection->flags);
1933 wake_ack_receiver(connection);
1934}
1935
1936extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1937extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1938extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1939 enum drbd_packet, unsigned int, void *,
1940 unsigned int);
1941extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1942 enum drbd_packet, unsigned int, void *,
1943 unsigned int);
1944
1945extern int drbd_send_ping(struct drbd_connection *connection);
1946extern int drbd_send_ping_ack(struct drbd_connection *connection);
1947extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1948extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1949
1950static inline void drbd_thread_stop(struct drbd_thread *thi)
1951{
1952 _drbd_thread_stop(thi, false, true);
1953}
1954
1955static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1956{
1957 _drbd_thread_stop(thi, false, false);
1958}
1959
1960static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1961{
1962 _drbd_thread_stop(thi, true, false);
1963}
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987static inline void inc_ap_pending(struct drbd_device *device)
1988{
1989 atomic_inc(&device->ap_pending_cnt);
1990}
1991
1992#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
1993 if (atomic_read(&device->which) < 0) \
1994 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
1995 func, line, \
1996 atomic_read(&device->which))
1997
1998#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
1999static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2000{
2001 if (atomic_dec_and_test(&device->ap_pending_cnt))
2002 wake_up(&device->misc_wait);
2003 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2004}
2005
2006
2007
2008
2009
2010
2011
2012static inline void inc_rs_pending(struct drbd_device *device)
2013{
2014 atomic_inc(&device->rs_pending_cnt);
2015}
2016
2017#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2018static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2019{
2020 atomic_dec(&device->rs_pending_cnt);
2021 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2022}
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static inline void inc_unacked(struct drbd_device *device)
2034{
2035 atomic_inc(&device->unacked_cnt);
2036}
2037
2038#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2039static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2040{
2041 atomic_dec(&device->unacked_cnt);
2042 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2043}
2044
2045#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2046static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2047{
2048 atomic_sub(n, &device->unacked_cnt);
2049 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2050}
2051
2052static inline bool is_sync_target_state(enum drbd_conns connection_state)
2053{
2054 return connection_state == C_SYNC_TARGET ||
2055 connection_state == C_PAUSED_SYNC_T;
2056}
2057
2058static inline bool is_sync_source_state(enum drbd_conns connection_state)
2059{
2060 return connection_state == C_SYNC_SOURCE ||
2061 connection_state == C_PAUSED_SYNC_S;
2062}
2063
2064static inline bool is_sync_state(enum drbd_conns connection_state)
2065{
2066 return is_sync_source_state(connection_state) ||
2067 is_sync_target_state(connection_state);
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077#define get_ldev_if_state(_device, _min_state) \
2078 (_get_ldev_if_state((_device), (_min_state)) ? \
2079 ({ __acquire(x); true; }) : false)
2080#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2081
2082static inline void put_ldev(struct drbd_device *device)
2083{
2084 enum drbd_disk_state disk_state = device->state.disk;
2085
2086
2087
2088
2089 int i = atomic_dec_return(&device->local_cnt);
2090
2091
2092
2093
2094 __release(local);
2095 D_ASSERT(device, i >= 0);
2096 if (i == 0) {
2097 if (disk_state == D_DISKLESS)
2098
2099 drbd_device_post_work(device, DESTROY_DISK);
2100 if (disk_state == D_FAILED)
2101
2102 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2103 drbd_device_post_work(device, GO_DISKLESS);
2104 wake_up(&device->misc_wait);
2105 }
2106}
2107
2108#ifndef __CHECKER__
2109static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2110{
2111 int io_allowed;
2112
2113
2114 if (device->state.disk == D_DISKLESS)
2115 return 0;
2116
2117 atomic_inc(&device->local_cnt);
2118 io_allowed = (device->state.disk >= mins);
2119 if (!io_allowed)
2120 put_ldev(device);
2121 return io_allowed;
2122}
2123#else
2124extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2125#endif
2126
2127
2128
2129
2130static inline int drbd_get_max_buffers(struct drbd_device *device)
2131{
2132 struct net_conf *nc;
2133 int mxb;
2134
2135 rcu_read_lock();
2136 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2137 mxb = nc ? nc->max_buffers : 1000000;
2138 rcu_read_unlock();
2139
2140 return mxb;
2141}
2142
2143static inline int drbd_state_is_stable(struct drbd_device *device)
2144{
2145 union drbd_dev_state s = device->state;
2146
2147
2148
2149
2150 switch ((enum drbd_conns)s.conn) {
2151
2152 case C_STANDALONE:
2153 case C_WF_CONNECTION:
2154
2155 case C_CONNECTED:
2156 case C_SYNC_SOURCE:
2157 case C_SYNC_TARGET:
2158 case C_VERIFY_S:
2159 case C_VERIFY_T:
2160 case C_PAUSED_SYNC_S:
2161 case C_PAUSED_SYNC_T:
2162 case C_AHEAD:
2163 case C_BEHIND:
2164
2165 case C_DISCONNECTING:
2166 case C_UNCONNECTED:
2167 case C_TIMEOUT:
2168 case C_BROKEN_PIPE:
2169 case C_NETWORK_FAILURE:
2170 case C_PROTOCOL_ERROR:
2171 case C_TEAR_DOWN:
2172 case C_WF_REPORT_PARAMS:
2173 case C_STARTING_SYNC_S:
2174 case C_STARTING_SYNC_T:
2175 break;
2176
2177
2178 case C_WF_BITMAP_S:
2179 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2180 return 0;
2181 break;
2182
2183
2184 case C_WF_BITMAP_T:
2185 case C_WF_SYNC_UUID:
2186 case C_MASK:
2187
2188 return 0;
2189 }
2190
2191 switch ((enum drbd_disk_state)s.disk) {
2192 case D_DISKLESS:
2193 case D_INCONSISTENT:
2194 case D_OUTDATED:
2195 case D_CONSISTENT:
2196 case D_UP_TO_DATE:
2197 case D_FAILED:
2198
2199 break;
2200
2201
2202 case D_ATTACHING:
2203 case D_NEGOTIATING:
2204 case D_UNKNOWN:
2205 case D_MASK:
2206
2207 return 0;
2208 }
2209
2210 return 1;
2211}
2212
2213static inline int drbd_suspended(struct drbd_device *device)
2214{
2215 struct drbd_resource *resource = device->resource;
2216
2217 return resource->susp || resource->susp_fen || resource->susp_nod;
2218}
2219
2220static inline bool may_inc_ap_bio(struct drbd_device *device)
2221{
2222 int mxb = drbd_get_max_buffers(device);
2223
2224 if (drbd_suspended(device))
2225 return false;
2226 if (atomic_read(&device->suspend_cnt))
2227 return false;
2228
2229
2230
2231
2232
2233
2234 if (!drbd_state_is_stable(device))
2235 return false;
2236
2237
2238
2239 if (atomic_read(&device->ap_bio_cnt) > mxb)
2240 return false;
2241 if (test_bit(BITMAP_IO, &device->flags))
2242 return false;
2243 return true;
2244}
2245
2246static inline bool inc_ap_bio_cond(struct drbd_device *device)
2247{
2248 bool rv = false;
2249
2250 spin_lock_irq(&device->resource->req_lock);
2251 rv = may_inc_ap_bio(device);
2252 if (rv)
2253 atomic_inc(&device->ap_bio_cnt);
2254 spin_unlock_irq(&device->resource->req_lock);
2255
2256 return rv;
2257}
2258
2259static inline void inc_ap_bio(struct drbd_device *device)
2260{
2261
2262
2263
2264
2265
2266
2267
2268
2269 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2270}
2271
2272static inline void dec_ap_bio(struct drbd_device *device)
2273{
2274 int mxb = drbd_get_max_buffers(device);
2275 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2276
2277 D_ASSERT(device, ap_bio >= 0);
2278
2279 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2280 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2281 drbd_queue_work(&first_peer_device(device)->
2282 connection->sender_work,
2283 &device->bm_io_work.w);
2284 }
2285
2286
2287
2288
2289 if (ap_bio < mxb)
2290 wake_up(&device->misc_wait);
2291}
2292
2293static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2294{
2295 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2296 first_peer_device(device)->connection->agreed_pro_version != 100;
2297}
2298
2299static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2300{
2301 int changed = device->ed_uuid != val;
2302 device->ed_uuid = val;
2303 return changed;
2304}
2305
2306static inline int drbd_queue_order_type(struct drbd_device *device)
2307{
2308
2309
2310#ifndef QUEUE_ORDERED_NONE
2311#define QUEUE_ORDERED_NONE 0
2312#endif
2313 return QUEUE_ORDERED_NONE;
2314}
2315
2316static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2317{
2318 return list_first_entry_or_null(&resource->connections,
2319 struct drbd_connection, connections);
2320}
2321
2322#endif
2323