1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <crypto/hash.h>
30#include <linux/compiler.h>
31#include <linux/types.h>
32#include <linux/list.h>
33#include <linux/sched/signal.h>
34#include <linux/bitops.h>
35#include <linux/slab.h>
36#include <linux/ratelimit.h>
37#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
41#include <linux/backing-dev.h>
42#include <linux/genhd.h>
43#include <linux/idr.h>
44#include <linux/dynamic_debug.h>
45#include <net/tcp.h>
46#include <linux/lru_cache.h>
47#include <linux/prefetch.h>
48#include <linux/drbd_genl_api.h>
49#include <linux/drbd.h>
50#include "drbd_strings.h"
51#include "drbd_state.h"
52#include "drbd_protocol.h"
53
54#ifdef __CHECKER__
55# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
56# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
57# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
58#else
59# define __protected_by(x)
60# define __protected_read_by(x)
61# define __protected_write_by(x)
62#endif
63
64
65#ifdef CONFIG_DRBD_FAULT_INJECTION
66extern int drbd_enable_faults;
67extern int drbd_fault_rate;
68#endif
69
70extern unsigned int drbd_minor_count;
71extern char drbd_usermode_helper[];
72extern int drbd_proc_details;
73
74
75
76
77
78
79
80#define DRBD_SIGKILL SIGHUP
81
82#define ID_IN_SYNC (4711ULL)
83#define ID_OUT_OF_SYNC (4712ULL)
84#define ID_SYNCER (-1ULL)
85
86#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
87
88struct drbd_device;
89struct drbd_connection;
90
91#define __drbd_printk_device(level, device, fmt, args...) \
92 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
93#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
94 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
95#define __drbd_printk_resource(level, resource, fmt, args...) \
96 printk(level "drbd %s: " fmt, (resource)->name, ## args)
97#define __drbd_printk_connection(level, connection, fmt, args...) \
98 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
99
100void drbd_printk_with_wrong_object_type(void);
101
102#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
103 (__builtin_types_compatible_p(typeof(obj), type) || \
104 __builtin_types_compatible_p(typeof(obj), const type)), \
105 func(level, (const type)(obj), fmt, ## args)
106
107#define drbd_printk(level, obj, fmt, args...) \
108 __builtin_choose_expr( \
109 __drbd_printk_if_same_type(obj, struct drbd_device *, \
110 __drbd_printk_device, level, fmt, ## args), \
111 __builtin_choose_expr( \
112 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
113 __drbd_printk_resource, level, fmt, ## args), \
114 __builtin_choose_expr( \
115 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
116 __drbd_printk_connection, level, fmt, ## args), \
117 __builtin_choose_expr( \
118 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
119 __drbd_printk_peer_device, level, fmt, ## args), \
120 drbd_printk_with_wrong_object_type()))))
121
122#define drbd_dbg(obj, fmt, args...) \
123 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
124#define drbd_alert(obj, fmt, args...) \
125 drbd_printk(KERN_ALERT, obj, fmt, ## args)
126#define drbd_err(obj, fmt, args...) \
127 drbd_printk(KERN_ERR, obj, fmt, ## args)
128#define drbd_warn(obj, fmt, args...) \
129 drbd_printk(KERN_WARNING, obj, fmt, ## args)
130#define drbd_info(obj, fmt, args...) \
131 drbd_printk(KERN_INFO, obj, fmt, ## args)
132#define drbd_emerg(obj, fmt, args...) \
133 drbd_printk(KERN_EMERG, obj, fmt, ## args)
134
135#define dynamic_drbd_dbg(device, fmt, args...) \
136 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
137
138#define D_ASSERT(device, exp) do { \
139 if (!(exp)) \
140 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
141 } while (0)
142
143
144
145
146
147
148#define expect(exp) ({ \
149 bool _bool = (exp); \
150 if (!_bool) \
151 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
152 #exp, __func__); \
153 _bool; \
154 })
155
156
157enum {
158 DRBD_FAULT_MD_WR = 0,
159 DRBD_FAULT_MD_RD = 1,
160 DRBD_FAULT_RS_WR = 2,
161 DRBD_FAULT_RS_RD = 3,
162 DRBD_FAULT_DT_WR = 4,
163 DRBD_FAULT_DT_RD = 5,
164 DRBD_FAULT_DT_RA = 6,
165 DRBD_FAULT_BM_ALLOC = 7,
166 DRBD_FAULT_AL_EE = 8,
167 DRBD_FAULT_RECEIVE = 9,
168
169 DRBD_FAULT_MAX,
170};
171
172extern unsigned int
173_drbd_insert_fault(struct drbd_device *device, unsigned int type);
174
175static inline int
176drbd_insert_fault(struct drbd_device *device, unsigned int type) {
177#ifdef CONFIG_DRBD_FAULT_INJECTION
178 return drbd_fault_rate &&
179 (drbd_enable_faults & (1<<type)) &&
180 _drbd_insert_fault(device, type);
181#else
182 return 0;
183#endif
184}
185
186
187#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
188
189#define div_floor(A, B) ((A)/(B))
190
191extern struct ratelimit_state drbd_ratelimit_state;
192extern struct idr drbd_devices;
193extern struct list_head drbd_resources;
194
195extern const char *cmdname(enum drbd_packet cmd);
196
197
198
199struct bm_xfer_ctx {
200
201
202
203
204 unsigned long bm_bits;
205 unsigned long bm_words;
206
207 unsigned long bit_offset;
208 unsigned long word_offset;
209
210
211 unsigned packets[2];
212 unsigned bytes[2];
213};
214
215extern void INFO_bm_xfer_stats(struct drbd_device *device,
216 const char *direction, struct bm_xfer_ctx *c);
217
218static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
219{
220
221
222
223
224
225
226
227#if BITS_PER_LONG == 64
228 c->word_offset = c->bit_offset >> 6;
229#elif BITS_PER_LONG == 32
230 c->word_offset = c->bit_offset >> 5;
231 c->word_offset &= ~(1UL);
232#else
233# error "unsupported BITS_PER_LONG"
234#endif
235}
236
237extern unsigned int drbd_header_size(struct drbd_connection *connection);
238
239
240enum drbd_thread_state {
241 NONE,
242 RUNNING,
243 EXITING,
244 RESTARTING
245};
246
247struct drbd_thread {
248 spinlock_t t_lock;
249 struct task_struct *task;
250 struct completion stop;
251 enum drbd_thread_state t_state;
252 int (*function) (struct drbd_thread *);
253 struct drbd_resource *resource;
254 struct drbd_connection *connection;
255 int reset_cpu_mask;
256 const char *name;
257};
258
259static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
260{
261
262
263
264
265 smp_rmb();
266 return thi->t_state;
267}
268
269struct drbd_work {
270 struct list_head list;
271 int (*cb)(struct drbd_work *, int cancel);
272};
273
274struct drbd_device_work {
275 struct drbd_work w;
276 struct drbd_device *device;
277};
278
279#include "drbd_interval.h"
280
281extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
282
283extern void lock_all_resources(void);
284extern void unlock_all_resources(void);
285
286struct drbd_request {
287 struct drbd_work w;
288 struct drbd_device *device;
289
290
291
292
293
294 struct bio *private_bio;
295
296 struct drbd_interval i;
297
298
299
300
301
302
303
304
305
306 unsigned int epoch;
307
308 struct list_head tl_requests;
309 struct bio *master_bio;
310
311
312 struct list_head req_pending_master_completion;
313 struct list_head req_pending_local;
314
315
316 unsigned long start_jif;
317
318
319
320
321
322
323
324
325
326 unsigned long in_actlog_jif;
327
328
329 unsigned long pre_submit_jif;
330
331
332 unsigned long pre_send_jif;
333 unsigned long acked_jif;
334 unsigned long net_done_jif;
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369 atomic_t completion_ref;
370
371 struct kref kref;
372
373 unsigned rq_state;
374};
375
376struct drbd_epoch {
377 struct drbd_connection *connection;
378 struct list_head list;
379 unsigned int barrier_nr;
380 atomic_t epoch_size;
381 atomic_t active;
382 unsigned long flags;
383};
384
385
386int drbdd_init(struct drbd_thread *);
387int drbd_asender(struct drbd_thread *);
388
389
390enum {
391 DE_HAVE_BARRIER_NUMBER,
392};
393
394enum epoch_event {
395 EV_PUT,
396 EV_GOT_BARRIER_NR,
397 EV_BECAME_LAST,
398 EV_CLEANUP = 32,
399};
400
401struct digest_info {
402 int digest_size;
403 void *digest;
404};
405
406struct drbd_peer_request {
407 struct drbd_work w;
408 struct drbd_peer_device *peer_device;
409 struct drbd_epoch *epoch;
410 struct page *pages;
411 atomic_t pending_bios;
412 struct drbd_interval i;
413
414 unsigned long flags;
415 unsigned long submit_jif;
416 union {
417 u64 block_id;
418 struct digest_info *digest;
419 };
420};
421
422
423
424
425
426
427
428enum {
429 __EE_CALL_AL_COMPLETE_IO,
430 __EE_MAY_SET_IN_SYNC,
431
432
433 __EE_IS_TRIM,
434
435
436
437 __EE_RESUBMITTED,
438
439
440
441
442 __EE_WAS_ERROR,
443
444
445 __EE_HAS_DIGEST,
446
447
448 __EE_RESTART_REQUESTS,
449
450
451 __EE_SEND_WRITE_ACK,
452
453
454 __EE_IN_INTERVAL_TREE,
455
456
457
458 __EE_SUBMITTED,
459
460
461 __EE_WRITE,
462
463
464 __EE_WRITE_SAME,
465
466
467
468 __EE_APPLICATION,
469
470
471 __EE_RS_THIN_REQ,
472};
473#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
474#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
475#define EE_IS_TRIM (1<<__EE_IS_TRIM)
476#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
477#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
478#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
479#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
480#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
481#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
482#define EE_SUBMITTED (1<<__EE_SUBMITTED)
483#define EE_WRITE (1<<__EE_WRITE)
484#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
485#define EE_APPLICATION (1<<__EE_APPLICATION)
486#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
487
488
489enum {
490 UNPLUG_REMOTE,
491 MD_DIRTY,
492 USE_DEGR_WFC_T,
493 CL_ST_CHG_SUCCESS,
494 CL_ST_CHG_FAIL,
495 CRASHED_PRIMARY,
496
497
498 CONSIDER_RESYNC,
499
500 MD_NO_FUA,
501
502 BITMAP_IO,
503
504 BITMAP_IO_QUEUED,
505 WAS_IO_ERROR,
506 WAS_READ_ERROR,
507 FORCE_DETACH,
508 RESYNC_AFTER_NEG,
509 RESIZE_PENDING,
510
511 NEW_CUR_UUID,
512 AL_SUSPENDED,
513 AHEAD_TO_SYNC_SOURCE,
514 B_RS_H_DONE,
515 DISCARD_MY_DATA,
516 READ_BALANCE_RR,
517
518 FLUSH_PENDING,
519
520
521
522 GOING_DISKLESS,
523
524
525 GO_DISKLESS,
526 DESTROY_DISK,
527 MD_SYNC,
528 RS_START,
529 RS_PROGRESS,
530 RS_DONE,
531};
532
533struct drbd_bitmap;
534
535
536
537enum bm_flag {
538
539 BM_LOCKED_MASK = 0xf,
540
541
542 BM_DONT_CLEAR = 0x1,
543 BM_DONT_SET = 0x2,
544 BM_DONT_TEST = 0x4,
545
546
547
548 BM_IS_LOCKED = 0x8,
549
550
551 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
552
553
554
555
556 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
557
558
559
560 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
561};
562
563struct drbd_work_queue {
564 struct list_head q;
565 spinlock_t q_lock;
566 wait_queue_head_t q_wait;
567};
568
569struct drbd_socket {
570 struct mutex mutex;
571 struct socket *socket;
572
573
574 void *sbuf;
575 void *rbuf;
576};
577
578struct drbd_md {
579 u64 md_offset;
580
581 u64 la_size_sect;
582 spinlock_t uuid_lock;
583 u64 uuid[UI_SIZE];
584 u64 device_uuid;
585 u32 flags;
586 u32 md_size_sect;
587
588 s32 al_offset;
589 s32 bm_offset;
590
591
592 s32 meta_dev_idx;
593
594
595 u32 al_stripes;
596 u32 al_stripe_size_4k;
597 u32 al_size_4k;
598};
599
600struct drbd_backing_dev {
601 struct block_device *backing_bdev;
602 struct block_device *md_bdev;
603 struct drbd_md md;
604 struct disk_conf *disk_conf;
605 sector_t known_size;
606};
607
608struct drbd_md_io {
609 struct page *page;
610 unsigned long start_jif;
611 unsigned long submit_jif;
612 const char *current_use;
613 atomic_t in_use;
614 unsigned int done;
615 int error;
616};
617
618struct bm_io_work {
619 struct drbd_work w;
620 char *why;
621 enum bm_flag flags;
622 int (*io_fn)(struct drbd_device *device);
623 void (*done)(struct drbd_device *device, int rv);
624};
625
626struct fifo_buffer {
627 unsigned int head_index;
628 unsigned int size;
629 int total;
630 int values[0];
631};
632extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
633
634
635enum {
636 NET_CONGESTED,
637 RESOLVE_CONFLICTS,
638 SEND_PING,
639 GOT_PING_ACK,
640 CONN_WD_ST_CHG_REQ,
641 CONN_WD_ST_CHG_OKAY,
642 CONN_WD_ST_CHG_FAIL,
643 CONN_DRY_RUN,
644 CREATE_BARRIER,
645 STATE_SENT,
646 CALLBACK_PENDING,
647
648
649
650
651
652 DISCONNECT_SENT,
653
654 DEVICE_WORK_PENDING,
655};
656
657enum which_state { NOW, OLD = NOW, NEW };
658
659struct drbd_resource {
660 char *name;
661#ifdef CONFIG_DEBUG_FS
662 struct dentry *debugfs_res;
663 struct dentry *debugfs_res_volumes;
664 struct dentry *debugfs_res_connections;
665 struct dentry *debugfs_res_in_flight_summary;
666#endif
667 struct kref kref;
668 struct idr devices;
669 struct list_head connections;
670 struct list_head resources;
671 struct res_opts res_opts;
672 struct mutex conf_update;
673 struct mutex adm_mutex;
674 spinlock_t req_lock;
675
676 unsigned susp:1;
677 unsigned susp_nod:1;
678 unsigned susp_fen:1;
679
680 enum write_ordering_e write_ordering;
681
682 cpumask_var_t cpu_mask;
683};
684
685struct drbd_thread_timing_details
686{
687 unsigned long start_jif;
688 void *cb_addr;
689 const char *caller_fn;
690 unsigned int line;
691 unsigned int cb_nr;
692};
693
694struct drbd_connection {
695 struct list_head connections;
696 struct drbd_resource *resource;
697#ifdef CONFIG_DEBUG_FS
698 struct dentry *debugfs_conn;
699 struct dentry *debugfs_conn_callback_history;
700 struct dentry *debugfs_conn_oldest_requests;
701#endif
702 struct kref kref;
703 struct idr peer_devices;
704 enum drbd_conns cstate;
705 struct mutex cstate_mutex;
706 unsigned int connect_cnt;
707
708 unsigned long flags;
709 struct net_conf *net_conf;
710 wait_queue_head_t ping_wait;
711
712 struct sockaddr_storage my_addr;
713 int my_addr_len;
714 struct sockaddr_storage peer_addr;
715 int peer_addr_len;
716
717 struct drbd_socket data;
718 struct drbd_socket meta;
719 int agreed_pro_version;
720 u32 agreed_features;
721 unsigned long last_received;
722 unsigned int ko_count;
723
724 struct list_head transfer_log;
725
726 struct crypto_shash *cram_hmac_tfm;
727 struct crypto_ahash *integrity_tfm;
728 struct crypto_ahash *peer_integrity_tfm;
729 struct crypto_ahash *csums_tfm;
730 struct crypto_ahash *verify_tfm;
731 void *int_dig_in;
732 void *int_dig_vv;
733
734
735 struct drbd_epoch *current_epoch;
736 spinlock_t epoch_lock;
737 unsigned int epochs;
738 atomic_t current_tle_nr;
739 unsigned current_tle_writes;
740
741 unsigned long last_reconnect_jif;
742
743 struct blk_plug receiver_plug;
744 struct drbd_thread receiver;
745 struct drbd_thread worker;
746 struct drbd_thread ack_receiver;
747 struct workqueue_struct *ack_sender;
748
749
750
751
752 struct drbd_request *req_next;
753 struct drbd_request *req_ack_pending;
754 struct drbd_request *req_not_net_done;
755
756
757 struct drbd_work_queue sender_work;
758
759#define DRBD_THREAD_DETAILS_HIST 16
760 unsigned int w_cb_nr;
761 unsigned int r_cb_nr;
762 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
763 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
764
765 struct {
766 unsigned long last_sent_barrier_jif;
767
768
769
770 bool seen_any_write_yet;
771
772
773 int current_epoch_nr;
774
775
776
777
778 unsigned current_epoch_writes;
779 } send;
780};
781
782static inline bool has_net_conf(struct drbd_connection *connection)
783{
784 bool has_net_conf;
785
786 rcu_read_lock();
787 has_net_conf = rcu_dereference(connection->net_conf);
788 rcu_read_unlock();
789
790 return has_net_conf;
791}
792
793void __update_timing_details(
794 struct drbd_thread_timing_details *tdp,
795 unsigned int *cb_nr,
796 void *cb,
797 const char *fn, const unsigned int line);
798
799#define update_worker_timing_details(c, cb) \
800 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
801#define update_receiver_timing_details(c, cb) \
802 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
803
804struct submit_worker {
805 struct workqueue_struct *wq;
806 struct work_struct worker;
807
808
809 struct list_head writes;
810};
811
812struct drbd_peer_device {
813 struct list_head peer_devices;
814 struct drbd_device *device;
815 struct drbd_connection *connection;
816 struct work_struct send_acks_work;
817#ifdef CONFIG_DEBUG_FS
818 struct dentry *debugfs_peer_dev;
819#endif
820};
821
822struct drbd_device {
823 struct drbd_resource *resource;
824 struct list_head peer_devices;
825 struct list_head pending_bitmap_io;
826
827 unsigned long flush_jif;
828#ifdef CONFIG_DEBUG_FS
829 struct dentry *debugfs_minor;
830 struct dentry *debugfs_vol;
831 struct dentry *debugfs_vol_oldest_requests;
832 struct dentry *debugfs_vol_act_log_extents;
833 struct dentry *debugfs_vol_resync_extents;
834 struct dentry *debugfs_vol_data_gen_id;
835 struct dentry *debugfs_vol_ed_gen_id;
836#endif
837
838 unsigned int vnr;
839 unsigned int minor;
840
841 struct kref kref;
842
843
844 unsigned long flags;
845
846
847 struct drbd_backing_dev *ldev __protected_by(local);
848
849 sector_t p_size;
850 struct request_queue *rq_queue;
851 struct block_device *this_bdev;
852 struct gendisk *vdisk;
853
854 unsigned long last_reattach_jif;
855 struct drbd_work resync_work;
856 struct drbd_work unplug_work;
857 struct timer_list resync_timer;
858 struct timer_list md_sync_timer;
859 struct timer_list start_resync_timer;
860 struct timer_list request_timer;
861
862
863 union drbd_state new_state_tmp;
864
865 union drbd_dev_state state;
866 wait_queue_head_t misc_wait;
867 wait_queue_head_t state_wait;
868 unsigned int send_cnt;
869 unsigned int recv_cnt;
870 unsigned int read_cnt;
871 unsigned int writ_cnt;
872 unsigned int al_writ_cnt;
873 unsigned int bm_writ_cnt;
874 atomic_t ap_bio_cnt;
875 atomic_t ap_actlog_cnt;
876 atomic_t ap_pending_cnt;
877 atomic_t rs_pending_cnt;
878 atomic_t unacked_cnt;
879 atomic_t local_cnt;
880 atomic_t suspend_cnt;
881
882
883 struct rb_root read_requests;
884 struct rb_root write_requests;
885
886
887
888 struct list_head pending_master_completion[2];
889 struct list_head pending_completion[2];
890
891
892 bool use_csums;
893
894 unsigned long rs_total;
895
896 unsigned long rs_failed;
897
898 unsigned long rs_start;
899
900 unsigned long rs_paused;
901
902 unsigned long rs_same_csum;
903#define DRBD_SYNC_MARKS 8
904#define DRBD_SYNC_MARK_STEP (3*HZ)
905
906 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
907
908 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
909
910 int rs_last_mark;
911 unsigned long rs_last_bcast;
912
913
914 sector_t ov_start_sector;
915 sector_t ov_stop_sector;
916
917 sector_t ov_position;
918
919 sector_t ov_last_oos_start;
920
921 sector_t ov_last_oos_size;
922 unsigned long ov_left;
923
924 struct drbd_bitmap *bitmap;
925 unsigned long bm_resync_fo;
926
927
928 struct lru_cache *resync;
929
930 unsigned int resync_locked;
931
932 unsigned int resync_wenr;
933
934 int open_cnt;
935 u64 *p_uuid;
936
937 struct list_head active_ee;
938 struct list_head sync_ee;
939 struct list_head done_ee;
940 struct list_head read_ee;
941 struct list_head net_ee;
942
943 int next_barrier_nr;
944 struct list_head resync_reads;
945 atomic_t pp_in_use;
946 atomic_t pp_in_use_by_net;
947 wait_queue_head_t ee_wait;
948 struct drbd_md_io md_io;
949 spinlock_t al_lock;
950 wait_queue_head_t al_wait;
951 struct lru_cache *act_log;
952 unsigned int al_tr_number;
953 int al_tr_cycle;
954 wait_queue_head_t seq_wait;
955 atomic_t packet_seq;
956 unsigned int peer_seq;
957 spinlock_t peer_seq_lock;
958 unsigned long comm_bm_set;
959 struct bm_io_work bm_io_work;
960 u64 ed_uuid;
961 struct mutex own_state_mutex;
962 struct mutex *state_mutex;
963 char congestion_reason;
964 atomic_t rs_sect_in;
965 atomic_t rs_sect_ev;
966 int rs_last_sect_ev;
967 int rs_last_events;
968
969 int c_sync_rate;
970 struct fifo_buffer *rs_plan_s;
971 int rs_in_flight;
972 atomic_t ap_in_flight;
973 unsigned int peer_max_bio_size;
974 unsigned int local_max_bio_size;
975
976
977
978 struct submit_worker submit;
979};
980
981struct drbd_bm_aio_ctx {
982 struct drbd_device *device;
983 struct list_head list; ;
984 unsigned long start_jif;
985 atomic_t in_flight;
986 unsigned int done;
987 unsigned flags;
988#define BM_AIO_COPY_PAGES 1
989#define BM_AIO_WRITE_HINTED 2
990#define BM_AIO_WRITE_ALL_PAGES 4
991#define BM_AIO_READ 8
992 int error;
993 struct kref kref;
994};
995
996struct drbd_config_context {
997
998 unsigned int minor;
999
1000 unsigned int volume;
1001#define VOLUME_UNSPECIFIED (-1U)
1002
1003
1004 char *resource_name;
1005 struct nlattr *my_addr;
1006 struct nlattr *peer_addr;
1007
1008
1009 struct sk_buff *reply_skb;
1010
1011 struct drbd_genlmsghdr *reply_dh;
1012
1013 struct drbd_device *device;
1014 struct drbd_resource *resource;
1015 struct drbd_connection *connection;
1016};
1017
1018static inline struct drbd_device *minor_to_device(unsigned int minor)
1019{
1020 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1021}
1022
1023static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1024{
1025 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1026}
1027
1028static inline struct drbd_peer_device *
1029conn_peer_device(struct drbd_connection *connection, int volume_number)
1030{
1031 return idr_find(&connection->peer_devices, volume_number);
1032}
1033
1034#define for_each_resource(resource, _resources) \
1035 list_for_each_entry(resource, _resources, resources)
1036
1037#define for_each_resource_rcu(resource, _resources) \
1038 list_for_each_entry_rcu(resource, _resources, resources)
1039
1040#define for_each_resource_safe(resource, tmp, _resources) \
1041 list_for_each_entry_safe(resource, tmp, _resources, resources)
1042
1043#define for_each_connection(connection, resource) \
1044 list_for_each_entry(connection, &resource->connections, connections)
1045
1046#define for_each_connection_rcu(connection, resource) \
1047 list_for_each_entry_rcu(connection, &resource->connections, connections)
1048
1049#define for_each_connection_safe(connection, tmp, resource) \
1050 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1051
1052#define for_each_peer_device(peer_device, device) \
1053 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1054
1055#define for_each_peer_device_rcu(peer_device, device) \
1056 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1057
1058#define for_each_peer_device_safe(peer_device, tmp, device) \
1059 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1060
1061static inline unsigned int device_to_minor(struct drbd_device *device)
1062{
1063 return device->minor;
1064}
1065
1066
1067
1068
1069
1070
1071
1072enum dds_flags {
1073 DDSF_FORCED = 1,
1074 DDSF_NO_RESYNC = 2,
1075};
1076
1077extern void drbd_init_set_defaults(struct drbd_device *device);
1078extern int drbd_thread_start(struct drbd_thread *thi);
1079extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1080#ifdef CONFIG_SMP
1081extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1082#else
1083#define drbd_thread_current_set_cpu(A) ({})
1084#endif
1085extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1086 unsigned int set_size);
1087extern void tl_clear(struct drbd_connection *);
1088extern void drbd_free_sock(struct drbd_connection *connection);
1089extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1090 void *buf, size_t size, unsigned msg_flags);
1091extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1092 unsigned);
1093
1094extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1095extern int drbd_send_protocol(struct drbd_connection *connection);
1096extern int drbd_send_uuids(struct drbd_peer_device *);
1097extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1098extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1099extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1100extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1101extern int drbd_send_current_state(struct drbd_peer_device *);
1102extern int drbd_send_sync_param(struct drbd_peer_device *);
1103extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1104 u32 set_size);
1105extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1106 struct drbd_peer_request *);
1107extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1108 struct p_block_req *rp);
1109extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1110 struct p_data *dp, int data_size);
1111extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1112 sector_t sector, int blksize, u64 block_id);
1113extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1114extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1115 struct drbd_peer_request *);
1116extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1117extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1118 sector_t sector, int size, u64 block_id);
1119extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1120 int size, void *digest, int digest_size,
1121 enum drbd_packet cmd);
1122extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1123
1124extern int drbd_send_bitmap(struct drbd_device *device);
1125extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1126extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1127extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1128extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1129extern void drbd_device_cleanup(struct drbd_device *device);
1130extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1131extern void drbd_queue_unplug(struct drbd_device *device);
1132
1133extern void conn_md_sync(struct drbd_connection *connection);
1134extern void drbd_md_write(struct drbd_device *device, void *buffer);
1135extern void drbd_md_sync(struct drbd_device *device);
1136extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1137extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1138extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1139extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1140extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1141extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1142extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1143extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1144extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1145extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1146extern void drbd_md_mark_dirty(struct drbd_device *device);
1147extern void drbd_queue_bitmap_io(struct drbd_device *device,
1148 int (*io_fn)(struct drbd_device *),
1149 void (*done)(struct drbd_device *, int),
1150 char *why, enum bm_flag flags);
1151extern int drbd_bitmap_io(struct drbd_device *device,
1152 int (*io_fn)(struct drbd_device *),
1153 char *why, enum bm_flag flags);
1154extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1155 int (*io_fn)(struct drbd_device *),
1156 char *why, enum bm_flag flags);
1157extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1158extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193#define MD_128MB_SECT (128LLU << 11)
1194#define MD_4kB_SECT 8
1195#define MD_32kB_SECT 64
1196
1197
1198#define AL_EXTENT_SHIFT 22
1199#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215#define AL_UPDATES_PER_TRANSACTION 64
1216#define AL_CONTEXT_PER_TRANSACTION 919
1217
1218#if BITS_PER_LONG == 32
1219#define LN2_BPL 5
1220#define cpu_to_lel(A) cpu_to_le32(A)
1221#define lel_to_cpu(A) le32_to_cpu(A)
1222#elif BITS_PER_LONG == 64
1223#define LN2_BPL 6
1224#define cpu_to_lel(A) cpu_to_le64(A)
1225#define lel_to_cpu(A) le64_to_cpu(A)
1226#else
1227#error "LN2 of BITS_PER_LONG unknown!"
1228#endif
1229
1230
1231
1232struct bm_extent {
1233 int rs_left;
1234 int rs_failed;
1235 unsigned long flags;
1236 struct lc_element lce;
1237};
1238
1239#define BME_NO_WRITES 0
1240#define BME_LOCKED 1
1241#define BME_PRIORITY 2
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251#define SLEEP_TIME (HZ/10)
1252
1253
1254
1255#define BM_BLOCK_SHIFT 12
1256#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1257
1258
1259
1260#define BM_EXT_SHIFT 24
1261#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1262
1263#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1264#error "HAVE YOU FIXED drbdmeta AS WELL??"
1265#endif
1266
1267
1268#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1269#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1270#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1271
1272
1273#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1274
1275
1276
1277#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1278#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1279
1280
1281#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1282
1283#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1284
1285#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1286
1287#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1288
1289
1290
1291#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1308
1309
1310
1311
1312
1313#define DRBD_MAX_SECTORS_FIXED_BM \
1314 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1315#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1316
1317#if BITS_PER_LONG == 32
1318
1319
1320
1321#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1322#else
1323
1324#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1325
1326#endif
1327
1328
1329
1330
1331
1332
1333
1334#define DRBD_MAX_BIO_SIZE (1U << 20)
1335#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1336#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1337#endif
1338#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1339
1340#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1341#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1342
1343
1344
1345
1346#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1347#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1348
1349extern int drbd_bm_init(struct drbd_device *device);
1350extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1351extern void drbd_bm_cleanup(struct drbd_device *device);
1352extern void drbd_bm_set_all(struct drbd_device *device);
1353extern void drbd_bm_clear_all(struct drbd_device *device);
1354
1355extern int drbd_bm_set_bits(
1356 struct drbd_device *device, unsigned long s, unsigned long e);
1357extern int drbd_bm_clear_bits(
1358 struct drbd_device *device, unsigned long s, unsigned long e);
1359extern int drbd_bm_count_bits(
1360 struct drbd_device *device, const unsigned long s, const unsigned long e);
1361
1362
1363extern void _drbd_bm_set_bits(struct drbd_device *device,
1364 const unsigned long s, const unsigned long e);
1365extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1366extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1367extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1368extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1369extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1370extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1371extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1372extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1373extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1374extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1375extern size_t drbd_bm_words(struct drbd_device *device);
1376extern unsigned long drbd_bm_bits(struct drbd_device *device);
1377extern sector_t drbd_bm_capacity(struct drbd_device *device);
1378
1379#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1380extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1381
1382extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1383extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1384extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1385extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1386
1387extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1388 size_t number, unsigned long *buffer);
1389
1390extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1391 size_t number, unsigned long *buffer);
1392
1393extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1394extern void drbd_bm_unlock(struct drbd_device *device);
1395
1396
1397extern struct kmem_cache *drbd_request_cache;
1398extern struct kmem_cache *drbd_ee_cache;
1399extern struct kmem_cache *drbd_bm_ext_cache;
1400extern struct kmem_cache *drbd_al_ext_cache;
1401extern mempool_t drbd_request_mempool;
1402extern mempool_t drbd_ee_mempool;
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417extern struct page *drbd_pp_pool;
1418extern spinlock_t drbd_pp_lock;
1419extern int drbd_pp_vacant;
1420extern wait_queue_head_t drbd_pp_wait;
1421
1422
1423
1424
1425
1426
1427#define DRBD_MIN_POOL_PAGES 128
1428extern mempool_t drbd_md_io_page_pool;
1429
1430
1431
1432extern struct bio_set drbd_md_io_bio_set;
1433
1434extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1435
1436
1437extern struct bio_set drbd_io_bio_set;
1438
1439extern struct mutex resources_mutex;
1440
1441extern int conn_lowest_minor(struct drbd_connection *connection);
1442extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1443extern void drbd_destroy_device(struct kref *kref);
1444extern void drbd_delete_device(struct drbd_device *device);
1445
1446extern struct drbd_resource *drbd_create_resource(const char *name);
1447extern void drbd_free_resource(struct drbd_resource *resource);
1448
1449extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1450extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1451extern void drbd_destroy_connection(struct kref *kref);
1452extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1453 void *peer_addr, int peer_addr_len);
1454extern struct drbd_resource *drbd_find_resource(const char *name);
1455extern void drbd_destroy_resource(struct kref *kref);
1456extern void conn_free_crypto(struct drbd_connection *connection);
1457
1458
1459extern void do_submit(struct work_struct *ws);
1460extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1461extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1462extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1463extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1464
1465
1466
1467
1468extern struct mutex notification_mutex;
1469
1470extern void drbd_suspend_io(struct drbd_device *device);
1471extern void drbd_resume_io(struct drbd_device *device);
1472extern char *ppsize(char *buf, unsigned long long size);
1473extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1474enum determine_dev_size {
1475 DS_ERROR_SHRINK = -3,
1476 DS_ERROR_SPACE_MD = -2,
1477 DS_ERROR = -1,
1478 DS_UNCHANGED = 0,
1479 DS_SHRUNK = 1,
1480 DS_GREW = 2,
1481 DS_GREW_FROM_ZERO = 3,
1482};
1483extern enum determine_dev_size
1484drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1485extern void resync_after_online_grow(struct drbd_device *);
1486extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1487 struct drbd_backing_dev *bdev, struct o_qlim *o);
1488extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1489 enum drbd_role new_role,
1490 int force);
1491extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1492extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1493extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1494extern int drbd_khelper(struct drbd_device *device, char *cmd);
1495
1496
1497
1498extern void drbd_md_endio(struct bio *bio);
1499extern void drbd_peer_request_endio(struct bio *bio);
1500extern void drbd_request_endio(struct bio *bio);
1501extern int drbd_worker(struct drbd_thread *thi);
1502enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1503void drbd_resync_after_changed(struct drbd_device *device);
1504extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1505extern void resume_next_sg(struct drbd_device *device);
1506extern void suspend_other_sg(struct drbd_device *device);
1507extern int drbd_resync_finished(struct drbd_device *device);
1508
1509extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1510extern void drbd_md_put_buffer(struct drbd_device *device);
1511extern int drbd_md_sync_page_io(struct drbd_device *device,
1512 struct drbd_backing_dev *bdev, sector_t sector, int op);
1513extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1514extern void wait_until_done_or_force_detached(struct drbd_device *device,
1515 struct drbd_backing_dev *bdev, unsigned int *done);
1516extern void drbd_rs_controller_reset(struct drbd_device *device);
1517
1518static inline void ov_out_of_sync_print(struct drbd_device *device)
1519{
1520 if (device->ov_last_oos_size) {
1521 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1522 (unsigned long long)device->ov_last_oos_start,
1523 (unsigned long)device->ov_last_oos_size);
1524 }
1525 device->ov_last_oos_size = 0;
1526}
1527
1528
1529extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
1530extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
1531
1532extern int w_e_end_data_req(struct drbd_work *, int);
1533extern int w_e_end_rsdata_req(struct drbd_work *, int);
1534extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1535extern int w_e_end_ov_reply(struct drbd_work *, int);
1536extern int w_e_end_ov_req(struct drbd_work *, int);
1537extern int w_ov_finished(struct drbd_work *, int);
1538extern int w_resync_timer(struct drbd_work *, int);
1539extern int w_send_write_hint(struct drbd_work *, int);
1540extern int w_send_dblock(struct drbd_work *, int);
1541extern int w_send_read_req(struct drbd_work *, int);
1542extern int w_e_reissue(struct drbd_work *, int);
1543extern int w_restart_disk_io(struct drbd_work *, int);
1544extern int w_send_out_of_sync(struct drbd_work *, int);
1545extern int w_start_resync(struct drbd_work *, int);
1546
1547extern void resync_timer_fn(struct timer_list *t);
1548extern void start_resync_timer_fn(struct timer_list *t);
1549
1550extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1551
1552
1553extern int drbd_receiver(struct drbd_thread *thi);
1554extern int drbd_ack_receiver(struct drbd_thread *thi);
1555extern void drbd_send_ping_wf(struct work_struct *ws);
1556extern void drbd_send_acks_wf(struct work_struct *ws);
1557extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1558extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1559 bool throttle_if_app_is_waiting);
1560extern int drbd_submit_peer_request(struct drbd_device *,
1561 struct drbd_peer_request *, const unsigned,
1562 const unsigned, const int);
1563extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1564extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1565 sector_t, unsigned int,
1566 unsigned int,
1567 gfp_t) __must_hold(local);
1568extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1569 int);
1570#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1571#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1572extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1573extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1574extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1575extern int drbd_connected(struct drbd_peer_device *);
1576
1577static inline void drbd_tcp_cork(struct socket *sock)
1578{
1579 int val = 1;
1580 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1581 (char*)&val, sizeof(val));
1582}
1583
1584static inline void drbd_tcp_uncork(struct socket *sock)
1585{
1586 int val = 0;
1587 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1588 (char*)&val, sizeof(val));
1589}
1590
1591static inline void drbd_tcp_quickack(struct socket *sock)
1592{
1593 int val = 2;
1594 (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1595 (char*)&val, sizeof(val));
1596}
1597
1598
1599static inline void drbd_set_my_capacity(struct drbd_device *device,
1600 sector_t size)
1601{
1602
1603 set_capacity(device->vdisk, size);
1604 device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1605}
1606
1607
1608
1609
1610static inline void drbd_generic_make_request(struct drbd_device *device,
1611 int fault_type, struct bio *bio)
1612{
1613 __release(local);
1614 if (!bio->bi_disk) {
1615 drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
1616 bio->bi_status = BLK_STS_IOERR;
1617 bio_endio(bio);
1618 return;
1619 }
1620
1621 if (drbd_insert_fault(device, fault_type))
1622 bio_io_error(bio);
1623 else
1624 generic_make_request(bio);
1625}
1626
1627void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1628 enum write_ordering_e wo);
1629
1630
1631extern struct proc_dir_entry *drbd_proc;
1632int drbd_seq_show(struct seq_file *seq, void *v);
1633
1634
1635extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1636extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1637extern void drbd_al_begin_io_commit(struct drbd_device *device);
1638extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1639extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1640extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1641extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1642extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1643extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1644extern void drbd_rs_cancel_all(struct drbd_device *device);
1645extern int drbd_rs_del_all(struct drbd_device *device);
1646extern void drbd_rs_failed_io(struct drbd_device *device,
1647 sector_t sector, int size);
1648extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1649
1650enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1651extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1652 enum update_sync_bits_mode mode);
1653#define drbd_set_in_sync(device, sector, size) \
1654 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1655#define drbd_set_out_of_sync(device, sector, size) \
1656 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1657#define drbd_rs_failed_io(device, sector, size) \
1658 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1659extern void drbd_al_shrink(struct drbd_device *device);
1660extern int drbd_al_initialize(struct drbd_device *, void *);
1661
1662
1663
1664struct sib_info {
1665 enum drbd_state_info_bcast_reason sib_reason;
1666 union {
1667 struct {
1668 char *helper_name;
1669 unsigned helper_exit_code;
1670 };
1671 struct {
1672 union drbd_state os;
1673 union drbd_state ns;
1674 };
1675 };
1676};
1677void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1678
1679extern void notify_resource_state(struct sk_buff *,
1680 unsigned int,
1681 struct drbd_resource *,
1682 struct resource_info *,
1683 enum drbd_notification_type);
1684extern void notify_device_state(struct sk_buff *,
1685 unsigned int,
1686 struct drbd_device *,
1687 struct device_info *,
1688 enum drbd_notification_type);
1689extern void notify_connection_state(struct sk_buff *,
1690 unsigned int,
1691 struct drbd_connection *,
1692 struct connection_info *,
1693 enum drbd_notification_type);
1694extern void notify_peer_device_state(struct sk_buff *,
1695 unsigned int,
1696 struct drbd_peer_device *,
1697 struct peer_device_info *,
1698 enum drbd_notification_type);
1699extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1700 struct drbd_connection *, const char *, int);
1701
1702
1703
1704
1705
1706
1707static inline struct page *page_chain_next(struct page *page)
1708{
1709 return (struct page *)page_private(page);
1710}
1711#define page_chain_for_each(page) \
1712 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1713 page = page_chain_next(page))
1714#define page_chain_for_each_safe(page, n) \
1715 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1716
1717
1718static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1719{
1720 struct page *page = peer_req->pages;
1721 page_chain_for_each(page) {
1722 if (page_count(page) > 1)
1723 return 1;
1724 }
1725 return 0;
1726}
1727
1728static inline union drbd_state drbd_read_state(struct drbd_device *device)
1729{
1730 struct drbd_resource *resource = device->resource;
1731 union drbd_state rv;
1732
1733 rv.i = device->state.i;
1734 rv.susp = resource->susp;
1735 rv.susp_nod = resource->susp_nod;
1736 rv.susp_fen = resource->susp_fen;
1737
1738 return rv;
1739}
1740
1741enum drbd_force_detach_flags {
1742 DRBD_READ_ERROR,
1743 DRBD_WRITE_ERROR,
1744 DRBD_META_IO_ERROR,
1745 DRBD_FORCE_DETACH,
1746};
1747
1748#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1749static inline void __drbd_chk_io_error_(struct drbd_device *device,
1750 enum drbd_force_detach_flags df,
1751 const char *where)
1752{
1753 enum drbd_io_error_p ep;
1754
1755 rcu_read_lock();
1756 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1757 rcu_read_unlock();
1758 switch (ep) {
1759 case EP_PASS_ON:
1760 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1761 if (__ratelimit(&drbd_ratelimit_state))
1762 drbd_err(device, "Local IO failed in %s.\n", where);
1763 if (device->state.disk > D_INCONSISTENT)
1764 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1765 break;
1766 }
1767
1768 case EP_DETACH:
1769 case EP_CALL_HELPER:
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 set_bit(WAS_IO_ERROR, &device->flags);
1791 if (df == DRBD_READ_ERROR)
1792 set_bit(WAS_READ_ERROR, &device->flags);
1793 if (df == DRBD_FORCE_DETACH)
1794 set_bit(FORCE_DETACH, &device->flags);
1795 if (device->state.disk > D_FAILED) {
1796 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1797 drbd_err(device,
1798 "Local IO failed in %s. Detaching...\n", where);
1799 }
1800 break;
1801 }
1802}
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1813static inline void drbd_chk_io_error_(struct drbd_device *device,
1814 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1815{
1816 if (error) {
1817 unsigned long flags;
1818 spin_lock_irqsave(&device->resource->req_lock, flags);
1819 __drbd_chk_io_error_(device, forcedetach, where);
1820 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1821 }
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1833{
1834 switch (bdev->md.meta_dev_idx) {
1835 case DRBD_MD_INDEX_INTERNAL:
1836 case DRBD_MD_INDEX_FLEX_INT:
1837 return bdev->md.md_offset + bdev->md.bm_offset;
1838 case DRBD_MD_INDEX_FLEX_EXT:
1839 default:
1840 return bdev->md.md_offset;
1841 }
1842}
1843
1844
1845
1846
1847
1848static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1849{
1850 switch (bdev->md.meta_dev_idx) {
1851 case DRBD_MD_INDEX_INTERNAL:
1852 case DRBD_MD_INDEX_FLEX_INT:
1853 return bdev->md.md_offset + MD_4kB_SECT -1;
1854 case DRBD_MD_INDEX_FLEX_EXT:
1855 default:
1856 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1857 }
1858}
1859
1860
1861static inline sector_t drbd_get_capacity(struct block_device *bdev)
1862{
1863
1864 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1876{
1877 sector_t s;
1878
1879 switch (bdev->md.meta_dev_idx) {
1880 case DRBD_MD_INDEX_INTERNAL:
1881 case DRBD_MD_INDEX_FLEX_INT:
1882 s = drbd_get_capacity(bdev->backing_bdev)
1883 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1884 drbd_md_first_sector(bdev))
1885 : 0;
1886 break;
1887 case DRBD_MD_INDEX_FLEX_EXT:
1888 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1889 drbd_get_capacity(bdev->backing_bdev));
1890
1891 s = min_t(sector_t, s,
1892 BM_EXT_TO_SECT(bdev->md.md_size_sect
1893 - bdev->md.bm_offset));
1894 break;
1895 default:
1896 s = min_t(sector_t, DRBD_MAX_SECTORS,
1897 drbd_get_capacity(bdev->backing_bdev));
1898 }
1899 return s;
1900}
1901
1902
1903
1904
1905
1906static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1907{
1908 const int meta_dev_idx = bdev->md.meta_dev_idx;
1909
1910 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1911 return 0;
1912
1913
1914
1915 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1916 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1917 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1918
1919
1920 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1921}
1922
1923static inline void
1924drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1925{
1926 unsigned long flags;
1927 spin_lock_irqsave(&q->q_lock, flags);
1928 list_add_tail(&w->list, &q->q);
1929 spin_unlock_irqrestore(&q->q_lock, flags);
1930 wake_up(&q->q_wait);
1931}
1932
1933static inline void
1934drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1935{
1936 unsigned long flags;
1937 spin_lock_irqsave(&q->q_lock, flags);
1938 if (list_empty_careful(&w->list))
1939 list_add_tail(&w->list, &q->q);
1940 spin_unlock_irqrestore(&q->q_lock, flags);
1941 wake_up(&q->q_wait);
1942}
1943
1944static inline void
1945drbd_device_post_work(struct drbd_device *device, int work_bit)
1946{
1947 if (!test_and_set_bit(work_bit, &device->flags)) {
1948 struct drbd_connection *connection =
1949 first_peer_device(device)->connection;
1950 struct drbd_work_queue *q = &connection->sender_work;
1951 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1952 wake_up(&q->q_wait);
1953 }
1954}
1955
1956extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1957
1958
1959
1960
1961
1962static inline void wake_ack_receiver(struct drbd_connection *connection)
1963{
1964 struct task_struct *task = connection->ack_receiver.task;
1965 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1966 force_sig(SIGXCPU, task);
1967}
1968
1969static inline void request_ping(struct drbd_connection *connection)
1970{
1971 set_bit(SEND_PING, &connection->flags);
1972 wake_ack_receiver(connection);
1973}
1974
1975extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1976extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1977extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1978 enum drbd_packet, unsigned int, void *,
1979 unsigned int);
1980extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1981 enum drbd_packet, unsigned int, void *,
1982 unsigned int);
1983
1984extern int drbd_send_ping(struct drbd_connection *connection);
1985extern int drbd_send_ping_ack(struct drbd_connection *connection);
1986extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1987extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1988
1989static inline void drbd_thread_stop(struct drbd_thread *thi)
1990{
1991 _drbd_thread_stop(thi, false, true);
1992}
1993
1994static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1995{
1996 _drbd_thread_stop(thi, false, false);
1997}
1998
1999static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
2000{
2001 _drbd_thread_stop(thi, true, false);
2002}
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026static inline void inc_ap_pending(struct drbd_device *device)
2027{
2028 atomic_inc(&device->ap_pending_cnt);
2029}
2030
2031#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
2032 if (atomic_read(&device->which) < 0) \
2033 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2034 func, line, \
2035 atomic_read(&device->which))
2036
2037#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2038static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2039{
2040 if (atomic_dec_and_test(&device->ap_pending_cnt))
2041 wake_up(&device->misc_wait);
2042 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2043}
2044
2045
2046
2047
2048
2049
2050
2051static inline void inc_rs_pending(struct drbd_device *device)
2052{
2053 atomic_inc(&device->rs_pending_cnt);
2054}
2055
2056#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2057static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2058{
2059 atomic_dec(&device->rs_pending_cnt);
2060 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072static inline void inc_unacked(struct drbd_device *device)
2073{
2074 atomic_inc(&device->unacked_cnt);
2075}
2076
2077#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2078static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2079{
2080 atomic_dec(&device->unacked_cnt);
2081 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2082}
2083
2084#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2085static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2086{
2087 atomic_sub(n, &device->unacked_cnt);
2088 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2089}
2090
2091static inline bool is_sync_target_state(enum drbd_conns connection_state)
2092{
2093 return connection_state == C_SYNC_TARGET ||
2094 connection_state == C_PAUSED_SYNC_T;
2095}
2096
2097static inline bool is_sync_source_state(enum drbd_conns connection_state)
2098{
2099 return connection_state == C_SYNC_SOURCE ||
2100 connection_state == C_PAUSED_SYNC_S;
2101}
2102
2103static inline bool is_sync_state(enum drbd_conns connection_state)
2104{
2105 return is_sync_source_state(connection_state) ||
2106 is_sync_target_state(connection_state);
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116#define get_ldev_if_state(_device, _min_state) \
2117 (_get_ldev_if_state((_device), (_min_state)) ? \
2118 ({ __acquire(x); true; }) : false)
2119#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2120
2121static inline void put_ldev(struct drbd_device *device)
2122{
2123 enum drbd_disk_state disk_state = device->state.disk;
2124
2125
2126
2127
2128 int i = atomic_dec_return(&device->local_cnt);
2129
2130
2131
2132
2133 __release(local);
2134 D_ASSERT(device, i >= 0);
2135 if (i == 0) {
2136 if (disk_state == D_DISKLESS)
2137
2138 drbd_device_post_work(device, DESTROY_DISK);
2139 if (disk_state == D_FAILED)
2140
2141 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2142 drbd_device_post_work(device, GO_DISKLESS);
2143 wake_up(&device->misc_wait);
2144 }
2145}
2146
2147#ifndef __CHECKER__
2148static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2149{
2150 int io_allowed;
2151
2152
2153 if (device->state.disk == D_DISKLESS)
2154 return 0;
2155
2156 atomic_inc(&device->local_cnt);
2157 io_allowed = (device->state.disk >= mins);
2158 if (!io_allowed)
2159 put_ldev(device);
2160 return io_allowed;
2161}
2162#else
2163extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2164#endif
2165
2166
2167
2168
2169static inline int drbd_get_max_buffers(struct drbd_device *device)
2170{
2171 struct net_conf *nc;
2172 int mxb;
2173
2174 rcu_read_lock();
2175 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2176 mxb = nc ? nc->max_buffers : 1000000;
2177 rcu_read_unlock();
2178
2179 return mxb;
2180}
2181
2182static inline int drbd_state_is_stable(struct drbd_device *device)
2183{
2184 union drbd_dev_state s = device->state;
2185
2186
2187
2188
2189 switch ((enum drbd_conns)s.conn) {
2190
2191 case C_STANDALONE:
2192 case C_WF_CONNECTION:
2193
2194 case C_CONNECTED:
2195 case C_SYNC_SOURCE:
2196 case C_SYNC_TARGET:
2197 case C_VERIFY_S:
2198 case C_VERIFY_T:
2199 case C_PAUSED_SYNC_S:
2200 case C_PAUSED_SYNC_T:
2201 case C_AHEAD:
2202 case C_BEHIND:
2203
2204 case C_DISCONNECTING:
2205 case C_UNCONNECTED:
2206 case C_TIMEOUT:
2207 case C_BROKEN_PIPE:
2208 case C_NETWORK_FAILURE:
2209 case C_PROTOCOL_ERROR:
2210 case C_TEAR_DOWN:
2211 case C_WF_REPORT_PARAMS:
2212 case C_STARTING_SYNC_S:
2213 case C_STARTING_SYNC_T:
2214 break;
2215
2216
2217 case C_WF_BITMAP_S:
2218 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2219 return 0;
2220 break;
2221
2222
2223 case C_WF_BITMAP_T:
2224 case C_WF_SYNC_UUID:
2225 case C_MASK:
2226
2227 return 0;
2228 }
2229
2230 switch ((enum drbd_disk_state)s.disk) {
2231 case D_DISKLESS:
2232 case D_INCONSISTENT:
2233 case D_OUTDATED:
2234 case D_CONSISTENT:
2235 case D_UP_TO_DATE:
2236 case D_FAILED:
2237
2238 break;
2239
2240
2241 case D_ATTACHING:
2242 case D_NEGOTIATING:
2243 case D_UNKNOWN:
2244 case D_MASK:
2245
2246 return 0;
2247 }
2248
2249 return 1;
2250}
2251
2252static inline int drbd_suspended(struct drbd_device *device)
2253{
2254 struct drbd_resource *resource = device->resource;
2255
2256 return resource->susp || resource->susp_fen || resource->susp_nod;
2257}
2258
2259static inline bool may_inc_ap_bio(struct drbd_device *device)
2260{
2261 int mxb = drbd_get_max_buffers(device);
2262
2263 if (drbd_suspended(device))
2264 return false;
2265 if (atomic_read(&device->suspend_cnt))
2266 return false;
2267
2268
2269
2270
2271
2272
2273 if (!drbd_state_is_stable(device))
2274 return false;
2275
2276
2277
2278 if (atomic_read(&device->ap_bio_cnt) > mxb)
2279 return false;
2280 if (test_bit(BITMAP_IO, &device->flags))
2281 return false;
2282 return true;
2283}
2284
2285static inline bool inc_ap_bio_cond(struct drbd_device *device)
2286{
2287 bool rv = false;
2288
2289 spin_lock_irq(&device->resource->req_lock);
2290 rv = may_inc_ap_bio(device);
2291 if (rv)
2292 atomic_inc(&device->ap_bio_cnt);
2293 spin_unlock_irq(&device->resource->req_lock);
2294
2295 return rv;
2296}
2297
2298static inline void inc_ap_bio(struct drbd_device *device)
2299{
2300
2301
2302
2303
2304
2305
2306
2307
2308 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2309}
2310
2311static inline void dec_ap_bio(struct drbd_device *device)
2312{
2313 int mxb = drbd_get_max_buffers(device);
2314 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2315
2316 D_ASSERT(device, ap_bio >= 0);
2317
2318 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2319 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2320 drbd_queue_work(&first_peer_device(device)->
2321 connection->sender_work,
2322 &device->bm_io_work.w);
2323 }
2324
2325
2326
2327
2328 if (ap_bio < mxb)
2329 wake_up(&device->misc_wait);
2330}
2331
2332static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2333{
2334 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2335 first_peer_device(device)->connection->agreed_pro_version != 100;
2336}
2337
2338static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2339{
2340 int changed = device->ed_uuid != val;
2341 device->ed_uuid = val;
2342 return changed;
2343}
2344
2345static inline int drbd_queue_order_type(struct drbd_device *device)
2346{
2347
2348
2349#ifndef QUEUE_ORDERED_NONE
2350#define QUEUE_ORDERED_NONE 0
2351#endif
2352 return QUEUE_ORDERED_NONE;
2353}
2354
2355static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2356{
2357 return list_first_entry_or_null(&resource->connections,
2358 struct drbd_connection, connections);
2359}
2360
2361#endif
2362