1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <crypto/hash.h>
30#include <linux/compiler.h>
31#include <linux/types.h>
32#include <linux/list.h>
33#include <linux/sched/signal.h>
34#include <linux/bitops.h>
35#include <linux/slab.h>
36#include <linux/ratelimit.h>
37#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
41#include <linux/backing-dev.h>
42#include <linux/genhd.h>
43#include <linux/idr.h>
44#include <linux/dynamic_debug.h>
45#include <net/tcp.h>
46#include <linux/lru_cache.h>
47#include <linux/prefetch.h>
48#include <linux/drbd_genl_api.h>
49#include <linux/drbd.h>
50#include "drbd_strings.h"
51#include "drbd_state.h"
52#include "drbd_protocol.h"
53
54#ifdef __CHECKER__
55# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
56# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
57# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
58#else
59# define __protected_by(x)
60# define __protected_read_by(x)
61# define __protected_write_by(x)
62#endif
63
64
65#ifdef CONFIG_DRBD_FAULT_INJECTION
66extern int drbd_enable_faults;
67extern int drbd_fault_rate;
68#endif
69
70extern unsigned int drbd_minor_count;
71extern char drbd_usermode_helper[];
72extern int drbd_proc_details;
73
74
75
76
77
78
79
80#define DRBD_SIGKILL SIGHUP
81
82#define ID_IN_SYNC (4711ULL)
83#define ID_OUT_OF_SYNC (4712ULL)
84#define ID_SYNCER (-1ULL)
85
86#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
87
88struct drbd_device;
89struct drbd_connection;
90
91#define __drbd_printk_device(level, device, fmt, args...) \
92 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
93#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
94 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
95#define __drbd_printk_resource(level, resource, fmt, args...) \
96 printk(level "drbd %s: " fmt, (resource)->name, ## args)
97#define __drbd_printk_connection(level, connection, fmt, args...) \
98 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
99
100void drbd_printk_with_wrong_object_type(void);
101
102#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
103 (__builtin_types_compatible_p(typeof(obj), type) || \
104 __builtin_types_compatible_p(typeof(obj), const type)), \
105 func(level, (const type)(obj), fmt, ## args)
106
107#define drbd_printk(level, obj, fmt, args...) \
108 __builtin_choose_expr( \
109 __drbd_printk_if_same_type(obj, struct drbd_device *, \
110 __drbd_printk_device, level, fmt, ## args), \
111 __builtin_choose_expr( \
112 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
113 __drbd_printk_resource, level, fmt, ## args), \
114 __builtin_choose_expr( \
115 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
116 __drbd_printk_connection, level, fmt, ## args), \
117 __builtin_choose_expr( \
118 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
119 __drbd_printk_peer_device, level, fmt, ## args), \
120 drbd_printk_with_wrong_object_type()))))
121
122#define drbd_dbg(obj, fmt, args...) \
123 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
124#define drbd_alert(obj, fmt, args...) \
125 drbd_printk(KERN_ALERT, obj, fmt, ## args)
126#define drbd_err(obj, fmt, args...) \
127 drbd_printk(KERN_ERR, obj, fmt, ## args)
128#define drbd_warn(obj, fmt, args...) \
129 drbd_printk(KERN_WARNING, obj, fmt, ## args)
130#define drbd_info(obj, fmt, args...) \
131 drbd_printk(KERN_INFO, obj, fmt, ## args)
132#define drbd_emerg(obj, fmt, args...) \
133 drbd_printk(KERN_EMERG, obj, fmt, ## args)
134
135#define dynamic_drbd_dbg(device, fmt, args...) \
136 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
137
138#define D_ASSERT(device, exp) do { \
139 if (!(exp)) \
140 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
141 } while (0)
142
143
144
145
146
147
148#define expect(exp) ({ \
149 bool _bool = (exp); \
150 if (!_bool) \
151 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
152 #exp, __func__); \
153 _bool; \
154 })
155
156
157enum {
158 DRBD_FAULT_MD_WR = 0,
159 DRBD_FAULT_MD_RD = 1,
160 DRBD_FAULT_RS_WR = 2,
161 DRBD_FAULT_RS_RD = 3,
162 DRBD_FAULT_DT_WR = 4,
163 DRBD_FAULT_DT_RD = 5,
164 DRBD_FAULT_DT_RA = 6,
165 DRBD_FAULT_BM_ALLOC = 7,
166 DRBD_FAULT_AL_EE = 8,
167 DRBD_FAULT_RECEIVE = 9,
168
169 DRBD_FAULT_MAX,
170};
171
172extern unsigned int
173_drbd_insert_fault(struct drbd_device *device, unsigned int type);
174
175static inline int
176drbd_insert_fault(struct drbd_device *device, unsigned int type) {
177#ifdef CONFIG_DRBD_FAULT_INJECTION
178 return drbd_fault_rate &&
179 (drbd_enable_faults & (1<<type)) &&
180 _drbd_insert_fault(device, type);
181#else
182 return 0;
183#endif
184}
185
186
187#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
188
189#define div_floor(A, B) ((A)/(B))
190
191extern struct ratelimit_state drbd_ratelimit_state;
192extern struct idr drbd_devices;
193extern struct list_head drbd_resources;
194
195extern const char *cmdname(enum drbd_packet cmd);
196
197
198
199struct bm_xfer_ctx {
200
201
202
203
204 unsigned long bm_bits;
205 unsigned long bm_words;
206
207 unsigned long bit_offset;
208 unsigned long word_offset;
209
210
211 unsigned packets[2];
212 unsigned bytes[2];
213};
214
215extern void INFO_bm_xfer_stats(struct drbd_device *device,
216 const char *direction, struct bm_xfer_ctx *c);
217
218static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
219{
220
221
222
223
224
225
226
227#if BITS_PER_LONG == 64
228 c->word_offset = c->bit_offset >> 6;
229#elif BITS_PER_LONG == 32
230 c->word_offset = c->bit_offset >> 5;
231 c->word_offset &= ~(1UL);
232#else
233# error "unsupported BITS_PER_LONG"
234#endif
235}
236
237extern unsigned int drbd_header_size(struct drbd_connection *connection);
238
239
240enum drbd_thread_state {
241 NONE,
242 RUNNING,
243 EXITING,
244 RESTARTING
245};
246
247struct drbd_thread {
248 spinlock_t t_lock;
249 struct task_struct *task;
250 struct completion stop;
251 enum drbd_thread_state t_state;
252 int (*function) (struct drbd_thread *);
253 struct drbd_resource *resource;
254 struct drbd_connection *connection;
255 int reset_cpu_mask;
256 const char *name;
257};
258
259static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
260{
261
262
263
264
265 smp_rmb();
266 return thi->t_state;
267}
268
269struct drbd_work {
270 struct list_head list;
271 int (*cb)(struct drbd_work *, int cancel);
272};
273
274struct drbd_device_work {
275 struct drbd_work w;
276 struct drbd_device *device;
277};
278
279#include "drbd_interval.h"
280
281extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
282
283extern void lock_all_resources(void);
284extern void unlock_all_resources(void);
285
286struct drbd_request {
287 struct drbd_work w;
288 struct drbd_device *device;
289
290
291
292
293
294 struct bio *private_bio;
295
296 struct drbd_interval i;
297
298
299
300
301
302
303
304
305
306 unsigned int epoch;
307
308 struct list_head tl_requests;
309 struct bio *master_bio;
310
311
312 struct list_head req_pending_master_completion;
313 struct list_head req_pending_local;
314
315
316 unsigned long start_jif;
317
318
319
320
321
322
323
324
325
326 unsigned long in_actlog_jif;
327
328
329 unsigned long pre_submit_jif;
330
331
332 unsigned long pre_send_jif;
333 unsigned long acked_jif;
334 unsigned long net_done_jif;
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369 atomic_t completion_ref;
370
371 struct kref kref;
372
373 unsigned rq_state;
374};
375
376struct drbd_epoch {
377 struct drbd_connection *connection;
378 struct list_head list;
379 unsigned int barrier_nr;
380 atomic_t epoch_size;
381 atomic_t active;
382 unsigned long flags;
383};
384
385
386int drbdd_init(struct drbd_thread *);
387int drbd_asender(struct drbd_thread *);
388
389
390enum {
391 DE_HAVE_BARRIER_NUMBER,
392};
393
394enum epoch_event {
395 EV_PUT,
396 EV_GOT_BARRIER_NR,
397 EV_BECAME_LAST,
398 EV_CLEANUP = 32,
399};
400
401struct digest_info {
402 int digest_size;
403 void *digest;
404};
405
406struct drbd_peer_request {
407 struct drbd_work w;
408 struct drbd_peer_device *peer_device;
409 struct drbd_epoch *epoch;
410 struct page *pages;
411 atomic_t pending_bios;
412 struct drbd_interval i;
413
414 unsigned long flags;
415 unsigned long submit_jif;
416 union {
417 u64 block_id;
418 struct digest_info *digest;
419 };
420};
421
422
423
424
425
426
427
428enum {
429 __EE_CALL_AL_COMPLETE_IO,
430 __EE_MAY_SET_IN_SYNC,
431
432
433 __EE_IS_TRIM,
434
435
436
437 __EE_RESUBMITTED,
438
439
440
441
442 __EE_WAS_ERROR,
443
444
445 __EE_HAS_DIGEST,
446
447
448 __EE_RESTART_REQUESTS,
449
450
451 __EE_SEND_WRITE_ACK,
452
453
454 __EE_IN_INTERVAL_TREE,
455
456
457
458 __EE_SUBMITTED,
459
460
461 __EE_WRITE,
462
463
464 __EE_WRITE_SAME,
465
466
467
468 __EE_APPLICATION,
469
470
471 __EE_RS_THIN_REQ,
472};
473#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
474#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
475#define EE_IS_TRIM (1<<__EE_IS_TRIM)
476#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
477#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
478#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
479#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
480#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
481#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
482#define EE_SUBMITTED (1<<__EE_SUBMITTED)
483#define EE_WRITE (1<<__EE_WRITE)
484#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
485#define EE_APPLICATION (1<<__EE_APPLICATION)
486#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
487
488
489enum {
490 UNPLUG_REMOTE,
491 MD_DIRTY,
492 USE_DEGR_WFC_T,
493 CL_ST_CHG_SUCCESS,
494 CL_ST_CHG_FAIL,
495 CRASHED_PRIMARY,
496
497
498 CONSIDER_RESYNC,
499
500 MD_NO_FUA,
501
502 BITMAP_IO,
503
504 BITMAP_IO_QUEUED,
505 WAS_IO_ERROR,
506 WAS_READ_ERROR,
507 FORCE_DETACH,
508 RESYNC_AFTER_NEG,
509 RESIZE_PENDING,
510
511 NEW_CUR_UUID,
512 AL_SUSPENDED,
513 AHEAD_TO_SYNC_SOURCE,
514 B_RS_H_DONE,
515 DISCARD_MY_DATA,
516 READ_BALANCE_RR,
517
518 FLUSH_PENDING,
519
520
521
522 GOING_DISKLESS,
523
524
525 GO_DISKLESS,
526 DESTROY_DISK,
527 MD_SYNC,
528 RS_START,
529 RS_PROGRESS,
530 RS_DONE,
531};
532
533struct drbd_bitmap;
534
535
536
537enum bm_flag {
538
539 BM_LOCKED_MASK = 0xf,
540
541
542 BM_DONT_CLEAR = 0x1,
543 BM_DONT_SET = 0x2,
544 BM_DONT_TEST = 0x4,
545
546
547
548 BM_IS_LOCKED = 0x8,
549
550
551 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
552
553
554
555
556 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
557
558
559
560 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
561};
562
563struct drbd_work_queue {
564 struct list_head q;
565 spinlock_t q_lock;
566 wait_queue_head_t q_wait;
567};
568
569struct drbd_socket {
570 struct mutex mutex;
571 struct socket *socket;
572
573
574 void *sbuf;
575 void *rbuf;
576};
577
578struct drbd_md {
579 u64 md_offset;
580
581 u64 la_size_sect;
582 spinlock_t uuid_lock;
583 u64 uuid[UI_SIZE];
584 u64 device_uuid;
585 u32 flags;
586 u32 md_size_sect;
587
588 s32 al_offset;
589 s32 bm_offset;
590
591
592 s32 meta_dev_idx;
593
594
595 u32 al_stripes;
596 u32 al_stripe_size_4k;
597 u32 al_size_4k;
598};
599
600struct drbd_backing_dev {
601 struct block_device *backing_bdev;
602 struct block_device *md_bdev;
603 struct drbd_md md;
604 struct disk_conf *disk_conf;
605 sector_t known_size;
606};
607
608struct drbd_md_io {
609 struct page *page;
610 unsigned long start_jif;
611 unsigned long submit_jif;
612 const char *current_use;
613 atomic_t in_use;
614 unsigned int done;
615 int error;
616};
617
618struct bm_io_work {
619 struct drbd_work w;
620 char *why;
621 enum bm_flag flags;
622 int (*io_fn)(struct drbd_device *device);
623 void (*done)(struct drbd_device *device, int rv);
624};
625
626struct fifo_buffer {
627 unsigned int head_index;
628 unsigned int size;
629 int total;
630 int values[0];
631};
632extern struct fifo_buffer *fifo_alloc(int fifo_size);
633
634
635enum {
636 NET_CONGESTED,
637 RESOLVE_CONFLICTS,
638 SEND_PING,
639 GOT_PING_ACK,
640 CONN_WD_ST_CHG_REQ,
641 CONN_WD_ST_CHG_OKAY,
642 CONN_WD_ST_CHG_FAIL,
643 CONN_DRY_RUN,
644 CREATE_BARRIER,
645 STATE_SENT,
646 CALLBACK_PENDING,
647
648
649
650
651
652 DISCONNECT_SENT,
653
654 DEVICE_WORK_PENDING,
655};
656
657enum which_state { NOW, OLD = NOW, NEW };
658
659struct drbd_resource {
660 char *name;
661#ifdef CONFIG_DEBUG_FS
662 struct dentry *debugfs_res;
663 struct dentry *debugfs_res_volumes;
664 struct dentry *debugfs_res_connections;
665 struct dentry *debugfs_res_in_flight_summary;
666#endif
667 struct kref kref;
668 struct idr devices;
669 struct list_head connections;
670 struct list_head resources;
671 struct res_opts res_opts;
672 struct mutex conf_update;
673 struct mutex adm_mutex;
674 spinlock_t req_lock;
675
676 unsigned susp:1;
677 unsigned susp_nod:1;
678 unsigned susp_fen:1;
679
680 enum write_ordering_e write_ordering;
681
682 cpumask_var_t cpu_mask;
683};
684
685struct drbd_thread_timing_details
686{
687 unsigned long start_jif;
688 void *cb_addr;
689 const char *caller_fn;
690 unsigned int line;
691 unsigned int cb_nr;
692};
693
694struct drbd_connection {
695 struct list_head connections;
696 struct drbd_resource *resource;
697#ifdef CONFIG_DEBUG_FS
698 struct dentry *debugfs_conn;
699 struct dentry *debugfs_conn_callback_history;
700 struct dentry *debugfs_conn_oldest_requests;
701#endif
702 struct kref kref;
703 struct idr peer_devices;
704 enum drbd_conns cstate;
705 struct mutex cstate_mutex;
706 unsigned int connect_cnt;
707
708 unsigned long flags;
709 struct net_conf *net_conf;
710 wait_queue_head_t ping_wait;
711
712 struct sockaddr_storage my_addr;
713 int my_addr_len;
714 struct sockaddr_storage peer_addr;
715 int peer_addr_len;
716
717 struct drbd_socket data;
718 struct drbd_socket meta;
719 int agreed_pro_version;
720 u32 agreed_features;
721 unsigned long last_received;
722 unsigned int ko_count;
723
724 struct list_head transfer_log;
725
726 struct crypto_shash *cram_hmac_tfm;
727 struct crypto_ahash *integrity_tfm;
728 struct crypto_ahash *peer_integrity_tfm;
729 struct crypto_ahash *csums_tfm;
730 struct crypto_ahash *verify_tfm;
731 void *int_dig_in;
732 void *int_dig_vv;
733
734
735 struct drbd_epoch *current_epoch;
736 spinlock_t epoch_lock;
737 unsigned int epochs;
738 atomic_t current_tle_nr;
739 unsigned current_tle_writes;
740
741 unsigned long last_reconnect_jif;
742
743 struct blk_plug receiver_plug;
744 struct drbd_thread receiver;
745 struct drbd_thread worker;
746 struct drbd_thread ack_receiver;
747 struct workqueue_struct *ack_sender;
748
749
750
751
752 struct drbd_request *req_next;
753 struct drbd_request *req_ack_pending;
754 struct drbd_request *req_not_net_done;
755
756
757 struct drbd_work_queue sender_work;
758
759#define DRBD_THREAD_DETAILS_HIST 16
760 unsigned int w_cb_nr;
761 unsigned int r_cb_nr;
762 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
763 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
764
765 struct {
766 unsigned long last_sent_barrier_jif;
767
768
769
770 bool seen_any_write_yet;
771
772
773 int current_epoch_nr;
774
775
776
777
778 unsigned current_epoch_writes;
779 } send;
780};
781
782static inline bool has_net_conf(struct drbd_connection *connection)
783{
784 bool has_net_conf;
785
786 rcu_read_lock();
787 has_net_conf = rcu_dereference(connection->net_conf);
788 rcu_read_unlock();
789
790 return has_net_conf;
791}
792
793void __update_timing_details(
794 struct drbd_thread_timing_details *tdp,
795 unsigned int *cb_nr,
796 void *cb,
797 const char *fn, const unsigned int line);
798
799#define update_worker_timing_details(c, cb) \
800 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
801#define update_receiver_timing_details(c, cb) \
802 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
803
804struct submit_worker {
805 struct workqueue_struct *wq;
806 struct work_struct worker;
807
808
809 struct list_head writes;
810};
811
812struct drbd_peer_device {
813 struct list_head peer_devices;
814 struct drbd_device *device;
815 struct drbd_connection *connection;
816 struct work_struct send_acks_work;
817#ifdef CONFIG_DEBUG_FS
818 struct dentry *debugfs_peer_dev;
819#endif
820};
821
822struct drbd_device {
823 struct drbd_resource *resource;
824 struct list_head peer_devices;
825 struct list_head pending_bitmap_io;
826
827 unsigned long flush_jif;
828#ifdef CONFIG_DEBUG_FS
829 struct dentry *debugfs_minor;
830 struct dentry *debugfs_vol;
831 struct dentry *debugfs_vol_oldest_requests;
832 struct dentry *debugfs_vol_act_log_extents;
833 struct dentry *debugfs_vol_resync_extents;
834 struct dentry *debugfs_vol_data_gen_id;
835 struct dentry *debugfs_vol_ed_gen_id;
836#endif
837
838 unsigned int vnr;
839 unsigned int minor;
840
841 struct kref kref;
842
843
844 unsigned long flags;
845
846
847 struct drbd_backing_dev *ldev __protected_by(local);
848
849 sector_t p_size;
850 struct request_queue *rq_queue;
851 struct block_device *this_bdev;
852 struct gendisk *vdisk;
853
854 unsigned long last_reattach_jif;
855 struct drbd_work resync_work;
856 struct drbd_work unplug_work;
857 struct timer_list resync_timer;
858 struct timer_list md_sync_timer;
859 struct timer_list start_resync_timer;
860 struct timer_list request_timer;
861
862
863 union drbd_state new_state_tmp;
864
865 union drbd_dev_state state;
866 wait_queue_head_t misc_wait;
867 wait_queue_head_t state_wait;
868 unsigned int send_cnt;
869 unsigned int recv_cnt;
870 unsigned int read_cnt;
871 unsigned int writ_cnt;
872 unsigned int al_writ_cnt;
873 unsigned int bm_writ_cnt;
874 atomic_t ap_bio_cnt;
875 atomic_t ap_actlog_cnt;
876 atomic_t ap_pending_cnt;
877 atomic_t rs_pending_cnt;
878 atomic_t unacked_cnt;
879 atomic_t local_cnt;
880 atomic_t suspend_cnt;
881
882
883 struct rb_root read_requests;
884 struct rb_root write_requests;
885
886
887
888 struct list_head pending_master_completion[2];
889 struct list_head pending_completion[2];
890
891
892 bool use_csums;
893
894 unsigned long rs_total;
895
896 unsigned long rs_failed;
897
898 unsigned long rs_start;
899
900 unsigned long rs_paused;
901
902 unsigned long rs_same_csum;
903#define DRBD_SYNC_MARKS 8
904#define DRBD_SYNC_MARK_STEP (3*HZ)
905
906 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
907
908 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
909
910 int rs_last_mark;
911 unsigned long rs_last_bcast;
912
913
914 sector_t ov_start_sector;
915 sector_t ov_stop_sector;
916
917 sector_t ov_position;
918
919 sector_t ov_last_oos_start;
920
921 sector_t ov_last_oos_size;
922 unsigned long ov_left;
923
924 struct drbd_bitmap *bitmap;
925 unsigned long bm_resync_fo;
926
927
928 struct lru_cache *resync;
929
930 unsigned int resync_locked;
931
932 unsigned int resync_wenr;
933
934 int open_cnt;
935 u64 *p_uuid;
936
937 struct list_head active_ee;
938 struct list_head sync_ee;
939 struct list_head done_ee;
940 struct list_head read_ee;
941 struct list_head net_ee;
942
943 int next_barrier_nr;
944 struct list_head resync_reads;
945 atomic_t pp_in_use;
946 atomic_t pp_in_use_by_net;
947 wait_queue_head_t ee_wait;
948 struct drbd_md_io md_io;
949 spinlock_t al_lock;
950 wait_queue_head_t al_wait;
951 struct lru_cache *act_log;
952 unsigned int al_tr_number;
953 int al_tr_cycle;
954 wait_queue_head_t seq_wait;
955 atomic_t packet_seq;
956 unsigned int peer_seq;
957 spinlock_t peer_seq_lock;
958 unsigned long comm_bm_set;
959 struct bm_io_work bm_io_work;
960 u64 ed_uuid;
961 struct mutex own_state_mutex;
962 struct mutex *state_mutex;
963 char congestion_reason;
964 atomic_t rs_sect_in;
965 atomic_t rs_sect_ev;
966 int rs_last_sect_ev;
967 int rs_last_events;
968
969 int c_sync_rate;
970 struct fifo_buffer *rs_plan_s;
971 int rs_in_flight;
972 atomic_t ap_in_flight;
973 unsigned int peer_max_bio_size;
974 unsigned int local_max_bio_size;
975
976
977
978 struct submit_worker submit;
979};
980
981struct drbd_bm_aio_ctx {
982 struct drbd_device *device;
983 struct list_head list; ;
984 unsigned long start_jif;
985 atomic_t in_flight;
986 unsigned int done;
987 unsigned flags;
988#define BM_AIO_COPY_PAGES 1
989#define BM_AIO_WRITE_HINTED 2
990#define BM_AIO_WRITE_ALL_PAGES 4
991#define BM_AIO_READ 8
992 int error;
993 struct kref kref;
994};
995
996struct drbd_config_context {
997
998 unsigned int minor;
999
1000 unsigned int volume;
1001#define VOLUME_UNSPECIFIED (-1U)
1002
1003
1004 char *resource_name;
1005 struct nlattr *my_addr;
1006 struct nlattr *peer_addr;
1007
1008
1009 struct sk_buff *reply_skb;
1010
1011 struct drbd_genlmsghdr *reply_dh;
1012
1013 struct drbd_device *device;
1014 struct drbd_resource *resource;
1015 struct drbd_connection *connection;
1016};
1017
1018static inline struct drbd_device *minor_to_device(unsigned int minor)
1019{
1020 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1021}
1022
1023static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1024{
1025 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1026}
1027
1028static inline struct drbd_peer_device *
1029conn_peer_device(struct drbd_connection *connection, int volume_number)
1030{
1031 return idr_find(&connection->peer_devices, volume_number);
1032}
1033
1034#define for_each_resource(resource, _resources) \
1035 list_for_each_entry(resource, _resources, resources)
1036
1037#define for_each_resource_rcu(resource, _resources) \
1038 list_for_each_entry_rcu(resource, _resources, resources)
1039
1040#define for_each_resource_safe(resource, tmp, _resources) \
1041 list_for_each_entry_safe(resource, tmp, _resources, resources)
1042
1043#define for_each_connection(connection, resource) \
1044 list_for_each_entry(connection, &resource->connections, connections)
1045
1046#define for_each_connection_rcu(connection, resource) \
1047 list_for_each_entry_rcu(connection, &resource->connections, connections)
1048
1049#define for_each_connection_safe(connection, tmp, resource) \
1050 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1051
1052#define for_each_peer_device(peer_device, device) \
1053 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1054
1055#define for_each_peer_device_rcu(peer_device, device) \
1056 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1057
1058#define for_each_peer_device_safe(peer_device, tmp, device) \
1059 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1060
1061static inline unsigned int device_to_minor(struct drbd_device *device)
1062{
1063 return device->minor;
1064}
1065
1066
1067
1068
1069
1070
1071
1072enum dds_flags {
1073 DDSF_FORCED = 1,
1074 DDSF_NO_RESYNC = 2,
1075};
1076
1077extern void drbd_init_set_defaults(struct drbd_device *device);
1078extern int drbd_thread_start(struct drbd_thread *thi);
1079extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1080#ifdef CONFIG_SMP
1081extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1082#else
1083#define drbd_thread_current_set_cpu(A) ({})
1084#endif
1085extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1086 unsigned int set_size);
1087extern void tl_clear(struct drbd_connection *);
1088extern void drbd_free_sock(struct drbd_connection *connection);
1089extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1090 void *buf, size_t size, unsigned msg_flags);
1091extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1092 unsigned);
1093
1094extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1095extern int drbd_send_protocol(struct drbd_connection *connection);
1096extern int drbd_send_uuids(struct drbd_peer_device *);
1097extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1098extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1099extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1100extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1101extern int drbd_send_current_state(struct drbd_peer_device *);
1102extern int drbd_send_sync_param(struct drbd_peer_device *);
1103extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1104 u32 set_size);
1105extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1106 struct drbd_peer_request *);
1107extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1108 struct p_block_req *rp);
1109extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1110 struct p_data *dp, int data_size);
1111extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1112 sector_t sector, int blksize, u64 block_id);
1113extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1114extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1115 struct drbd_peer_request *);
1116extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1117extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1118 sector_t sector, int size, u64 block_id);
1119extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1120 int size, void *digest, int digest_size,
1121 enum drbd_packet cmd);
1122extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1123
1124extern int drbd_send_bitmap(struct drbd_device *device);
1125extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1126extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1127extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1128extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1129extern void drbd_device_cleanup(struct drbd_device *device);
1130extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1131extern void drbd_queue_unplug(struct drbd_device *device);
1132
1133extern void conn_md_sync(struct drbd_connection *connection);
1134extern void drbd_md_write(struct drbd_device *device, void *buffer);
1135extern void drbd_md_sync(struct drbd_device *device);
1136extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1137extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1138extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1139extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1140extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1141extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1142extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1143extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1144extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1145extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1146extern void drbd_md_mark_dirty(struct drbd_device *device);
1147extern void drbd_queue_bitmap_io(struct drbd_device *device,
1148 int (*io_fn)(struct drbd_device *),
1149 void (*done)(struct drbd_device *, int),
1150 char *why, enum bm_flag flags);
1151extern int drbd_bitmap_io(struct drbd_device *device,
1152 int (*io_fn)(struct drbd_device *),
1153 char *why, enum bm_flag flags);
1154extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1155 int (*io_fn)(struct drbd_device *),
1156 char *why, enum bm_flag flags);
1157extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1158extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193#define MD_128MB_SECT (128LLU << 11)
1194#define MD_4kB_SECT 8
1195#define MD_32kB_SECT 64
1196
1197
1198#define AL_EXTENT_SHIFT 22
1199#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215#define AL_UPDATES_PER_TRANSACTION 64
1216#define AL_CONTEXT_PER_TRANSACTION 919
1217
1218#if BITS_PER_LONG == 32
1219#define LN2_BPL 5
1220#define cpu_to_lel(A) cpu_to_le32(A)
1221#define lel_to_cpu(A) le32_to_cpu(A)
1222#elif BITS_PER_LONG == 64
1223#define LN2_BPL 6
1224#define cpu_to_lel(A) cpu_to_le64(A)
1225#define lel_to_cpu(A) le64_to_cpu(A)
1226#else
1227#error "LN2 of BITS_PER_LONG unknown!"
1228#endif
1229
1230
1231
1232struct bm_extent {
1233 int rs_left;
1234 int rs_failed;
1235 unsigned long flags;
1236 struct lc_element lce;
1237};
1238
1239#define BME_NO_WRITES 0
1240#define BME_LOCKED 1
1241#define BME_PRIORITY 2
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251#define SLEEP_TIME (HZ/10)
1252
1253
1254
1255#define BM_BLOCK_SHIFT 12
1256#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1257
1258
1259
1260#define BM_EXT_SHIFT 24
1261#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1262
1263#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1264#error "HAVE YOU FIXED drbdmeta AS WELL??"
1265#endif
1266
1267
1268#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1269#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1270#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1271
1272
1273#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1274
1275
1276
1277#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1278#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1279
1280
1281#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1282
1283#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1284
1285#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1286
1287#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1288
1289
1290
1291#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1308
1309
1310
1311
1312
1313#define DRBD_MAX_SECTORS_FIXED_BM \
1314 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1315#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1316#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1317#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1318#else
1319#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1320
1321#if BITS_PER_LONG == 32
1322
1323
1324
1325#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1326#else
1327
1328#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1329
1330#endif
1331#endif
1332
1333
1334
1335
1336
1337
1338
1339#define DRBD_MAX_BIO_SIZE (1U << 20)
1340#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1341#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1342#endif
1343#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1344
1345#define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1346#define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1347
1348
1349
1350
1351#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1352#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1353
1354extern int drbd_bm_init(struct drbd_device *device);
1355extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1356extern void drbd_bm_cleanup(struct drbd_device *device);
1357extern void drbd_bm_set_all(struct drbd_device *device);
1358extern void drbd_bm_clear_all(struct drbd_device *device);
1359
1360extern int drbd_bm_set_bits(
1361 struct drbd_device *device, unsigned long s, unsigned long e);
1362extern int drbd_bm_clear_bits(
1363 struct drbd_device *device, unsigned long s, unsigned long e);
1364extern int drbd_bm_count_bits(
1365 struct drbd_device *device, const unsigned long s, const unsigned long e);
1366
1367
1368extern void _drbd_bm_set_bits(struct drbd_device *device,
1369 const unsigned long s, const unsigned long e);
1370extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1371extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1372extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1373extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1374extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1375extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1376extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1377extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1378extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1379extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1380extern size_t drbd_bm_words(struct drbd_device *device);
1381extern unsigned long drbd_bm_bits(struct drbd_device *device);
1382extern sector_t drbd_bm_capacity(struct drbd_device *device);
1383
1384#define DRBD_END_OF_BITMAP (~(unsigned long)0)
1385extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1386
1387extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1388extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1389extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1390extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1391
1392extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1393 size_t number, unsigned long *buffer);
1394
1395extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1396 size_t number, unsigned long *buffer);
1397
1398extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1399extern void drbd_bm_unlock(struct drbd_device *device);
1400
1401
1402extern struct kmem_cache *drbd_request_cache;
1403extern struct kmem_cache *drbd_ee_cache;
1404extern struct kmem_cache *drbd_bm_ext_cache;
1405extern struct kmem_cache *drbd_al_ext_cache;
1406extern mempool_t drbd_request_mempool;
1407extern mempool_t drbd_ee_mempool;
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422extern struct page *drbd_pp_pool;
1423extern spinlock_t drbd_pp_lock;
1424extern int drbd_pp_vacant;
1425extern wait_queue_head_t drbd_pp_wait;
1426
1427
1428
1429
1430
1431
1432#define DRBD_MIN_POOL_PAGES 128
1433extern mempool_t drbd_md_io_page_pool;
1434
1435
1436
1437extern struct bio_set drbd_md_io_bio_set;
1438
1439extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1440
1441
1442extern struct bio_set drbd_io_bio_set;
1443
1444extern struct mutex resources_mutex;
1445
1446extern int conn_lowest_minor(struct drbd_connection *connection);
1447extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1448extern void drbd_destroy_device(struct kref *kref);
1449extern void drbd_delete_device(struct drbd_device *device);
1450
1451extern struct drbd_resource *drbd_create_resource(const char *name);
1452extern void drbd_free_resource(struct drbd_resource *resource);
1453
1454extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1455extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1456extern void drbd_destroy_connection(struct kref *kref);
1457extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1458 void *peer_addr, int peer_addr_len);
1459extern struct drbd_resource *drbd_find_resource(const char *name);
1460extern void drbd_destroy_resource(struct kref *kref);
1461extern void conn_free_crypto(struct drbd_connection *connection);
1462
1463
1464extern void do_submit(struct work_struct *ws);
1465extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1466extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1467extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1468extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1469
1470
1471
1472
1473extern struct mutex notification_mutex;
1474
1475extern void drbd_suspend_io(struct drbd_device *device);
1476extern void drbd_resume_io(struct drbd_device *device);
1477extern char *ppsize(char *buf, unsigned long long size);
1478extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1479enum determine_dev_size {
1480 DS_ERROR_SHRINK = -3,
1481 DS_ERROR_SPACE_MD = -2,
1482 DS_ERROR = -1,
1483 DS_UNCHANGED = 0,
1484 DS_SHRUNK = 1,
1485 DS_GREW = 2,
1486 DS_GREW_FROM_ZERO = 3,
1487};
1488extern enum determine_dev_size
1489drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1490extern void resync_after_online_grow(struct drbd_device *);
1491extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1492 struct drbd_backing_dev *bdev, struct o_qlim *o);
1493extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1494 enum drbd_role new_role,
1495 int force);
1496extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1497extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1498extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1499extern int drbd_khelper(struct drbd_device *device, char *cmd);
1500
1501
1502
1503extern void drbd_md_endio(struct bio *bio);
1504extern void drbd_peer_request_endio(struct bio *bio);
1505extern void drbd_request_endio(struct bio *bio);
1506extern int drbd_worker(struct drbd_thread *thi);
1507enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1508void drbd_resync_after_changed(struct drbd_device *device);
1509extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1510extern void resume_next_sg(struct drbd_device *device);
1511extern void suspend_other_sg(struct drbd_device *device);
1512extern int drbd_resync_finished(struct drbd_device *device);
1513
1514extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1515extern void drbd_md_put_buffer(struct drbd_device *device);
1516extern int drbd_md_sync_page_io(struct drbd_device *device,
1517 struct drbd_backing_dev *bdev, sector_t sector, int op);
1518extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1519extern void wait_until_done_or_force_detached(struct drbd_device *device,
1520 struct drbd_backing_dev *bdev, unsigned int *done);
1521extern void drbd_rs_controller_reset(struct drbd_device *device);
1522
1523static inline void ov_out_of_sync_print(struct drbd_device *device)
1524{
1525 if (device->ov_last_oos_size) {
1526 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1527 (unsigned long long)device->ov_last_oos_start,
1528 (unsigned long)device->ov_last_oos_size);
1529 }
1530 device->ov_last_oos_size = 0;
1531}
1532
1533
1534extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
1535extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
1536
1537extern int w_e_end_data_req(struct drbd_work *, int);
1538extern int w_e_end_rsdata_req(struct drbd_work *, int);
1539extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1540extern int w_e_end_ov_reply(struct drbd_work *, int);
1541extern int w_e_end_ov_req(struct drbd_work *, int);
1542extern int w_ov_finished(struct drbd_work *, int);
1543extern int w_resync_timer(struct drbd_work *, int);
1544extern int w_send_write_hint(struct drbd_work *, int);
1545extern int w_send_dblock(struct drbd_work *, int);
1546extern int w_send_read_req(struct drbd_work *, int);
1547extern int w_e_reissue(struct drbd_work *, int);
1548extern int w_restart_disk_io(struct drbd_work *, int);
1549extern int w_send_out_of_sync(struct drbd_work *, int);
1550extern int w_start_resync(struct drbd_work *, int);
1551
1552extern void resync_timer_fn(struct timer_list *t);
1553extern void start_resync_timer_fn(struct timer_list *t);
1554
1555extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1556
1557
1558extern int drbd_receiver(struct drbd_thread *thi);
1559extern int drbd_ack_receiver(struct drbd_thread *thi);
1560extern void drbd_send_ping_wf(struct work_struct *ws);
1561extern void drbd_send_acks_wf(struct work_struct *ws);
1562extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1563extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1564 bool throttle_if_app_is_waiting);
1565extern int drbd_submit_peer_request(struct drbd_device *,
1566 struct drbd_peer_request *, const unsigned,
1567 const unsigned, const int);
1568extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1569extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1570 sector_t, unsigned int,
1571 unsigned int,
1572 gfp_t) __must_hold(local);
1573extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1574 int);
1575#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1576#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1577extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1578extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1579extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1580extern int drbd_connected(struct drbd_peer_device *);
1581
1582static inline void drbd_tcp_cork(struct socket *sock)
1583{
1584 int val = 1;
1585 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1586 (char*)&val, sizeof(val));
1587}
1588
1589static inline void drbd_tcp_uncork(struct socket *sock)
1590{
1591 int val = 0;
1592 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1593 (char*)&val, sizeof(val));
1594}
1595
1596static inline void drbd_tcp_nodelay(struct socket *sock)
1597{
1598 int val = 1;
1599 (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1600 (char*)&val, sizeof(val));
1601}
1602
1603static inline void drbd_tcp_quickack(struct socket *sock)
1604{
1605 int val = 2;
1606 (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1607 (char*)&val, sizeof(val));
1608}
1609
1610
1611static inline void drbd_set_my_capacity(struct drbd_device *device,
1612 sector_t size)
1613{
1614
1615 set_capacity(device->vdisk, size);
1616 device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1617}
1618
1619
1620
1621
1622static inline void drbd_generic_make_request(struct drbd_device *device,
1623 int fault_type, struct bio *bio)
1624{
1625 __release(local);
1626 if (!bio->bi_disk) {
1627 drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
1628 bio->bi_status = BLK_STS_IOERR;
1629 bio_endio(bio);
1630 return;
1631 }
1632
1633 if (drbd_insert_fault(device, fault_type))
1634 bio_io_error(bio);
1635 else
1636 generic_make_request(bio);
1637}
1638
1639void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1640 enum write_ordering_e wo);
1641
1642
1643extern struct proc_dir_entry *drbd_proc;
1644int drbd_seq_show(struct seq_file *seq, void *v);
1645
1646
1647extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1648extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1649extern void drbd_al_begin_io_commit(struct drbd_device *device);
1650extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1651extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1652extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1653extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1654extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1655extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1656extern void drbd_rs_cancel_all(struct drbd_device *device);
1657extern int drbd_rs_del_all(struct drbd_device *device);
1658extern void drbd_rs_failed_io(struct drbd_device *device,
1659 sector_t sector, int size);
1660extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1661
1662enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1663extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1664 enum update_sync_bits_mode mode);
1665#define drbd_set_in_sync(device, sector, size) \
1666 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1667#define drbd_set_out_of_sync(device, sector, size) \
1668 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1669#define drbd_rs_failed_io(device, sector, size) \
1670 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1671extern void drbd_al_shrink(struct drbd_device *device);
1672extern int drbd_al_initialize(struct drbd_device *, void *);
1673
1674
1675
1676struct sib_info {
1677 enum drbd_state_info_bcast_reason sib_reason;
1678 union {
1679 struct {
1680 char *helper_name;
1681 unsigned helper_exit_code;
1682 };
1683 struct {
1684 union drbd_state os;
1685 union drbd_state ns;
1686 };
1687 };
1688};
1689void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1690
1691extern void notify_resource_state(struct sk_buff *,
1692 unsigned int,
1693 struct drbd_resource *,
1694 struct resource_info *,
1695 enum drbd_notification_type);
1696extern void notify_device_state(struct sk_buff *,
1697 unsigned int,
1698 struct drbd_device *,
1699 struct device_info *,
1700 enum drbd_notification_type);
1701extern void notify_connection_state(struct sk_buff *,
1702 unsigned int,
1703 struct drbd_connection *,
1704 struct connection_info *,
1705 enum drbd_notification_type);
1706extern void notify_peer_device_state(struct sk_buff *,
1707 unsigned int,
1708 struct drbd_peer_device *,
1709 struct peer_device_info *,
1710 enum drbd_notification_type);
1711extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1712 struct drbd_connection *, const char *, int);
1713
1714
1715
1716
1717
1718
1719static inline struct page *page_chain_next(struct page *page)
1720{
1721 return (struct page *)page_private(page);
1722}
1723#define page_chain_for_each(page) \
1724 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1725 page = page_chain_next(page))
1726#define page_chain_for_each_safe(page, n) \
1727 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1728
1729
1730static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1731{
1732 struct page *page = peer_req->pages;
1733 page_chain_for_each(page) {
1734 if (page_count(page) > 1)
1735 return 1;
1736 }
1737 return 0;
1738}
1739
1740static inline union drbd_state drbd_read_state(struct drbd_device *device)
1741{
1742 struct drbd_resource *resource = device->resource;
1743 union drbd_state rv;
1744
1745 rv.i = device->state.i;
1746 rv.susp = resource->susp;
1747 rv.susp_nod = resource->susp_nod;
1748 rv.susp_fen = resource->susp_fen;
1749
1750 return rv;
1751}
1752
1753enum drbd_force_detach_flags {
1754 DRBD_READ_ERROR,
1755 DRBD_WRITE_ERROR,
1756 DRBD_META_IO_ERROR,
1757 DRBD_FORCE_DETACH,
1758};
1759
1760#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1761static inline void __drbd_chk_io_error_(struct drbd_device *device,
1762 enum drbd_force_detach_flags df,
1763 const char *where)
1764{
1765 enum drbd_io_error_p ep;
1766
1767 rcu_read_lock();
1768 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1769 rcu_read_unlock();
1770 switch (ep) {
1771 case EP_PASS_ON:
1772 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1773 if (__ratelimit(&drbd_ratelimit_state))
1774 drbd_err(device, "Local IO failed in %s.\n", where);
1775 if (device->state.disk > D_INCONSISTENT)
1776 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1777 break;
1778 }
1779
1780 case EP_DETACH:
1781 case EP_CALL_HELPER:
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802 set_bit(WAS_IO_ERROR, &device->flags);
1803 if (df == DRBD_READ_ERROR)
1804 set_bit(WAS_READ_ERROR, &device->flags);
1805 if (df == DRBD_FORCE_DETACH)
1806 set_bit(FORCE_DETACH, &device->flags);
1807 if (device->state.disk > D_FAILED) {
1808 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1809 drbd_err(device,
1810 "Local IO failed in %s. Detaching...\n", where);
1811 }
1812 break;
1813 }
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1825static inline void drbd_chk_io_error_(struct drbd_device *device,
1826 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1827{
1828 if (error) {
1829 unsigned long flags;
1830 spin_lock_irqsave(&device->resource->req_lock, flags);
1831 __drbd_chk_io_error_(device, forcedetach, where);
1832 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1833 }
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1845{
1846 switch (bdev->md.meta_dev_idx) {
1847 case DRBD_MD_INDEX_INTERNAL:
1848 case DRBD_MD_INDEX_FLEX_INT:
1849 return bdev->md.md_offset + bdev->md.bm_offset;
1850 case DRBD_MD_INDEX_FLEX_EXT:
1851 default:
1852 return bdev->md.md_offset;
1853 }
1854}
1855
1856
1857
1858
1859
1860static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1861{
1862 switch (bdev->md.meta_dev_idx) {
1863 case DRBD_MD_INDEX_INTERNAL:
1864 case DRBD_MD_INDEX_FLEX_INT:
1865 return bdev->md.md_offset + MD_4kB_SECT -1;
1866 case DRBD_MD_INDEX_FLEX_EXT:
1867 default:
1868 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1869 }
1870}
1871
1872
1873static inline sector_t drbd_get_capacity(struct block_device *bdev)
1874{
1875
1876 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1877}
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1888{
1889 sector_t s;
1890
1891 switch (bdev->md.meta_dev_idx) {
1892 case DRBD_MD_INDEX_INTERNAL:
1893 case DRBD_MD_INDEX_FLEX_INT:
1894 s = drbd_get_capacity(bdev->backing_bdev)
1895 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1896 drbd_md_first_sector(bdev))
1897 : 0;
1898 break;
1899 case DRBD_MD_INDEX_FLEX_EXT:
1900 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1901 drbd_get_capacity(bdev->backing_bdev));
1902
1903 s = min_t(sector_t, s,
1904 BM_EXT_TO_SECT(bdev->md.md_size_sect
1905 - bdev->md.bm_offset));
1906 break;
1907 default:
1908 s = min_t(sector_t, DRBD_MAX_SECTORS,
1909 drbd_get_capacity(bdev->backing_bdev));
1910 }
1911 return s;
1912}
1913
1914
1915
1916
1917
1918static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1919{
1920 const int meta_dev_idx = bdev->md.meta_dev_idx;
1921
1922 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1923 return 0;
1924
1925
1926
1927 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1928 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1929 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1930
1931
1932 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1933}
1934
1935static inline void
1936drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1937{
1938 unsigned long flags;
1939 spin_lock_irqsave(&q->q_lock, flags);
1940 list_add_tail(&w->list, &q->q);
1941 spin_unlock_irqrestore(&q->q_lock, flags);
1942 wake_up(&q->q_wait);
1943}
1944
1945static inline void
1946drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1947{
1948 unsigned long flags;
1949 spin_lock_irqsave(&q->q_lock, flags);
1950 if (list_empty_careful(&w->list))
1951 list_add_tail(&w->list, &q->q);
1952 spin_unlock_irqrestore(&q->q_lock, flags);
1953 wake_up(&q->q_wait);
1954}
1955
1956static inline void
1957drbd_device_post_work(struct drbd_device *device, int work_bit)
1958{
1959 if (!test_and_set_bit(work_bit, &device->flags)) {
1960 struct drbd_connection *connection =
1961 first_peer_device(device)->connection;
1962 struct drbd_work_queue *q = &connection->sender_work;
1963 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1964 wake_up(&q->q_wait);
1965 }
1966}
1967
1968extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1969
1970
1971
1972
1973
1974static inline void wake_ack_receiver(struct drbd_connection *connection)
1975{
1976 struct task_struct *task = connection->ack_receiver.task;
1977 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1978 force_sig(SIGXCPU, task);
1979}
1980
1981static inline void request_ping(struct drbd_connection *connection)
1982{
1983 set_bit(SEND_PING, &connection->flags);
1984 wake_ack_receiver(connection);
1985}
1986
1987extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1988extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1989extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1990 enum drbd_packet, unsigned int, void *,
1991 unsigned int);
1992extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1993 enum drbd_packet, unsigned int, void *,
1994 unsigned int);
1995
1996extern int drbd_send_ping(struct drbd_connection *connection);
1997extern int drbd_send_ping_ack(struct drbd_connection *connection);
1998extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1999extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
2000
2001static inline void drbd_thread_stop(struct drbd_thread *thi)
2002{
2003 _drbd_thread_stop(thi, false, true);
2004}
2005
2006static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
2007{
2008 _drbd_thread_stop(thi, false, false);
2009}
2010
2011static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
2012{
2013 _drbd_thread_stop(thi, true, false);
2014}
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038static inline void inc_ap_pending(struct drbd_device *device)
2039{
2040 atomic_inc(&device->ap_pending_cnt);
2041}
2042
2043#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
2044 if (atomic_read(&device->which) < 0) \
2045 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2046 func, line, \
2047 atomic_read(&device->which))
2048
2049#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2050static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2051{
2052 if (atomic_dec_and_test(&device->ap_pending_cnt))
2053 wake_up(&device->misc_wait);
2054 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2055}
2056
2057
2058
2059
2060
2061
2062
2063static inline void inc_rs_pending(struct drbd_device *device)
2064{
2065 atomic_inc(&device->rs_pending_cnt);
2066}
2067
2068#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2069static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2070{
2071 atomic_dec(&device->rs_pending_cnt);
2072 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2073}
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084static inline void inc_unacked(struct drbd_device *device)
2085{
2086 atomic_inc(&device->unacked_cnt);
2087}
2088
2089#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2090static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2091{
2092 atomic_dec(&device->unacked_cnt);
2093 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2094}
2095
2096#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2097static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2098{
2099 atomic_sub(n, &device->unacked_cnt);
2100 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2101}
2102
2103static inline bool is_sync_target_state(enum drbd_conns connection_state)
2104{
2105 return connection_state == C_SYNC_TARGET ||
2106 connection_state == C_PAUSED_SYNC_T;
2107}
2108
2109static inline bool is_sync_source_state(enum drbd_conns connection_state)
2110{
2111 return connection_state == C_SYNC_SOURCE ||
2112 connection_state == C_PAUSED_SYNC_S;
2113}
2114
2115static inline bool is_sync_state(enum drbd_conns connection_state)
2116{
2117 return is_sync_source_state(connection_state) ||
2118 is_sync_target_state(connection_state);
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128#define get_ldev_if_state(_device, _min_state) \
2129 (_get_ldev_if_state((_device), (_min_state)) ? \
2130 ({ __acquire(x); true; }) : false)
2131#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2132
2133static inline void put_ldev(struct drbd_device *device)
2134{
2135 enum drbd_disk_state disk_state = device->state.disk;
2136
2137
2138
2139
2140 int i = atomic_dec_return(&device->local_cnt);
2141
2142
2143
2144
2145 __release(local);
2146 D_ASSERT(device, i >= 0);
2147 if (i == 0) {
2148 if (disk_state == D_DISKLESS)
2149
2150 drbd_device_post_work(device, DESTROY_DISK);
2151 if (disk_state == D_FAILED)
2152
2153 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2154 drbd_device_post_work(device, GO_DISKLESS);
2155 wake_up(&device->misc_wait);
2156 }
2157}
2158
2159#ifndef __CHECKER__
2160static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2161{
2162 int io_allowed;
2163
2164
2165 if (device->state.disk == D_DISKLESS)
2166 return 0;
2167
2168 atomic_inc(&device->local_cnt);
2169 io_allowed = (device->state.disk >= mins);
2170 if (!io_allowed)
2171 put_ldev(device);
2172 return io_allowed;
2173}
2174#else
2175extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2176#endif
2177
2178
2179
2180
2181static inline int drbd_get_max_buffers(struct drbd_device *device)
2182{
2183 struct net_conf *nc;
2184 int mxb;
2185
2186 rcu_read_lock();
2187 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2188 mxb = nc ? nc->max_buffers : 1000000;
2189 rcu_read_unlock();
2190
2191 return mxb;
2192}
2193
2194static inline int drbd_state_is_stable(struct drbd_device *device)
2195{
2196 union drbd_dev_state s = device->state;
2197
2198
2199
2200
2201 switch ((enum drbd_conns)s.conn) {
2202
2203 case C_STANDALONE:
2204 case C_WF_CONNECTION:
2205
2206 case C_CONNECTED:
2207 case C_SYNC_SOURCE:
2208 case C_SYNC_TARGET:
2209 case C_VERIFY_S:
2210 case C_VERIFY_T:
2211 case C_PAUSED_SYNC_S:
2212 case C_PAUSED_SYNC_T:
2213 case C_AHEAD:
2214 case C_BEHIND:
2215
2216 case C_DISCONNECTING:
2217 case C_UNCONNECTED:
2218 case C_TIMEOUT:
2219 case C_BROKEN_PIPE:
2220 case C_NETWORK_FAILURE:
2221 case C_PROTOCOL_ERROR:
2222 case C_TEAR_DOWN:
2223 case C_WF_REPORT_PARAMS:
2224 case C_STARTING_SYNC_S:
2225 case C_STARTING_SYNC_T:
2226 break;
2227
2228
2229 case C_WF_BITMAP_S:
2230 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2231 return 0;
2232 break;
2233
2234
2235 case C_WF_BITMAP_T:
2236 case C_WF_SYNC_UUID:
2237 case C_MASK:
2238
2239 return 0;
2240 }
2241
2242 switch ((enum drbd_disk_state)s.disk) {
2243 case D_DISKLESS:
2244 case D_INCONSISTENT:
2245 case D_OUTDATED:
2246 case D_CONSISTENT:
2247 case D_UP_TO_DATE:
2248 case D_FAILED:
2249
2250 break;
2251
2252
2253 case D_ATTACHING:
2254 case D_NEGOTIATING:
2255 case D_UNKNOWN:
2256 case D_MASK:
2257
2258 return 0;
2259 }
2260
2261 return 1;
2262}
2263
2264static inline int drbd_suspended(struct drbd_device *device)
2265{
2266 struct drbd_resource *resource = device->resource;
2267
2268 return resource->susp || resource->susp_fen || resource->susp_nod;
2269}
2270
2271static inline bool may_inc_ap_bio(struct drbd_device *device)
2272{
2273 int mxb = drbd_get_max_buffers(device);
2274
2275 if (drbd_suspended(device))
2276 return false;
2277 if (atomic_read(&device->suspend_cnt))
2278 return false;
2279
2280
2281
2282
2283
2284
2285 if (!drbd_state_is_stable(device))
2286 return false;
2287
2288
2289
2290 if (atomic_read(&device->ap_bio_cnt) > mxb)
2291 return false;
2292 if (test_bit(BITMAP_IO, &device->flags))
2293 return false;
2294 return true;
2295}
2296
2297static inline bool inc_ap_bio_cond(struct drbd_device *device)
2298{
2299 bool rv = false;
2300
2301 spin_lock_irq(&device->resource->req_lock);
2302 rv = may_inc_ap_bio(device);
2303 if (rv)
2304 atomic_inc(&device->ap_bio_cnt);
2305 spin_unlock_irq(&device->resource->req_lock);
2306
2307 return rv;
2308}
2309
2310static inline void inc_ap_bio(struct drbd_device *device)
2311{
2312
2313
2314
2315
2316
2317
2318
2319
2320 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2321}
2322
2323static inline void dec_ap_bio(struct drbd_device *device)
2324{
2325 int mxb = drbd_get_max_buffers(device);
2326 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2327
2328 D_ASSERT(device, ap_bio >= 0);
2329
2330 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2331 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2332 drbd_queue_work(&first_peer_device(device)->
2333 connection->sender_work,
2334 &device->bm_io_work.w);
2335 }
2336
2337
2338
2339
2340 if (ap_bio < mxb)
2341 wake_up(&device->misc_wait);
2342}
2343
2344static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2345{
2346 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2347 first_peer_device(device)->connection->agreed_pro_version != 100;
2348}
2349
2350static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2351{
2352 int changed = device->ed_uuid != val;
2353 device->ed_uuid = val;
2354 return changed;
2355}
2356
2357static inline int drbd_queue_order_type(struct drbd_device *device)
2358{
2359
2360
2361#ifndef QUEUE_ORDERED_NONE
2362#define QUEUE_ORDERED_NONE 0
2363#endif
2364 return QUEUE_ORDERED_NONE;
2365}
2366
2367static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2368{
2369 return list_first_entry_or_null(&resource->connections,
2370 struct drbd_connection, connections);
2371}
2372
2373#endif
2374