linux/drivers/block/drbd/drbd_int.h
<<
>>
Prefs
   1/*
   2  drbd_int.h
   3
   4  This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
   5
   6  Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
   7  Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   8  Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
   9
  10  drbd is free software; you can redistribute it and/or modify
  11  it under the terms of the GNU General Public License as published by
  12  the Free Software Foundation; either version 2, or (at your option)
  13  any later version.
  14
  15  drbd is distributed in the hope that it will be useful,
  16  but WITHOUT ANY WARRANTY; without even the implied warranty of
  17  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18  GNU General Public License for more details.
  19
  20  You should have received a copy of the GNU General Public License
  21  along with drbd; see the file COPYING.  If not, write to
  22  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23
  24*/
  25
  26#ifndef _DRBD_INT_H
  27#define _DRBD_INT_H
  28
  29#include <crypto/hash.h>
  30#include <linux/compiler.h>
  31#include <linux/types.h>
  32#include <linux/list.h>
  33#include <linux/sched/signal.h>
  34#include <linux/bitops.h>
  35#include <linux/slab.h>
  36#include <linux/ratelimit.h>
  37#include <linux/tcp.h>
  38#include <linux/mutex.h>
  39#include <linux/major.h>
  40#include <linux/blkdev.h>
  41#include <linux/backing-dev.h>
  42#include <linux/genhd.h>
  43#include <linux/idr.h>
  44#include <linux/dynamic_debug.h>
  45#include <net/tcp.h>
  46#include <linux/lru_cache.h>
  47#include <linux/prefetch.h>
  48#include <linux/drbd_genl_api.h>
  49#include <linux/drbd.h>
  50#include "drbd_strings.h"
  51#include "drbd_state.h"
  52#include "drbd_protocol.h"
  53
  54#ifdef __CHECKER__
  55# define __protected_by(x)       __attribute__((require_context(x,1,999,"rdwr")))
  56# define __protected_read_by(x)  __attribute__((require_context(x,1,999,"read")))
  57# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
  58#else
  59# define __protected_by(x)
  60# define __protected_read_by(x)
  61# define __protected_write_by(x)
  62#endif
  63
  64/* shared module parameters, defined in drbd_main.c */
  65#ifdef CONFIG_DRBD_FAULT_INJECTION
  66extern int drbd_enable_faults;
  67extern int drbd_fault_rate;
  68#endif
  69
  70extern unsigned int drbd_minor_count;
  71extern char drbd_usermode_helper[];
  72extern int drbd_proc_details;
  73
  74
  75/* This is used to stop/restart our threads.
  76 * Cannot use SIGTERM nor SIGKILL, since these
  77 * are sent out by init on runlevel changes
  78 * I choose SIGHUP for now.
  79 */
  80#define DRBD_SIGKILL SIGHUP
  81
  82#define ID_IN_SYNC      (4711ULL)
  83#define ID_OUT_OF_SYNC  (4712ULL)
  84#define ID_SYNCER (-1ULL)
  85
  86#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
  87
  88struct drbd_device;
  89struct drbd_connection;
  90
  91#define __drbd_printk_device(level, device, fmt, args...) \
  92        dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
  93#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
  94        dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
  95#define __drbd_printk_resource(level, resource, fmt, args...) \
  96        printk(level "drbd %s: " fmt, (resource)->name, ## args)
  97#define __drbd_printk_connection(level, connection, fmt, args...) \
  98        printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
  99
 100void drbd_printk_with_wrong_object_type(void);
 101
 102#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
 103        (__builtin_types_compatible_p(typeof(obj), type) || \
 104         __builtin_types_compatible_p(typeof(obj), const type)), \
 105        func(level, (const type)(obj), fmt, ## args)
 106
 107#define drbd_printk(level, obj, fmt, args...) \
 108        __builtin_choose_expr( \
 109          __drbd_printk_if_same_type(obj, struct drbd_device *, \
 110                             __drbd_printk_device, level, fmt, ## args), \
 111          __builtin_choose_expr( \
 112            __drbd_printk_if_same_type(obj, struct drbd_resource *, \
 113                               __drbd_printk_resource, level, fmt, ## args), \
 114            __builtin_choose_expr( \
 115              __drbd_printk_if_same_type(obj, struct drbd_connection *, \
 116                                 __drbd_printk_connection, level, fmt, ## args), \
 117              __builtin_choose_expr( \
 118                __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
 119                                 __drbd_printk_peer_device, level, fmt, ## args), \
 120                drbd_printk_with_wrong_object_type()))))
 121
 122#define drbd_dbg(obj, fmt, args...) \
 123        drbd_printk(KERN_DEBUG, obj, fmt, ## args)
 124#define drbd_alert(obj, fmt, args...) \
 125        drbd_printk(KERN_ALERT, obj, fmt, ## args)
 126#define drbd_err(obj, fmt, args...) \
 127        drbd_printk(KERN_ERR, obj, fmt, ## args)
 128#define drbd_warn(obj, fmt, args...) \
 129        drbd_printk(KERN_WARNING, obj, fmt, ## args)
 130#define drbd_info(obj, fmt, args...) \
 131        drbd_printk(KERN_INFO, obj, fmt, ## args)
 132#define drbd_emerg(obj, fmt, args...) \
 133        drbd_printk(KERN_EMERG, obj, fmt, ## args)
 134
 135#define dynamic_drbd_dbg(device, fmt, args...) \
 136        dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
 137
 138#define D_ASSERT(device, exp)   do { \
 139        if (!(exp)) \
 140                drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
 141        } while (0)
 142
 143/**
 144 * expect  -  Make an assertion
 145 *
 146 * Unlike the assert macro, this macro returns a boolean result.
 147 */
 148#define expect(exp) ({                                                          \
 149                bool _bool = (exp);                                             \
 150                if (!_bool)                                                     \
 151                        drbd_err(device, "ASSERTION %s FAILED in %s\n",         \
 152                                #exp, __func__);                                \
 153                _bool;                                                          \
 154                })
 155
 156/* Defines to control fault insertion */
 157enum {
 158        DRBD_FAULT_MD_WR = 0,   /* meta data write */
 159        DRBD_FAULT_MD_RD = 1,   /*           read  */
 160        DRBD_FAULT_RS_WR = 2,   /* resync          */
 161        DRBD_FAULT_RS_RD = 3,
 162        DRBD_FAULT_DT_WR = 4,   /* data            */
 163        DRBD_FAULT_DT_RD = 5,
 164        DRBD_FAULT_DT_RA = 6,   /* data read ahead */
 165        DRBD_FAULT_BM_ALLOC = 7,        /* bitmap allocation */
 166        DRBD_FAULT_AL_EE = 8,   /* alloc ee */
 167        DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
 168
 169        DRBD_FAULT_MAX,
 170};
 171
 172extern unsigned int
 173_drbd_insert_fault(struct drbd_device *device, unsigned int type);
 174
 175static inline int
 176drbd_insert_fault(struct drbd_device *device, unsigned int type) {
 177#ifdef CONFIG_DRBD_FAULT_INJECTION
 178        return drbd_fault_rate &&
 179                (drbd_enable_faults & (1<<type)) &&
 180                _drbd_insert_fault(device, type);
 181#else
 182        return 0;
 183#endif
 184}
 185
 186/* integer division, round _UP_ to the next integer */
 187#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
 188/* usual integer division */
 189#define div_floor(A, B) ((A)/(B))
 190
 191extern struct ratelimit_state drbd_ratelimit_state;
 192extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
 193extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
 194
 195extern const char *cmdname(enum drbd_packet cmd);
 196
 197/* for sending/receiving the bitmap,
 198 * possibly in some encoding scheme */
 199struct bm_xfer_ctx {
 200        /* "const"
 201         * stores total bits and long words
 202         * of the bitmap, so we don't need to
 203         * call the accessor functions over and again. */
 204        unsigned long bm_bits;
 205        unsigned long bm_words;
 206        /* during xfer, current position within the bitmap */
 207        unsigned long bit_offset;
 208        unsigned long word_offset;
 209
 210        /* statistics; index: (h->command == P_BITMAP) */
 211        unsigned packets[2];
 212        unsigned bytes[2];
 213};
 214
 215extern void INFO_bm_xfer_stats(struct drbd_device *device,
 216                const char *direction, struct bm_xfer_ctx *c);
 217
 218static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
 219{
 220        /* word_offset counts "native long words" (32 or 64 bit),
 221         * aligned at 64 bit.
 222         * Encoded packet may end at an unaligned bit offset.
 223         * In case a fallback clear text packet is transmitted in
 224         * between, we adjust this offset back to the last 64bit
 225         * aligned "native long word", which makes coding and decoding
 226         * the plain text bitmap much more convenient.  */
 227#if BITS_PER_LONG == 64
 228        c->word_offset = c->bit_offset >> 6;
 229#elif BITS_PER_LONG == 32
 230        c->word_offset = c->bit_offset >> 5;
 231        c->word_offset &= ~(1UL);
 232#else
 233# error "unsupported BITS_PER_LONG"
 234#endif
 235}
 236
 237extern unsigned int drbd_header_size(struct drbd_connection *connection);
 238
 239/**********************************************************************/
 240enum drbd_thread_state {
 241        NONE,
 242        RUNNING,
 243        EXITING,
 244        RESTARTING
 245};
 246
 247struct drbd_thread {
 248        spinlock_t t_lock;
 249        struct task_struct *task;
 250        struct completion stop;
 251        enum drbd_thread_state t_state;
 252        int (*function) (struct drbd_thread *);
 253        struct drbd_resource *resource;
 254        struct drbd_connection *connection;
 255        int reset_cpu_mask;
 256        const char *name;
 257};
 258
 259static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
 260{
 261        /* THINK testing the t_state seems to be uncritical in all cases
 262         * (but thread_{start,stop}), so we can read it *without* the lock.
 263         *      --lge */
 264
 265        smp_rmb();
 266        return thi->t_state;
 267}
 268
 269struct drbd_work {
 270        struct list_head list;
 271        int (*cb)(struct drbd_work *, int cancel);
 272};
 273
 274struct drbd_device_work {
 275        struct drbd_work w;
 276        struct drbd_device *device;
 277};
 278
 279#include "drbd_interval.h"
 280
 281extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
 282
 283extern void lock_all_resources(void);
 284extern void unlock_all_resources(void);
 285
 286struct drbd_request {
 287        struct drbd_work w;
 288        struct drbd_device *device;
 289
 290        /* if local IO is not allowed, will be NULL.
 291         * if local IO _is_ allowed, holds the locally submitted bio clone,
 292         * or, after local IO completion, the ERR_PTR(error).
 293         * see drbd_request_endio(). */
 294        struct bio *private_bio;
 295
 296        struct drbd_interval i;
 297
 298        /* epoch: used to check on "completion" whether this req was in
 299         * the current epoch, and we therefore have to close it,
 300         * causing a p_barrier packet to be send, starting a new epoch.
 301         *
 302         * This corresponds to "barrier" in struct p_barrier[_ack],
 303         * and to "barrier_nr" in struct drbd_epoch (and various
 304         * comments/function parameters/local variable names).
 305         */
 306        unsigned int epoch;
 307
 308        struct list_head tl_requests; /* ring list in the transfer log */
 309        struct bio *master_bio;       /* master bio pointer */
 310
 311        /* see struct drbd_device */
 312        struct list_head req_pending_master_completion;
 313        struct list_head req_pending_local;
 314
 315        /* for generic IO accounting */
 316        unsigned long start_jif;
 317
 318        /* for DRBD internal statistics */
 319
 320        /* Minimal set of time stamps to determine if we wait for activity log
 321         * transactions, local disk or peer.  32 bit "jiffies" are good enough,
 322         * we don't expect a DRBD request to be stalled for several month.
 323         */
 324
 325        /* before actual request processing */
 326        unsigned long in_actlog_jif;
 327
 328        /* local disk */
 329        unsigned long pre_submit_jif;
 330
 331        /* per connection */
 332        unsigned long pre_send_jif;
 333        unsigned long acked_jif;
 334        unsigned long net_done_jif;
 335
 336        /* Possibly even more detail to track each phase:
 337         *  master_completion_jif
 338         *      how long did it take to complete the master bio
 339         *      (application visible latency)
 340         *  allocated_jif
 341         *      how long the master bio was blocked until we finally allocated
 342         *      a tracking struct
 343         *  in_actlog_jif
 344         *      how long did we wait for activity log transactions
 345         *
 346         *  net_queued_jif
 347         *      when did we finally queue it for sending
 348         *  pre_send_jif
 349         *      when did we start sending it
 350         *  post_send_jif
 351         *      how long did we block in the network stack trying to send it
 352         *  acked_jif
 353         *      when did we receive (or fake, in protocol A) a remote ACK
 354         *  net_done_jif
 355         *      when did we receive final acknowledgement (P_BARRIER_ACK),
 356         *      or decide, e.g. on connection loss, that we do no longer expect
 357         *      anything from this peer for this request.
 358         *
 359         *  pre_submit_jif
 360         *  post_sub_jif
 361         *      when did we start submiting to the lower level device,
 362         *      and how long did we block in that submit function
 363         *  local_completion_jif
 364         *      how long did it take the lower level device to complete this request
 365         */
 366
 367
 368        /* once it hits 0, we may complete the master_bio */
 369        atomic_t completion_ref;
 370        /* once it hits 0, we may destroy this drbd_request object */
 371        struct kref kref;
 372
 373        unsigned rq_state; /* see comments above _req_mod() */
 374};
 375
 376struct drbd_epoch {
 377        struct drbd_connection *connection;
 378        struct list_head list;
 379        unsigned int barrier_nr;
 380        atomic_t epoch_size; /* increased on every request added. */
 381        atomic_t active;     /* increased on every req. added, and dec on every finished. */
 382        unsigned long flags;
 383};
 384
 385/* Prototype declaration of function defined in drbd_receiver.c */
 386int drbdd_init(struct drbd_thread *);
 387int drbd_asender(struct drbd_thread *);
 388
 389/* drbd_epoch flag bits */
 390enum {
 391        DE_HAVE_BARRIER_NUMBER,
 392};
 393
 394enum epoch_event {
 395        EV_PUT,
 396        EV_GOT_BARRIER_NR,
 397        EV_BECAME_LAST,
 398        EV_CLEANUP = 32, /* used as flag */
 399};
 400
 401struct digest_info {
 402        int digest_size;
 403        void *digest;
 404};
 405
 406struct drbd_peer_request {
 407        struct drbd_work w;
 408        struct drbd_peer_device *peer_device;
 409        struct drbd_epoch *epoch; /* for writes */
 410        struct page *pages;
 411        atomic_t pending_bios;
 412        struct drbd_interval i;
 413        /* see comments on ee flag bits below */
 414        unsigned long flags;
 415        unsigned long submit_jif;
 416        union {
 417                u64 block_id;
 418                struct digest_info *digest;
 419        };
 420};
 421
 422/* ee flag bits.
 423 * While corresponding bios are in flight, the only modification will be
 424 * set_bit WAS_ERROR, which has to be atomic.
 425 * If no bios are in flight yet, or all have been completed,
 426 * non-atomic modification to ee->flags is ok.
 427 */
 428enum {
 429        __EE_CALL_AL_COMPLETE_IO,
 430        __EE_MAY_SET_IN_SYNC,
 431
 432        /* is this a TRIM aka REQ_DISCARD? */
 433        __EE_IS_TRIM,
 434
 435        /* In case a barrier failed,
 436         * we need to resubmit without the barrier flag. */
 437        __EE_RESUBMITTED,
 438
 439        /* we may have several bios per peer request.
 440         * if any of those fail, we set this flag atomically
 441         * from the endio callback */
 442        __EE_WAS_ERROR,
 443
 444        /* This ee has a pointer to a digest instead of a block id */
 445        __EE_HAS_DIGEST,
 446
 447        /* Conflicting local requests need to be restarted after this request */
 448        __EE_RESTART_REQUESTS,
 449
 450        /* The peer wants a write ACK for this (wire proto C) */
 451        __EE_SEND_WRITE_ACK,
 452
 453        /* Is set when net_conf had two_primaries set while creating this peer_req */
 454        __EE_IN_INTERVAL_TREE,
 455
 456        /* for debugfs: */
 457        /* has this been submitted, or does it still wait for something else? */
 458        __EE_SUBMITTED,
 459
 460        /* this is/was a write request */
 461        __EE_WRITE,
 462
 463        /* this is/was a write same request */
 464        __EE_WRITE_SAME,
 465
 466        /* this originates from application on peer
 467         * (not some resync or verify or other DRBD internal request) */
 468        __EE_APPLICATION,
 469
 470        /* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
 471        __EE_RS_THIN_REQ,
 472};
 473#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
 474#define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
 475#define EE_IS_TRIM             (1<<__EE_IS_TRIM)
 476#define EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
 477#define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
 478#define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
 479#define EE_RESTART_REQUESTS     (1<<__EE_RESTART_REQUESTS)
 480#define EE_SEND_WRITE_ACK       (1<<__EE_SEND_WRITE_ACK)
 481#define EE_IN_INTERVAL_TREE     (1<<__EE_IN_INTERVAL_TREE)
 482#define EE_SUBMITTED            (1<<__EE_SUBMITTED)
 483#define EE_WRITE                (1<<__EE_WRITE)
 484#define EE_WRITE_SAME           (1<<__EE_WRITE_SAME)
 485#define EE_APPLICATION          (1<<__EE_APPLICATION)
 486#define EE_RS_THIN_REQ          (1<<__EE_RS_THIN_REQ)
 487
 488/* flag bits per device */
 489enum {
 490        UNPLUG_REMOTE,          /* sending a "UnplugRemote" could help */
 491        MD_DIRTY,               /* current uuids and flags not yet on disk */
 492        USE_DEGR_WFC_T,         /* degr-wfc-timeout instead of wfc-timeout. */
 493        CL_ST_CHG_SUCCESS,
 494        CL_ST_CHG_FAIL,
 495        CRASHED_PRIMARY,        /* This node was a crashed primary.
 496                                 * Gets cleared when the state.conn
 497                                 * goes into C_CONNECTED state. */
 498        CONSIDER_RESYNC,
 499
 500        MD_NO_FUA,              /* Users wants us to not use FUA/FLUSH on meta data dev */
 501
 502        BITMAP_IO,              /* suspend application io;
 503                                   once no more io in flight, start bitmap io */
 504        BITMAP_IO_QUEUED,       /* Started bitmap IO */
 505        WAS_IO_ERROR,           /* Local disk failed, returned IO error */
 506        WAS_READ_ERROR,         /* Local disk READ failed (set additionally to the above) */
 507        FORCE_DETACH,           /* Force-detach from local disk, aborting any pending local IO */
 508        RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
 509        RESIZE_PENDING,         /* Size change detected locally, waiting for the response from
 510                                 * the peer, if it changed there as well. */
 511        NEW_CUR_UUID,           /* Create new current UUID when thawing IO */
 512        AL_SUSPENDED,           /* Activity logging is currently suspended. */
 513        AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
 514        B_RS_H_DONE,            /* Before resync handler done (already executed) */
 515        DISCARD_MY_DATA,        /* discard_my_data flag per volume */
 516        READ_BALANCE_RR,
 517
 518        FLUSH_PENDING,          /* if set, device->flush_jif is when we submitted that flush
 519                                 * from drbd_flush_after_epoch() */
 520
 521        /* cleared only after backing device related structures have been destroyed. */
 522        GOING_DISKLESS,         /* Disk is being detached, because of io-error, or admin request. */
 523
 524        /* to be used in drbd_device_post_work() */
 525        GO_DISKLESS,            /* tell worker to schedule cleanup before detach */
 526        DESTROY_DISK,           /* tell worker to close backing devices and destroy related structures. */
 527        MD_SYNC,                /* tell worker to call drbd_md_sync() */
 528        RS_START,               /* tell worker to start resync/OV */
 529        RS_PROGRESS,            /* tell worker that resync made significant progress */
 530        RS_DONE,                /* tell worker that resync is done */
 531};
 532
 533struct drbd_bitmap; /* opaque for drbd_device */
 534
 535/* definition of bits in bm_flags to be used in drbd_bm_lock
 536 * and drbd_bitmap_io and friends. */
 537enum bm_flag {
 538        /* currently locked for bulk operation */
 539        BM_LOCKED_MASK = 0xf,
 540
 541        /* in detail, that is: */
 542        BM_DONT_CLEAR = 0x1,
 543        BM_DONT_SET   = 0x2,
 544        BM_DONT_TEST  = 0x4,
 545
 546        /* so we can mark it locked for bulk operation,
 547         * and still allow all non-bulk operations */
 548        BM_IS_LOCKED  = 0x8,
 549
 550        /* (test bit, count bit) allowed (common case) */
 551        BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
 552
 553        /* testing bits, as well as setting new bits allowed, but clearing bits
 554         * would be unexpected.  Used during bitmap receive.  Setting new bits
 555         * requires sending of "out-of-sync" information, though. */
 556        BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
 557
 558        /* for drbd_bm_write_copy_pages, everything is allowed,
 559         * only concurrent bulk operations are locked out. */
 560        BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
 561};
 562
 563struct drbd_work_queue {
 564        struct list_head q;
 565        spinlock_t q_lock;  /* to protect the list. */
 566        wait_queue_head_t q_wait;
 567};
 568
 569struct drbd_socket {
 570        struct mutex mutex;
 571        struct socket    *socket;
 572        /* this way we get our
 573         * send/receive buffers off the stack */
 574        void *sbuf;
 575        void *rbuf;
 576};
 577
 578struct drbd_md {
 579        u64 md_offset;          /* sector offset to 'super' block */
 580
 581        u64 la_size_sect;       /* last agreed size, unit sectors */
 582        spinlock_t uuid_lock;
 583        u64 uuid[UI_SIZE];
 584        u64 device_uuid;
 585        u32 flags;
 586        u32 md_size_sect;
 587
 588        s32 al_offset;  /* signed relative sector offset to activity log */
 589        s32 bm_offset;  /* signed relative sector offset to bitmap */
 590
 591        /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
 592        s32 meta_dev_idx;
 593
 594        /* see al_tr_number_to_on_disk_sector() */
 595        u32 al_stripes;
 596        u32 al_stripe_size_4k;
 597        u32 al_size_4k; /* cached product of the above */
 598};
 599
 600struct drbd_backing_dev {
 601        struct block_device *backing_bdev;
 602        struct block_device *md_bdev;
 603        struct drbd_md md;
 604        struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
 605        sector_t known_size; /* last known size of that backing device */
 606};
 607
 608struct drbd_md_io {
 609        struct page *page;
 610        unsigned long start_jif;        /* last call to drbd_md_get_buffer */
 611        unsigned long submit_jif;       /* last _drbd_md_sync_page_io() submit */
 612        const char *current_use;
 613        atomic_t in_use;
 614        unsigned int done;
 615        int error;
 616};
 617
 618struct bm_io_work {
 619        struct drbd_work w;
 620        char *why;
 621        enum bm_flag flags;
 622        int (*io_fn)(struct drbd_device *device);
 623        void (*done)(struct drbd_device *device, int rv);
 624};
 625
 626struct fifo_buffer {
 627        unsigned int head_index;
 628        unsigned int size;
 629        int total; /* sum of all values */
 630        int values[0];
 631};
 632extern struct fifo_buffer *fifo_alloc(int fifo_size);
 633
 634/* flag bits per connection */
 635enum {
 636        NET_CONGESTED,          /* The data socket is congested */
 637        RESOLVE_CONFLICTS,      /* Set on one node, cleared on the peer! */
 638        SEND_PING,
 639        GOT_PING_ACK,           /* set when we receive a ping_ack packet, ping_wait gets woken */
 640        CONN_WD_ST_CHG_REQ,     /* A cluster wide state change on the connection is active */
 641        CONN_WD_ST_CHG_OKAY,
 642        CONN_WD_ST_CHG_FAIL,
 643        CONN_DRY_RUN,           /* Expect disconnect after resync handshake. */
 644        CREATE_BARRIER,         /* next P_DATA is preceded by a P_BARRIER */
 645        STATE_SENT,             /* Do not change state/UUIDs while this is set */
 646        CALLBACK_PENDING,       /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
 647                                 * pending, from drbd worker context.
 648                                 * If set, bdi_write_congested() returns true,
 649                                 * so shrink_page_list() would not recurse into,
 650                                 * and potentially deadlock on, this drbd worker.
 651                                 */
 652        DISCONNECT_SENT,
 653
 654        DEVICE_WORK_PENDING,    /* tell worker that some device has pending work */
 655};
 656
 657enum which_state { NOW, OLD = NOW, NEW };
 658
 659struct drbd_resource {
 660        char *name;
 661#ifdef CONFIG_DEBUG_FS
 662        struct dentry *debugfs_res;
 663        struct dentry *debugfs_res_volumes;
 664        struct dentry *debugfs_res_connections;
 665        struct dentry *debugfs_res_in_flight_summary;
 666#endif
 667        struct kref kref;
 668        struct idr devices;             /* volume number to device mapping */
 669        struct list_head connections;
 670        struct list_head resources;
 671        struct res_opts res_opts;
 672        struct mutex conf_update;       /* mutex for ready-copy-update of net_conf and disk_conf */
 673        struct mutex adm_mutex;         /* mutex to serialize administrative requests */
 674        spinlock_t req_lock;
 675
 676        unsigned susp:1;                /* IO suspended by user */
 677        unsigned susp_nod:1;            /* IO suspended because no data */
 678        unsigned susp_fen:1;            /* IO suspended because fence peer handler runs */
 679
 680        enum write_ordering_e write_ordering;
 681
 682        cpumask_var_t cpu_mask;
 683};
 684
 685struct drbd_thread_timing_details
 686{
 687        unsigned long start_jif;
 688        void *cb_addr;
 689        const char *caller_fn;
 690        unsigned int line;
 691        unsigned int cb_nr;
 692};
 693
 694struct drbd_connection {
 695        struct list_head connections;
 696        struct drbd_resource *resource;
 697#ifdef CONFIG_DEBUG_FS
 698        struct dentry *debugfs_conn;
 699        struct dentry *debugfs_conn_callback_history;
 700        struct dentry *debugfs_conn_oldest_requests;
 701#endif
 702        struct kref kref;
 703        struct idr peer_devices;        /* volume number to peer device mapping */
 704        enum drbd_conns cstate;         /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
 705        struct mutex cstate_mutex;      /* Protects graceful disconnects */
 706        unsigned int connect_cnt;       /* Inc each time a connection is established */
 707
 708        unsigned long flags;
 709        struct net_conf *net_conf;      /* content protected by rcu */
 710        wait_queue_head_t ping_wait;    /* Woken upon reception of a ping, and a state change */
 711
 712        struct sockaddr_storage my_addr;
 713        int my_addr_len;
 714        struct sockaddr_storage peer_addr;
 715        int peer_addr_len;
 716
 717        struct drbd_socket data;        /* data/barrier/cstate/parameter packets */
 718        struct drbd_socket meta;        /* ping/ack (metadata) packets */
 719        int agreed_pro_version;         /* actually used protocol version */
 720        u32 agreed_features;
 721        unsigned long last_received;    /* in jiffies, either socket */
 722        unsigned int ko_count;
 723
 724        struct list_head transfer_log;  /* all requests not yet fully processed */
 725
 726        struct crypto_shash *cram_hmac_tfm;
 727        struct crypto_ahash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
 728        struct crypto_ahash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
 729        struct crypto_ahash *csums_tfm;
 730        struct crypto_ahash *verify_tfm;
 731        void *int_dig_in;
 732        void *int_dig_vv;
 733
 734        /* receiver side */
 735        struct drbd_epoch *current_epoch;
 736        spinlock_t epoch_lock;
 737        unsigned int epochs;
 738        atomic_t current_tle_nr;        /* transfer log epoch number */
 739        unsigned current_tle_writes;    /* writes seen within this tl epoch */
 740
 741        unsigned long last_reconnect_jif;
 742        /* empty member on older kernels without blk_start_plug() */
 743        struct blk_plug receiver_plug;
 744        struct drbd_thread receiver;
 745        struct drbd_thread worker;
 746        struct drbd_thread ack_receiver;
 747        struct workqueue_struct *ack_sender;
 748
 749        /* cached pointers,
 750         * so we can look up the oldest pending requests more quickly.
 751         * protected by resource->req_lock */
 752        struct drbd_request *req_next; /* DRBD 9: todo.req_next */
 753        struct drbd_request *req_ack_pending;
 754        struct drbd_request *req_not_net_done;
 755
 756        /* sender side */
 757        struct drbd_work_queue sender_work;
 758
 759#define DRBD_THREAD_DETAILS_HIST        16
 760        unsigned int w_cb_nr; /* keeps counting up */
 761        unsigned int r_cb_nr; /* keeps counting up */
 762        struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
 763        struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
 764
 765        struct {
 766                unsigned long last_sent_barrier_jif;
 767
 768                /* whether this sender thread
 769                 * has processed a single write yet. */
 770                bool seen_any_write_yet;
 771
 772                /* Which barrier number to send with the next P_BARRIER */
 773                int current_epoch_nr;
 774
 775                /* how many write requests have been sent
 776                 * with req->epoch == current_epoch_nr.
 777                 * If none, no P_BARRIER will be sent. */
 778                unsigned current_epoch_writes;
 779        } send;
 780};
 781
 782static inline bool has_net_conf(struct drbd_connection *connection)
 783{
 784        bool has_net_conf;
 785
 786        rcu_read_lock();
 787        has_net_conf = rcu_dereference(connection->net_conf);
 788        rcu_read_unlock();
 789
 790        return has_net_conf;
 791}
 792
 793void __update_timing_details(
 794                struct drbd_thread_timing_details *tdp,
 795                unsigned int *cb_nr,
 796                void *cb,
 797                const char *fn, const unsigned int line);
 798
 799#define update_worker_timing_details(c, cb) \
 800        __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
 801#define update_receiver_timing_details(c, cb) \
 802        __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
 803
 804struct submit_worker {
 805        struct workqueue_struct *wq;
 806        struct work_struct worker;
 807
 808        /* protected by ..->resource->req_lock */
 809        struct list_head writes;
 810};
 811
 812struct drbd_peer_device {
 813        struct list_head peer_devices;
 814        struct drbd_device *device;
 815        struct drbd_connection *connection;
 816        struct work_struct send_acks_work;
 817#ifdef CONFIG_DEBUG_FS
 818        struct dentry *debugfs_peer_dev;
 819#endif
 820};
 821
 822struct drbd_device {
 823        struct drbd_resource *resource;
 824        struct list_head peer_devices;
 825        struct list_head pending_bitmap_io;
 826
 827        unsigned long flush_jif;
 828#ifdef CONFIG_DEBUG_FS
 829        struct dentry *debugfs_minor;
 830        struct dentry *debugfs_vol;
 831        struct dentry *debugfs_vol_oldest_requests;
 832        struct dentry *debugfs_vol_act_log_extents;
 833        struct dentry *debugfs_vol_resync_extents;
 834        struct dentry *debugfs_vol_data_gen_id;
 835        struct dentry *debugfs_vol_ed_gen_id;
 836#endif
 837
 838        unsigned int vnr;       /* volume number within the connection */
 839        unsigned int minor;     /* device minor number */
 840
 841        struct kref kref;
 842
 843        /* things that are stored as / read from meta data on disk */
 844        unsigned long flags;
 845
 846        /* configured by drbdsetup */
 847        struct drbd_backing_dev *ldev __protected_by(local);
 848
 849        sector_t p_size;     /* partner's disk size */
 850        struct request_queue *rq_queue;
 851        struct block_device *this_bdev;
 852        struct gendisk      *vdisk;
 853
 854        unsigned long last_reattach_jif;
 855        struct drbd_work resync_work;
 856        struct drbd_work unplug_work;
 857        struct timer_list resync_timer;
 858        struct timer_list md_sync_timer;
 859        struct timer_list start_resync_timer;
 860        struct timer_list request_timer;
 861
 862        /* Used after attach while negotiating new disk state. */
 863        union drbd_state new_state_tmp;
 864
 865        union drbd_dev_state state;
 866        wait_queue_head_t misc_wait;
 867        wait_queue_head_t state_wait;  /* upon each state change. */
 868        unsigned int send_cnt;
 869        unsigned int recv_cnt;
 870        unsigned int read_cnt;
 871        unsigned int writ_cnt;
 872        unsigned int al_writ_cnt;
 873        unsigned int bm_writ_cnt;
 874        atomic_t ap_bio_cnt;     /* Requests we need to complete */
 875        atomic_t ap_actlog_cnt;  /* Requests waiting for activity log */
 876        atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
 877        atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
 878        atomic_t unacked_cnt;    /* Need to send replies for */
 879        atomic_t local_cnt;      /* Waiting for local completion */
 880        atomic_t suspend_cnt;
 881
 882        /* Interval tree of pending local requests */
 883        struct rb_root read_requests;
 884        struct rb_root write_requests;
 885
 886        /* for statistics and timeouts */
 887        /* [0] read, [1] write */
 888        struct list_head pending_master_completion[2];
 889        struct list_head pending_completion[2];
 890
 891        /* use checksums for *this* resync */
 892        bool use_csums;
 893        /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
 894        unsigned long rs_total;
 895        /* number of resync blocks that failed in this run */
 896        unsigned long rs_failed;
 897        /* Syncer's start time [unit jiffies] */
 898        unsigned long rs_start;
 899        /* cumulated time in PausedSyncX state [unit jiffies] */
 900        unsigned long rs_paused;
 901        /* skipped because csum was equal [unit BM_BLOCK_SIZE] */
 902        unsigned long rs_same_csum;
 903#define DRBD_SYNC_MARKS 8
 904#define DRBD_SYNC_MARK_STEP (3*HZ)
 905        /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
 906        unsigned long rs_mark_left[DRBD_SYNC_MARKS];
 907        /* marks's time [unit jiffies] */
 908        unsigned long rs_mark_time[DRBD_SYNC_MARKS];
 909        /* current index into rs_mark_{left,time} */
 910        int rs_last_mark;
 911        unsigned long rs_last_bcast; /* [unit jiffies] */
 912
 913        /* where does the admin want us to start? (sector) */
 914        sector_t ov_start_sector;
 915        sector_t ov_stop_sector;
 916        /* where are we now? (sector) */
 917        sector_t ov_position;
 918        /* Start sector of out of sync range (to merge printk reporting). */
 919        sector_t ov_last_oos_start;
 920        /* size of out-of-sync range in sectors. */
 921        sector_t ov_last_oos_size;
 922        unsigned long ov_left; /* in bits */
 923
 924        struct drbd_bitmap *bitmap;
 925        unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
 926
 927        /* Used to track operations of resync... */
 928        struct lru_cache *resync;
 929        /* Number of locked elements in resync LRU */
 930        unsigned int resync_locked;
 931        /* resync extent number waiting for application requests */
 932        unsigned int resync_wenr;
 933
 934        int open_cnt;
 935        u64 *p_uuid;
 936
 937        struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
 938        struct list_head sync_ee;   /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
 939        struct list_head done_ee;   /* need to send P_WRITE_ACK */
 940        struct list_head read_ee;   /* [RS]P_DATA_REQUEST being read */
 941        struct list_head net_ee;    /* zero-copy network send in progress */
 942
 943        int next_barrier_nr;
 944        struct list_head resync_reads;
 945        atomic_t pp_in_use;             /* allocated from page pool */
 946        atomic_t pp_in_use_by_net;      /* sendpage()d, still referenced by tcp */
 947        wait_queue_head_t ee_wait;
 948        struct drbd_md_io md_io;
 949        spinlock_t al_lock;
 950        wait_queue_head_t al_wait;
 951        struct lru_cache *act_log;      /* activity log */
 952        unsigned int al_tr_number;
 953        int al_tr_cycle;
 954        wait_queue_head_t seq_wait;
 955        atomic_t packet_seq;
 956        unsigned int peer_seq;
 957        spinlock_t peer_seq_lock;
 958        unsigned long comm_bm_set; /* communicated number of set bits. */
 959        struct bm_io_work bm_io_work;
 960        u64 ed_uuid; /* UUID of the exposed data */
 961        struct mutex own_state_mutex;
 962        struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
 963        char congestion_reason;  /* Why we where congested... */
 964        atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
 965        atomic_t rs_sect_ev; /* for submitted resync data rate, both */
 966        int rs_last_sect_ev; /* counter to compare with */
 967        int rs_last_events;  /* counter of read or write "events" (unit sectors)
 968                              * on the lower level device when we last looked. */
 969        int c_sync_rate; /* current resync rate after syncer throttle magic */
 970        struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
 971        int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
 972        atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
 973        unsigned int peer_max_bio_size;
 974        unsigned int local_max_bio_size;
 975
 976        /* any requests that would block in drbd_make_request()
 977         * are deferred to this single-threaded work queue */
 978        struct submit_worker submit;
 979};
 980
 981struct drbd_bm_aio_ctx {
 982        struct drbd_device *device;
 983        struct list_head list; /* on device->pending_bitmap_io */;
 984        unsigned long start_jif;
 985        atomic_t in_flight;
 986        unsigned int done;
 987        unsigned flags;
 988#define BM_AIO_COPY_PAGES       1
 989#define BM_AIO_WRITE_HINTED     2
 990#define BM_AIO_WRITE_ALL_PAGES  4
 991#define BM_AIO_READ             8
 992        int error;
 993        struct kref kref;
 994};
 995
 996struct drbd_config_context {
 997        /* assigned from drbd_genlmsghdr */
 998        unsigned int minor;
 999        /* assigned from request attributes, if present */
1000        unsigned int volume;
1001#define VOLUME_UNSPECIFIED              (-1U)
1002        /* pointer into the request skb,
1003         * limited lifetime! */
1004        char *resource_name;
1005        struct nlattr *my_addr;
1006        struct nlattr *peer_addr;
1007
1008        /* reply buffer */
1009        struct sk_buff *reply_skb;
1010        /* pointer into reply buffer */
1011        struct drbd_genlmsghdr *reply_dh;
1012        /* resolved from attributes, if possible */
1013        struct drbd_device *device;
1014        struct drbd_resource *resource;
1015        struct drbd_connection *connection;
1016};
1017
1018static inline struct drbd_device *minor_to_device(unsigned int minor)
1019{
1020        return (struct drbd_device *)idr_find(&drbd_devices, minor);
1021}
1022
1023static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1024{
1025        return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1026}
1027
1028static inline struct drbd_peer_device *
1029conn_peer_device(struct drbd_connection *connection, int volume_number)
1030{
1031        return idr_find(&connection->peer_devices, volume_number);
1032}
1033
1034#define for_each_resource(resource, _resources) \
1035        list_for_each_entry(resource, _resources, resources)
1036
1037#define for_each_resource_rcu(resource, _resources) \
1038        list_for_each_entry_rcu(resource, _resources, resources)
1039
1040#define for_each_resource_safe(resource, tmp, _resources) \
1041        list_for_each_entry_safe(resource, tmp, _resources, resources)
1042
1043#define for_each_connection(connection, resource) \
1044        list_for_each_entry(connection, &resource->connections, connections)
1045
1046#define for_each_connection_rcu(connection, resource) \
1047        list_for_each_entry_rcu(connection, &resource->connections, connections)
1048
1049#define for_each_connection_safe(connection, tmp, resource) \
1050        list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1051
1052#define for_each_peer_device(peer_device, device) \
1053        list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1054
1055#define for_each_peer_device_rcu(peer_device, device) \
1056        list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1057
1058#define for_each_peer_device_safe(peer_device, tmp, device) \
1059        list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1060
1061static inline unsigned int device_to_minor(struct drbd_device *device)
1062{
1063        return device->minor;
1064}
1065
1066/*
1067 * function declarations
1068 *************************/
1069
1070/* drbd_main.c */
1071
1072enum dds_flags {
1073        DDSF_FORCED    = 1,
1074        DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1075};
1076
1077extern void drbd_init_set_defaults(struct drbd_device *device);
1078extern int  drbd_thread_start(struct drbd_thread *thi);
1079extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1080#ifdef CONFIG_SMP
1081extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1082#else
1083#define drbd_thread_current_set_cpu(A) ({})
1084#endif
1085extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1086                       unsigned int set_size);
1087extern void tl_clear(struct drbd_connection *);
1088extern void drbd_free_sock(struct drbd_connection *connection);
1089extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1090                     void *buf, size_t size, unsigned msg_flags);
1091extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1092                         unsigned);
1093
1094extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1095extern int drbd_send_protocol(struct drbd_connection *connection);
1096extern int drbd_send_uuids(struct drbd_peer_device *);
1097extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1098extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1099extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1100extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1101extern int drbd_send_current_state(struct drbd_peer_device *);
1102extern int drbd_send_sync_param(struct drbd_peer_device *);
1103extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1104                            u32 set_size);
1105extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1106                         struct drbd_peer_request *);
1107extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1108                             struct p_block_req *rp);
1109extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1110                             struct p_data *dp, int data_size);
1111extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1112                            sector_t sector, int blksize, u64 block_id);
1113extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1114extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1115                           struct drbd_peer_request *);
1116extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1117extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1118                              sector_t sector, int size, u64 block_id);
1119extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1120                                   int size, void *digest, int digest_size,
1121                                   enum drbd_packet cmd);
1122extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1123
1124extern int drbd_send_bitmap(struct drbd_device *device);
1125extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1126extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1127extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1128extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1129extern void drbd_device_cleanup(struct drbd_device *device);
1130extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1131extern void drbd_queue_unplug(struct drbd_device *device);
1132
1133extern void conn_md_sync(struct drbd_connection *connection);
1134extern void drbd_md_write(struct drbd_device *device, void *buffer);
1135extern void drbd_md_sync(struct drbd_device *device);
1136extern int  drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1137extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1138extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1139extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1140extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1141extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1142extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1143extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1144extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1145extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1146extern void drbd_md_mark_dirty(struct drbd_device *device);
1147extern void drbd_queue_bitmap_io(struct drbd_device *device,
1148                                 int (*io_fn)(struct drbd_device *),
1149                                 void (*done)(struct drbd_device *, int),
1150                                 char *why, enum bm_flag flags);
1151extern int drbd_bitmap_io(struct drbd_device *device,
1152                int (*io_fn)(struct drbd_device *),
1153                char *why, enum bm_flag flags);
1154extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1155                int (*io_fn)(struct drbd_device *),
1156                char *why, enum bm_flag flags);
1157extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1158extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1159
1160/* Meta data layout
1161 *
1162 * We currently have two possible layouts.
1163 * Offsets in (512 byte) sectors.
1164 * external:
1165 *   |----------- md_size_sect ------------------|
1166 *   [ 4k superblock ][ activity log ][  Bitmap  ]
1167 *   | al_offset == 8 |
1168 *   | bm_offset = al_offset + X      |
1169 *  ==> bitmap sectors = md_size_sect - bm_offset
1170 *
1171 *  Variants:
1172 *     old, indexed fixed size meta data:
1173 *
1174 * internal:
1175 *            |----------- md_size_sect ------------------|
1176 * [data.....][  Bitmap  ][ activity log ][ 4k superblock ][padding*]
1177 *                        | al_offset < 0 |
1178 *            | bm_offset = al_offset - Y |
1179 *  ==> bitmap sectors = Y = al_offset - bm_offset
1180 *
1181 *  [padding*] are zero or up to 7 unused 512 Byte sectors to the
1182 *  end of the device, so that the [4k superblock] will be 4k aligned.
1183 *
1184 *  The activity log consists of 4k transaction blocks,
1185 *  which are written in a ring-buffer, or striped ring-buffer like fashion,
1186 *  which are writtensize used to be fixed 32kB,
1187 *  but is about to become configurable.
1188 */
1189
1190/* Our old fixed size meta data layout
1191 * allows up to about 3.8TB, so if you want more,
1192 * you need to use the "flexible" meta data format. */
1193#define MD_128MB_SECT (128LLU << 11)  /* 128 MB, unit sectors */
1194#define MD_4kB_SECT      8
1195#define MD_32kB_SECT    64
1196
1197/* One activity log extent represents 4M of storage */
1198#define AL_EXTENT_SHIFT 22
1199#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1200
1201/* We could make these currently hardcoded constants configurable
1202 * variables at create-md time (or even re-configurable at runtime?).
1203 * Which will require some more changes to the DRBD "super block"
1204 * and attach code.
1205 *
1206 * updates per transaction:
1207 *   This many changes to the active set can be logged with one transaction.
1208 *   This number is arbitrary.
1209 * context per transaction:
1210 *   This many context extent numbers are logged with each transaction.
1211 *   This number is resulting from the transaction block size (4k), the layout
1212 *   of the transaction header, and the number of updates per transaction.
1213 *   See drbd_actlog.c:struct al_transaction_on_disk
1214 * */
1215#define AL_UPDATES_PER_TRANSACTION       64     // arbitrary
1216#define AL_CONTEXT_PER_TRANSACTION      919     // (4096 - 36 - 6*64)/4
1217
1218#if BITS_PER_LONG == 32
1219#define LN2_BPL 5
1220#define cpu_to_lel(A) cpu_to_le32(A)
1221#define lel_to_cpu(A) le32_to_cpu(A)
1222#elif BITS_PER_LONG == 64
1223#define LN2_BPL 6
1224#define cpu_to_lel(A) cpu_to_le64(A)
1225#define lel_to_cpu(A) le64_to_cpu(A)
1226#else
1227#error "LN2 of BITS_PER_LONG unknown!"
1228#endif
1229
1230/* resync bitmap */
1231/* 16MB sized 'bitmap extent' to track syncer usage */
1232struct bm_extent {
1233        int rs_left; /* number of bits set (out of sync) in this extent. */
1234        int rs_failed; /* number of failed resync requests in this extent. */
1235        unsigned long flags;
1236        struct lc_element lce;
1237};
1238
1239#define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
1240#define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
1241#define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
1242
1243/* drbd_bitmap.c */
1244/*
1245 * We need to store one bit for a block.
1246 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1247 * Bit 0 ==> local node thinks this block is binary identical on both nodes
1248 * Bit 1 ==> local node thinks this block needs to be synced.
1249 */
1250
1251#define SLEEP_TIME (HZ/10)
1252
1253/* We do bitmap IO in units of 4k blocks.
1254 * We also still have a hardcoded 4k per bit relation. */
1255#define BM_BLOCK_SHIFT  12                       /* 4k per bit */
1256#define BM_BLOCK_SIZE    (1<<BM_BLOCK_SHIFT)
1257/* mostly arbitrarily set the represented size of one bitmap extent,
1258 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1259 * at 4k per bit resolution) */
1260#define BM_EXT_SHIFT     24     /* 16 MiB per resync extent */
1261#define BM_EXT_SIZE      (1<<BM_EXT_SHIFT)
1262
1263#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1264#error "HAVE YOU FIXED drbdmeta AS WELL??"
1265#endif
1266
1267/* thus many _storage_ sectors are described by one bit */
1268#define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
1269#define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1270#define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
1271
1272/* bit to represented kilo byte conversion */
1273#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1274
1275/* in which _bitmap_ extent (resp. sector) the bit for a certain
1276 * _storage_ sector is located in */
1277#define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
1278#define BM_BIT_TO_EXT(x)    ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1279
1280/* first storage sector a bitmap extent corresponds to */
1281#define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
1282/* how much _storage_ sectors we have per bitmap extent */
1283#define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
1284/* how many bits are covered by one bitmap extent (resync extent) */
1285#define BM_BITS_PER_EXT     (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1286
1287#define BM_BLOCKS_PER_BM_EXT_MASK  (BM_BITS_PER_EXT - 1)
1288
1289
1290/* in one sector of the bitmap, we have this many activity_log extents. */
1291#define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1292
1293/* the extent in "PER_EXTENT" below is an activity log extent
1294 * we need that many (long words/bytes) to store the bitmap
1295 *                   of one AL_EXTENT_SIZE chunk of storage.
1296 * we can store the bitmap for that many AL_EXTENTS within
1297 * one sector of the _on_disk_ bitmap:
1298 * bit   0        bit 37   bit 38            bit (512*8)-1
1299 *           ...|........|........|.. // ..|........|
1300 * sect. 0       `296     `304                     ^(512*8*8)-1
1301 *
1302#define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1303#define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
1304#define BM_EXT_PER_SECT     ( 512 / BM_BYTES_PER_EXTENT )        //   4
1305 */
1306
1307#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1308/* we have a certain meta data variant that has a fixed on-disk size of 128
1309 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1310 * log, leaving this many sectors for the bitmap.
1311 */
1312
1313#define DRBD_MAX_SECTORS_FIXED_BM \
1314          ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1315#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1316#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_32
1317#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1318#else
1319#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_FIXED_BM
1320/* 16 TB in units of sectors */
1321#if BITS_PER_LONG == 32
1322/* adjust by one page worth of bitmap,
1323 * so we won't wrap around in drbd_bm_find_next_bit.
1324 * you should use 64bit OS for that much storage, anyways. */
1325#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1326#else
1327/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1328#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1329/* corresponds to (1UL << 38) bits right now. */
1330#endif
1331#endif
1332
1333/* Estimate max bio size as 256 * PAGE_SIZE,
1334 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1335 * Since we may live in a mixed-platform cluster,
1336 * we limit us to a platform agnostic constant here for now.
1337 * A followup commit may allow even bigger BIO sizes,
1338 * once we thought that through. */
1339#define DRBD_MAX_BIO_SIZE (1U << 20)
1340#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1341#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1342#endif
1343#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
1344
1345#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1346#define DRBD_MAX_BIO_SIZE_P95    (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
1347
1348/* For now, don't allow more than half of what we can "activate" in one
1349 * activity log transaction to be discarded in one go. We may need to rework
1350 * drbd_al_begin_io() to allow for even larger discard ranges */
1351#define DRBD_MAX_BATCH_BIO_SIZE  (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1352#define DRBD_MAX_BBIO_SECTORS    (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1353
1354extern int  drbd_bm_init(struct drbd_device *device);
1355extern int  drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1356extern void drbd_bm_cleanup(struct drbd_device *device);
1357extern void drbd_bm_set_all(struct drbd_device *device);
1358extern void drbd_bm_clear_all(struct drbd_device *device);
1359/* set/clear/test only a few bits at a time */
1360extern int  drbd_bm_set_bits(
1361                struct drbd_device *device, unsigned long s, unsigned long e);
1362extern int  drbd_bm_clear_bits(
1363                struct drbd_device *device, unsigned long s, unsigned long e);
1364extern int drbd_bm_count_bits(
1365        struct drbd_device *device, const unsigned long s, const unsigned long e);
1366/* bm_set_bits variant for use while holding drbd_bm_lock,
1367 * may process the whole bitmap in one go */
1368extern void _drbd_bm_set_bits(struct drbd_device *device,
1369                const unsigned long s, const unsigned long e);
1370extern int  drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1371extern int  drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1372extern int  drbd_bm_read(struct drbd_device *device) __must_hold(local);
1373extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1374extern int  drbd_bm_write(struct drbd_device *device) __must_hold(local);
1375extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1376extern int  drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1377extern int  drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1378extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1379extern int  drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1380extern size_t        drbd_bm_words(struct drbd_device *device);
1381extern unsigned long drbd_bm_bits(struct drbd_device *device);
1382extern sector_t      drbd_bm_capacity(struct drbd_device *device);
1383
1384#define DRBD_END_OF_BITMAP      (~(unsigned long)0)
1385extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1386/* bm_find_next variants for use while you hold drbd_bm_lock() */
1387extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1388extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1389extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1390extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1391/* for receive_bitmap */
1392extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1393                size_t number, unsigned long *buffer);
1394/* for _drbd_send_bitmap */
1395extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1396                size_t number, unsigned long *buffer);
1397
1398extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1399extern void drbd_bm_unlock(struct drbd_device *device);
1400/* drbd_main.c */
1401
1402extern struct kmem_cache *drbd_request_cache;
1403extern struct kmem_cache *drbd_ee_cache;        /* peer requests */
1404extern struct kmem_cache *drbd_bm_ext_cache;    /* bitmap extents */
1405extern struct kmem_cache *drbd_al_ext_cache;    /* activity log extents */
1406extern mempool_t drbd_request_mempool;
1407extern mempool_t drbd_ee_mempool;
1408
1409/* drbd's page pool, used to buffer data received from the peer,
1410 * or data requested by the peer.
1411 *
1412 * This does not have an emergency reserve.
1413 *
1414 * When allocating from this pool, it first takes pages from the pool.
1415 * Only if the pool is depleted will try to allocate from the system.
1416 *
1417 * The assumption is that pages taken from this pool will be processed,
1418 * and given back, "quickly", and then can be recycled, so we can avoid
1419 * frequent calls to alloc_page(), and still will be able to make progress even
1420 * under memory pressure.
1421 */
1422extern struct page *drbd_pp_pool;
1423extern spinlock_t   drbd_pp_lock;
1424extern int          drbd_pp_vacant;
1425extern wait_queue_head_t drbd_pp_wait;
1426
1427/* We also need a standard (emergency-reserve backed) page pool
1428 * for meta data IO (activity log, bitmap).
1429 * We can keep it global, as long as it is used as "N pages at a time".
1430 * 128 should be plenty, currently we probably can get away with as few as 1.
1431 */
1432#define DRBD_MIN_POOL_PAGES     128
1433extern mempool_t drbd_md_io_page_pool;
1434
1435/* We also need to make sure we get a bio
1436 * when we need it for housekeeping purposes */
1437extern struct bio_set drbd_md_io_bio_set;
1438/* to allocate from that set */
1439extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1440
1441/* And a bio_set for cloning */
1442extern struct bio_set drbd_io_bio_set;
1443
1444extern struct mutex resources_mutex;
1445
1446extern int conn_lowest_minor(struct drbd_connection *connection);
1447extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1448extern void drbd_destroy_device(struct kref *kref);
1449extern void drbd_delete_device(struct drbd_device *device);
1450
1451extern struct drbd_resource *drbd_create_resource(const char *name);
1452extern void drbd_free_resource(struct drbd_resource *resource);
1453
1454extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1455extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1456extern void drbd_destroy_connection(struct kref *kref);
1457extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1458                                            void *peer_addr, int peer_addr_len);
1459extern struct drbd_resource *drbd_find_resource(const char *name);
1460extern void drbd_destroy_resource(struct kref *kref);
1461extern void conn_free_crypto(struct drbd_connection *connection);
1462
1463/* drbd_req */
1464extern void do_submit(struct work_struct *ws);
1465extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1466extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1467extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1468extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1469
1470
1471/* drbd_nl.c */
1472
1473extern struct mutex notification_mutex;
1474
1475extern void drbd_suspend_io(struct drbd_device *device);
1476extern void drbd_resume_io(struct drbd_device *device);
1477extern char *ppsize(char *buf, unsigned long long size);
1478extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1479enum determine_dev_size {
1480        DS_ERROR_SHRINK = -3,
1481        DS_ERROR_SPACE_MD = -2,
1482        DS_ERROR = -1,
1483        DS_UNCHANGED = 0,
1484        DS_SHRUNK = 1,
1485        DS_GREW = 2,
1486        DS_GREW_FROM_ZERO = 3,
1487};
1488extern enum determine_dev_size
1489drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1490extern void resync_after_online_grow(struct drbd_device *);
1491extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1492                        struct drbd_backing_dev *bdev, struct o_qlim *o);
1493extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1494                                        enum drbd_role new_role,
1495                                        int force);
1496extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1497extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1498extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1499extern int drbd_khelper(struct drbd_device *device, char *cmd);
1500
1501/* drbd_worker.c */
1502/* bi_end_io handlers */
1503extern void drbd_md_endio(struct bio *bio);
1504extern void drbd_peer_request_endio(struct bio *bio);
1505extern void drbd_request_endio(struct bio *bio);
1506extern int drbd_worker(struct drbd_thread *thi);
1507enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1508void drbd_resync_after_changed(struct drbd_device *device);
1509extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1510extern void resume_next_sg(struct drbd_device *device);
1511extern void suspend_other_sg(struct drbd_device *device);
1512extern int drbd_resync_finished(struct drbd_device *device);
1513/* maybe rather drbd_main.c ? */
1514extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1515extern void drbd_md_put_buffer(struct drbd_device *device);
1516extern int drbd_md_sync_page_io(struct drbd_device *device,
1517                struct drbd_backing_dev *bdev, sector_t sector, int op);
1518extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1519extern void wait_until_done_or_force_detached(struct drbd_device *device,
1520                struct drbd_backing_dev *bdev, unsigned int *done);
1521extern void drbd_rs_controller_reset(struct drbd_device *device);
1522
1523static inline void ov_out_of_sync_print(struct drbd_device *device)
1524{
1525        if (device->ov_last_oos_size) {
1526                drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1527                     (unsigned long long)device->ov_last_oos_start,
1528                     (unsigned long)device->ov_last_oos_size);
1529        }
1530        device->ov_last_oos_size = 0;
1531}
1532
1533
1534extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
1535extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
1536/* worker callbacks */
1537extern int w_e_end_data_req(struct drbd_work *, int);
1538extern int w_e_end_rsdata_req(struct drbd_work *, int);
1539extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1540extern int w_e_end_ov_reply(struct drbd_work *, int);
1541extern int w_e_end_ov_req(struct drbd_work *, int);
1542extern int w_ov_finished(struct drbd_work *, int);
1543extern int w_resync_timer(struct drbd_work *, int);
1544extern int w_send_write_hint(struct drbd_work *, int);
1545extern int w_send_dblock(struct drbd_work *, int);
1546extern int w_send_read_req(struct drbd_work *, int);
1547extern int w_e_reissue(struct drbd_work *, int);
1548extern int w_restart_disk_io(struct drbd_work *, int);
1549extern int w_send_out_of_sync(struct drbd_work *, int);
1550extern int w_start_resync(struct drbd_work *, int);
1551
1552extern void resync_timer_fn(struct timer_list *t);
1553extern void start_resync_timer_fn(struct timer_list *t);
1554
1555extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1556
1557/* drbd_receiver.c */
1558extern int drbd_receiver(struct drbd_thread *thi);
1559extern int drbd_ack_receiver(struct drbd_thread *thi);
1560extern void drbd_send_ping_wf(struct work_struct *ws);
1561extern void drbd_send_acks_wf(struct work_struct *ws);
1562extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1563extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1564                bool throttle_if_app_is_waiting);
1565extern int drbd_submit_peer_request(struct drbd_device *,
1566                                    struct drbd_peer_request *, const unsigned,
1567                                    const unsigned, const int);
1568extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1569extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1570                                                     sector_t, unsigned int,
1571                                                     unsigned int,
1572                                                     gfp_t) __must_hold(local);
1573extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1574                                 int);
1575#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1576#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1577extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1578extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1579extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1580extern int drbd_connected(struct drbd_peer_device *);
1581
1582static inline void drbd_tcp_cork(struct socket *sock)
1583{
1584        int val = 1;
1585        (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1586                        (char*)&val, sizeof(val));
1587}
1588
1589static inline void drbd_tcp_uncork(struct socket *sock)
1590{
1591        int val = 0;
1592        (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1593                        (char*)&val, sizeof(val));
1594}
1595
1596static inline void drbd_tcp_nodelay(struct socket *sock)
1597{
1598        int val = 1;
1599        (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1600                        (char*)&val, sizeof(val));
1601}
1602
1603static inline void drbd_tcp_quickack(struct socket *sock)
1604{
1605        int val = 2;
1606        (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1607                        (char*)&val, sizeof(val));
1608}
1609
1610/* sets the number of 512 byte sectors of our virtual device */
1611static inline void drbd_set_my_capacity(struct drbd_device *device,
1612                                        sector_t size)
1613{
1614        /* set_capacity(device->this_bdev->bd_disk, size); */
1615        set_capacity(device->vdisk, size);
1616        device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1617}
1618
1619/*
1620 * used to submit our private bio
1621 */
1622static inline void drbd_generic_make_request(struct drbd_device *device,
1623                                             int fault_type, struct bio *bio)
1624{
1625        __release(local);
1626        if (!bio->bi_disk) {
1627                drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
1628                bio->bi_status = BLK_STS_IOERR;
1629                bio_endio(bio);
1630                return;
1631        }
1632
1633        if (drbd_insert_fault(device, fault_type))
1634                bio_io_error(bio);
1635        else
1636                generic_make_request(bio);
1637}
1638
1639void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1640                              enum write_ordering_e wo);
1641
1642/* drbd_proc.c */
1643extern struct proc_dir_entry *drbd_proc;
1644int drbd_seq_show(struct seq_file *seq, void *v);
1645
1646/* drbd_actlog.c */
1647extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1648extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1649extern void drbd_al_begin_io_commit(struct drbd_device *device);
1650extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1651extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1652extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1653extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1654extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1655extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1656extern void drbd_rs_cancel_all(struct drbd_device *device);
1657extern int drbd_rs_del_all(struct drbd_device *device);
1658extern void drbd_rs_failed_io(struct drbd_device *device,
1659                sector_t sector, int size);
1660extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1661
1662enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1663extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1664                enum update_sync_bits_mode mode);
1665#define drbd_set_in_sync(device, sector, size) \
1666        __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1667#define drbd_set_out_of_sync(device, sector, size) \
1668        __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1669#define drbd_rs_failed_io(device, sector, size) \
1670        __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1671extern void drbd_al_shrink(struct drbd_device *device);
1672extern int drbd_al_initialize(struct drbd_device *, void *);
1673
1674/* drbd_nl.c */
1675/* state info broadcast */
1676struct sib_info {
1677        enum drbd_state_info_bcast_reason sib_reason;
1678        union {
1679                struct {
1680                        char *helper_name;
1681                        unsigned helper_exit_code;
1682                };
1683                struct {
1684                        union drbd_state os;
1685                        union drbd_state ns;
1686                };
1687        };
1688};
1689void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1690
1691extern void notify_resource_state(struct sk_buff *,
1692                                  unsigned int,
1693                                  struct drbd_resource *,
1694                                  struct resource_info *,
1695                                  enum drbd_notification_type);
1696extern void notify_device_state(struct sk_buff *,
1697                                unsigned int,
1698                                struct drbd_device *,
1699                                struct device_info *,
1700                                enum drbd_notification_type);
1701extern void notify_connection_state(struct sk_buff *,
1702                                    unsigned int,
1703                                    struct drbd_connection *,
1704                                    struct connection_info *,
1705                                    enum drbd_notification_type);
1706extern void notify_peer_device_state(struct sk_buff *,
1707                                     unsigned int,
1708                                     struct drbd_peer_device *,
1709                                     struct peer_device_info *,
1710                                     enum drbd_notification_type);
1711extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1712                          struct drbd_connection *, const char *, int);
1713
1714/*
1715 * inline helper functions
1716 *************************/
1717
1718/* see also page_chain_add and friends in drbd_receiver.c */
1719static inline struct page *page_chain_next(struct page *page)
1720{
1721        return (struct page *)page_private(page);
1722}
1723#define page_chain_for_each(page) \
1724        for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1725                        page = page_chain_next(page))
1726#define page_chain_for_each_safe(page, n) \
1727        for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1728
1729
1730static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1731{
1732        struct page *page = peer_req->pages;
1733        page_chain_for_each(page) {
1734                if (page_count(page) > 1)
1735                        return 1;
1736        }
1737        return 0;
1738}
1739
1740static inline union drbd_state drbd_read_state(struct drbd_device *device)
1741{
1742        struct drbd_resource *resource = device->resource;
1743        union drbd_state rv;
1744
1745        rv.i = device->state.i;
1746        rv.susp = resource->susp;
1747        rv.susp_nod = resource->susp_nod;
1748        rv.susp_fen = resource->susp_fen;
1749
1750        return rv;
1751}
1752
1753enum drbd_force_detach_flags {
1754        DRBD_READ_ERROR,
1755        DRBD_WRITE_ERROR,
1756        DRBD_META_IO_ERROR,
1757        DRBD_FORCE_DETACH,
1758};
1759
1760#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1761static inline void __drbd_chk_io_error_(struct drbd_device *device,
1762                enum drbd_force_detach_flags df,
1763                const char *where)
1764{
1765        enum drbd_io_error_p ep;
1766
1767        rcu_read_lock();
1768        ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1769        rcu_read_unlock();
1770        switch (ep) {
1771        case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1772                if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1773                        if (__ratelimit(&drbd_ratelimit_state))
1774                                drbd_err(device, "Local IO failed in %s.\n", where);
1775                        if (device->state.disk > D_INCONSISTENT)
1776                                _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1777                        break;
1778                }
1779                /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
1780        case EP_DETACH:
1781        case EP_CALL_HELPER:
1782                /* Remember whether we saw a READ or WRITE error.
1783                 *
1784                 * Recovery of the affected area for WRITE failure is covered
1785                 * by the activity log.
1786                 * READ errors may fall outside that area though. Certain READ
1787                 * errors can be "healed" by writing good data to the affected
1788                 * blocks, which triggers block re-allocation in lower layers.
1789                 *
1790                 * If we can not write the bitmap after a READ error,
1791                 * we may need to trigger a full sync (see w_go_diskless()).
1792                 *
1793                 * Force-detach is not really an IO error, but rather a
1794                 * desperate measure to try to deal with a completely
1795                 * unresponsive lower level IO stack.
1796                 * Still it should be treated as a WRITE error.
1797                 *
1798                 * Meta IO error is always WRITE error:
1799                 * we read meta data only once during attach,
1800                 * which will fail in case of errors.
1801                 */
1802                set_bit(WAS_IO_ERROR, &device->flags);
1803                if (df == DRBD_READ_ERROR)
1804                        set_bit(WAS_READ_ERROR, &device->flags);
1805                if (df == DRBD_FORCE_DETACH)
1806                        set_bit(FORCE_DETACH, &device->flags);
1807                if (device->state.disk > D_FAILED) {
1808                        _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1809                        drbd_err(device,
1810                                "Local IO failed in %s. Detaching...\n", where);
1811                }
1812                break;
1813        }
1814}
1815
1816/**
1817 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1818 * @device:      DRBD device.
1819 * @error:       Error code passed to the IO completion callback
1820 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1821 *
1822 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1823 */
1824#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1825static inline void drbd_chk_io_error_(struct drbd_device *device,
1826        int error, enum drbd_force_detach_flags forcedetach, const char *where)
1827{
1828        if (error) {
1829                unsigned long flags;
1830                spin_lock_irqsave(&device->resource->req_lock, flags);
1831                __drbd_chk_io_error_(device, forcedetach, where);
1832                spin_unlock_irqrestore(&device->resource->req_lock, flags);
1833        }
1834}
1835
1836
1837/**
1838 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1839 * @bdev:       Meta data block device.
1840 *
1841 * BTW, for internal meta data, this happens to be the maximum capacity
1842 * we could agree upon with our peer node.
1843 */
1844static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1845{
1846        switch (bdev->md.meta_dev_idx) {
1847        case DRBD_MD_INDEX_INTERNAL:
1848        case DRBD_MD_INDEX_FLEX_INT:
1849                return bdev->md.md_offset + bdev->md.bm_offset;
1850        case DRBD_MD_INDEX_FLEX_EXT:
1851        default:
1852                return bdev->md.md_offset;
1853        }
1854}
1855
1856/**
1857 * drbd_md_last_sector() - Return the last sector number of the meta data area
1858 * @bdev:       Meta data block device.
1859 */
1860static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1861{
1862        switch (bdev->md.meta_dev_idx) {
1863        case DRBD_MD_INDEX_INTERNAL:
1864        case DRBD_MD_INDEX_FLEX_INT:
1865                return bdev->md.md_offset + MD_4kB_SECT -1;
1866        case DRBD_MD_INDEX_FLEX_EXT:
1867        default:
1868                return bdev->md.md_offset + bdev->md.md_size_sect -1;
1869        }
1870}
1871
1872/* Returns the number of 512 byte sectors of the device */
1873static inline sector_t drbd_get_capacity(struct block_device *bdev)
1874{
1875        /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1876        return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1877}
1878
1879/**
1880 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1881 * @bdev:       Meta data block device.
1882 *
1883 * returns the capacity we announce to out peer.  we clip ourselves at the
1884 * various MAX_SECTORS, because if we don't, current implementation will
1885 * oops sooner or later
1886 */
1887static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1888{
1889        sector_t s;
1890
1891        switch (bdev->md.meta_dev_idx) {
1892        case DRBD_MD_INDEX_INTERNAL:
1893        case DRBD_MD_INDEX_FLEX_INT:
1894                s = drbd_get_capacity(bdev->backing_bdev)
1895                        ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1896                                drbd_md_first_sector(bdev))
1897                        : 0;
1898                break;
1899        case DRBD_MD_INDEX_FLEX_EXT:
1900                s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1901                                drbd_get_capacity(bdev->backing_bdev));
1902                /* clip at maximum size the meta device can support */
1903                s = min_t(sector_t, s,
1904                        BM_EXT_TO_SECT(bdev->md.md_size_sect
1905                                     - bdev->md.bm_offset));
1906                break;
1907        default:
1908                s = min_t(sector_t, DRBD_MAX_SECTORS,
1909                                drbd_get_capacity(bdev->backing_bdev));
1910        }
1911        return s;
1912}
1913
1914/**
1915 * drbd_md_ss() - Return the sector number of our meta data super block
1916 * @bdev:       Meta data block device.
1917 */
1918static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1919{
1920        const int meta_dev_idx = bdev->md.meta_dev_idx;
1921
1922        if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1923                return 0;
1924
1925        /* Since drbd08, internal meta data is always "flexible".
1926         * position: last 4k aligned block of 4k size */
1927        if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1928            meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1929                return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1930
1931        /* external, some index; this is the old fixed size layout */
1932        return MD_128MB_SECT * bdev->md.meta_dev_idx;
1933}
1934
1935static inline void
1936drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1937{
1938        unsigned long flags;
1939        spin_lock_irqsave(&q->q_lock, flags);
1940        list_add_tail(&w->list, &q->q);
1941        spin_unlock_irqrestore(&q->q_lock, flags);
1942        wake_up(&q->q_wait);
1943}
1944
1945static inline void
1946drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1947{
1948        unsigned long flags;
1949        spin_lock_irqsave(&q->q_lock, flags);
1950        if (list_empty_careful(&w->list))
1951                list_add_tail(&w->list, &q->q);
1952        spin_unlock_irqrestore(&q->q_lock, flags);
1953        wake_up(&q->q_wait);
1954}
1955
1956static inline void
1957drbd_device_post_work(struct drbd_device *device, int work_bit)
1958{
1959        if (!test_and_set_bit(work_bit, &device->flags)) {
1960                struct drbd_connection *connection =
1961                        first_peer_device(device)->connection;
1962                struct drbd_work_queue *q = &connection->sender_work;
1963                if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1964                        wake_up(&q->q_wait);
1965        }
1966}
1967
1968extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1969
1970/* To get the ack_receiver out of the blocking network stack,
1971 * so it can change its sk_rcvtimeo from idle- to ping-timeout,
1972 * and send a ping, we need to send a signal.
1973 * Which signal we send is irrelevant. */
1974static inline void wake_ack_receiver(struct drbd_connection *connection)
1975{
1976        struct task_struct *task = connection->ack_receiver.task;
1977        if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1978                force_sig(SIGXCPU, task);
1979}
1980
1981static inline void request_ping(struct drbd_connection *connection)
1982{
1983        set_bit(SEND_PING, &connection->flags);
1984        wake_ack_receiver(connection);
1985}
1986
1987extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1988extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1989extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1990                             enum drbd_packet, unsigned int, void *,
1991                             unsigned int);
1992extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1993                             enum drbd_packet, unsigned int, void *,
1994                             unsigned int);
1995
1996extern int drbd_send_ping(struct drbd_connection *connection);
1997extern int drbd_send_ping_ack(struct drbd_connection *connection);
1998extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1999extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
2000
2001static inline void drbd_thread_stop(struct drbd_thread *thi)
2002{
2003        _drbd_thread_stop(thi, false, true);
2004}
2005
2006static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
2007{
2008        _drbd_thread_stop(thi, false, false);
2009}
2010
2011static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
2012{
2013        _drbd_thread_stop(thi, true, false);
2014}
2015
2016/* counts how many answer packets packets we expect from our peer,
2017 * for either explicit application requests,
2018 * or implicit barrier packets as necessary.
2019 * increased:
2020 *  w_send_barrier
2021 *  _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
2022 *    it is much easier and equally valid to count what we queue for the
2023 *    worker, even before it actually was queued or send.
2024 *    (drbd_make_request_common; recovery path on read io-error)
2025 * decreased:
2026 *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
2027 *  _req_mod(req, DATA_RECEIVED)
2028 *     [from receive_DataReply]
2029 *  _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
2030 *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
2031 *     for some reason it is NOT decreased in got_NegAck,
2032 *     but in the resulting cleanup code from report_params.
2033 *     we should try to remember the reason for that...
2034 *  _req_mod(req, SEND_FAILED or SEND_CANCELED)
2035 *  _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
2036 *     [from tl_clear_barrier]
2037 */
2038static inline void inc_ap_pending(struct drbd_device *device)
2039{
2040        atomic_inc(&device->ap_pending_cnt);
2041}
2042
2043#define ERR_IF_CNT_IS_NEGATIVE(which, func, line)                       \
2044        if (atomic_read(&device->which) < 0)                            \
2045                drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n",   \
2046                        func, line,                                     \
2047                        atomic_read(&device->which))
2048
2049#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2050static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2051{
2052        if (atomic_dec_and_test(&device->ap_pending_cnt))
2053                wake_up(&device->misc_wait);
2054        ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2055}
2056
2057/* counts how many resync-related answers we still expect from the peer
2058 *                   increase                   decrease
2059 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
2060 * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK with ID_SYNCER)
2061 *                                         (or P_NEG_ACK with ID_SYNCER)
2062 */
2063static inline void inc_rs_pending(struct drbd_device *device)
2064{
2065        atomic_inc(&device->rs_pending_cnt);
2066}
2067
2068#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2069static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2070{
2071        atomic_dec(&device->rs_pending_cnt);
2072        ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2073}
2074
2075/* counts how many answers we still need to send to the peer.
2076 * increased on
2077 *  receive_Data        unless protocol A;
2078 *                      we need to send a P_RECV_ACK (proto B)
2079 *                      or P_WRITE_ACK (proto C)
2080 *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
2081 *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
2082 *  receive_Barrier_*   we need to send a P_BARRIER_ACK
2083 */
2084static inline void inc_unacked(struct drbd_device *device)
2085{
2086        atomic_inc(&device->unacked_cnt);
2087}
2088
2089#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2090static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2091{
2092        atomic_dec(&device->unacked_cnt);
2093        ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2094}
2095
2096#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2097static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2098{
2099        atomic_sub(n, &device->unacked_cnt);
2100        ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2101}
2102
2103static inline bool is_sync_target_state(enum drbd_conns connection_state)
2104{
2105        return  connection_state == C_SYNC_TARGET ||
2106                connection_state == C_PAUSED_SYNC_T;
2107}
2108
2109static inline bool is_sync_source_state(enum drbd_conns connection_state)
2110{
2111        return  connection_state == C_SYNC_SOURCE ||
2112                connection_state == C_PAUSED_SYNC_S;
2113}
2114
2115static inline bool is_sync_state(enum drbd_conns connection_state)
2116{
2117        return  is_sync_source_state(connection_state) ||
2118                is_sync_target_state(connection_state);
2119}
2120
2121/**
2122 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
2123 * @_device:            DRBD device.
2124 * @_min_state:         Minimum device state required for success.
2125 *
2126 * You have to call put_ldev() when finished working with device->ldev.
2127 */
2128#define get_ldev_if_state(_device, _min_state)                          \
2129        (_get_ldev_if_state((_device), (_min_state)) ?                  \
2130         ({ __acquire(x); true; }) : false)
2131#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2132
2133static inline void put_ldev(struct drbd_device *device)
2134{
2135        enum drbd_disk_state disk_state = device->state.disk;
2136        /* We must check the state *before* the atomic_dec becomes visible,
2137         * or we have a theoretical race where someone hitting zero,
2138         * while state still D_FAILED, will then see D_DISKLESS in the
2139         * condition below and calling into destroy, where he must not, yet. */
2140        int i = atomic_dec_return(&device->local_cnt);
2141
2142        /* This may be called from some endio handler,
2143         * so we must not sleep here. */
2144
2145        __release(local);
2146        D_ASSERT(device, i >= 0);
2147        if (i == 0) {
2148                if (disk_state == D_DISKLESS)
2149                        /* even internal references gone, safe to destroy */
2150                        drbd_device_post_work(device, DESTROY_DISK);
2151                if (disk_state == D_FAILED)
2152                        /* all application IO references gone. */
2153                        if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2154                                drbd_device_post_work(device, GO_DISKLESS);
2155                wake_up(&device->misc_wait);
2156        }
2157}
2158
2159#ifndef __CHECKER__
2160static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2161{
2162        int io_allowed;
2163
2164        /* never get a reference while D_DISKLESS */
2165        if (device->state.disk == D_DISKLESS)
2166                return 0;
2167
2168        atomic_inc(&device->local_cnt);
2169        io_allowed = (device->state.disk >= mins);
2170        if (!io_allowed)
2171                put_ldev(device);
2172        return io_allowed;
2173}
2174#else
2175extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2176#endif
2177
2178/* this throttles on-the-fly application requests
2179 * according to max_buffers settings;
2180 * maybe re-implement using semaphores? */
2181static inline int drbd_get_max_buffers(struct drbd_device *device)
2182{
2183        struct net_conf *nc;
2184        int mxb;
2185
2186        rcu_read_lock();
2187        nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2188        mxb = nc ? nc->max_buffers : 1000000;  /* arbitrary limit on open requests */
2189        rcu_read_unlock();
2190
2191        return mxb;
2192}
2193
2194static inline int drbd_state_is_stable(struct drbd_device *device)
2195{
2196        union drbd_dev_state s = device->state;
2197
2198        /* DO NOT add a default clause, we want the compiler to warn us
2199         * for any newly introduced state we may have forgotten to add here */
2200
2201        switch ((enum drbd_conns)s.conn) {
2202        /* new io only accepted when there is no connection, ... */
2203        case C_STANDALONE:
2204        case C_WF_CONNECTION:
2205        /* ... or there is a well established connection. */
2206        case C_CONNECTED:
2207        case C_SYNC_SOURCE:
2208        case C_SYNC_TARGET:
2209        case C_VERIFY_S:
2210        case C_VERIFY_T:
2211        case C_PAUSED_SYNC_S:
2212        case C_PAUSED_SYNC_T:
2213        case C_AHEAD:
2214        case C_BEHIND:
2215                /* transitional states, IO allowed */
2216        case C_DISCONNECTING:
2217        case C_UNCONNECTED:
2218        case C_TIMEOUT:
2219        case C_BROKEN_PIPE:
2220        case C_NETWORK_FAILURE:
2221        case C_PROTOCOL_ERROR:
2222        case C_TEAR_DOWN:
2223        case C_WF_REPORT_PARAMS:
2224        case C_STARTING_SYNC_S:
2225        case C_STARTING_SYNC_T:
2226                break;
2227
2228                /* Allow IO in BM exchange states with new protocols */
2229        case C_WF_BITMAP_S:
2230                if (first_peer_device(device)->connection->agreed_pro_version < 96)
2231                        return 0;
2232                break;
2233
2234                /* no new io accepted in these states */
2235        case C_WF_BITMAP_T:
2236        case C_WF_SYNC_UUID:
2237        case C_MASK:
2238                /* not "stable" */
2239                return 0;
2240        }
2241
2242        switch ((enum drbd_disk_state)s.disk) {
2243        case D_DISKLESS:
2244        case D_INCONSISTENT:
2245        case D_OUTDATED:
2246        case D_CONSISTENT:
2247        case D_UP_TO_DATE:
2248        case D_FAILED:
2249                /* disk state is stable as well. */
2250                break;
2251
2252        /* no new io accepted during transitional states */
2253        case D_ATTACHING:
2254        case D_NEGOTIATING:
2255        case D_UNKNOWN:
2256        case D_MASK:
2257                /* not "stable" */
2258                return 0;
2259        }
2260
2261        return 1;
2262}
2263
2264static inline int drbd_suspended(struct drbd_device *device)
2265{
2266        struct drbd_resource *resource = device->resource;
2267
2268        return resource->susp || resource->susp_fen || resource->susp_nod;
2269}
2270
2271static inline bool may_inc_ap_bio(struct drbd_device *device)
2272{
2273        int mxb = drbd_get_max_buffers(device);
2274
2275        if (drbd_suspended(device))
2276                return false;
2277        if (atomic_read(&device->suspend_cnt))
2278                return false;
2279
2280        /* to avoid potential deadlock or bitmap corruption,
2281         * in various places, we only allow new application io
2282         * to start during "stable" states. */
2283
2284        /* no new io accepted when attaching or detaching the disk */
2285        if (!drbd_state_is_stable(device))
2286                return false;
2287
2288        /* since some older kernels don't have atomic_add_unless,
2289         * and we are within the spinlock anyways, we have this workaround.  */
2290        if (atomic_read(&device->ap_bio_cnt) > mxb)
2291                return false;
2292        if (test_bit(BITMAP_IO, &device->flags))
2293                return false;
2294        return true;
2295}
2296
2297static inline bool inc_ap_bio_cond(struct drbd_device *device)
2298{
2299        bool rv = false;
2300
2301        spin_lock_irq(&device->resource->req_lock);
2302        rv = may_inc_ap_bio(device);
2303        if (rv)
2304                atomic_inc(&device->ap_bio_cnt);
2305        spin_unlock_irq(&device->resource->req_lock);
2306
2307        return rv;
2308}
2309
2310static inline void inc_ap_bio(struct drbd_device *device)
2311{
2312        /* we wait here
2313         *    as long as the device is suspended
2314         *    until the bitmap is no longer on the fly during connection
2315         *    handshake as long as we would exceed the max_buffer limit.
2316         *
2317         * to avoid races with the reconnect code,
2318         * we need to atomic_inc within the spinlock. */
2319
2320        wait_event(device->misc_wait, inc_ap_bio_cond(device));
2321}
2322
2323static inline void dec_ap_bio(struct drbd_device *device)
2324{
2325        int mxb = drbd_get_max_buffers(device);
2326        int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2327
2328        D_ASSERT(device, ap_bio >= 0);
2329
2330        if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2331                if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2332                        drbd_queue_work(&first_peer_device(device)->
2333                                connection->sender_work,
2334                                &device->bm_io_work.w);
2335        }
2336
2337        /* this currently does wake_up for every dec_ap_bio!
2338         * maybe rather introduce some type of hysteresis?
2339         * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2340        if (ap_bio < mxb)
2341                wake_up(&device->misc_wait);
2342}
2343
2344static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2345{
2346        return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2347                first_peer_device(device)->connection->agreed_pro_version != 100;
2348}
2349
2350static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2351{
2352        int changed = device->ed_uuid != val;
2353        device->ed_uuid = val;
2354        return changed;
2355}
2356
2357static inline int drbd_queue_order_type(struct drbd_device *device)
2358{
2359        /* sorry, we currently have no working implementation
2360         * of distributed TCQ stuff */
2361#ifndef QUEUE_ORDERED_NONE
2362#define QUEUE_ORDERED_NONE 0
2363#endif
2364        return QUEUE_ORDERED_NONE;
2365}
2366
2367static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2368{
2369        return list_first_entry_or_null(&resource->connections,
2370                                struct drbd_connection, connections);
2371}
2372
2373#endif
2374