linux/drivers/block/drbd/drbd_actlog.c
<<
>>
Prefs
   1/*
   2   drbd_actlog.c
   3
   4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
   5
   6   Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
   7   Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
   8   Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
   9
  10   drbd is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation; either version 2, or (at your option)
  13   any later version.
  14
  15   drbd is distributed in the hope that it will be useful,
  16   but WITHOUT ANY WARRANTY; without even the implied warranty of
  17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18   GNU General Public License for more details.
  19
  20   You should have received a copy of the GNU General Public License
  21   along with drbd; see the file COPYING.  If not, write to
  22   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23
  24 */
  25
  26#include <linux/slab.h>
  27#include <linux/crc32c.h>
  28#include <linux/drbd.h>
  29#include <linux/drbd_limits.h>
  30#include <linux/dynamic_debug.h>
  31#include "drbd_int.h"
  32
  33
  34enum al_transaction_types {
  35        AL_TR_UPDATE = 0,
  36        AL_TR_INITIALIZED = 0xffff
  37};
  38/* all fields on disc in big endian */
  39struct __packed al_transaction_on_disk {
  40        /* don't we all like magic */
  41        __be32  magic;
  42
  43        /* to identify the most recent transaction block
  44         * in the on disk ring buffer */
  45        __be32  tr_number;
  46
  47        /* checksum on the full 4k block, with this field set to 0. */
  48        __be32  crc32c;
  49
  50        /* type of transaction, special transaction types like:
  51         * purge-all, set-all-idle, set-all-active, ... to-be-defined
  52         * see also enum al_transaction_types */
  53        __be16  transaction_type;
  54
  55        /* we currently allow only a few thousand extents,
  56         * so 16bit will be enough for the slot number. */
  57
  58        /* how many updates in this transaction */
  59        __be16  n_updates;
  60
  61        /* maximum slot number, "al-extents" in drbd.conf speak.
  62         * Having this in each transaction should make reconfiguration
  63         * of that parameter easier. */
  64        __be16  context_size;
  65
  66        /* slot number the context starts with */
  67        __be16  context_start_slot_nr;
  68
  69        /* Some reserved bytes.  Expected usage is a 64bit counter of
  70         * sectors-written since device creation, and other data generation tag
  71         * supporting usage */
  72        __be32  __reserved[4];
  73
  74        /* --- 36 byte used --- */
  75
  76        /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
  77         * in one transaction, then use the remaining byte in the 4k block for
  78         * context information.  "Flexible" number of updates per transaction
  79         * does not help, as we have to account for the case when all update
  80         * slots are used anyways, so it would only complicate code without
  81         * additional benefit.
  82         */
  83        __be16  update_slot_nr[AL_UPDATES_PER_TRANSACTION];
  84
  85        /* but the extent number is 32bit, which at an extent size of 4 MiB
  86         * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
  87        __be32  update_extent_nr[AL_UPDATES_PER_TRANSACTION];
  88
  89        /* --- 420 bytes used (36 + 64*6) --- */
  90
  91        /* 4096 - 420 = 3676 = 919 * 4 */
  92        __be32  context[AL_CONTEXT_PER_TRANSACTION];
  93};
  94
  95void *drbd_md_get_buffer(struct drbd_device *device, const char *intent)
  96{
  97        int r;
  98
  99        wait_event(device->misc_wait,
 100                   (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 ||
 101                   device->state.disk <= D_FAILED);
 102
 103        if (r)
 104                return NULL;
 105
 106        device->md_io.current_use = intent;
 107        device->md_io.start_jif = jiffies;
 108        device->md_io.submit_jif = device->md_io.start_jif - 1;
 109        return page_address(device->md_io.page);
 110}
 111
 112void drbd_md_put_buffer(struct drbd_device *device)
 113{
 114        if (atomic_dec_and_test(&device->md_io.in_use))
 115                wake_up(&device->misc_wait);
 116}
 117
 118void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
 119                                     unsigned int *done)
 120{
 121        long dt;
 122
 123        rcu_read_lock();
 124        dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
 125        rcu_read_unlock();
 126        dt = dt * HZ / 10;
 127        if (dt == 0)
 128                dt = MAX_SCHEDULE_TIMEOUT;
 129
 130        dt = wait_event_timeout(device->misc_wait,
 131                        *done || test_bit(FORCE_DETACH, &device->flags), dt);
 132        if (dt == 0) {
 133                drbd_err(device, "meta-data IO operation timed out\n");
 134                drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
 135        }
 136}
 137
 138static int _drbd_md_sync_page_io(struct drbd_device *device,
 139                                 struct drbd_backing_dev *bdev,
 140                                 sector_t sector, int rw)
 141{
 142        struct bio *bio;
 143        /* we do all our meta data IO in aligned 4k blocks. */
 144        const int size = 4096;
 145        int err;
 146
 147        device->md_io.done = 0;
 148        device->md_io.error = -ENODEV;
 149
 150        if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
 151                rw |= REQ_FUA | REQ_FLUSH;
 152        rw |= REQ_SYNC | REQ_NOIDLE;
 153
 154        bio = bio_alloc_drbd(GFP_NOIO);
 155        bio->bi_bdev = bdev->md_bdev;
 156        bio->bi_iter.bi_sector = sector;
 157        err = -EIO;
 158        if (bio_add_page(bio, device->md_io.page, size, 0) != size)
 159                goto out;
 160        bio->bi_private = device;
 161        bio->bi_end_io = drbd_md_endio;
 162        bio->bi_rw = rw;
 163
 164        if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
 165                /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
 166                ;
 167        else if (!get_ldev_if_state(device, D_ATTACHING)) {
 168                /* Corresponding put_ldev in drbd_md_endio() */
 169                drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
 170                err = -ENODEV;
 171                goto out;
 172        }
 173
 174        bio_get(bio); /* one bio_put() is in the completion handler */
 175        atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
 176        device->md_io.submit_jif = jiffies;
 177        if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
 178                bio_endio(bio, -EIO);
 179        else
 180                submit_bio(rw, bio);
 181        wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
 182        if (bio_flagged(bio, BIO_UPTODATE))
 183                err = device->md_io.error;
 184
 185 out:
 186        bio_put(bio);
 187        return err;
 188}
 189
 190int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
 191                         sector_t sector, int rw)
 192{
 193        int err;
 194        D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
 195
 196        BUG_ON(!bdev->md_bdev);
 197
 198        dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
 199             current->comm, current->pid, __func__,
 200             (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
 201             (void*)_RET_IP_ );
 202
 203        if (sector < drbd_md_first_sector(bdev) ||
 204            sector + 7 > drbd_md_last_sector(bdev))
 205                drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
 206                     current->comm, current->pid, __func__,
 207                     (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
 208
 209        err = _drbd_md_sync_page_io(device, bdev, sector, rw);
 210        if (err) {
 211                drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
 212                    (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
 213        }
 214        return err;
 215}
 216
 217static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
 218{
 219        struct lc_element *tmp;
 220        tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
 221        if (unlikely(tmp != NULL)) {
 222                struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
 223                if (test_bit(BME_NO_WRITES, &bm_ext->flags))
 224                        return bm_ext;
 225        }
 226        return NULL;
 227}
 228
 229static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
 230{
 231        struct lc_element *al_ext;
 232        struct bm_extent *bm_ext;
 233        int wake;
 234
 235        spin_lock_irq(&device->al_lock);
 236        bm_ext = find_active_resync_extent(device, enr);
 237        if (bm_ext) {
 238                wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
 239                spin_unlock_irq(&device->al_lock);
 240                if (wake)
 241                        wake_up(&device->al_wait);
 242                return NULL;
 243        }
 244        if (nonblock)
 245                al_ext = lc_try_get(device->act_log, enr);
 246        else
 247                al_ext = lc_get(device->act_log, enr);
 248        spin_unlock_irq(&device->al_lock);
 249        return al_ext;
 250}
 251
 252bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
 253{
 254        /* for bios crossing activity log extent boundaries,
 255         * we may need to activate two extents in one go */
 256        unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
 257        unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
 258
 259        D_ASSERT(device, (unsigned)(last - first) <= 1);
 260        D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
 261
 262        /* FIXME figure out a fast path for bios crossing AL extent boundaries */
 263        if (first != last)
 264                return false;
 265
 266        return _al_get(device, first, true);
 267}
 268
 269bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
 270{
 271        /* for bios crossing activity log extent boundaries,
 272         * we may need to activate two extents in one go */
 273        unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
 274        unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
 275        unsigned enr;
 276        bool need_transaction = false;
 277
 278        D_ASSERT(device, first <= last);
 279        D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
 280
 281        for (enr = first; enr <= last; enr++) {
 282                struct lc_element *al_ext;
 283                wait_event(device->al_wait,
 284                                (al_ext = _al_get(device, enr, false)) != NULL);
 285                if (al_ext->lc_number != enr)
 286                        need_transaction = true;
 287        }
 288        return need_transaction;
 289}
 290
 291static int al_write_transaction(struct drbd_device *device);
 292
 293void drbd_al_begin_io_commit(struct drbd_device *device)
 294{
 295        bool locked = false;
 296
 297        /* Serialize multiple transactions.
 298         * This uses test_and_set_bit, memory barrier is implicit.
 299         */
 300        wait_event(device->al_wait,
 301                        device->act_log->pending_changes == 0 ||
 302                        (locked = lc_try_lock_for_transaction(device->act_log)));
 303
 304        if (locked) {
 305                /* Double check: it may have been committed by someone else,
 306                 * while we have been waiting for the lock. */
 307                if (device->act_log->pending_changes) {
 308                        bool write_al_updates;
 309
 310                        rcu_read_lock();
 311                        write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
 312                        rcu_read_unlock();
 313
 314                        if (write_al_updates)
 315                                al_write_transaction(device);
 316                        spin_lock_irq(&device->al_lock);
 317                        /* FIXME
 318                        if (err)
 319                                we need an "lc_cancel" here;
 320                        */
 321                        lc_committed(device->act_log);
 322                        spin_unlock_irq(&device->al_lock);
 323                }
 324                lc_unlock(device->act_log);
 325                wake_up(&device->al_wait);
 326        }
 327}
 328
 329/*
 330 * @delegate:   delegate activity log I/O to the worker thread
 331 */
 332void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i)
 333{
 334        if (drbd_al_begin_io_prepare(device, i))
 335                drbd_al_begin_io_commit(device);
 336}
 337
 338int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
 339{
 340        struct lru_cache *al = device->act_log;
 341        /* for bios crossing activity log extent boundaries,
 342         * we may need to activate two extents in one go */
 343        unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
 344        unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
 345        unsigned nr_al_extents;
 346        unsigned available_update_slots;
 347        unsigned enr;
 348
 349        D_ASSERT(device, first <= last);
 350
 351        nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
 352        available_update_slots = min(al->nr_elements - al->used,
 353                                al->max_pending_changes - al->pending_changes);
 354
 355        /* We want all necessary updates for a given request within the same transaction
 356         * We could first check how many updates are *actually* needed,
 357         * and use that instead of the worst-case nr_al_extents */
 358        if (available_update_slots < nr_al_extents) {
 359                /* Too many activity log extents are currently "hot".
 360                 *
 361                 * If we have accumulated pending changes already,
 362                 * we made progress.
 363                 *
 364                 * If we cannot get even a single pending change through,
 365                 * stop the fast path until we made some progress,
 366                 * or requests to "cold" extents could be starved. */
 367                if (!al->pending_changes)
 368                        __set_bit(__LC_STARVING, &device->act_log->flags);
 369                return -ENOBUFS;
 370        }
 371
 372        /* Is resync active in this area? */
 373        for (enr = first; enr <= last; enr++) {
 374                struct lc_element *tmp;
 375                tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
 376                if (unlikely(tmp != NULL)) {
 377                        struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
 378                        if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
 379                                if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
 380                                        return -EBUSY;
 381                                return -EWOULDBLOCK;
 382                        }
 383                }
 384        }
 385
 386        /* Checkout the refcounts.
 387         * Given that we checked for available elements and update slots above,
 388         * this has to be successful. */
 389        for (enr = first; enr <= last; enr++) {
 390                struct lc_element *al_ext;
 391                al_ext = lc_get_cumulative(device->act_log, enr);
 392                if (!al_ext)
 393                        drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
 394        }
 395        return 0;
 396}
 397
 398void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
 399{
 400        /* for bios crossing activity log extent boundaries,
 401         * we may need to activate two extents in one go */
 402        unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
 403        unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
 404        unsigned enr;
 405        struct lc_element *extent;
 406        unsigned long flags;
 407
 408        D_ASSERT(device, first <= last);
 409        spin_lock_irqsave(&device->al_lock, flags);
 410
 411        for (enr = first; enr <= last; enr++) {
 412                extent = lc_find(device->act_log, enr);
 413                if (!extent) {
 414                        drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
 415                        continue;
 416                }
 417                lc_put(device->act_log, extent);
 418        }
 419        spin_unlock_irqrestore(&device->al_lock, flags);
 420        wake_up(&device->al_wait);
 421}
 422
 423#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
 424/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
 425 * are still coupled, or assume too much about their relation.
 426 * Code below will not work if this is violated.
 427 * Will be cleaned up with some followup patch.
 428 */
 429# error FIXME
 430#endif
 431
 432static unsigned int al_extent_to_bm_page(unsigned int al_enr)
 433{
 434        return al_enr >>
 435                /* bit to page */
 436                ((PAGE_SHIFT + 3) -
 437                /* al extent number to bit */
 438                 (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
 439}
 440
 441static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
 442{
 443        const unsigned int stripes = device->ldev->md.al_stripes;
 444        const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
 445
 446        /* transaction number, modulo on-disk ring buffer wrap around */
 447        unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
 448
 449        /* ... to aligned 4k on disk block */
 450        t = ((t % stripes) * stripe_size_4kB) + t/stripes;
 451
 452        /* ... to 512 byte sector in activity log */
 453        t *= 8;
 454
 455        /* ... plus offset to the on disk position */
 456        return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
 457}
 458
 459int al_write_transaction(struct drbd_device *device)
 460{
 461        struct al_transaction_on_disk *buffer;
 462        struct lc_element *e;
 463        sector_t sector;
 464        int i, mx;
 465        unsigned extent_nr;
 466        unsigned crc = 0;
 467        int err = 0;
 468
 469        if (!get_ldev(device)) {
 470                drbd_err(device, "disk is %s, cannot start al transaction\n",
 471                        drbd_disk_str(device->state.disk));
 472                return -EIO;
 473        }
 474
 475        /* The bitmap write may have failed, causing a state change. */
 476        if (device->state.disk < D_INCONSISTENT) {
 477                drbd_err(device,
 478                        "disk is %s, cannot write al transaction\n",
 479                        drbd_disk_str(device->state.disk));
 480                put_ldev(device);
 481                return -EIO;
 482        }
 483
 484        /* protects md_io_buffer, al_tr_cycle, ... */
 485        buffer = drbd_md_get_buffer(device, __func__);
 486        if (!buffer) {
 487                drbd_err(device, "disk failed while waiting for md_io buffer\n");
 488                put_ldev(device);
 489                return -ENODEV;
 490        }
 491
 492        memset(buffer, 0, sizeof(*buffer));
 493        buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
 494        buffer->tr_number = cpu_to_be32(device->al_tr_number);
 495
 496        i = 0;
 497
 498        /* Even though no one can start to change this list
 499         * once we set the LC_LOCKED -- from drbd_al_begin_io(),
 500         * lc_try_lock_for_transaction() --, someone may still
 501         * be in the process of changing it. */
 502        spin_lock_irq(&device->al_lock);
 503        list_for_each_entry(e, &device->act_log->to_be_changed, list) {
 504                if (i == AL_UPDATES_PER_TRANSACTION) {
 505                        i++;
 506                        break;
 507                }
 508                buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
 509                buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
 510                if (e->lc_number != LC_FREE)
 511                        drbd_bm_mark_for_writeout(device,
 512                                        al_extent_to_bm_page(e->lc_number));
 513                i++;
 514        }
 515        spin_unlock_irq(&device->al_lock);
 516        BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
 517
 518        buffer->n_updates = cpu_to_be16(i);
 519        for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
 520                buffer->update_slot_nr[i] = cpu_to_be16(-1);
 521                buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
 522        }
 523
 524        buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
 525        buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
 526
 527        mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
 528                   device->act_log->nr_elements - device->al_tr_cycle);
 529        for (i = 0; i < mx; i++) {
 530                unsigned idx = device->al_tr_cycle + i;
 531                extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
 532                buffer->context[i] = cpu_to_be32(extent_nr);
 533        }
 534        for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
 535                buffer->context[i] = cpu_to_be32(LC_FREE);
 536
 537        device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
 538        if (device->al_tr_cycle >= device->act_log->nr_elements)
 539                device->al_tr_cycle = 0;
 540
 541        sector = al_tr_number_to_on_disk_sector(device);
 542
 543        crc = crc32c(0, buffer, 4096);
 544        buffer->crc32c = cpu_to_be32(crc);
 545
 546        if (drbd_bm_write_hinted(device))
 547                err = -EIO;
 548        else {
 549                bool write_al_updates;
 550                rcu_read_lock();
 551                write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
 552                rcu_read_unlock();
 553                if (write_al_updates) {
 554                        if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
 555                                err = -EIO;
 556                                drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
 557                        } else {
 558                                device->al_tr_number++;
 559                                device->al_writ_cnt++;
 560                        }
 561                }
 562        }
 563
 564        drbd_md_put_buffer(device);
 565        put_ldev(device);
 566
 567        return err;
 568}
 569
 570static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
 571{
 572        int rv;
 573
 574        spin_lock_irq(&device->al_lock);
 575        rv = (al_ext->refcnt == 0);
 576        if (likely(rv))
 577                lc_del(device->act_log, al_ext);
 578        spin_unlock_irq(&device->al_lock);
 579
 580        return rv;
 581}
 582
 583/**
 584 * drbd_al_shrink() - Removes all active extents form the activity log
 585 * @device:     DRBD device.
 586 *
 587 * Removes all active extents form the activity log, waiting until
 588 * the reference count of each entry dropped to 0 first, of course.
 589 *
 590 * You need to lock device->act_log with lc_try_lock() / lc_unlock()
 591 */
 592void drbd_al_shrink(struct drbd_device *device)
 593{
 594        struct lc_element *al_ext;
 595        int i;
 596
 597        D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
 598
 599        for (i = 0; i < device->act_log->nr_elements; i++) {
 600                al_ext = lc_element_by_index(device->act_log, i);
 601                if (al_ext->lc_number == LC_FREE)
 602                        continue;
 603                wait_event(device->al_wait, _try_lc_del(device, al_ext));
 604        }
 605
 606        wake_up(&device->al_wait);
 607}
 608
 609int drbd_initialize_al(struct drbd_device *device, void *buffer)
 610{
 611        struct al_transaction_on_disk *al = buffer;
 612        struct drbd_md *md = &device->ldev->md;
 613        sector_t al_base = md->md_offset + md->al_offset;
 614        int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
 615        int i;
 616
 617        memset(al, 0, 4096);
 618        al->magic = cpu_to_be32(DRBD_AL_MAGIC);
 619        al->transaction_type = cpu_to_be16(AL_TR_INITIALIZED);
 620        al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
 621
 622        for (i = 0; i < al_size_4k; i++) {
 623                int err = drbd_md_sync_page_io(device, device->ldev, al_base + i * 8, WRITE);
 624                if (err)
 625                        return err;
 626        }
 627        return 0;
 628}
 629
 630static const char *drbd_change_sync_fname[] = {
 631        [RECORD_RS_FAILED] = "drbd_rs_failed_io",
 632        [SET_IN_SYNC] = "drbd_set_in_sync",
 633        [SET_OUT_OF_SYNC] = "drbd_set_out_of_sync"
 634};
 635
 636/* ATTENTION. The AL's extents are 4MB each, while the extents in the
 637 * resync LRU-cache are 16MB each.
 638 * The caller of this function has to hold an get_ldev() reference.
 639 *
 640 * Adjusts the caching members ->rs_left (success) or ->rs_failed (!success),
 641 * potentially pulling in (and recounting the corresponding bits)
 642 * this resync extent into the resync extent lru cache.
 643 *
 644 * Returns whether all bits have been cleared for this resync extent,
 645 * precisely: (rs_left <= rs_failed)
 646 *
 647 * TODO will be obsoleted once we have a caching lru of the on disk bitmap
 648 */
 649static bool update_rs_extent(struct drbd_device *device,
 650                unsigned int enr, int count,
 651                enum update_sync_bits_mode mode)
 652{
 653        struct lc_element *e;
 654
 655        D_ASSERT(device, atomic_read(&device->local_cnt));
 656
 657        /* When setting out-of-sync bits,
 658         * we don't need it cached (lc_find).
 659         * But if it is present in the cache,
 660         * we should update the cached bit count.
 661         * Otherwise, that extent should be in the resync extent lru cache
 662         * already -- or we want to pull it in if necessary -- (lc_get),
 663         * then update and check rs_left and rs_failed. */
 664        if (mode == SET_OUT_OF_SYNC)
 665                e = lc_find(device->resync, enr);
 666        else
 667                e = lc_get(device->resync, enr);
 668        if (e) {
 669                struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
 670                if (ext->lce.lc_number == enr) {
 671                        if (mode == SET_IN_SYNC)
 672                                ext->rs_left -= count;
 673                        else if (mode == SET_OUT_OF_SYNC)
 674                                ext->rs_left += count;
 675                        else
 676                                ext->rs_failed += count;
 677                        if (ext->rs_left < ext->rs_failed) {
 678                                drbd_warn(device, "BAD! enr=%u rs_left=%d "
 679                                    "rs_failed=%d count=%d cstate=%s\n",
 680                                     ext->lce.lc_number, ext->rs_left,
 681                                     ext->rs_failed, count,
 682                                     drbd_conn_str(device->state.conn));
 683
 684                                /* We don't expect to be able to clear more bits
 685                                 * than have been set when we originally counted
 686                                 * the set bits to cache that value in ext->rs_left.
 687                                 * Whatever the reason (disconnect during resync,
 688                                 * delayed local completion of an application write),
 689                                 * try to fix it up by recounting here. */
 690                                ext->rs_left = drbd_bm_e_weight(device, enr);
 691                        }
 692                } else {
 693                        /* Normally this element should be in the cache,
 694                         * since drbd_rs_begin_io() pulled it already in.
 695                         *
 696                         * But maybe an application write finished, and we set
 697                         * something outside the resync lru_cache in sync.
 698                         */
 699                        int rs_left = drbd_bm_e_weight(device, enr);
 700                        if (ext->flags != 0) {
 701                                drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
 702                                     " -> %d[%u;00]\n",
 703                                     ext->lce.lc_number, ext->rs_left,
 704                                     ext->flags, enr, rs_left);
 705                                ext->flags = 0;
 706                        }
 707                        if (ext->rs_failed) {
 708                                drbd_warn(device, "Kicking resync_lru element enr=%u "
 709                                     "out with rs_failed=%d\n",
 710                                     ext->lce.lc_number, ext->rs_failed);
 711                        }
 712                        ext->rs_left = rs_left;
 713                        ext->rs_failed = (mode == RECORD_RS_FAILED) ? count : 0;
 714                        /* we don't keep a persistent log of the resync lru,
 715                         * we can commit any change right away. */
 716                        lc_committed(device->resync);
 717                }
 718                if (mode != SET_OUT_OF_SYNC)
 719                        lc_put(device->resync, &ext->lce);
 720                /* no race, we are within the al_lock! */
 721
 722                if (ext->rs_left <= ext->rs_failed) {
 723                        ext->rs_failed = 0;
 724                        return true;
 725                }
 726        } else if (mode != SET_OUT_OF_SYNC) {
 727                /* be quiet if lc_find() did not find it. */
 728                drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
 729                    device->resync_locked,
 730                    device->resync->nr_elements,
 731                    device->resync->flags);
 732        }
 733        return false;
 734}
 735
 736void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
 737{
 738        unsigned long now = jiffies;
 739        unsigned long last = device->rs_mark_time[device->rs_last_mark];
 740        int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
 741        if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
 742                if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
 743                    device->state.conn != C_PAUSED_SYNC_T &&
 744                    device->state.conn != C_PAUSED_SYNC_S) {
 745                        device->rs_mark_time[next] = now;
 746                        device->rs_mark_left[next] = still_to_go;
 747                        device->rs_last_mark = next;
 748                }
 749        }
 750}
 751
 752/* It is called lazy update, so don't do write-out too often. */
 753static bool lazy_bitmap_update_due(struct drbd_device *device)
 754{
 755        return time_after(jiffies, device->rs_last_bcast + 2*HZ);
 756}
 757
 758static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
 759{
 760        if (rs_done)
 761                set_bit(RS_DONE, &device->flags);
 762                /* and also set RS_PROGRESS below */
 763        else if (!lazy_bitmap_update_due(device))
 764                return;
 765
 766        drbd_device_post_work(device, RS_PROGRESS);
 767}
 768
 769static int update_sync_bits(struct drbd_device *device,
 770                unsigned long sbnr, unsigned long ebnr,
 771                enum update_sync_bits_mode mode)
 772{
 773        /*
 774         * We keep a count of set bits per resync-extent in the ->rs_left
 775         * caching member, so we need to loop and work within the resync extent
 776         * alignment. Typically this loop will execute exactly once.
 777         */
 778        unsigned long flags;
 779        unsigned long count = 0;
 780        unsigned int cleared = 0;
 781        while (sbnr <= ebnr) {
 782                /* set temporary boundary bit number to last bit number within
 783                 * the resync extent of the current start bit number,
 784                 * but cap at provided end bit number */
 785                unsigned long tbnr = min(ebnr, sbnr | BM_BLOCKS_PER_BM_EXT_MASK);
 786                unsigned long c;
 787
 788                if (mode == RECORD_RS_FAILED)
 789                        /* Only called from drbd_rs_failed_io(), bits
 790                         * supposedly still set.  Recount, maybe some
 791                         * of the bits have been successfully cleared
 792                         * by application IO meanwhile.
 793                         */
 794                        c = drbd_bm_count_bits(device, sbnr, tbnr);
 795                else if (mode == SET_IN_SYNC)
 796                        c = drbd_bm_clear_bits(device, sbnr, tbnr);
 797                else /* if (mode == SET_OUT_OF_SYNC) */
 798                        c = drbd_bm_set_bits(device, sbnr, tbnr);
 799
 800                if (c) {
 801                        spin_lock_irqsave(&device->al_lock, flags);
 802                        cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode);
 803                        spin_unlock_irqrestore(&device->al_lock, flags);
 804                        count += c;
 805                }
 806                sbnr = tbnr + 1;
 807        }
 808        if (count) {
 809                if (mode == SET_IN_SYNC) {
 810                        unsigned long still_to_go = drbd_bm_total_weight(device);
 811                        bool rs_is_done = (still_to_go <= device->rs_failed);
 812                        drbd_advance_rs_marks(device, still_to_go);
 813                        if (cleared || rs_is_done)
 814                                maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
 815                } else if (mode == RECORD_RS_FAILED)
 816                        device->rs_failed += count;
 817                wake_up(&device->al_wait);
 818        }
 819        return count;
 820}
 821
 822/* clear the bit corresponding to the piece of storage in question:
 823 * size byte of data starting from sector.  Only clear a bits of the affected
 824 * one ore more _aligned_ BM_BLOCK_SIZE blocks.
 825 *
 826 * called by worker on C_SYNC_TARGET and receiver on SyncSource.
 827 *
 828 */
 829int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
 830                enum update_sync_bits_mode mode)
 831{
 832        /* Is called from worker and receiver context _only_ */
 833        unsigned long sbnr, ebnr, lbnr;
 834        unsigned long count = 0;
 835        sector_t esector, nr_sectors;
 836
 837        /* This would be an empty REQ_FLUSH, be silent. */
 838        if ((mode == SET_OUT_OF_SYNC) && size == 0)
 839                return 0;
 840
 841        if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) {
 842                drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
 843                                drbd_change_sync_fname[mode],
 844                                (unsigned long long)sector, size);
 845                return 0;
 846        }
 847
 848        if (!get_ldev(device))
 849                return 0; /* no disk, no metadata, no bitmap to manipulate bits in */
 850
 851        nr_sectors = drbd_get_capacity(device->this_bdev);
 852        esector = sector + (size >> 9) - 1;
 853
 854        if (!expect(sector < nr_sectors))
 855                goto out;
 856        if (!expect(esector < nr_sectors))
 857                esector = nr_sectors - 1;
 858
 859        lbnr = BM_SECT_TO_BIT(nr_sectors-1);
 860
 861        if (mode == SET_IN_SYNC) {
 862                /* Round up start sector, round down end sector.  We make sure
 863                 * we only clear full, aligned, BM_BLOCK_SIZE blocks. */
 864                if (unlikely(esector < BM_SECT_PER_BIT-1))
 865                        goto out;
 866                if (unlikely(esector == (nr_sectors-1)))
 867                        ebnr = lbnr;
 868                else
 869                        ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
 870                sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
 871        } else {
 872                /* We set it out of sync, or record resync failure.
 873                 * Should not round anything here. */
 874                sbnr = BM_SECT_TO_BIT(sector);
 875                ebnr = BM_SECT_TO_BIT(esector);
 876        }
 877
 878        count = update_sync_bits(device, sbnr, ebnr, mode);
 879out:
 880        put_ldev(device);
 881        return count;
 882}
 883
 884static
 885struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
 886{
 887        struct lc_element *e;
 888        struct bm_extent *bm_ext;
 889        int wakeup = 0;
 890        unsigned long rs_flags;
 891
 892        spin_lock_irq(&device->al_lock);
 893        if (device->resync_locked > device->resync->nr_elements/2) {
 894                spin_unlock_irq(&device->al_lock);
 895                return NULL;
 896        }
 897        e = lc_get(device->resync, enr);
 898        bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
 899        if (bm_ext) {
 900                if (bm_ext->lce.lc_number != enr) {
 901                        bm_ext->rs_left = drbd_bm_e_weight(device, enr);
 902                        bm_ext->rs_failed = 0;
 903                        lc_committed(device->resync);
 904                        wakeup = 1;
 905                }
 906                if (bm_ext->lce.refcnt == 1)
 907                        device->resync_locked++;
 908                set_bit(BME_NO_WRITES, &bm_ext->flags);
 909        }
 910        rs_flags = device->resync->flags;
 911        spin_unlock_irq(&device->al_lock);
 912        if (wakeup)
 913                wake_up(&device->al_wait);
 914
 915        if (!bm_ext) {
 916                if (rs_flags & LC_STARVING)
 917                        drbd_warn(device, "Have to wait for element"
 918                             " (resync LRU too small?)\n");
 919                BUG_ON(rs_flags & LC_LOCKED);
 920        }
 921
 922        return bm_ext;
 923}
 924
 925static int _is_in_al(struct drbd_device *device, unsigned int enr)
 926{
 927        int rv;
 928
 929        spin_lock_irq(&device->al_lock);
 930        rv = lc_is_used(device->act_log, enr);
 931        spin_unlock_irq(&device->al_lock);
 932
 933        return rv;
 934}
 935
 936/**
 937 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
 938 * @device:     DRBD device.
 939 * @sector:     The sector number.
 940 *
 941 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
 942 */
 943int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
 944{
 945        unsigned int enr = BM_SECT_TO_EXT(sector);
 946        struct bm_extent *bm_ext;
 947        int i, sig;
 948        bool sa;
 949
 950retry:
 951        sig = wait_event_interruptible(device->al_wait,
 952                        (bm_ext = _bme_get(device, enr)));
 953        if (sig)
 954                return -EINTR;
 955
 956        if (test_bit(BME_LOCKED, &bm_ext->flags))
 957                return 0;
 958
 959        /* step aside only while we are above c-min-rate; unless disabled. */
 960        sa = drbd_rs_c_min_rate_throttle(device);
 961
 962        for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
 963                sig = wait_event_interruptible(device->al_wait,
 964                                               !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
 965                                               (sa && test_bit(BME_PRIORITY, &bm_ext->flags)));
 966
 967                if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) {
 968                        spin_lock_irq(&device->al_lock);
 969                        if (lc_put(device->resync, &bm_ext->lce) == 0) {
 970                                bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
 971                                device->resync_locked--;
 972                                wake_up(&device->al_wait);
 973                        }
 974                        spin_unlock_irq(&device->al_lock);
 975                        if (sig)
 976                                return -EINTR;
 977                        if (schedule_timeout_interruptible(HZ/10))
 978                                return -EINTR;
 979                        goto retry;
 980                }
 981        }
 982        set_bit(BME_LOCKED, &bm_ext->flags);
 983        return 0;
 984}
 985
 986/**
 987 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
 988 * @device:     DRBD device.
 989 * @sector:     The sector number.
 990 *
 991 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
 992 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
 993 * if there is still application IO going on in this area.
 994 */
 995int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
 996{
 997        unsigned int enr = BM_SECT_TO_EXT(sector);
 998        const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
 999        struct lc_element *e;
1000        struct bm_extent *bm_ext;
1001        int i;
1002        bool throttle = drbd_rs_should_slow_down(device, sector, true);
1003
1004        /* If we need to throttle, a half-locked (only marked BME_NO_WRITES,
1005         * not yet BME_LOCKED) extent needs to be kicked out explicitly if we
1006         * need to throttle. There is at most one such half-locked extent,
1007         * which is remembered in resync_wenr. */
1008
1009        if (throttle && device->resync_wenr != enr)
1010                return -EAGAIN;
1011
1012        spin_lock_irq(&device->al_lock);
1013        if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
1014                /* in case you have very heavy scattered io, it may
1015                 * stall the syncer undefined if we give up the ref count
1016                 * when we try again and requeue.
1017                 *
1018                 * if we don't give up the refcount, but the next time
1019                 * we are scheduled this extent has been "synced" by new
1020                 * application writes, we'd miss the lc_put on the
1021                 * extent we keep the refcount on.
1022                 * so we remembered which extent we had to try again, and
1023                 * if the next requested one is something else, we do
1024                 * the lc_put here...
1025                 * we also have to wake_up
1026                 */
1027                e = lc_find(device->resync, device->resync_wenr);
1028                bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1029                if (bm_ext) {
1030                        D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1031                        D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1032                        clear_bit(BME_NO_WRITES, &bm_ext->flags);
1033                        device->resync_wenr = LC_FREE;
1034                        if (lc_put(device->resync, &bm_ext->lce) == 0) {
1035                                bm_ext->flags = 0;
1036                                device->resync_locked--;
1037                        }
1038                        wake_up(&device->al_wait);
1039                } else {
1040                        drbd_alert(device, "LOGIC BUG\n");
1041                }
1042        }
1043        /* TRY. */
1044        e = lc_try_get(device->resync, enr);
1045        bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1046        if (bm_ext) {
1047                if (test_bit(BME_LOCKED, &bm_ext->flags))
1048                        goto proceed;
1049                if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
1050                        device->resync_locked++;
1051                } else {
1052                        /* we did set the BME_NO_WRITES,
1053                         * but then could not set BME_LOCKED,
1054                         * so we tried again.
1055                         * drop the extra reference. */
1056                        bm_ext->lce.refcnt--;
1057                        D_ASSERT(device, bm_ext->lce.refcnt > 0);
1058                }
1059                goto check_al;
1060        } else {
1061                /* do we rather want to try later? */
1062                if (device->resync_locked > device->resync->nr_elements-3)
1063                        goto try_again;
1064                /* Do or do not. There is no try. -- Yoda */
1065                e = lc_get(device->resync, enr);
1066                bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1067                if (!bm_ext) {
1068                        const unsigned long rs_flags = device->resync->flags;
1069                        if (rs_flags & LC_STARVING)
1070                                drbd_warn(device, "Have to wait for element"
1071                                     " (resync LRU too small?)\n");
1072                        BUG_ON(rs_flags & LC_LOCKED);
1073                        goto try_again;
1074                }
1075                if (bm_ext->lce.lc_number != enr) {
1076                        bm_ext->rs_left = drbd_bm_e_weight(device, enr);
1077                        bm_ext->rs_failed = 0;
1078                        lc_committed(device->resync);
1079                        wake_up(&device->al_wait);
1080                        D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1081                }
1082                set_bit(BME_NO_WRITES, &bm_ext->flags);
1083                D_ASSERT(device, bm_ext->lce.refcnt == 1);
1084                device->resync_locked++;
1085                goto check_al;
1086        }
1087check_al:
1088        for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1089                if (lc_is_used(device->act_log, al_enr+i))
1090                        goto try_again;
1091        }
1092        set_bit(BME_LOCKED, &bm_ext->flags);
1093proceed:
1094        device->resync_wenr = LC_FREE;
1095        spin_unlock_irq(&device->al_lock);
1096        return 0;
1097
1098try_again:
1099        if (bm_ext) {
1100                if (throttle) {
1101                        D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1102                        D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1103                        clear_bit(BME_NO_WRITES, &bm_ext->flags);
1104                        device->resync_wenr = LC_FREE;
1105                        if (lc_put(device->resync, &bm_ext->lce) == 0) {
1106                                bm_ext->flags = 0;
1107                                device->resync_locked--;
1108                        }
1109                        wake_up(&device->al_wait);
1110                } else
1111                        device->resync_wenr = enr;
1112        }
1113        spin_unlock_irq(&device->al_lock);
1114        return -EAGAIN;
1115}
1116
1117void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
1118{
1119        unsigned int enr = BM_SECT_TO_EXT(sector);
1120        struct lc_element *e;
1121        struct bm_extent *bm_ext;
1122        unsigned long flags;
1123
1124        spin_lock_irqsave(&device->al_lock, flags);
1125        e = lc_find(device->resync, enr);
1126        bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1127        if (!bm_ext) {
1128                spin_unlock_irqrestore(&device->al_lock, flags);
1129                if (__ratelimit(&drbd_ratelimit_state))
1130                        drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
1131                return;
1132        }
1133
1134        if (bm_ext->lce.refcnt == 0) {
1135                spin_unlock_irqrestore(&device->al_lock, flags);
1136                drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
1137                    "but refcnt is 0!?\n",
1138                    (unsigned long long)sector, enr);
1139                return;
1140        }
1141
1142        if (lc_put(device->resync, &bm_ext->lce) == 0) {
1143                bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
1144                device->resync_locked--;
1145                wake_up(&device->al_wait);
1146        }
1147
1148        spin_unlock_irqrestore(&device->al_lock, flags);
1149}
1150
1151/**
1152 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
1153 * @device:     DRBD device.
1154 */
1155void drbd_rs_cancel_all(struct drbd_device *device)
1156{
1157        spin_lock_irq(&device->al_lock);
1158
1159        if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
1160                lc_reset(device->resync);
1161                put_ldev(device);
1162        }
1163        device->resync_locked = 0;
1164        device->resync_wenr = LC_FREE;
1165        spin_unlock_irq(&device->al_lock);
1166        wake_up(&device->al_wait);
1167}
1168
1169/**
1170 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
1171 * @device:     DRBD device.
1172 *
1173 * Returns 0 upon success, -EAGAIN if at least one reference count was
1174 * not zero.
1175 */
1176int drbd_rs_del_all(struct drbd_device *device)
1177{
1178        struct lc_element *e;
1179        struct bm_extent *bm_ext;
1180        int i;
1181
1182        spin_lock_irq(&device->al_lock);
1183
1184        if (get_ldev_if_state(device, D_FAILED)) {
1185                /* ok, ->resync is there. */
1186                for (i = 0; i < device->resync->nr_elements; i++) {
1187                        e = lc_element_by_index(device->resync, i);
1188                        bm_ext = lc_entry(e, struct bm_extent, lce);
1189                        if (bm_ext->lce.lc_number == LC_FREE)
1190                                continue;
1191                        if (bm_ext->lce.lc_number == device->resync_wenr) {
1192                                drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
1193                                     " got 'synced' by application io\n",
1194                                     device->resync_wenr);
1195                                D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1196                                D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1197                                clear_bit(BME_NO_WRITES, &bm_ext->flags);
1198                                device->resync_wenr = LC_FREE;
1199                                lc_put(device->resync, &bm_ext->lce);
1200                        }
1201                        if (bm_ext->lce.refcnt != 0) {
1202                                drbd_info(device, "Retrying drbd_rs_del_all() later. "
1203                                     "refcnt=%d\n", bm_ext->lce.refcnt);
1204                                put_ldev(device);
1205                                spin_unlock_irq(&device->al_lock);
1206                                return -EAGAIN;
1207                        }
1208                        D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1209                        D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
1210                        lc_del(device->resync, &bm_ext->lce);
1211                }
1212                D_ASSERT(device, device->resync->used == 0);
1213                put_ldev(device);
1214        }
1215        spin_unlock_irq(&device->al_lock);
1216        wake_up(&device->al_wait);
1217
1218        return 0;
1219}
1220