1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27#include <linux/crc32c.h>
28#include <linux/drbd.h>
29#include <linux/drbd_limits.h>
30#include <linux/dynamic_debug.h>
31#include "drbd_int.h"
32
33
34enum al_transaction_types {
35 AL_TR_UPDATE = 0,
36 AL_TR_INITIALIZED = 0xffff
37};
38
39struct __packed al_transaction_on_disk {
40
41 __be32 magic;
42
43
44
45 __be32 tr_number;
46
47
48 __be32 crc32c;
49
50
51
52
53 __be16 transaction_type;
54
55
56
57
58
59 __be16 n_updates;
60
61
62
63
64 __be16 context_size;
65
66
67 __be16 context_start_slot_nr;
68
69
70
71
72 __be32 __reserved[4];
73
74
75
76
77
78
79
80
81
82
83 __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
84
85
86
87 __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
88
89
90
91
92 __be32 context[AL_CONTEXT_PER_TRANSACTION];
93};
94
95void *drbd_md_get_buffer(struct drbd_device *device, const char *intent)
96{
97 int r;
98
99 wait_event(device->misc_wait,
100 (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 ||
101 device->state.disk <= D_FAILED);
102
103 if (r)
104 return NULL;
105
106 device->md_io.current_use = intent;
107 device->md_io.start_jif = jiffies;
108 device->md_io.submit_jif = device->md_io.start_jif - 1;
109 return page_address(device->md_io.page);
110}
111
112void drbd_md_put_buffer(struct drbd_device *device)
113{
114 if (atomic_dec_and_test(&device->md_io.in_use))
115 wake_up(&device->misc_wait);
116}
117
118void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
119 unsigned int *done)
120{
121 long dt;
122
123 rcu_read_lock();
124 dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
125 rcu_read_unlock();
126 dt = dt * HZ / 10;
127 if (dt == 0)
128 dt = MAX_SCHEDULE_TIMEOUT;
129
130 dt = wait_event_timeout(device->misc_wait,
131 *done || test_bit(FORCE_DETACH, &device->flags), dt);
132 if (dt == 0) {
133 drbd_err(device, "meta-data IO operation timed out\n");
134 drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
135 }
136}
137
138static int _drbd_md_sync_page_io(struct drbd_device *device,
139 struct drbd_backing_dev *bdev,
140 sector_t sector, int rw)
141{
142 struct bio *bio;
143
144 const int size = 4096;
145 int err;
146
147 device->md_io.done = 0;
148 device->md_io.error = -ENODEV;
149
150 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
151 rw |= REQ_FUA | REQ_FLUSH;
152 rw |= REQ_SYNC | REQ_NOIDLE;
153
154 bio = bio_alloc_drbd(GFP_NOIO);
155 bio->bi_bdev = bdev->md_bdev;
156 bio->bi_iter.bi_sector = sector;
157 err = -EIO;
158 if (bio_add_page(bio, device->md_io.page, size, 0) != size)
159 goto out;
160 bio->bi_private = device;
161 bio->bi_end_io = drbd_md_endio;
162 bio->bi_rw = rw;
163
164 if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
165
166 ;
167 else if (!get_ldev_if_state(device, D_ATTACHING)) {
168
169 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
170 err = -ENODEV;
171 goto out;
172 }
173
174 bio_get(bio);
175 atomic_inc(&device->md_io.in_use);
176 device->md_io.submit_jif = jiffies;
177 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
178 bio_endio(bio, -EIO);
179 else
180 submit_bio(rw, bio);
181 wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
182 if (bio_flagged(bio, BIO_UPTODATE))
183 err = device->md_io.error;
184
185 out:
186 bio_put(bio);
187 return err;
188}
189
190int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
191 sector_t sector, int rw)
192{
193 int err;
194 D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
195
196 BUG_ON(!bdev->md_bdev);
197
198 dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
199 current->comm, current->pid, __func__,
200 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
201 (void*)_RET_IP_ );
202
203 if (sector < drbd_md_first_sector(bdev) ||
204 sector + 7 > drbd_md_last_sector(bdev))
205 drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
206 current->comm, current->pid, __func__,
207 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
208
209 err = _drbd_md_sync_page_io(device, bdev, sector, rw);
210 if (err) {
211 drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
212 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
213 }
214 return err;
215}
216
217static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
218{
219 struct lc_element *tmp;
220 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
221 if (unlikely(tmp != NULL)) {
222 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
223 if (test_bit(BME_NO_WRITES, &bm_ext->flags))
224 return bm_ext;
225 }
226 return NULL;
227}
228
229static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
230{
231 struct lc_element *al_ext;
232 struct bm_extent *bm_ext;
233 int wake;
234
235 spin_lock_irq(&device->al_lock);
236 bm_ext = find_active_resync_extent(device, enr);
237 if (bm_ext) {
238 wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
239 spin_unlock_irq(&device->al_lock);
240 if (wake)
241 wake_up(&device->al_wait);
242 return NULL;
243 }
244 if (nonblock)
245 al_ext = lc_try_get(device->act_log, enr);
246 else
247 al_ext = lc_get(device->act_log, enr);
248 spin_unlock_irq(&device->al_lock);
249 return al_ext;
250}
251
252bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
253{
254
255
256 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
257 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
258
259 D_ASSERT(device, (unsigned)(last - first) <= 1);
260 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
261
262
263 if (first != last)
264 return false;
265
266 return _al_get(device, first, true);
267}
268
269bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
270{
271
272
273 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
274 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
275 unsigned enr;
276 bool need_transaction = false;
277
278 D_ASSERT(device, first <= last);
279 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
280
281 for (enr = first; enr <= last; enr++) {
282 struct lc_element *al_ext;
283 wait_event(device->al_wait,
284 (al_ext = _al_get(device, enr, false)) != NULL);
285 if (al_ext->lc_number != enr)
286 need_transaction = true;
287 }
288 return need_transaction;
289}
290
291static int al_write_transaction(struct drbd_device *device);
292
293void drbd_al_begin_io_commit(struct drbd_device *device)
294{
295 bool locked = false;
296
297
298
299
300 wait_event(device->al_wait,
301 device->act_log->pending_changes == 0 ||
302 (locked = lc_try_lock_for_transaction(device->act_log)));
303
304 if (locked) {
305
306
307 if (device->act_log->pending_changes) {
308 bool write_al_updates;
309
310 rcu_read_lock();
311 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
312 rcu_read_unlock();
313
314 if (write_al_updates)
315 al_write_transaction(device);
316 spin_lock_irq(&device->al_lock);
317
318
319
320
321 lc_committed(device->act_log);
322 spin_unlock_irq(&device->al_lock);
323 }
324 lc_unlock(device->act_log);
325 wake_up(&device->al_wait);
326 }
327}
328
329
330
331
332void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i)
333{
334 if (drbd_al_begin_io_prepare(device, i))
335 drbd_al_begin_io_commit(device);
336}
337
338int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
339{
340 struct lru_cache *al = device->act_log;
341
342
343 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
344 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
345 unsigned nr_al_extents;
346 unsigned available_update_slots;
347 unsigned enr;
348
349 D_ASSERT(device, first <= last);
350
351 nr_al_extents = 1 + last - first;
352 available_update_slots = min(al->nr_elements - al->used,
353 al->max_pending_changes - al->pending_changes);
354
355
356
357
358 if (available_update_slots < nr_al_extents) {
359
360
361
362
363
364
365
366
367 if (!al->pending_changes)
368 __set_bit(__LC_STARVING, &device->act_log->flags);
369 return -ENOBUFS;
370 }
371
372
373 for (enr = first; enr <= last; enr++) {
374 struct lc_element *tmp;
375 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
376 if (unlikely(tmp != NULL)) {
377 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
378 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
379 if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
380 return -EBUSY;
381 return -EWOULDBLOCK;
382 }
383 }
384 }
385
386
387
388
389 for (enr = first; enr <= last; enr++) {
390 struct lc_element *al_ext;
391 al_ext = lc_get_cumulative(device->act_log, enr);
392 if (!al_ext)
393 drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
394 }
395 return 0;
396}
397
398void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
399{
400
401
402 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
403 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
404 unsigned enr;
405 struct lc_element *extent;
406 unsigned long flags;
407
408 D_ASSERT(device, first <= last);
409 spin_lock_irqsave(&device->al_lock, flags);
410
411 for (enr = first; enr <= last; enr++) {
412 extent = lc_find(device->act_log, enr);
413 if (!extent) {
414 drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
415 continue;
416 }
417 lc_put(device->act_log, extent);
418 }
419 spin_unlock_irqrestore(&device->al_lock, flags);
420 wake_up(&device->al_wait);
421}
422
423#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
424
425
426
427
428
429# error FIXME
430#endif
431
432static unsigned int al_extent_to_bm_page(unsigned int al_enr)
433{
434 return al_enr >>
435
436 ((PAGE_SHIFT + 3) -
437
438 (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
439}
440
441static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
442{
443 const unsigned int stripes = device->ldev->md.al_stripes;
444 const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
445
446
447 unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
448
449
450 t = ((t % stripes) * stripe_size_4kB) + t/stripes;
451
452
453 t *= 8;
454
455
456 return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
457}
458
459int al_write_transaction(struct drbd_device *device)
460{
461 struct al_transaction_on_disk *buffer;
462 struct lc_element *e;
463 sector_t sector;
464 int i, mx;
465 unsigned extent_nr;
466 unsigned crc = 0;
467 int err = 0;
468
469 if (!get_ldev(device)) {
470 drbd_err(device, "disk is %s, cannot start al transaction\n",
471 drbd_disk_str(device->state.disk));
472 return -EIO;
473 }
474
475
476 if (device->state.disk < D_INCONSISTENT) {
477 drbd_err(device,
478 "disk is %s, cannot write al transaction\n",
479 drbd_disk_str(device->state.disk));
480 put_ldev(device);
481 return -EIO;
482 }
483
484
485 buffer = drbd_md_get_buffer(device, __func__);
486 if (!buffer) {
487 drbd_err(device, "disk failed while waiting for md_io buffer\n");
488 put_ldev(device);
489 return -ENODEV;
490 }
491
492 memset(buffer, 0, sizeof(*buffer));
493 buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
494 buffer->tr_number = cpu_to_be32(device->al_tr_number);
495
496 i = 0;
497
498
499
500
501
502 spin_lock_irq(&device->al_lock);
503 list_for_each_entry(e, &device->act_log->to_be_changed, list) {
504 if (i == AL_UPDATES_PER_TRANSACTION) {
505 i++;
506 break;
507 }
508 buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
509 buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
510 if (e->lc_number != LC_FREE)
511 drbd_bm_mark_for_writeout(device,
512 al_extent_to_bm_page(e->lc_number));
513 i++;
514 }
515 spin_unlock_irq(&device->al_lock);
516 BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
517
518 buffer->n_updates = cpu_to_be16(i);
519 for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
520 buffer->update_slot_nr[i] = cpu_to_be16(-1);
521 buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
522 }
523
524 buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
525 buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
526
527 mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
528 device->act_log->nr_elements - device->al_tr_cycle);
529 for (i = 0; i < mx; i++) {
530 unsigned idx = device->al_tr_cycle + i;
531 extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
532 buffer->context[i] = cpu_to_be32(extent_nr);
533 }
534 for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
535 buffer->context[i] = cpu_to_be32(LC_FREE);
536
537 device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
538 if (device->al_tr_cycle >= device->act_log->nr_elements)
539 device->al_tr_cycle = 0;
540
541 sector = al_tr_number_to_on_disk_sector(device);
542
543 crc = crc32c(0, buffer, 4096);
544 buffer->crc32c = cpu_to_be32(crc);
545
546 if (drbd_bm_write_hinted(device))
547 err = -EIO;
548 else {
549 bool write_al_updates;
550 rcu_read_lock();
551 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
552 rcu_read_unlock();
553 if (write_al_updates) {
554 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
555 err = -EIO;
556 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
557 } else {
558 device->al_tr_number++;
559 device->al_writ_cnt++;
560 }
561 }
562 }
563
564 drbd_md_put_buffer(device);
565 put_ldev(device);
566
567 return err;
568}
569
570static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
571{
572 int rv;
573
574 spin_lock_irq(&device->al_lock);
575 rv = (al_ext->refcnt == 0);
576 if (likely(rv))
577 lc_del(device->act_log, al_ext);
578 spin_unlock_irq(&device->al_lock);
579
580 return rv;
581}
582
583
584
585
586
587
588
589
590
591
592void drbd_al_shrink(struct drbd_device *device)
593{
594 struct lc_element *al_ext;
595 int i;
596
597 D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
598
599 for (i = 0; i < device->act_log->nr_elements; i++) {
600 al_ext = lc_element_by_index(device->act_log, i);
601 if (al_ext->lc_number == LC_FREE)
602 continue;
603 wait_event(device->al_wait, _try_lc_del(device, al_ext));
604 }
605
606 wake_up(&device->al_wait);
607}
608
609int drbd_initialize_al(struct drbd_device *device, void *buffer)
610{
611 struct al_transaction_on_disk *al = buffer;
612 struct drbd_md *md = &device->ldev->md;
613 sector_t al_base = md->md_offset + md->al_offset;
614 int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
615 int i;
616
617 memset(al, 0, 4096);
618 al->magic = cpu_to_be32(DRBD_AL_MAGIC);
619 al->transaction_type = cpu_to_be16(AL_TR_INITIALIZED);
620 al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
621
622 for (i = 0; i < al_size_4k; i++) {
623 int err = drbd_md_sync_page_io(device, device->ldev, al_base + i * 8, WRITE);
624 if (err)
625 return err;
626 }
627 return 0;
628}
629
630static const char *drbd_change_sync_fname[] = {
631 [RECORD_RS_FAILED] = "drbd_rs_failed_io",
632 [SET_IN_SYNC] = "drbd_set_in_sync",
633 [SET_OUT_OF_SYNC] = "drbd_set_out_of_sync"
634};
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649static bool update_rs_extent(struct drbd_device *device,
650 unsigned int enr, int count,
651 enum update_sync_bits_mode mode)
652{
653 struct lc_element *e;
654
655 D_ASSERT(device, atomic_read(&device->local_cnt));
656
657
658
659
660
661
662
663
664 if (mode == SET_OUT_OF_SYNC)
665 e = lc_find(device->resync, enr);
666 else
667 e = lc_get(device->resync, enr);
668 if (e) {
669 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
670 if (ext->lce.lc_number == enr) {
671 if (mode == SET_IN_SYNC)
672 ext->rs_left -= count;
673 else if (mode == SET_OUT_OF_SYNC)
674 ext->rs_left += count;
675 else
676 ext->rs_failed += count;
677 if (ext->rs_left < ext->rs_failed) {
678 drbd_warn(device, "BAD! enr=%u rs_left=%d "
679 "rs_failed=%d count=%d cstate=%s\n",
680 ext->lce.lc_number, ext->rs_left,
681 ext->rs_failed, count,
682 drbd_conn_str(device->state.conn));
683
684
685
686
687
688
689
690 ext->rs_left = drbd_bm_e_weight(device, enr);
691 }
692 } else {
693
694
695
696
697
698
699 int rs_left = drbd_bm_e_weight(device, enr);
700 if (ext->flags != 0) {
701 drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
702 " -> %d[%u;00]\n",
703 ext->lce.lc_number, ext->rs_left,
704 ext->flags, enr, rs_left);
705 ext->flags = 0;
706 }
707 if (ext->rs_failed) {
708 drbd_warn(device, "Kicking resync_lru element enr=%u "
709 "out with rs_failed=%d\n",
710 ext->lce.lc_number, ext->rs_failed);
711 }
712 ext->rs_left = rs_left;
713 ext->rs_failed = (mode == RECORD_RS_FAILED) ? count : 0;
714
715
716 lc_committed(device->resync);
717 }
718 if (mode != SET_OUT_OF_SYNC)
719 lc_put(device->resync, &ext->lce);
720
721
722 if (ext->rs_left <= ext->rs_failed) {
723 ext->rs_failed = 0;
724 return true;
725 }
726 } else if (mode != SET_OUT_OF_SYNC) {
727
728 drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
729 device->resync_locked,
730 device->resync->nr_elements,
731 device->resync->flags);
732 }
733 return false;
734}
735
736void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
737{
738 unsigned long now = jiffies;
739 unsigned long last = device->rs_mark_time[device->rs_last_mark];
740 int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
741 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
742 if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
743 device->state.conn != C_PAUSED_SYNC_T &&
744 device->state.conn != C_PAUSED_SYNC_S) {
745 device->rs_mark_time[next] = now;
746 device->rs_mark_left[next] = still_to_go;
747 device->rs_last_mark = next;
748 }
749 }
750}
751
752
753static bool lazy_bitmap_update_due(struct drbd_device *device)
754{
755 return time_after(jiffies, device->rs_last_bcast + 2*HZ);
756}
757
758static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
759{
760 if (rs_done)
761 set_bit(RS_DONE, &device->flags);
762
763 else if (!lazy_bitmap_update_due(device))
764 return;
765
766 drbd_device_post_work(device, RS_PROGRESS);
767}
768
769static int update_sync_bits(struct drbd_device *device,
770 unsigned long sbnr, unsigned long ebnr,
771 enum update_sync_bits_mode mode)
772{
773
774
775
776
777
778 unsigned long flags;
779 unsigned long count = 0;
780 unsigned int cleared = 0;
781 while (sbnr <= ebnr) {
782
783
784
785 unsigned long tbnr = min(ebnr, sbnr | BM_BLOCKS_PER_BM_EXT_MASK);
786 unsigned long c;
787
788 if (mode == RECORD_RS_FAILED)
789
790
791
792
793
794 c = drbd_bm_count_bits(device, sbnr, tbnr);
795 else if (mode == SET_IN_SYNC)
796 c = drbd_bm_clear_bits(device, sbnr, tbnr);
797 else
798 c = drbd_bm_set_bits(device, sbnr, tbnr);
799
800 if (c) {
801 spin_lock_irqsave(&device->al_lock, flags);
802 cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode);
803 spin_unlock_irqrestore(&device->al_lock, flags);
804 count += c;
805 }
806 sbnr = tbnr + 1;
807 }
808 if (count) {
809 if (mode == SET_IN_SYNC) {
810 unsigned long still_to_go = drbd_bm_total_weight(device);
811 bool rs_is_done = (still_to_go <= device->rs_failed);
812 drbd_advance_rs_marks(device, still_to_go);
813 if (cleared || rs_is_done)
814 maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
815 } else if (mode == RECORD_RS_FAILED)
816 device->rs_failed += count;
817 wake_up(&device->al_wait);
818 }
819 return count;
820}
821
822
823
824
825
826
827
828
829int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
830 enum update_sync_bits_mode mode)
831{
832
833 unsigned long sbnr, ebnr, lbnr;
834 unsigned long count = 0;
835 sector_t esector, nr_sectors;
836
837
838 if ((mode == SET_OUT_OF_SYNC) && size == 0)
839 return 0;
840
841 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) {
842 drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
843 drbd_change_sync_fname[mode],
844 (unsigned long long)sector, size);
845 return 0;
846 }
847
848 if (!get_ldev(device))
849 return 0;
850
851 nr_sectors = drbd_get_capacity(device->this_bdev);
852 esector = sector + (size >> 9) - 1;
853
854 if (!expect(sector < nr_sectors))
855 goto out;
856 if (!expect(esector < nr_sectors))
857 esector = nr_sectors - 1;
858
859 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
860
861 if (mode == SET_IN_SYNC) {
862
863
864 if (unlikely(esector < BM_SECT_PER_BIT-1))
865 goto out;
866 if (unlikely(esector == (nr_sectors-1)))
867 ebnr = lbnr;
868 else
869 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
870 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
871 } else {
872
873
874 sbnr = BM_SECT_TO_BIT(sector);
875 ebnr = BM_SECT_TO_BIT(esector);
876 }
877
878 count = update_sync_bits(device, sbnr, ebnr, mode);
879out:
880 put_ldev(device);
881 return count;
882}
883
884static
885struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
886{
887 struct lc_element *e;
888 struct bm_extent *bm_ext;
889 int wakeup = 0;
890 unsigned long rs_flags;
891
892 spin_lock_irq(&device->al_lock);
893 if (device->resync_locked > device->resync->nr_elements/2) {
894 spin_unlock_irq(&device->al_lock);
895 return NULL;
896 }
897 e = lc_get(device->resync, enr);
898 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
899 if (bm_ext) {
900 if (bm_ext->lce.lc_number != enr) {
901 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
902 bm_ext->rs_failed = 0;
903 lc_committed(device->resync);
904 wakeup = 1;
905 }
906 if (bm_ext->lce.refcnt == 1)
907 device->resync_locked++;
908 set_bit(BME_NO_WRITES, &bm_ext->flags);
909 }
910 rs_flags = device->resync->flags;
911 spin_unlock_irq(&device->al_lock);
912 if (wakeup)
913 wake_up(&device->al_wait);
914
915 if (!bm_ext) {
916 if (rs_flags & LC_STARVING)
917 drbd_warn(device, "Have to wait for element"
918 " (resync LRU too small?)\n");
919 BUG_ON(rs_flags & LC_LOCKED);
920 }
921
922 return bm_ext;
923}
924
925static int _is_in_al(struct drbd_device *device, unsigned int enr)
926{
927 int rv;
928
929 spin_lock_irq(&device->al_lock);
930 rv = lc_is_used(device->act_log, enr);
931 spin_unlock_irq(&device->al_lock);
932
933 return rv;
934}
935
936
937
938
939
940
941
942
943int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
944{
945 unsigned int enr = BM_SECT_TO_EXT(sector);
946 struct bm_extent *bm_ext;
947 int i, sig;
948 bool sa;
949
950retry:
951 sig = wait_event_interruptible(device->al_wait,
952 (bm_ext = _bme_get(device, enr)));
953 if (sig)
954 return -EINTR;
955
956 if (test_bit(BME_LOCKED, &bm_ext->flags))
957 return 0;
958
959
960 sa = drbd_rs_c_min_rate_throttle(device);
961
962 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
963 sig = wait_event_interruptible(device->al_wait,
964 !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
965 (sa && test_bit(BME_PRIORITY, &bm_ext->flags)));
966
967 if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) {
968 spin_lock_irq(&device->al_lock);
969 if (lc_put(device->resync, &bm_ext->lce) == 0) {
970 bm_ext->flags = 0;
971 device->resync_locked--;
972 wake_up(&device->al_wait);
973 }
974 spin_unlock_irq(&device->al_lock);
975 if (sig)
976 return -EINTR;
977 if (schedule_timeout_interruptible(HZ/10))
978 return -EINTR;
979 goto retry;
980 }
981 }
982 set_bit(BME_LOCKED, &bm_ext->flags);
983 return 0;
984}
985
986
987
988
989
990
991
992
993
994
995int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
996{
997 unsigned int enr = BM_SECT_TO_EXT(sector);
998 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
999 struct lc_element *e;
1000 struct bm_extent *bm_ext;
1001 int i;
1002 bool throttle = drbd_rs_should_slow_down(device, sector, true);
1003
1004
1005
1006
1007
1008
1009 if (throttle && device->resync_wenr != enr)
1010 return -EAGAIN;
1011
1012 spin_lock_irq(&device->al_lock);
1013 if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 e = lc_find(device->resync, device->resync_wenr);
1028 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1029 if (bm_ext) {
1030 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1031 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1032 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1033 device->resync_wenr = LC_FREE;
1034 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1035 bm_ext->flags = 0;
1036 device->resync_locked--;
1037 }
1038 wake_up(&device->al_wait);
1039 } else {
1040 drbd_alert(device, "LOGIC BUG\n");
1041 }
1042 }
1043
1044 e = lc_try_get(device->resync, enr);
1045 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1046 if (bm_ext) {
1047 if (test_bit(BME_LOCKED, &bm_ext->flags))
1048 goto proceed;
1049 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
1050 device->resync_locked++;
1051 } else {
1052
1053
1054
1055
1056 bm_ext->lce.refcnt--;
1057 D_ASSERT(device, bm_ext->lce.refcnt > 0);
1058 }
1059 goto check_al;
1060 } else {
1061
1062 if (device->resync_locked > device->resync->nr_elements-3)
1063 goto try_again;
1064
1065 e = lc_get(device->resync, enr);
1066 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1067 if (!bm_ext) {
1068 const unsigned long rs_flags = device->resync->flags;
1069 if (rs_flags & LC_STARVING)
1070 drbd_warn(device, "Have to wait for element"
1071 " (resync LRU too small?)\n");
1072 BUG_ON(rs_flags & LC_LOCKED);
1073 goto try_again;
1074 }
1075 if (bm_ext->lce.lc_number != enr) {
1076 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
1077 bm_ext->rs_failed = 0;
1078 lc_committed(device->resync);
1079 wake_up(&device->al_wait);
1080 D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1081 }
1082 set_bit(BME_NO_WRITES, &bm_ext->flags);
1083 D_ASSERT(device, bm_ext->lce.refcnt == 1);
1084 device->resync_locked++;
1085 goto check_al;
1086 }
1087check_al:
1088 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1089 if (lc_is_used(device->act_log, al_enr+i))
1090 goto try_again;
1091 }
1092 set_bit(BME_LOCKED, &bm_ext->flags);
1093proceed:
1094 device->resync_wenr = LC_FREE;
1095 spin_unlock_irq(&device->al_lock);
1096 return 0;
1097
1098try_again:
1099 if (bm_ext) {
1100 if (throttle) {
1101 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1102 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1103 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1104 device->resync_wenr = LC_FREE;
1105 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1106 bm_ext->flags = 0;
1107 device->resync_locked--;
1108 }
1109 wake_up(&device->al_wait);
1110 } else
1111 device->resync_wenr = enr;
1112 }
1113 spin_unlock_irq(&device->al_lock);
1114 return -EAGAIN;
1115}
1116
1117void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
1118{
1119 unsigned int enr = BM_SECT_TO_EXT(sector);
1120 struct lc_element *e;
1121 struct bm_extent *bm_ext;
1122 unsigned long flags;
1123
1124 spin_lock_irqsave(&device->al_lock, flags);
1125 e = lc_find(device->resync, enr);
1126 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1127 if (!bm_ext) {
1128 spin_unlock_irqrestore(&device->al_lock, flags);
1129 if (__ratelimit(&drbd_ratelimit_state))
1130 drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
1131 return;
1132 }
1133
1134 if (bm_ext->lce.refcnt == 0) {
1135 spin_unlock_irqrestore(&device->al_lock, flags);
1136 drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
1137 "but refcnt is 0!?\n",
1138 (unsigned long long)sector, enr);
1139 return;
1140 }
1141
1142 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1143 bm_ext->flags = 0;
1144 device->resync_locked--;
1145 wake_up(&device->al_wait);
1146 }
1147
1148 spin_unlock_irqrestore(&device->al_lock, flags);
1149}
1150
1151
1152
1153
1154
1155void drbd_rs_cancel_all(struct drbd_device *device)
1156{
1157 spin_lock_irq(&device->al_lock);
1158
1159 if (get_ldev_if_state(device, D_FAILED)) {
1160 lc_reset(device->resync);
1161 put_ldev(device);
1162 }
1163 device->resync_locked = 0;
1164 device->resync_wenr = LC_FREE;
1165 spin_unlock_irq(&device->al_lock);
1166 wake_up(&device->al_wait);
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176int drbd_rs_del_all(struct drbd_device *device)
1177{
1178 struct lc_element *e;
1179 struct bm_extent *bm_ext;
1180 int i;
1181
1182 spin_lock_irq(&device->al_lock);
1183
1184 if (get_ldev_if_state(device, D_FAILED)) {
1185
1186 for (i = 0; i < device->resync->nr_elements; i++) {
1187 e = lc_element_by_index(device->resync, i);
1188 bm_ext = lc_entry(e, struct bm_extent, lce);
1189 if (bm_ext->lce.lc_number == LC_FREE)
1190 continue;
1191 if (bm_ext->lce.lc_number == device->resync_wenr) {
1192 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
1193 " got 'synced' by application io\n",
1194 device->resync_wenr);
1195 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1196 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1197 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1198 device->resync_wenr = LC_FREE;
1199 lc_put(device->resync, &bm_ext->lce);
1200 }
1201 if (bm_ext->lce.refcnt != 0) {
1202 drbd_info(device, "Retrying drbd_rs_del_all() later. "
1203 "refcnt=%d\n", bm_ext->lce.refcnt);
1204 put_ldev(device);
1205 spin_unlock_irq(&device->al_lock);
1206 return -EAGAIN;
1207 }
1208 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1209 D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
1210 lc_del(device->resync, &bm_ext->lce);
1211 }
1212 D_ASSERT(device, device->resync->used == 0);
1213 put_ldev(device);
1214 }
1215 spin_unlock_irq(&device->al_lock);
1216 wake_up(&device->al_wait);
1217
1218 return 0;
1219}
1220