1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/bitops.h>
26#include <linux/vmalloc.h>
27#include <linux/string.h>
28#include <linux/drbd.h>
29#include <linux/slab.h>
30#include <asm/kmap_types.h>
31
32#include "drbd_int.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93struct drbd_bitmap {
94 struct page **bm_pages;
95 spinlock_t bm_lock;
96
97
98
99 unsigned long bm_set;
100 unsigned long bm_bits;
101 size_t bm_words;
102 size_t bm_number_of_pages;
103 sector_t bm_dev_capacity;
104 struct mutex bm_change;
105
106 wait_queue_head_t bm_io_wait;
107
108 enum bm_flag bm_flags;
109
110
111 char *bm_why;
112 struct task_struct *bm_task;
113};
114
115#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
116static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
117{
118 struct drbd_bitmap *b = mdev->bitmap;
119 if (!__ratelimit(&drbd_ratelimit_state))
120 return;
121 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
122 drbd_task_to_thread_name(mdev->tconn, current),
123 func, b->bm_why ?: "?",
124 drbd_task_to_thread_name(mdev->tconn, b->bm_task));
125}
126
127void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
128{
129 struct drbd_bitmap *b = mdev->bitmap;
130 int trylock_failed;
131
132 if (!b) {
133 dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
134 return;
135 }
136
137 trylock_failed = !mutex_trylock(&b->bm_change);
138
139 if (trylock_failed) {
140 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
141 drbd_task_to_thread_name(mdev->tconn, current),
142 why, b->bm_why ?: "?",
143 drbd_task_to_thread_name(mdev->tconn, b->bm_task));
144 mutex_lock(&b->bm_change);
145 }
146 if (BM_LOCKED_MASK & b->bm_flags)
147 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
148 b->bm_flags |= flags & BM_LOCKED_MASK;
149
150 b->bm_why = why;
151 b->bm_task = current;
152}
153
154void drbd_bm_unlock(struct drbd_conf *mdev)
155{
156 struct drbd_bitmap *b = mdev->bitmap;
157 if (!b) {
158 dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
159 return;
160 }
161
162 if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
163 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
164
165 b->bm_flags &= ~BM_LOCKED_MASK;
166 b->bm_why = NULL;
167 b->bm_task = NULL;
168 mutex_unlock(&b->bm_change);
169}
170
171
172
173
174
175
176
177
178
179
180#define BM_PAGE_IDX_MASK ((1UL<<24)-1)
181
182#define BM_PAGE_IO_LOCK 31
183
184#define BM_PAGE_IO_ERROR 30
185
186
187#define BM_PAGE_NEED_WRITEOUT 29
188
189
190#define BM_PAGE_LAZY_WRITEOUT 28
191
192
193#define BM_PAGE_HINT_WRITEOUT 27
194
195
196
197
198
199
200static void bm_store_page_idx(struct page *page, unsigned long idx)
201{
202 BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
203 set_page_private(page, idx);
204}
205
206static unsigned long bm_page_to_idx(struct page *page)
207{
208 return page_private(page) & BM_PAGE_IDX_MASK;
209}
210
211
212
213
214static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
215{
216 struct drbd_bitmap *b = mdev->bitmap;
217 void *addr = &page_private(b->bm_pages[page_nr]);
218 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
219}
220
221static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
222{
223 struct drbd_bitmap *b = mdev->bitmap;
224 void *addr = &page_private(b->bm_pages[page_nr]);
225 clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
226 wake_up(&mdev->bitmap->bm_io_wait);
227}
228
229
230
231static void bm_set_page_unchanged(struct page *page)
232{
233
234 clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
235 clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
236}
237
238static void bm_set_page_need_writeout(struct page *page)
239{
240 set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
241}
242
243
244
245
246
247
248
249
250
251
252void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
253{
254 struct page *page;
255 if (page_nr >= mdev->bitmap->bm_number_of_pages) {
256 dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
257 page_nr, (int)mdev->bitmap->bm_number_of_pages);
258 return;
259 }
260 page = mdev->bitmap->bm_pages[page_nr];
261 set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
262}
263
264static int bm_test_page_unchanged(struct page *page)
265{
266 volatile const unsigned long *addr = &page_private(page);
267 return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
268}
269
270static void bm_set_page_io_err(struct page *page)
271{
272 set_bit(BM_PAGE_IO_ERROR, &page_private(page));
273}
274
275static void bm_clear_page_io_err(struct page *page)
276{
277 clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
278}
279
280static void bm_set_page_lazy_writeout(struct page *page)
281{
282 set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
283}
284
285static int bm_test_page_lazy_writeout(struct page *page)
286{
287 return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
288}
289
290
291static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
292{
293
294 unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
295 BUG_ON(page_nr >= b->bm_number_of_pages);
296 return page_nr;
297}
298
299static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
300{
301
302 unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
303 BUG_ON(page_nr >= b->bm_number_of_pages);
304 return page_nr;
305}
306
307static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
308{
309 struct page *page = b->bm_pages[idx];
310 return (unsigned long *) kmap_atomic(page);
311}
312
313static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
314{
315 return __bm_map_pidx(b, idx);
316}
317
318static void __bm_unmap(unsigned long *p_addr)
319{
320 kunmap_atomic(p_addr);
321};
322
323static void bm_unmap(unsigned long *p_addr)
324{
325 return __bm_unmap(p_addr);
326}
327
328
329#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
330
331
332
333
334
335
336#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
337
338
339#define LWPP (PAGE_SIZE/sizeof(long))
340
341
342
343
344
345
346
347
348static void bm_free_pages(struct page **pages, unsigned long number)
349{
350 unsigned long i;
351 if (!pages)
352 return;
353
354 for (i = 0; i < number; i++) {
355 if (!pages[i]) {
356 printk(KERN_ALERT "drbd: bm_free_pages tried to free "
357 "a NULL pointer; i=%lu n=%lu\n",
358 i, number);
359 continue;
360 }
361 __free_page(pages[i]);
362 pages[i] = NULL;
363 }
364}
365
366static void bm_vk_free(void *ptr, int v)
367{
368 if (v)
369 vfree(ptr);
370 else
371 kfree(ptr);
372}
373
374
375
376
377static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
378{
379 struct page **old_pages = b->bm_pages;
380 struct page **new_pages, *page;
381 unsigned int i, bytes, vmalloced = 0;
382 unsigned long have = b->bm_number_of_pages;
383
384 BUG_ON(have == 0 && old_pages != NULL);
385 BUG_ON(have != 0 && old_pages == NULL);
386
387 if (have == want)
388 return old_pages;
389
390
391
392
393
394
395 bytes = sizeof(struct page *)*want;
396 new_pages = kzalloc(bytes, GFP_NOIO);
397 if (!new_pages) {
398 new_pages = __vmalloc(bytes,
399 GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
400 PAGE_KERNEL);
401 if (!new_pages)
402 return NULL;
403 vmalloced = 1;
404 }
405
406 if (want >= have) {
407 for (i = 0; i < have; i++)
408 new_pages[i] = old_pages[i];
409 for (; i < want; i++) {
410 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
411 if (!page) {
412 bm_free_pages(new_pages + have, i - have);
413 bm_vk_free(new_pages, vmalloced);
414 return NULL;
415 }
416
417
418 bm_store_page_idx(page, i);
419 new_pages[i] = page;
420 }
421 } else {
422 for (i = 0; i < want; i++)
423 new_pages[i] = old_pages[i];
424
425
426
427 }
428
429 if (vmalloced)
430 b->bm_flags |= BM_P_VMALLOCED;
431 else
432 b->bm_flags &= ~BM_P_VMALLOCED;
433
434 return new_pages;
435}
436
437
438
439
440
441int drbd_bm_init(struct drbd_conf *mdev)
442{
443 struct drbd_bitmap *b = mdev->bitmap;
444 WARN_ON(b != NULL);
445 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
446 if (!b)
447 return -ENOMEM;
448 spin_lock_init(&b->bm_lock);
449 mutex_init(&b->bm_change);
450 init_waitqueue_head(&b->bm_io_wait);
451
452 mdev->bitmap = b;
453
454 return 0;
455}
456
457sector_t drbd_bm_capacity(struct drbd_conf *mdev)
458{
459 if (!expect(mdev->bitmap))
460 return 0;
461 return mdev->bitmap->bm_dev_capacity;
462}
463
464
465
466void drbd_bm_cleanup(struct drbd_conf *mdev)
467{
468 if (!expect(mdev->bitmap))
469 return;
470 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
471 bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
472 kfree(mdev->bitmap);
473 mdev->bitmap = NULL;
474}
475
476
477
478
479
480
481#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
482#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
483#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
484static int bm_clear_surplus(struct drbd_bitmap *b)
485{
486 unsigned long mask;
487 unsigned long *p_addr, *bm;
488 int tmp;
489 int cleared = 0;
490
491
492 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
493
494 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
495
496
497 mask = cpu_to_lel(mask);
498
499 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
500 bm = p_addr + (tmp/BITS_PER_LONG);
501 if (mask) {
502
503
504
505
506 cleared = hweight_long(*bm & ~mask);
507 *bm &= mask;
508 bm++;
509 }
510
511 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
512
513
514 cleared += hweight_long(*bm);
515 *bm = 0;
516 }
517 bm_unmap(p_addr);
518 return cleared;
519}
520
521static void bm_set_surplus(struct drbd_bitmap *b)
522{
523 unsigned long mask;
524 unsigned long *p_addr, *bm;
525 int tmp;
526
527
528 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
529
530 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
531
532
533 mask = cpu_to_lel(mask);
534
535 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
536 bm = p_addr + (tmp/BITS_PER_LONG);
537 if (mask) {
538
539
540
541
542 *bm |= ~mask;
543 bm++;
544 }
545
546 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
547
548
549 *bm = ~0UL;
550 }
551 bm_unmap(p_addr);
552}
553
554
555
556static unsigned long bm_count_bits(struct drbd_bitmap *b)
557{
558 unsigned long *p_addr;
559 unsigned long bits = 0;
560 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
561 int idx, i, last_word;
562
563
564 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
565 p_addr = __bm_map_pidx(b, idx);
566 for (i = 0; i < LWPP; i++)
567 bits += hweight_long(p_addr[i]);
568 __bm_unmap(p_addr);
569 cond_resched();
570 }
571
572 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
573 p_addr = __bm_map_pidx(b, idx);
574 for (i = 0; i < last_word; i++)
575 bits += hweight_long(p_addr[i]);
576 p_addr[last_word] &= cpu_to_lel(mask);
577 bits += hweight_long(p_addr[last_word]);
578
579 if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
580 p_addr[last_word+1] = 0;
581 __bm_unmap(p_addr);
582 return bits;
583}
584
585
586static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
587{
588 unsigned long *p_addr, *bm;
589 unsigned int idx;
590 size_t do_now, end;
591
592 end = offset + len;
593
594 if (end > b->bm_words) {
595 printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
596 return;
597 }
598
599 while (offset < end) {
600 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
601 idx = bm_word_to_page_idx(b, offset);
602 p_addr = bm_map_pidx(b, idx);
603 bm = p_addr + MLPP(offset);
604 if (bm+do_now > p_addr + LWPP) {
605 printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
606 p_addr, bm, (int)do_now);
607 } else
608 memset(bm, c, do_now * sizeof(long));
609 bm_unmap(p_addr);
610 bm_set_page_need_writeout(b->bm_pages[idx]);
611 offset += do_now;
612 }
613}
614
615
616static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
617{
618 u64 bitmap_sectors;
619 if (ldev->md.al_offset == 8)
620 bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
621 else
622 bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
623 return bitmap_sectors << (9 + 3);
624}
625
626
627
628
629
630
631
632
633
634int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
635{
636 struct drbd_bitmap *b = mdev->bitmap;
637 unsigned long bits, words, owords, obits;
638 unsigned long want, have, onpages;
639 struct page **npages, **opages = NULL;
640 int err = 0, growing;
641 int opages_vmalloced;
642
643 if (!expect(b))
644 return -ENOMEM;
645
646 drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
647
648 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
649 (unsigned long long)capacity);
650
651 if (capacity == b->bm_dev_capacity)
652 goto out;
653
654 opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
655
656 if (capacity == 0) {
657 spin_lock_irq(&b->bm_lock);
658 opages = b->bm_pages;
659 onpages = b->bm_number_of_pages;
660 owords = b->bm_words;
661 b->bm_pages = NULL;
662 b->bm_number_of_pages =
663 b->bm_set =
664 b->bm_bits =
665 b->bm_words =
666 b->bm_dev_capacity = 0;
667 spin_unlock_irq(&b->bm_lock);
668 bm_free_pages(opages, onpages);
669 bm_vk_free(opages, opages_vmalloced);
670 goto out;
671 }
672 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
673
674
675
676
677
678
679 words = ALIGN(bits, 64) >> LN2_BPL;
680
681 if (get_ldev(mdev)) {
682 u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
683 put_ldev(mdev);
684 if (bits > bits_on_disk) {
685 dev_info(DEV, "bits = %lu\n", bits);
686 dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
687 err = -ENOSPC;
688 goto out;
689 }
690 }
691
692 want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
693 have = b->bm_number_of_pages;
694 if (want == have) {
695 D_ASSERT(b->bm_pages != NULL);
696 npages = b->bm_pages;
697 } else {
698 if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
699 npages = NULL;
700 else
701 npages = bm_realloc_pages(b, want);
702 }
703
704 if (!npages) {
705 err = -ENOMEM;
706 goto out;
707 }
708
709 spin_lock_irq(&b->bm_lock);
710 opages = b->bm_pages;
711 owords = b->bm_words;
712 obits = b->bm_bits;
713
714 growing = bits > obits;
715 if (opages && growing && set_new_bits)
716 bm_set_surplus(b);
717
718 b->bm_pages = npages;
719 b->bm_number_of_pages = want;
720 b->bm_bits = bits;
721 b->bm_words = words;
722 b->bm_dev_capacity = capacity;
723
724 if (growing) {
725 if (set_new_bits) {
726 bm_memset(b, owords, 0xff, words-owords);
727 b->bm_set += bits - obits;
728 } else
729 bm_memset(b, owords, 0x00, words-owords);
730
731 }
732
733 if (want < have) {
734
735 bm_free_pages(opages + want, have - want);
736 }
737
738 (void)bm_clear_surplus(b);
739
740 spin_unlock_irq(&b->bm_lock);
741 if (opages != npages)
742 bm_vk_free(opages, opages_vmalloced);
743 if (!growing)
744 b->bm_set = bm_count_bits(b);
745 dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
746
747 out:
748 drbd_bm_unlock(mdev);
749 return err;
750}
751
752
753
754
755
756
757
758
759
760unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
761{
762 struct drbd_bitmap *b = mdev->bitmap;
763 unsigned long s;
764 unsigned long flags;
765
766 if (!expect(b))
767 return 0;
768 if (!expect(b->bm_pages))
769 return 0;
770
771 spin_lock_irqsave(&b->bm_lock, flags);
772 s = b->bm_set;
773 spin_unlock_irqrestore(&b->bm_lock, flags);
774
775 return s;
776}
777
778unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
779{
780 unsigned long s;
781
782 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
783 return 0;
784 s = _drbd_bm_total_weight(mdev);
785 put_ldev(mdev);
786 return s;
787}
788
789size_t drbd_bm_words(struct drbd_conf *mdev)
790{
791 struct drbd_bitmap *b = mdev->bitmap;
792 if (!expect(b))
793 return 0;
794 if (!expect(b->bm_pages))
795 return 0;
796
797 return b->bm_words;
798}
799
800unsigned long drbd_bm_bits(struct drbd_conf *mdev)
801{
802 struct drbd_bitmap *b = mdev->bitmap;
803 if (!expect(b))
804 return 0;
805
806 return b->bm_bits;
807}
808
809
810
811
812
813
814void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
815 unsigned long *buffer)
816{
817 struct drbd_bitmap *b = mdev->bitmap;
818 unsigned long *p_addr, *bm;
819 unsigned long word, bits;
820 unsigned int idx;
821 size_t end, do_now;
822
823 end = offset + number;
824
825 if (!expect(b))
826 return;
827 if (!expect(b->bm_pages))
828 return;
829 if (number == 0)
830 return;
831 WARN_ON(offset >= b->bm_words);
832 WARN_ON(end > b->bm_words);
833
834 spin_lock_irq(&b->bm_lock);
835 while (offset < end) {
836 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
837 idx = bm_word_to_page_idx(b, offset);
838 p_addr = bm_map_pidx(b, idx);
839 bm = p_addr + MLPP(offset);
840 offset += do_now;
841 while (do_now--) {
842 bits = hweight_long(*bm);
843 word = *bm | *buffer++;
844 *bm++ = word;
845 b->bm_set += hweight_long(word) - bits;
846 }
847 bm_unmap(p_addr);
848 bm_set_page_need_writeout(b->bm_pages[idx]);
849 }
850
851
852
853
854
855 if (end == b->bm_words)
856 b->bm_set -= bm_clear_surplus(b);
857 spin_unlock_irq(&b->bm_lock);
858}
859
860
861
862
863void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
864 unsigned long *buffer)
865{
866 struct drbd_bitmap *b = mdev->bitmap;
867 unsigned long *p_addr, *bm;
868 size_t end, do_now;
869
870 end = offset + number;
871
872 if (!expect(b))
873 return;
874 if (!expect(b->bm_pages))
875 return;
876
877 spin_lock_irq(&b->bm_lock);
878 if ((offset >= b->bm_words) ||
879 (end > b->bm_words) ||
880 (number <= 0))
881 dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
882 (unsigned long) offset,
883 (unsigned long) number,
884 (unsigned long) b->bm_words);
885 else {
886 while (offset < end) {
887 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
888 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
889 bm = p_addr + MLPP(offset);
890 offset += do_now;
891 while (do_now--)
892 *buffer++ = *bm++;
893 bm_unmap(p_addr);
894 }
895 }
896 spin_unlock_irq(&b->bm_lock);
897}
898
899
900void drbd_bm_set_all(struct drbd_conf *mdev)
901{
902 struct drbd_bitmap *b = mdev->bitmap;
903 if (!expect(b))
904 return;
905 if (!expect(b->bm_pages))
906 return;
907
908 spin_lock_irq(&b->bm_lock);
909 bm_memset(b, 0, 0xff, b->bm_words);
910 (void)bm_clear_surplus(b);
911 b->bm_set = b->bm_bits;
912 spin_unlock_irq(&b->bm_lock);
913}
914
915
916void drbd_bm_clear_all(struct drbd_conf *mdev)
917{
918 struct drbd_bitmap *b = mdev->bitmap;
919 if (!expect(b))
920 return;
921 if (!expect(b->bm_pages))
922 return;
923
924 spin_lock_irq(&b->bm_lock);
925 bm_memset(b, 0, 0, b->bm_words);
926 b->bm_set = 0;
927 spin_unlock_irq(&b->bm_lock);
928}
929
930struct bm_aio_ctx {
931 struct drbd_conf *mdev;
932 atomic_t in_flight;
933 unsigned int done;
934 unsigned flags;
935#define BM_AIO_COPY_PAGES 1
936#define BM_AIO_WRITE_HINTED 2
937#define BM_WRITE_ALL_PAGES 4
938 int error;
939 struct kref kref;
940};
941
942static void bm_aio_ctx_destroy(struct kref *kref)
943{
944 struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
945
946 put_ldev(ctx->mdev);
947 kfree(ctx);
948}
949
950
951static void bm_async_io_complete(struct bio *bio, int error)
952{
953 struct bm_aio_ctx *ctx = bio->bi_private;
954 struct drbd_conf *mdev = ctx->mdev;
955 struct drbd_bitmap *b = mdev->bitmap;
956 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
957 int uptodate = bio_flagged(bio, BIO_UPTODATE);
958
959
960
961
962
963
964 if (!error && !uptodate)
965 error = -EIO;
966
967 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
968 !bm_test_page_unchanged(b->bm_pages[idx]))
969 dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
970
971 if (error) {
972
973
974 ctx->error = error;
975 bm_set_page_io_err(b->bm_pages[idx]);
976
977
978 if (__ratelimit(&drbd_ratelimit_state))
979 dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
980 error, idx);
981 } else {
982 bm_clear_page_io_err(b->bm_pages[idx]);
983 dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
984 }
985
986 bm_page_unlock_io(mdev, idx);
987
988 if (ctx->flags & BM_AIO_COPY_PAGES)
989 mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
990
991 bio_put(bio);
992
993 if (atomic_dec_and_test(&ctx->in_flight)) {
994 ctx->done = 1;
995 wake_up(&mdev->misc_wait);
996 kref_put(&ctx->kref, &bm_aio_ctx_destroy);
997 }
998}
999
1000static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
1001{
1002 struct bio *bio = bio_alloc_drbd(GFP_NOIO);
1003 struct drbd_conf *mdev = ctx->mdev;
1004 struct drbd_bitmap *b = mdev->bitmap;
1005 struct page *page;
1006 unsigned int len;
1007
1008 sector_t on_disk_sector =
1009 mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
1010 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
1011
1012
1013
1014
1015 len = min_t(unsigned int, PAGE_SIZE,
1016 (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
1017
1018
1019 bm_page_lock_io(mdev, page_nr);
1020
1021
1022 bm_set_page_unchanged(b->bm_pages[page_nr]);
1023
1024 if (ctx->flags & BM_AIO_COPY_PAGES) {
1025 page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
1026 copy_highpage(page, b->bm_pages[page_nr]);
1027 bm_store_page_idx(page, page_nr);
1028 } else
1029 page = b->bm_pages[page_nr];
1030 bio->bi_bdev = mdev->ldev->md_bdev;
1031 bio->bi_sector = on_disk_sector;
1032
1033
1034 bio_add_page(bio, page, len, 0);
1035 bio->bi_private = ctx;
1036 bio->bi_end_io = bm_async_io_complete;
1037
1038 if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1039 bio->bi_rw |= rw;
1040 bio_endio(bio, -EIO);
1041 } else {
1042 submit_bio(rw, bio);
1043
1044
1045 atomic_add(len >> 9, &mdev->rs_sect_ev);
1046 }
1047}
1048
1049
1050
1051
1052static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1053{
1054 struct bm_aio_ctx *ctx;
1055 struct drbd_bitmap *b = mdev->bitmap;
1056 int num_pages, i, count = 0;
1057 unsigned long now;
1058 char ppb[10];
1059 int err = 0;
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1071 if (!ctx)
1072 return -ENOMEM;
1073
1074 *ctx = (struct bm_aio_ctx) {
1075 .mdev = mdev,
1076 .in_flight = ATOMIC_INIT(1),
1077 .done = 0,
1078 .flags = flags,
1079 .error = 0,
1080 .kref = { ATOMIC_INIT(2) },
1081 };
1082
1083 if (!get_ldev_if_state(mdev, D_ATTACHING)) {
1084 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
1085 kfree(ctx);
1086 return -ENODEV;
1087 }
1088
1089 if (!ctx->flags)
1090 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1091
1092 num_pages = b->bm_number_of_pages;
1093
1094 now = jiffies;
1095
1096
1097 for (i = 0; i < num_pages; i++) {
1098
1099 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1100 break;
1101 if (rw & WRITE) {
1102 if ((flags & BM_AIO_WRITE_HINTED) &&
1103 !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
1104 &page_private(b->bm_pages[i])))
1105 continue;
1106
1107 if (!(flags & BM_WRITE_ALL_PAGES) &&
1108 bm_test_page_unchanged(b->bm_pages[i])) {
1109 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
1110 continue;
1111 }
1112
1113
1114 if (lazy_writeout_upper_idx &&
1115 !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1116 dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
1117 continue;
1118 }
1119 }
1120 atomic_inc(&ctx->in_flight);
1121 bm_page_io_async(ctx, i, rw);
1122 ++count;
1123 cond_resched();
1124 }
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 if (!atomic_dec_and_test(&ctx->in_flight))
1135 wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
1136 else
1137 kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1138
1139
1140 if (flags == 0)
1141 dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
1142 rw == WRITE ? "WRITE" : "READ",
1143 count, jiffies - now);
1144
1145 if (ctx->error) {
1146 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
1147 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
1148 err = -EIO;
1149 }
1150
1151 if (atomic_read(&ctx->in_flight))
1152 err = -EIO;
1153
1154 now = jiffies;
1155 if (rw == WRITE) {
1156 drbd_md_flush(mdev);
1157 } else {
1158 b->bm_set = bm_count_bits(b);
1159 dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
1160 jiffies - now);
1161 }
1162 now = b->bm_set;
1163
1164 if (flags == 0)
1165 dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1166 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1167
1168 kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1169 return err;
1170}
1171
1172
1173
1174
1175
1176int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
1177{
1178 return bm_rw(mdev, READ, 0, 0);
1179}
1180
1181
1182
1183
1184
1185
1186
1187int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1188{
1189 return bm_rw(mdev, WRITE, 0, 0);
1190}
1191
1192
1193
1194
1195
1196
1197
1198int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
1199{
1200 return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
1201}
1202
1203
1204
1205
1206
1207
1208int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
1209{
1210 return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
1211}
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
1225{
1226 return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
1227}
1228
1229
1230
1231
1232
1233int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
1234{
1235 return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
1236}
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
1251{
1252 struct bm_aio_ctx *ctx;
1253 int err;
1254
1255 if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
1256 dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
1257 return 0;
1258 }
1259
1260 ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1261 if (!ctx)
1262 return -ENOMEM;
1263
1264 *ctx = (struct bm_aio_ctx) {
1265 .mdev = mdev,
1266 .in_flight = ATOMIC_INIT(1),
1267 .done = 0,
1268 .flags = BM_AIO_COPY_PAGES,
1269 .error = 0,
1270 .kref = { ATOMIC_INIT(2) },
1271 };
1272
1273 if (!get_ldev_if_state(mdev, D_ATTACHING)) {
1274 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
1275 kfree(ctx);
1276 return -ENODEV;
1277 }
1278
1279 bm_page_io_async(ctx, idx, WRITE_SYNC);
1280 wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
1281
1282 if (ctx->error)
1283 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
1284
1285
1286
1287 mdev->bm_writ_cnt++;
1288 err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
1289 kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1290 return err;
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1302 const int find_zero_bit)
1303{
1304 struct drbd_bitmap *b = mdev->bitmap;
1305 unsigned long *p_addr;
1306 unsigned long bit_offset;
1307 unsigned i;
1308
1309
1310 if (bm_fo > b->bm_bits) {
1311 dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
1312 bm_fo = DRBD_END_OF_BITMAP;
1313 } else {
1314 while (bm_fo < b->bm_bits) {
1315
1316 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1317 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1318
1319 if (find_zero_bit)
1320 i = find_next_zero_bit_le(p_addr,
1321 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1322 else
1323 i = find_next_bit_le(p_addr,
1324 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1325
1326 __bm_unmap(p_addr);
1327 if (i < PAGE_SIZE*8) {
1328 bm_fo = bit_offset + i;
1329 if (bm_fo >= b->bm_bits)
1330 break;
1331 goto found;
1332 }
1333 bm_fo = bit_offset + PAGE_SIZE*8;
1334 }
1335 bm_fo = DRBD_END_OF_BITMAP;
1336 }
1337 found:
1338 return bm_fo;
1339}
1340
1341static unsigned long bm_find_next(struct drbd_conf *mdev,
1342 unsigned long bm_fo, const int find_zero_bit)
1343{
1344 struct drbd_bitmap *b = mdev->bitmap;
1345 unsigned long i = DRBD_END_OF_BITMAP;
1346
1347 if (!expect(b))
1348 return i;
1349 if (!expect(b->bm_pages))
1350 return i;
1351
1352 spin_lock_irq(&b->bm_lock);
1353 if (BM_DONT_TEST & b->bm_flags)
1354 bm_print_lock_info(mdev);
1355
1356 i = __bm_find_next(mdev, bm_fo, find_zero_bit);
1357
1358 spin_unlock_irq(&b->bm_lock);
1359 return i;
1360}
1361
1362unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1363{
1364 return bm_find_next(mdev, bm_fo, 0);
1365}
1366
1367#if 0
1368
1369unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1370{
1371 return bm_find_next(mdev, bm_fo, 1);
1372}
1373#endif
1374
1375
1376
1377unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1378{
1379
1380 return __bm_find_next(mdev, bm_fo, 0);
1381}
1382
1383unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1384{
1385
1386 return __bm_find_next(mdev, bm_fo, 1);
1387}
1388
1389
1390
1391
1392
1393
1394
1395static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1396 unsigned long e, int val)
1397{
1398 struct drbd_bitmap *b = mdev->bitmap;
1399 unsigned long *p_addr = NULL;
1400 unsigned long bitnr;
1401 unsigned int last_page_nr = -1U;
1402 int c = 0;
1403 int changed_total = 0;
1404
1405 if (e >= b->bm_bits) {
1406 dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1407 s, e, b->bm_bits);
1408 e = b->bm_bits ? b->bm_bits -1 : 0;
1409 }
1410 for (bitnr = s; bitnr <= e; bitnr++) {
1411 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1412 if (page_nr != last_page_nr) {
1413 if (p_addr)
1414 __bm_unmap(p_addr);
1415 if (c < 0)
1416 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1417 else if (c > 0)
1418 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1419 changed_total += c;
1420 c = 0;
1421 p_addr = __bm_map_pidx(b, page_nr);
1422 last_page_nr = page_nr;
1423 }
1424 if (val)
1425 c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1426 else
1427 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1428 }
1429 if (p_addr)
1430 __bm_unmap(p_addr);
1431 if (c < 0)
1432 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1433 else if (c > 0)
1434 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1435 changed_total += c;
1436 b->bm_set += changed_total;
1437 return changed_total;
1438}
1439
1440
1441
1442
1443
1444static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1445 const unsigned long e, int val)
1446{
1447 unsigned long flags;
1448 struct drbd_bitmap *b = mdev->bitmap;
1449 int c = 0;
1450
1451 if (!expect(b))
1452 return 1;
1453 if (!expect(b->bm_pages))
1454 return 0;
1455
1456 spin_lock_irqsave(&b->bm_lock, flags);
1457 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1458 bm_print_lock_info(mdev);
1459
1460 c = __bm_change_bits_to(mdev, s, e, val);
1461
1462 spin_unlock_irqrestore(&b->bm_lock, flags);
1463 return c;
1464}
1465
1466
1467int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1468{
1469 return bm_change_bits_to(mdev, s, e, 1);
1470}
1471
1472
1473int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1474{
1475 return -bm_change_bits_to(mdev, s, e, 0);
1476}
1477
1478
1479
1480static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1481 int page_nr, int first_word, int last_word)
1482{
1483 int i;
1484 int bits;
1485 int changed = 0;
1486 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1487 for (i = first_word; i < last_word; i++) {
1488 bits = hweight_long(paddr[i]);
1489 paddr[i] = ~0UL;
1490 changed += BITS_PER_LONG - bits;
1491 }
1492 kunmap_atomic(paddr);
1493 if (changed) {
1494
1495
1496
1497 bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
1498 b->bm_set += changed;
1499 }
1500}
1501
1502
1503
1504
1505
1506
1507void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1508{
1509
1510
1511
1512
1513
1514
1515
1516
1517 struct drbd_bitmap *b = mdev->bitmap;
1518 unsigned long sl = ALIGN(s,BITS_PER_LONG);
1519 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1520 int first_page;
1521 int last_page;
1522 int page_nr;
1523 int first_word;
1524 int last_word;
1525
1526 if (e - s <= 3*BITS_PER_LONG) {
1527
1528 spin_lock_irq(&b->bm_lock);
1529 __bm_change_bits_to(mdev, s, e, 1);
1530 spin_unlock_irq(&b->bm_lock);
1531 return;
1532 }
1533
1534
1535
1536 spin_lock_irq(&b->bm_lock);
1537
1538
1539 if (sl)
1540 __bm_change_bits_to(mdev, s, sl-1, 1);
1541
1542 first_page = sl >> (3 + PAGE_SHIFT);
1543 last_page = el >> (3 + PAGE_SHIFT);
1544
1545
1546
1547 first_word = MLPP(sl >> LN2_BPL);
1548 last_word = LWPP;
1549
1550
1551 for (page_nr = first_page; page_nr < last_page; page_nr++) {
1552 bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
1553 spin_unlock_irq(&b->bm_lock);
1554 cond_resched();
1555 first_word = 0;
1556 spin_lock_irq(&b->bm_lock);
1557 }
1558
1559 last_word = MLPP(el >> LN2_BPL);
1560
1561
1562
1563
1564
1565
1566
1567 if (last_word)
1568 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1569
1570
1571
1572
1573
1574
1575 if (el <= e)
1576 __bm_change_bits_to(mdev, el, e, 1);
1577 spin_unlock_irq(&b->bm_lock);
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1588{
1589 unsigned long flags;
1590 struct drbd_bitmap *b = mdev->bitmap;
1591 unsigned long *p_addr;
1592 int i;
1593
1594 if (!expect(b))
1595 return 0;
1596 if (!expect(b->bm_pages))
1597 return 0;
1598
1599 spin_lock_irqsave(&b->bm_lock, flags);
1600 if (BM_DONT_TEST & b->bm_flags)
1601 bm_print_lock_info(mdev);
1602 if (bitnr < b->bm_bits) {
1603 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
1604 i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1605 bm_unmap(p_addr);
1606 } else if (bitnr == b->bm_bits) {
1607 i = -1;
1608 } else {
1609 dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1610 i = 0;
1611 }
1612
1613 spin_unlock_irqrestore(&b->bm_lock, flags);
1614 return i;
1615}
1616
1617
1618int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1619{
1620 unsigned long flags;
1621 struct drbd_bitmap *b = mdev->bitmap;
1622 unsigned long *p_addr = NULL;
1623 unsigned long bitnr;
1624 unsigned int page_nr = -1U;
1625 int c = 0;
1626
1627
1628
1629
1630
1631 if (!expect(b))
1632 return 1;
1633 if (!expect(b->bm_pages))
1634 return 1;
1635
1636 spin_lock_irqsave(&b->bm_lock, flags);
1637 if (BM_DONT_TEST & b->bm_flags)
1638 bm_print_lock_info(mdev);
1639 for (bitnr = s; bitnr <= e; bitnr++) {
1640 unsigned int idx = bm_bit_to_page_idx(b, bitnr);
1641 if (page_nr != idx) {
1642 page_nr = idx;
1643 if (p_addr)
1644 bm_unmap(p_addr);
1645 p_addr = bm_map_pidx(b, idx);
1646 }
1647 if (expect(bitnr < b->bm_bits))
1648 c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1649 else
1650 dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1651 }
1652 if (p_addr)
1653 bm_unmap(p_addr);
1654 spin_unlock_irqrestore(&b->bm_lock, flags);
1655 return c;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1674{
1675 struct drbd_bitmap *b = mdev->bitmap;
1676 int count, s, e;
1677 unsigned long flags;
1678 unsigned long *p_addr, *bm;
1679
1680 if (!expect(b))
1681 return 0;
1682 if (!expect(b->bm_pages))
1683 return 0;
1684
1685 spin_lock_irqsave(&b->bm_lock, flags);
1686 if (BM_DONT_TEST & b->bm_flags)
1687 bm_print_lock_info(mdev);
1688
1689 s = S2W(enr);
1690 e = min((size_t)S2W(enr+1), b->bm_words);
1691 count = 0;
1692 if (s < b->bm_words) {
1693 int n = e-s;
1694 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1695 bm = p_addr + MLPP(s);
1696 while (n--)
1697 count += hweight_long(*bm++);
1698 bm_unmap(p_addr);
1699 } else {
1700 dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1701 }
1702 spin_unlock_irqrestore(&b->bm_lock, flags);
1703 return count;
1704}
1705