1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/bitmap.h>
28#include <linux/vmalloc.h>
29#include <linux/string.h>
30#include <linux/drbd.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33
34#include "drbd_int.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95struct drbd_bitmap {
96 struct page **bm_pages;
97 spinlock_t bm_lock;
98
99
100
101
102
103 unsigned int n_bitmap_hints;
104 unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION];
105
106
107
108 unsigned long bm_set;
109 unsigned long bm_bits;
110 size_t bm_words;
111 size_t bm_number_of_pages;
112 sector_t bm_dev_capacity;
113 struct mutex bm_change;
114
115 wait_queue_head_t bm_io_wait;
116
117 enum bm_flag bm_flags;
118
119
120 char *bm_why;
121 struct task_struct *bm_task;
122};
123
124#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
125static void __bm_print_lock_info(struct drbd_device *device, const char *func)
126{
127 struct drbd_bitmap *b = device->bitmap;
128 if (!__ratelimit(&drbd_ratelimit_state))
129 return;
130 drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
131 current->comm, task_pid_nr(current),
132 func, b->bm_why ?: "?",
133 b->bm_task->comm, task_pid_nr(b->bm_task));
134}
135
136void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
137{
138 struct drbd_bitmap *b = device->bitmap;
139 int trylock_failed;
140
141 if (!b) {
142 drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
143 return;
144 }
145
146 trylock_failed = !mutex_trylock(&b->bm_change);
147
148 if (trylock_failed) {
149 drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
150 current->comm, task_pid_nr(current),
151 why, b->bm_why ?: "?",
152 b->bm_task->comm, task_pid_nr(b->bm_task));
153 mutex_lock(&b->bm_change);
154 }
155 if (BM_LOCKED_MASK & b->bm_flags)
156 drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
157 b->bm_flags |= flags & BM_LOCKED_MASK;
158
159 b->bm_why = why;
160 b->bm_task = current;
161}
162
163void drbd_bm_unlock(struct drbd_device *device)
164{
165 struct drbd_bitmap *b = device->bitmap;
166 if (!b) {
167 drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
168 return;
169 }
170
171 if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
172 drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
173
174 b->bm_flags &= ~BM_LOCKED_MASK;
175 b->bm_why = NULL;
176 b->bm_task = NULL;
177 mutex_unlock(&b->bm_change);
178}
179
180
181
182
183
184
185
186
187
188
189#define BM_PAGE_IDX_MASK ((1UL<<24)-1)
190
191#define BM_PAGE_IO_LOCK 31
192
193#define BM_PAGE_IO_ERROR 30
194
195
196#define BM_PAGE_NEED_WRITEOUT 29
197
198
199#define BM_PAGE_LAZY_WRITEOUT 28
200
201
202#define BM_PAGE_HINT_WRITEOUT 27
203
204
205
206
207
208
209static void bm_store_page_idx(struct page *page, unsigned long idx)
210{
211 BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
212 set_page_private(page, idx);
213}
214
215static unsigned long bm_page_to_idx(struct page *page)
216{
217 return page_private(page) & BM_PAGE_IDX_MASK;
218}
219
220
221
222
223static void bm_page_lock_io(struct drbd_device *device, int page_nr)
224{
225 struct drbd_bitmap *b = device->bitmap;
226 void *addr = &page_private(b->bm_pages[page_nr]);
227 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
228}
229
230static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
231{
232 struct drbd_bitmap *b = device->bitmap;
233 void *addr = &page_private(b->bm_pages[page_nr]);
234 clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
235 wake_up(&device->bitmap->bm_io_wait);
236}
237
238
239
240static void bm_set_page_unchanged(struct page *page)
241{
242
243 clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
244 clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
245}
246
247static void bm_set_page_need_writeout(struct page *page)
248{
249 set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
250}
251
252void drbd_bm_reset_al_hints(struct drbd_device *device)
253{
254 device->bitmap->n_bitmap_hints = 0;
255}
256
257
258
259
260
261
262
263
264
265
266void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
267{
268 struct drbd_bitmap *b = device->bitmap;
269 struct page *page;
270 if (page_nr >= device->bitmap->bm_number_of_pages) {
271 drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
272 page_nr, (int)device->bitmap->bm_number_of_pages);
273 return;
274 }
275 page = device->bitmap->bm_pages[page_nr];
276 BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints));
277 if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)))
278 b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr;
279}
280
281static int bm_test_page_unchanged(struct page *page)
282{
283 volatile const unsigned long *addr = &page_private(page);
284 return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
285}
286
287static void bm_set_page_io_err(struct page *page)
288{
289 set_bit(BM_PAGE_IO_ERROR, &page_private(page));
290}
291
292static void bm_clear_page_io_err(struct page *page)
293{
294 clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
295}
296
297static void bm_set_page_lazy_writeout(struct page *page)
298{
299 set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
300}
301
302static int bm_test_page_lazy_writeout(struct page *page)
303{
304 return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
305}
306
307
308static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
309{
310
311 unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
312 BUG_ON(page_nr >= b->bm_number_of_pages);
313 return page_nr;
314}
315
316static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
317{
318
319 unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
320 BUG_ON(page_nr >= b->bm_number_of_pages);
321 return page_nr;
322}
323
324static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
325{
326 struct page *page = b->bm_pages[idx];
327 return (unsigned long *) kmap_atomic(page);
328}
329
330static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
331{
332 return __bm_map_pidx(b, idx);
333}
334
335static void __bm_unmap(unsigned long *p_addr)
336{
337 kunmap_atomic(p_addr);
338};
339
340static void bm_unmap(unsigned long *p_addr)
341{
342 return __bm_unmap(p_addr);
343}
344
345
346#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
347
348
349
350
351
352
353#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
354
355
356#define LWPP (PAGE_SIZE/sizeof(long))
357
358
359
360
361
362
363
364
365static void bm_free_pages(struct page **pages, unsigned long number)
366{
367 unsigned long i;
368 if (!pages)
369 return;
370
371 for (i = 0; i < number; i++) {
372 if (!pages[i]) {
373 pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n",
374 i, number);
375 continue;
376 }
377 __free_page(pages[i]);
378 pages[i] = NULL;
379 }
380}
381
382static inline void bm_vk_free(void *ptr)
383{
384 kvfree(ptr);
385}
386
387
388
389
390static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
391{
392 struct page **old_pages = b->bm_pages;
393 struct page **new_pages, *page;
394 unsigned int i, bytes;
395 unsigned long have = b->bm_number_of_pages;
396
397 BUG_ON(have == 0 && old_pages != NULL);
398 BUG_ON(have != 0 && old_pages == NULL);
399
400 if (have == want)
401 return old_pages;
402
403
404
405
406
407
408 bytes = sizeof(struct page *)*want;
409 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
410 if (!new_pages) {
411 new_pages = __vmalloc(bytes,
412 GFP_NOIO | __GFP_ZERO,
413 PAGE_KERNEL);
414 if (!new_pages)
415 return NULL;
416 }
417
418 if (want >= have) {
419 for (i = 0; i < have; i++)
420 new_pages[i] = old_pages[i];
421 for (; i < want; i++) {
422 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
423 if (!page) {
424 bm_free_pages(new_pages + have, i - have);
425 bm_vk_free(new_pages);
426 return NULL;
427 }
428
429
430 bm_store_page_idx(page, i);
431 new_pages[i] = page;
432 }
433 } else {
434 for (i = 0; i < want; i++)
435 new_pages[i] = old_pages[i];
436
437
438
439 }
440
441 return new_pages;
442}
443
444
445
446
447int drbd_bm_init(struct drbd_device *device)
448{
449 struct drbd_bitmap *b = device->bitmap;
450 WARN_ON(b != NULL);
451 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
452 if (!b)
453 return -ENOMEM;
454 spin_lock_init(&b->bm_lock);
455 mutex_init(&b->bm_change);
456 init_waitqueue_head(&b->bm_io_wait);
457
458 device->bitmap = b;
459
460 return 0;
461}
462
463sector_t drbd_bm_capacity(struct drbd_device *device)
464{
465 if (!expect(device->bitmap))
466 return 0;
467 return device->bitmap->bm_dev_capacity;
468}
469
470
471
472void drbd_bm_cleanup(struct drbd_device *device)
473{
474 if (!expect(device->bitmap))
475 return;
476 bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
477 bm_vk_free(device->bitmap->bm_pages);
478 kfree(device->bitmap);
479 device->bitmap = NULL;
480}
481
482
483
484
485
486
487#ifndef BITS_PER_PAGE
488#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
489#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
490#else
491# if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3))
492# error "ambiguous BITS_PER_PAGE"
493# endif
494#endif
495#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
496static int bm_clear_surplus(struct drbd_bitmap *b)
497{
498 unsigned long mask;
499 unsigned long *p_addr, *bm;
500 int tmp;
501 int cleared = 0;
502
503
504 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
505
506 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
507
508
509 mask = cpu_to_lel(mask);
510
511 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
512 bm = p_addr + (tmp/BITS_PER_LONG);
513 if (mask) {
514
515
516
517
518 cleared = hweight_long(*bm & ~mask);
519 *bm &= mask;
520 bm++;
521 }
522
523 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
524
525
526 cleared += hweight_long(*bm);
527 *bm = 0;
528 }
529 bm_unmap(p_addr);
530 return cleared;
531}
532
533static void bm_set_surplus(struct drbd_bitmap *b)
534{
535 unsigned long mask;
536 unsigned long *p_addr, *bm;
537 int tmp;
538
539
540 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
541
542 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
543
544
545 mask = cpu_to_lel(mask);
546
547 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
548 bm = p_addr + (tmp/BITS_PER_LONG);
549 if (mask) {
550
551
552
553
554 *bm |= ~mask;
555 bm++;
556 }
557
558 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
559
560
561 *bm = ~0UL;
562 }
563 bm_unmap(p_addr);
564}
565
566
567
568static unsigned long bm_count_bits(struct drbd_bitmap *b)
569{
570 unsigned long *p_addr;
571 unsigned long bits = 0;
572 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
573 int idx, last_word;
574
575
576 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
577 p_addr = __bm_map_pidx(b, idx);
578 bits += bitmap_weight(p_addr, BITS_PER_PAGE);
579 __bm_unmap(p_addr);
580 cond_resched();
581 }
582
583 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
584 p_addr = __bm_map_pidx(b, idx);
585 bits += bitmap_weight(p_addr, last_word * BITS_PER_LONG);
586 p_addr[last_word] &= cpu_to_lel(mask);
587 bits += hweight_long(p_addr[last_word]);
588
589 if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
590 p_addr[last_word+1] = 0;
591 __bm_unmap(p_addr);
592 return bits;
593}
594
595
596static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
597{
598 unsigned long *p_addr, *bm;
599 unsigned int idx;
600 size_t do_now, end;
601
602 end = offset + len;
603
604 if (end > b->bm_words) {
605 pr_alert("bm_memset end > bm_words\n");
606 return;
607 }
608
609 while (offset < end) {
610 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
611 idx = bm_word_to_page_idx(b, offset);
612 p_addr = bm_map_pidx(b, idx);
613 bm = p_addr + MLPP(offset);
614 if (bm+do_now > p_addr + LWPP) {
615 pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
616 p_addr, bm, (int)do_now);
617 } else
618 memset(bm, c, do_now * sizeof(long));
619 bm_unmap(p_addr);
620 bm_set_page_need_writeout(b->bm_pages[idx]);
621 offset += do_now;
622 }
623}
624
625
626static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
627{
628 u64 bitmap_sectors;
629 if (ldev->md.al_offset == 8)
630 bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
631 else
632 bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
633 return bitmap_sectors << (9 + 3);
634}
635
636
637
638
639
640
641
642
643
644int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
645{
646 struct drbd_bitmap *b = device->bitmap;
647 unsigned long bits, words, owords, obits;
648 unsigned long want, have, onpages;
649 struct page **npages, **opages = NULL;
650 int err = 0;
651 bool growing;
652
653 if (!expect(b))
654 return -ENOMEM;
655
656 drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
657
658 drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
659 (unsigned long long)capacity);
660
661 if (capacity == b->bm_dev_capacity)
662 goto out;
663
664 if (capacity == 0) {
665 spin_lock_irq(&b->bm_lock);
666 opages = b->bm_pages;
667 onpages = b->bm_number_of_pages;
668 owords = b->bm_words;
669 b->bm_pages = NULL;
670 b->bm_number_of_pages =
671 b->bm_set =
672 b->bm_bits =
673 b->bm_words =
674 b->bm_dev_capacity = 0;
675 spin_unlock_irq(&b->bm_lock);
676 bm_free_pages(opages, onpages);
677 bm_vk_free(opages);
678 goto out;
679 }
680 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
681
682
683
684
685
686
687 words = ALIGN(bits, 64) >> LN2_BPL;
688
689 if (get_ldev(device)) {
690 u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
691 put_ldev(device);
692 if (bits > bits_on_disk) {
693 drbd_info(device, "bits = %lu\n", bits);
694 drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
695 err = -ENOSPC;
696 goto out;
697 }
698 }
699
700 want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
701 have = b->bm_number_of_pages;
702 if (want == have) {
703 D_ASSERT(device, b->bm_pages != NULL);
704 npages = b->bm_pages;
705 } else {
706 if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
707 npages = NULL;
708 else
709 npages = bm_realloc_pages(b, want);
710 }
711
712 if (!npages) {
713 err = -ENOMEM;
714 goto out;
715 }
716
717 spin_lock_irq(&b->bm_lock);
718 opages = b->bm_pages;
719 owords = b->bm_words;
720 obits = b->bm_bits;
721
722 growing = bits > obits;
723 if (opages && growing && set_new_bits)
724 bm_set_surplus(b);
725
726 b->bm_pages = npages;
727 b->bm_number_of_pages = want;
728 b->bm_bits = bits;
729 b->bm_words = words;
730 b->bm_dev_capacity = capacity;
731
732 if (growing) {
733 if (set_new_bits) {
734 bm_memset(b, owords, 0xff, words-owords);
735 b->bm_set += bits - obits;
736 } else
737 bm_memset(b, owords, 0x00, words-owords);
738
739 }
740
741 if (want < have) {
742
743 bm_free_pages(opages + want, have - want);
744 }
745
746 (void)bm_clear_surplus(b);
747
748 spin_unlock_irq(&b->bm_lock);
749 if (opages != npages)
750 bm_vk_free(opages);
751 if (!growing)
752 b->bm_set = bm_count_bits(b);
753 drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
754
755 out:
756 drbd_bm_unlock(device);
757 return err;
758}
759
760
761
762
763
764
765
766
767
768unsigned long _drbd_bm_total_weight(struct drbd_device *device)
769{
770 struct drbd_bitmap *b = device->bitmap;
771 unsigned long s;
772 unsigned long flags;
773
774 if (!expect(b))
775 return 0;
776 if (!expect(b->bm_pages))
777 return 0;
778
779 spin_lock_irqsave(&b->bm_lock, flags);
780 s = b->bm_set;
781 spin_unlock_irqrestore(&b->bm_lock, flags);
782
783 return s;
784}
785
786unsigned long drbd_bm_total_weight(struct drbd_device *device)
787{
788 unsigned long s;
789
790 if (!get_ldev_if_state(device, D_NEGOTIATING))
791 return 0;
792 s = _drbd_bm_total_weight(device);
793 put_ldev(device);
794 return s;
795}
796
797size_t drbd_bm_words(struct drbd_device *device)
798{
799 struct drbd_bitmap *b = device->bitmap;
800 if (!expect(b))
801 return 0;
802 if (!expect(b->bm_pages))
803 return 0;
804
805 return b->bm_words;
806}
807
808unsigned long drbd_bm_bits(struct drbd_device *device)
809{
810 struct drbd_bitmap *b = device->bitmap;
811 if (!expect(b))
812 return 0;
813
814 return b->bm_bits;
815}
816
817
818
819
820
821
822void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
823 unsigned long *buffer)
824{
825 struct drbd_bitmap *b = device->bitmap;
826 unsigned long *p_addr, *bm;
827 unsigned long word, bits;
828 unsigned int idx;
829 size_t end, do_now;
830
831 end = offset + number;
832
833 if (!expect(b))
834 return;
835 if (!expect(b->bm_pages))
836 return;
837 if (number == 0)
838 return;
839 WARN_ON(offset >= b->bm_words);
840 WARN_ON(end > b->bm_words);
841
842 spin_lock_irq(&b->bm_lock);
843 while (offset < end) {
844 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
845 idx = bm_word_to_page_idx(b, offset);
846 p_addr = bm_map_pidx(b, idx);
847 bm = p_addr + MLPP(offset);
848 offset += do_now;
849 while (do_now--) {
850 bits = hweight_long(*bm);
851 word = *bm | *buffer++;
852 *bm++ = word;
853 b->bm_set += hweight_long(word) - bits;
854 }
855 bm_unmap(p_addr);
856 bm_set_page_need_writeout(b->bm_pages[idx]);
857 }
858
859
860
861
862
863 if (end == b->bm_words)
864 b->bm_set -= bm_clear_surplus(b);
865 spin_unlock_irq(&b->bm_lock);
866}
867
868
869
870
871void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
872 unsigned long *buffer)
873{
874 struct drbd_bitmap *b = device->bitmap;
875 unsigned long *p_addr, *bm;
876 size_t end, do_now;
877
878 end = offset + number;
879
880 if (!expect(b))
881 return;
882 if (!expect(b->bm_pages))
883 return;
884
885 spin_lock_irq(&b->bm_lock);
886 if ((offset >= b->bm_words) ||
887 (end > b->bm_words) ||
888 (number <= 0))
889 drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
890 (unsigned long) offset,
891 (unsigned long) number,
892 (unsigned long) b->bm_words);
893 else {
894 while (offset < end) {
895 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
896 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
897 bm = p_addr + MLPP(offset);
898 offset += do_now;
899 while (do_now--)
900 *buffer++ = *bm++;
901 bm_unmap(p_addr);
902 }
903 }
904 spin_unlock_irq(&b->bm_lock);
905}
906
907
908void drbd_bm_set_all(struct drbd_device *device)
909{
910 struct drbd_bitmap *b = device->bitmap;
911 if (!expect(b))
912 return;
913 if (!expect(b->bm_pages))
914 return;
915
916 spin_lock_irq(&b->bm_lock);
917 bm_memset(b, 0, 0xff, b->bm_words);
918 (void)bm_clear_surplus(b);
919 b->bm_set = b->bm_bits;
920 spin_unlock_irq(&b->bm_lock);
921}
922
923
924void drbd_bm_clear_all(struct drbd_device *device)
925{
926 struct drbd_bitmap *b = device->bitmap;
927 if (!expect(b))
928 return;
929 if (!expect(b->bm_pages))
930 return;
931
932 spin_lock_irq(&b->bm_lock);
933 bm_memset(b, 0, 0, b->bm_words);
934 b->bm_set = 0;
935 spin_unlock_irq(&b->bm_lock);
936}
937
938static void drbd_bm_aio_ctx_destroy(struct kref *kref)
939{
940 struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref);
941 unsigned long flags;
942
943 spin_lock_irqsave(&ctx->device->resource->req_lock, flags);
944 list_del(&ctx->list);
945 spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags);
946 put_ldev(ctx->device);
947 kfree(ctx);
948}
949
950
951static void drbd_bm_endio(struct bio *bio)
952{
953 struct drbd_bm_aio_ctx *ctx = bio->bi_private;
954 struct drbd_device *device = ctx->device;
955 struct drbd_bitmap *b = device->bitmap;
956 unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
957
958 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
959 !bm_test_page_unchanged(b->bm_pages[idx]))
960 drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
961
962 if (bio->bi_status) {
963
964
965 ctx->error = blk_status_to_errno(bio->bi_status);
966 bm_set_page_io_err(b->bm_pages[idx]);
967
968
969 if (__ratelimit(&drbd_ratelimit_state))
970 drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
971 bio->bi_status, idx);
972 } else {
973 bm_clear_page_io_err(b->bm_pages[idx]);
974 dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
975 }
976
977 bm_page_unlock_io(device, idx);
978
979 if (ctx->flags & BM_AIO_COPY_PAGES)
980 mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
981
982 bio_put(bio);
983
984 if (atomic_dec_and_test(&ctx->in_flight)) {
985 ctx->done = 1;
986 wake_up(&device->misc_wait);
987 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
988 }
989}
990
991static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
992{
993 struct bio *bio = bio_alloc_drbd(GFP_NOIO);
994 struct drbd_device *device = ctx->device;
995 struct drbd_bitmap *b = device->bitmap;
996 struct page *page;
997 unsigned int len;
998 unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
999
1000 sector_t on_disk_sector =
1001 device->ldev->md.md_offset + device->ldev->md.bm_offset;
1002 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
1003
1004
1005
1006
1007 len = min_t(unsigned int, PAGE_SIZE,
1008 (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
1009
1010
1011 bm_page_lock_io(device, page_nr);
1012
1013
1014 bm_set_page_unchanged(b->bm_pages[page_nr]);
1015
1016 if (ctx->flags & BM_AIO_COPY_PAGES) {
1017 page = mempool_alloc(&drbd_md_io_page_pool,
1018 GFP_NOIO | __GFP_HIGHMEM);
1019 copy_highpage(page, b->bm_pages[page_nr]);
1020 bm_store_page_idx(page, page_nr);
1021 } else
1022 page = b->bm_pages[page_nr];
1023 bio_set_dev(bio, device->ldev->md_bdev);
1024 bio->bi_iter.bi_sector = on_disk_sector;
1025
1026
1027 bio_add_page(bio, page, len, 0);
1028 bio->bi_private = ctx;
1029 bio->bi_end_io = drbd_bm_endio;
1030 bio_set_op_attrs(bio, op, 0);
1031
1032 if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1033 bio_io_error(bio);
1034 } else {
1035 submit_bio(bio);
1036
1037
1038 atomic_add(len >> 9, &device->rs_sect_ev);
1039 }
1040}
1041
1042
1043
1044
1045static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1046{
1047 struct drbd_bm_aio_ctx *ctx;
1048 struct drbd_bitmap *b = device->bitmap;
1049 unsigned int num_pages, i, count = 0;
1050 unsigned long now;
1051 char ppb[10];
1052 int err = 0;
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO);
1064 if (!ctx)
1065 return -ENOMEM;
1066
1067 *ctx = (struct drbd_bm_aio_ctx) {
1068 .device = device,
1069 .start_jif = jiffies,
1070 .in_flight = ATOMIC_INIT(1),
1071 .done = 0,
1072 .flags = flags,
1073 .error = 0,
1074 .kref = KREF_INIT(2),
1075 };
1076
1077 if (!get_ldev_if_state(device, D_ATTACHING)) {
1078 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
1079 kfree(ctx);
1080 return -ENODEV;
1081 }
1082
1083
1084
1085 if (0 == (ctx->flags & ~BM_AIO_READ))
1086 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1087
1088 spin_lock_irq(&device->resource->req_lock);
1089 list_add_tail(&ctx->list, &device->pending_bitmap_io);
1090 spin_unlock_irq(&device->resource->req_lock);
1091
1092 num_pages = b->bm_number_of_pages;
1093
1094 now = jiffies;
1095
1096
1097
1098 if (flags & BM_AIO_READ) {
1099 for (i = 0; i < num_pages; i++) {
1100 atomic_inc(&ctx->in_flight);
1101 bm_page_io_async(ctx, i);
1102 ++count;
1103 cond_resched();
1104 }
1105 } else if (flags & BM_AIO_WRITE_HINTED) {
1106
1107 unsigned int hint;
1108 for (hint = 0; hint < b->n_bitmap_hints; hint++) {
1109 i = b->al_bitmap_hints[hint];
1110 if (i >= num_pages)
1111 continue;
1112
1113 if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
1114 &page_private(b->bm_pages[i])))
1115 continue;
1116
1117 if (bm_test_page_unchanged(b->bm_pages[i]))
1118 continue;
1119 atomic_inc(&ctx->in_flight);
1120 bm_page_io_async(ctx, i);
1121 ++count;
1122 }
1123 } else {
1124 for (i = 0; i < num_pages; i++) {
1125
1126 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1127 break;
1128 if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
1129 bm_test_page_unchanged(b->bm_pages[i])) {
1130 dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
1131 continue;
1132 }
1133
1134
1135 if (lazy_writeout_upper_idx &&
1136 !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1137 dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
1138 continue;
1139 }
1140 atomic_inc(&ctx->in_flight);
1141 bm_page_io_async(ctx, i);
1142 ++count;
1143 cond_resched();
1144 }
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 if (!atomic_dec_and_test(&ctx->in_flight))
1156 wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
1157 else
1158 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1159
1160
1161 if (flags == 0) {
1162 unsigned int ms = jiffies_to_msecs(jiffies - now);
1163 if (ms > 5) {
1164 drbd_info(device, "bitmap %s of %u pages took %u ms\n",
1165 (flags & BM_AIO_READ) ? "READ" : "WRITE",
1166 count, ms);
1167 }
1168 }
1169
1170 if (ctx->error) {
1171 drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
1172 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
1173 err = -EIO;
1174 }
1175
1176 if (atomic_read(&ctx->in_flight))
1177 err = -EIO;
1178
1179 now = jiffies;
1180 if (flags & BM_AIO_READ) {
1181 b->bm_set = bm_count_bits(b);
1182 drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
1183 jiffies - now);
1184 }
1185 now = b->bm_set;
1186
1187 if ((flags & ~BM_AIO_READ) == 0)
1188 drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1189 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1190
1191 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1192 return err;
1193}
1194
1195
1196
1197
1198
1199int drbd_bm_read(struct drbd_device *device) __must_hold(local)
1200{
1201 return bm_rw(device, BM_AIO_READ, 0);
1202}
1203
1204
1205
1206
1207
1208
1209
1210int drbd_bm_write(struct drbd_device *device) __must_hold(local)
1211{
1212 return bm_rw(device, 0, 0);
1213}
1214
1215
1216
1217
1218
1219
1220
1221int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
1222{
1223 return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
1224}
1225
1226
1227
1228
1229
1230
1231int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local)
1232{
1233 return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
1248{
1249 return bm_rw(device, BM_AIO_COPY_PAGES, 0);
1250}
1251
1252
1253
1254
1255
1256int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
1257{
1258 return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
1270 const int find_zero_bit)
1271{
1272 struct drbd_bitmap *b = device->bitmap;
1273 unsigned long *p_addr;
1274 unsigned long bit_offset;
1275 unsigned i;
1276
1277
1278 if (bm_fo > b->bm_bits) {
1279 drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
1280 bm_fo = DRBD_END_OF_BITMAP;
1281 } else {
1282 while (bm_fo < b->bm_bits) {
1283
1284 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1285 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1286
1287 if (find_zero_bit)
1288 i = find_next_zero_bit_le(p_addr,
1289 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1290 else
1291 i = find_next_bit_le(p_addr,
1292 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1293
1294 __bm_unmap(p_addr);
1295 if (i < PAGE_SIZE*8) {
1296 bm_fo = bit_offset + i;
1297 if (bm_fo >= b->bm_bits)
1298 break;
1299 goto found;
1300 }
1301 bm_fo = bit_offset + PAGE_SIZE*8;
1302 }
1303 bm_fo = DRBD_END_OF_BITMAP;
1304 }
1305 found:
1306 return bm_fo;
1307}
1308
1309static unsigned long bm_find_next(struct drbd_device *device,
1310 unsigned long bm_fo, const int find_zero_bit)
1311{
1312 struct drbd_bitmap *b = device->bitmap;
1313 unsigned long i = DRBD_END_OF_BITMAP;
1314
1315 if (!expect(b))
1316 return i;
1317 if (!expect(b->bm_pages))
1318 return i;
1319
1320 spin_lock_irq(&b->bm_lock);
1321 if (BM_DONT_TEST & b->bm_flags)
1322 bm_print_lock_info(device);
1323
1324 i = __bm_find_next(device, bm_fo, find_zero_bit);
1325
1326 spin_unlock_irq(&b->bm_lock);
1327 return i;
1328}
1329
1330unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1331{
1332 return bm_find_next(device, bm_fo, 0);
1333}
1334
1335#if 0
1336
1337unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1338{
1339 return bm_find_next(device, bm_fo, 1);
1340}
1341#endif
1342
1343
1344
1345unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1346{
1347
1348 return __bm_find_next(device, bm_fo, 0);
1349}
1350
1351unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1352{
1353
1354 return __bm_find_next(device, bm_fo, 1);
1355}
1356
1357
1358
1359
1360
1361
1362
1363static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1364 unsigned long e, int val)
1365{
1366 struct drbd_bitmap *b = device->bitmap;
1367 unsigned long *p_addr = NULL;
1368 unsigned long bitnr;
1369 unsigned int last_page_nr = -1U;
1370 int c = 0;
1371 int changed_total = 0;
1372
1373 if (e >= b->bm_bits) {
1374 drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1375 s, e, b->bm_bits);
1376 e = b->bm_bits ? b->bm_bits -1 : 0;
1377 }
1378 for (bitnr = s; bitnr <= e; bitnr++) {
1379 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1380 if (page_nr != last_page_nr) {
1381 if (p_addr)
1382 __bm_unmap(p_addr);
1383 if (c < 0)
1384 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1385 else if (c > 0)
1386 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1387 changed_total += c;
1388 c = 0;
1389 p_addr = __bm_map_pidx(b, page_nr);
1390 last_page_nr = page_nr;
1391 }
1392 if (val)
1393 c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1394 else
1395 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1396 }
1397 if (p_addr)
1398 __bm_unmap(p_addr);
1399 if (c < 0)
1400 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1401 else if (c > 0)
1402 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1403 changed_total += c;
1404 b->bm_set += changed_total;
1405 return changed_total;
1406}
1407
1408
1409
1410
1411
1412static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1413 const unsigned long e, int val)
1414{
1415 unsigned long flags;
1416 struct drbd_bitmap *b = device->bitmap;
1417 int c = 0;
1418
1419 if (!expect(b))
1420 return 1;
1421 if (!expect(b->bm_pages))
1422 return 0;
1423
1424 spin_lock_irqsave(&b->bm_lock, flags);
1425 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1426 bm_print_lock_info(device);
1427
1428 c = __bm_change_bits_to(device, s, e, val);
1429
1430 spin_unlock_irqrestore(&b->bm_lock, flags);
1431 return c;
1432}
1433
1434
1435int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1436{
1437 return bm_change_bits_to(device, s, e, 1);
1438}
1439
1440
1441int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1442{
1443 return -bm_change_bits_to(device, s, e, 0);
1444}
1445
1446
1447
1448static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1449 int page_nr, int first_word, int last_word)
1450{
1451 int i;
1452 int bits;
1453 int changed = 0;
1454 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1455
1456
1457
1458 for (i = first_word; i < last_word; i++) {
1459 bits = hweight_long(paddr[i]);
1460 paddr[i] = ~0UL;
1461 changed += BITS_PER_LONG - bits;
1462 }
1463 kunmap_atomic(paddr);
1464 if (changed) {
1465
1466
1467
1468 bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
1469 b->bm_set += changed;
1470 }
1471}
1472
1473
1474
1475
1476
1477
1478void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1479{
1480
1481
1482
1483
1484
1485
1486
1487
1488 struct drbd_bitmap *b = device->bitmap;
1489 unsigned long sl = ALIGN(s,BITS_PER_LONG);
1490 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1491 int first_page;
1492 int last_page;
1493 int page_nr;
1494 int first_word;
1495 int last_word;
1496
1497 if (e - s <= 3*BITS_PER_LONG) {
1498
1499 spin_lock_irq(&b->bm_lock);
1500 __bm_change_bits_to(device, s, e, 1);
1501 spin_unlock_irq(&b->bm_lock);
1502 return;
1503 }
1504
1505
1506
1507 spin_lock_irq(&b->bm_lock);
1508
1509
1510 if (sl)
1511 __bm_change_bits_to(device, s, sl-1, 1);
1512
1513 first_page = sl >> (3 + PAGE_SHIFT);
1514 last_page = el >> (3 + PAGE_SHIFT);
1515
1516
1517
1518 first_word = MLPP(sl >> LN2_BPL);
1519 last_word = LWPP;
1520
1521
1522 for (page_nr = first_page; page_nr < last_page; page_nr++) {
1523 bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
1524 spin_unlock_irq(&b->bm_lock);
1525 cond_resched();
1526 first_word = 0;
1527 spin_lock_irq(&b->bm_lock);
1528 }
1529
1530 last_word = MLPP(el >> LN2_BPL);
1531
1532
1533
1534
1535
1536
1537
1538 if (last_word)
1539 bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
1540
1541
1542
1543
1544
1545
1546 if (el <= e)
1547 __bm_change_bits_to(device, el, e, 1);
1548 spin_unlock_irq(&b->bm_lock);
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
1559{
1560 unsigned long flags;
1561 struct drbd_bitmap *b = device->bitmap;
1562 unsigned long *p_addr;
1563 int i;
1564
1565 if (!expect(b))
1566 return 0;
1567 if (!expect(b->bm_pages))
1568 return 0;
1569
1570 spin_lock_irqsave(&b->bm_lock, flags);
1571 if (BM_DONT_TEST & b->bm_flags)
1572 bm_print_lock_info(device);
1573 if (bitnr < b->bm_bits) {
1574 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
1575 i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1576 bm_unmap(p_addr);
1577 } else if (bitnr == b->bm_bits) {
1578 i = -1;
1579 } else {
1580 drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1581 i = 0;
1582 }
1583
1584 spin_unlock_irqrestore(&b->bm_lock, flags);
1585 return i;
1586}
1587
1588
1589int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1590{
1591 unsigned long flags;
1592 struct drbd_bitmap *b = device->bitmap;
1593 unsigned long *p_addr = NULL;
1594 unsigned long bitnr;
1595 unsigned int page_nr = -1U;
1596 int c = 0;
1597
1598
1599
1600
1601
1602 if (!expect(b))
1603 return 1;
1604 if (!expect(b->bm_pages))
1605 return 1;
1606
1607 spin_lock_irqsave(&b->bm_lock, flags);
1608 if (BM_DONT_TEST & b->bm_flags)
1609 bm_print_lock_info(device);
1610 for (bitnr = s; bitnr <= e; bitnr++) {
1611 unsigned int idx = bm_bit_to_page_idx(b, bitnr);
1612 if (page_nr != idx) {
1613 page_nr = idx;
1614 if (p_addr)
1615 bm_unmap(p_addr);
1616 p_addr = bm_map_pidx(b, idx);
1617 }
1618 if (expect(bitnr < b->bm_bits))
1619 c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1620 else
1621 drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1622 }
1623 if (p_addr)
1624 bm_unmap(p_addr);
1625 spin_unlock_irqrestore(&b->bm_lock, flags);
1626 return c;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
1645{
1646 struct drbd_bitmap *b = device->bitmap;
1647 int count, s, e;
1648 unsigned long flags;
1649 unsigned long *p_addr, *bm;
1650
1651 if (!expect(b))
1652 return 0;
1653 if (!expect(b->bm_pages))
1654 return 0;
1655
1656 spin_lock_irqsave(&b->bm_lock, flags);
1657 if (BM_DONT_TEST & b->bm_flags)
1658 bm_print_lock_info(device);
1659
1660 s = S2W(enr);
1661 e = min((size_t)S2W(enr+1), b->bm_words);
1662 count = 0;
1663 if (s < b->bm_words) {
1664 int n = e-s;
1665 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1666 bm = p_addr + MLPP(s);
1667 count += bitmap_weight(bm, n * BITS_PER_LONG);
1668 bm_unmap(p_addr);
1669 } else {
1670 drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1671 }
1672 spin_unlock_irqrestore(&b->bm_lock, flags);
1673 return count;
1674}
1675