1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/bitops.h>
28#include <linux/vmalloc.h>
29#include <linux/string.h>
30#include <linux/drbd.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33
34#include "drbd_int.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95struct drbd_bitmap {
96 struct page **bm_pages;
97 spinlock_t bm_lock;
98
99
100
101 unsigned long bm_set;
102 unsigned long bm_bits;
103 size_t bm_words;
104 size_t bm_number_of_pages;
105 sector_t bm_dev_capacity;
106 struct mutex bm_change;
107
108 wait_queue_head_t bm_io_wait;
109
110 enum bm_flag bm_flags;
111
112
113 char *bm_why;
114 struct task_struct *bm_task;
115};
116
117#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
118static void __bm_print_lock_info(struct drbd_device *device, const char *func)
119{
120 struct drbd_bitmap *b = device->bitmap;
121 if (!__ratelimit(&drbd_ratelimit_state))
122 return;
123 drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
124 current->comm, task_pid_nr(current),
125 func, b->bm_why ?: "?",
126 b->bm_task->comm, task_pid_nr(b->bm_task));
127}
128
129void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
130{
131 struct drbd_bitmap *b = device->bitmap;
132 int trylock_failed;
133
134 if (!b) {
135 drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
136 return;
137 }
138
139 trylock_failed = !mutex_trylock(&b->bm_change);
140
141 if (trylock_failed) {
142 drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
143 current->comm, task_pid_nr(current),
144 why, b->bm_why ?: "?",
145 b->bm_task->comm, task_pid_nr(b->bm_task));
146 mutex_lock(&b->bm_change);
147 }
148 if (BM_LOCKED_MASK & b->bm_flags)
149 drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
150 b->bm_flags |= flags & BM_LOCKED_MASK;
151
152 b->bm_why = why;
153 b->bm_task = current;
154}
155
156void drbd_bm_unlock(struct drbd_device *device)
157{
158 struct drbd_bitmap *b = device->bitmap;
159 if (!b) {
160 drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
161 return;
162 }
163
164 if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
165 drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
166
167 b->bm_flags &= ~BM_LOCKED_MASK;
168 b->bm_why = NULL;
169 b->bm_task = NULL;
170 mutex_unlock(&b->bm_change);
171}
172
173
174
175
176
177
178
179
180
181
182#define BM_PAGE_IDX_MASK ((1UL<<24)-1)
183
184#define BM_PAGE_IO_LOCK 31
185
186#define BM_PAGE_IO_ERROR 30
187
188
189#define BM_PAGE_NEED_WRITEOUT 29
190
191
192#define BM_PAGE_LAZY_WRITEOUT 28
193
194
195#define BM_PAGE_HINT_WRITEOUT 27
196
197
198
199
200
201
202static void bm_store_page_idx(struct page *page, unsigned long idx)
203{
204 BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
205 set_page_private(page, idx);
206}
207
208static unsigned long bm_page_to_idx(struct page *page)
209{
210 return page_private(page) & BM_PAGE_IDX_MASK;
211}
212
213
214
215
216static void bm_page_lock_io(struct drbd_device *device, int page_nr)
217{
218 struct drbd_bitmap *b = device->bitmap;
219 void *addr = &page_private(b->bm_pages[page_nr]);
220 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
221}
222
223static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
224{
225 struct drbd_bitmap *b = device->bitmap;
226 void *addr = &page_private(b->bm_pages[page_nr]);
227 clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
228 wake_up(&device->bitmap->bm_io_wait);
229}
230
231
232
233static void bm_set_page_unchanged(struct page *page)
234{
235
236 clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
237 clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
238}
239
240static void bm_set_page_need_writeout(struct page *page)
241{
242 set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
243}
244
245
246
247
248
249
250
251
252
253
254void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
255{
256 struct page *page;
257 if (page_nr >= device->bitmap->bm_number_of_pages) {
258 drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
259 page_nr, (int)device->bitmap->bm_number_of_pages);
260 return;
261 }
262 page = device->bitmap->bm_pages[page_nr];
263 set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
264}
265
266static int bm_test_page_unchanged(struct page *page)
267{
268 volatile const unsigned long *addr = &page_private(page);
269 return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
270}
271
272static void bm_set_page_io_err(struct page *page)
273{
274 set_bit(BM_PAGE_IO_ERROR, &page_private(page));
275}
276
277static void bm_clear_page_io_err(struct page *page)
278{
279 clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
280}
281
282static void bm_set_page_lazy_writeout(struct page *page)
283{
284 set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
285}
286
287static int bm_test_page_lazy_writeout(struct page *page)
288{
289 return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
290}
291
292
293static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
294{
295
296 unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
297 BUG_ON(page_nr >= b->bm_number_of_pages);
298 return page_nr;
299}
300
301static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
302{
303
304 unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
305 BUG_ON(page_nr >= b->bm_number_of_pages);
306 return page_nr;
307}
308
309static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
310{
311 struct page *page = b->bm_pages[idx];
312 return (unsigned long *) kmap_atomic(page);
313}
314
315static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
316{
317 return __bm_map_pidx(b, idx);
318}
319
320static void __bm_unmap(unsigned long *p_addr)
321{
322 kunmap_atomic(p_addr);
323};
324
325static void bm_unmap(unsigned long *p_addr)
326{
327 return __bm_unmap(p_addr);
328}
329
330
331#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
332
333
334
335
336
337
338#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
339
340
341#define LWPP (PAGE_SIZE/sizeof(long))
342
343
344
345
346
347
348
349
350static void bm_free_pages(struct page **pages, unsigned long number)
351{
352 unsigned long i;
353 if (!pages)
354 return;
355
356 for (i = 0; i < number; i++) {
357 if (!pages[i]) {
358 pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n",
359 i, number);
360 continue;
361 }
362 __free_page(pages[i]);
363 pages[i] = NULL;
364 }
365}
366
367static void bm_vk_free(void *ptr, int v)
368{
369 if (v)
370 vfree(ptr);
371 else
372 kfree(ptr);
373}
374
375
376
377
378static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
379{
380 struct page **old_pages = b->bm_pages;
381 struct page **new_pages, *page;
382 unsigned int i, bytes, vmalloced = 0;
383 unsigned long have = b->bm_number_of_pages;
384
385 BUG_ON(have == 0 && old_pages != NULL);
386 BUG_ON(have != 0 && old_pages == NULL);
387
388 if (have == want)
389 return old_pages;
390
391
392
393
394
395
396 bytes = sizeof(struct page *)*want;
397 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
398 if (!new_pages) {
399 new_pages = __vmalloc(bytes,
400 GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
401 PAGE_KERNEL);
402 if (!new_pages)
403 return NULL;
404 vmalloced = 1;
405 }
406
407 if (want >= have) {
408 for (i = 0; i < have; i++)
409 new_pages[i] = old_pages[i];
410 for (; i < want; i++) {
411 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
412 if (!page) {
413 bm_free_pages(new_pages + have, i - have);
414 bm_vk_free(new_pages, vmalloced);
415 return NULL;
416 }
417
418
419 bm_store_page_idx(page, i);
420 new_pages[i] = page;
421 }
422 } else {
423 for (i = 0; i < want; i++)
424 new_pages[i] = old_pages[i];
425
426
427
428 }
429
430 if (vmalloced)
431 b->bm_flags |= BM_P_VMALLOCED;
432 else
433 b->bm_flags &= ~BM_P_VMALLOCED;
434
435 return new_pages;
436}
437
438
439
440
441
442int drbd_bm_init(struct drbd_device *device)
443{
444 struct drbd_bitmap *b = device->bitmap;
445 WARN_ON(b != NULL);
446 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
447 if (!b)
448 return -ENOMEM;
449 spin_lock_init(&b->bm_lock);
450 mutex_init(&b->bm_change);
451 init_waitqueue_head(&b->bm_io_wait);
452
453 device->bitmap = b;
454
455 return 0;
456}
457
458sector_t drbd_bm_capacity(struct drbd_device *device)
459{
460 if (!expect(device->bitmap))
461 return 0;
462 return device->bitmap->bm_dev_capacity;
463}
464
465
466
467void drbd_bm_cleanup(struct drbd_device *device)
468{
469 if (!expect(device->bitmap))
470 return;
471 bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
472 bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
473 kfree(device->bitmap);
474 device->bitmap = NULL;
475}
476
477
478
479
480
481
482#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
483#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
484#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
485static int bm_clear_surplus(struct drbd_bitmap *b)
486{
487 unsigned long mask;
488 unsigned long *p_addr, *bm;
489 int tmp;
490 int cleared = 0;
491
492
493 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
494
495 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
496
497
498 mask = cpu_to_lel(mask);
499
500 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
501 bm = p_addr + (tmp/BITS_PER_LONG);
502 if (mask) {
503
504
505
506
507 cleared = hweight_long(*bm & ~mask);
508 *bm &= mask;
509 bm++;
510 }
511
512 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
513
514
515 cleared += hweight_long(*bm);
516 *bm = 0;
517 }
518 bm_unmap(p_addr);
519 return cleared;
520}
521
522static void bm_set_surplus(struct drbd_bitmap *b)
523{
524 unsigned long mask;
525 unsigned long *p_addr, *bm;
526 int tmp;
527
528
529 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
530
531 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
532
533
534 mask = cpu_to_lel(mask);
535
536 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
537 bm = p_addr + (tmp/BITS_PER_LONG);
538 if (mask) {
539
540
541
542
543 *bm |= ~mask;
544 bm++;
545 }
546
547 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
548
549
550 *bm = ~0UL;
551 }
552 bm_unmap(p_addr);
553}
554
555
556
557static unsigned long bm_count_bits(struct drbd_bitmap *b)
558{
559 unsigned long *p_addr;
560 unsigned long bits = 0;
561 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
562 int idx, i, last_word;
563
564
565 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
566 p_addr = __bm_map_pidx(b, idx);
567 for (i = 0; i < LWPP; i++)
568 bits += hweight_long(p_addr[i]);
569 __bm_unmap(p_addr);
570 cond_resched();
571 }
572
573 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
574 p_addr = __bm_map_pidx(b, idx);
575 for (i = 0; i < last_word; i++)
576 bits += hweight_long(p_addr[i]);
577 p_addr[last_word] &= cpu_to_lel(mask);
578 bits += hweight_long(p_addr[last_word]);
579
580 if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
581 p_addr[last_word+1] = 0;
582 __bm_unmap(p_addr);
583 return bits;
584}
585
586
587static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
588{
589 unsigned long *p_addr, *bm;
590 unsigned int idx;
591 size_t do_now, end;
592
593 end = offset + len;
594
595 if (end > b->bm_words) {
596 pr_alert("bm_memset end > bm_words\n");
597 return;
598 }
599
600 while (offset < end) {
601 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
602 idx = bm_word_to_page_idx(b, offset);
603 p_addr = bm_map_pidx(b, idx);
604 bm = p_addr + MLPP(offset);
605 if (bm+do_now > p_addr + LWPP) {
606 pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
607 p_addr, bm, (int)do_now);
608 } else
609 memset(bm, c, do_now * sizeof(long));
610 bm_unmap(p_addr);
611 bm_set_page_need_writeout(b->bm_pages[idx]);
612 offset += do_now;
613 }
614}
615
616
617static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
618{
619 u64 bitmap_sectors;
620 if (ldev->md.al_offset == 8)
621 bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
622 else
623 bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
624 return bitmap_sectors << (9 + 3);
625}
626
627
628
629
630
631
632
633
634
635int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
636{
637 struct drbd_bitmap *b = device->bitmap;
638 unsigned long bits, words, owords, obits;
639 unsigned long want, have, onpages;
640 struct page **npages, **opages = NULL;
641 int err = 0, growing;
642 int opages_vmalloced;
643
644 if (!expect(b))
645 return -ENOMEM;
646
647 drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
648
649 drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
650 (unsigned long long)capacity);
651
652 if (capacity == b->bm_dev_capacity)
653 goto out;
654
655 opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
656
657 if (capacity == 0) {
658 spin_lock_irq(&b->bm_lock);
659 opages = b->bm_pages;
660 onpages = b->bm_number_of_pages;
661 owords = b->bm_words;
662 b->bm_pages = NULL;
663 b->bm_number_of_pages =
664 b->bm_set =
665 b->bm_bits =
666 b->bm_words =
667 b->bm_dev_capacity = 0;
668 spin_unlock_irq(&b->bm_lock);
669 bm_free_pages(opages, onpages);
670 bm_vk_free(opages, opages_vmalloced);
671 goto out;
672 }
673 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
674
675
676
677
678
679
680 words = ALIGN(bits, 64) >> LN2_BPL;
681
682 if (get_ldev(device)) {
683 u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
684 put_ldev(device);
685 if (bits > bits_on_disk) {
686 drbd_info(device, "bits = %lu\n", bits);
687 drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
688 err = -ENOSPC;
689 goto out;
690 }
691 }
692
693 want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
694 have = b->bm_number_of_pages;
695 if (want == have) {
696 D_ASSERT(device, b->bm_pages != NULL);
697 npages = b->bm_pages;
698 } else {
699 if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
700 npages = NULL;
701 else
702 npages = bm_realloc_pages(b, want);
703 }
704
705 if (!npages) {
706 err = -ENOMEM;
707 goto out;
708 }
709
710 spin_lock_irq(&b->bm_lock);
711 opages = b->bm_pages;
712 owords = b->bm_words;
713 obits = b->bm_bits;
714
715 growing = bits > obits;
716 if (opages && growing && set_new_bits)
717 bm_set_surplus(b);
718
719 b->bm_pages = npages;
720 b->bm_number_of_pages = want;
721 b->bm_bits = bits;
722 b->bm_words = words;
723 b->bm_dev_capacity = capacity;
724
725 if (growing) {
726 if (set_new_bits) {
727 bm_memset(b, owords, 0xff, words-owords);
728 b->bm_set += bits - obits;
729 } else
730 bm_memset(b, owords, 0x00, words-owords);
731
732 }
733
734 if (want < have) {
735
736 bm_free_pages(opages + want, have - want);
737 }
738
739 (void)bm_clear_surplus(b);
740
741 spin_unlock_irq(&b->bm_lock);
742 if (opages != npages)
743 bm_vk_free(opages, opages_vmalloced);
744 if (!growing)
745 b->bm_set = bm_count_bits(b);
746 drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
747
748 out:
749 drbd_bm_unlock(device);
750 return err;
751}
752
753
754
755
756
757
758
759
760
761unsigned long _drbd_bm_total_weight(struct drbd_device *device)
762{
763 struct drbd_bitmap *b = device->bitmap;
764 unsigned long s;
765 unsigned long flags;
766
767 if (!expect(b))
768 return 0;
769 if (!expect(b->bm_pages))
770 return 0;
771
772 spin_lock_irqsave(&b->bm_lock, flags);
773 s = b->bm_set;
774 spin_unlock_irqrestore(&b->bm_lock, flags);
775
776 return s;
777}
778
779unsigned long drbd_bm_total_weight(struct drbd_device *device)
780{
781 unsigned long s;
782
783 if (!get_ldev_if_state(device, D_NEGOTIATING))
784 return 0;
785 s = _drbd_bm_total_weight(device);
786 put_ldev(device);
787 return s;
788}
789
790size_t drbd_bm_words(struct drbd_device *device)
791{
792 struct drbd_bitmap *b = device->bitmap;
793 if (!expect(b))
794 return 0;
795 if (!expect(b->bm_pages))
796 return 0;
797
798 return b->bm_words;
799}
800
801unsigned long drbd_bm_bits(struct drbd_device *device)
802{
803 struct drbd_bitmap *b = device->bitmap;
804 if (!expect(b))
805 return 0;
806
807 return b->bm_bits;
808}
809
810
811
812
813
814
815void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
816 unsigned long *buffer)
817{
818 struct drbd_bitmap *b = device->bitmap;
819 unsigned long *p_addr, *bm;
820 unsigned long word, bits;
821 unsigned int idx;
822 size_t end, do_now;
823
824 end = offset + number;
825
826 if (!expect(b))
827 return;
828 if (!expect(b->bm_pages))
829 return;
830 if (number == 0)
831 return;
832 WARN_ON(offset >= b->bm_words);
833 WARN_ON(end > b->bm_words);
834
835 spin_lock_irq(&b->bm_lock);
836 while (offset < end) {
837 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
838 idx = bm_word_to_page_idx(b, offset);
839 p_addr = bm_map_pidx(b, idx);
840 bm = p_addr + MLPP(offset);
841 offset += do_now;
842 while (do_now--) {
843 bits = hweight_long(*bm);
844 word = *bm | *buffer++;
845 *bm++ = word;
846 b->bm_set += hweight_long(word) - bits;
847 }
848 bm_unmap(p_addr);
849 bm_set_page_need_writeout(b->bm_pages[idx]);
850 }
851
852
853
854
855
856 if (end == b->bm_words)
857 b->bm_set -= bm_clear_surplus(b);
858 spin_unlock_irq(&b->bm_lock);
859}
860
861
862
863
864void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
865 unsigned long *buffer)
866{
867 struct drbd_bitmap *b = device->bitmap;
868 unsigned long *p_addr, *bm;
869 size_t end, do_now;
870
871 end = offset + number;
872
873 if (!expect(b))
874 return;
875 if (!expect(b->bm_pages))
876 return;
877
878 spin_lock_irq(&b->bm_lock);
879 if ((offset >= b->bm_words) ||
880 (end > b->bm_words) ||
881 (number <= 0))
882 drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
883 (unsigned long) offset,
884 (unsigned long) number,
885 (unsigned long) b->bm_words);
886 else {
887 while (offset < end) {
888 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
889 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
890 bm = p_addr + MLPP(offset);
891 offset += do_now;
892 while (do_now--)
893 *buffer++ = *bm++;
894 bm_unmap(p_addr);
895 }
896 }
897 spin_unlock_irq(&b->bm_lock);
898}
899
900
901void drbd_bm_set_all(struct drbd_device *device)
902{
903 struct drbd_bitmap *b = device->bitmap;
904 if (!expect(b))
905 return;
906 if (!expect(b->bm_pages))
907 return;
908
909 spin_lock_irq(&b->bm_lock);
910 bm_memset(b, 0, 0xff, b->bm_words);
911 (void)bm_clear_surplus(b);
912 b->bm_set = b->bm_bits;
913 spin_unlock_irq(&b->bm_lock);
914}
915
916
917void drbd_bm_clear_all(struct drbd_device *device)
918{
919 struct drbd_bitmap *b = device->bitmap;
920 if (!expect(b))
921 return;
922 if (!expect(b->bm_pages))
923 return;
924
925 spin_lock_irq(&b->bm_lock);
926 bm_memset(b, 0, 0, b->bm_words);
927 b->bm_set = 0;
928 spin_unlock_irq(&b->bm_lock);
929}
930
931static void drbd_bm_aio_ctx_destroy(struct kref *kref)
932{
933 struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref);
934 unsigned long flags;
935
936 spin_lock_irqsave(&ctx->device->resource->req_lock, flags);
937 list_del(&ctx->list);
938 spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags);
939 put_ldev(ctx->device);
940 kfree(ctx);
941}
942
943
944static void drbd_bm_endio(struct bio *bio)
945{
946 struct drbd_bm_aio_ctx *ctx = bio->bi_private;
947 struct drbd_device *device = ctx->device;
948 struct drbd_bitmap *b = device->bitmap;
949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
950
951 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
952 !bm_test_page_unchanged(b->bm_pages[idx]))
953 drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
954
955 if (bio->bi_error) {
956
957
958 ctx->error = bio->bi_error;
959 bm_set_page_io_err(b->bm_pages[idx]);
960
961
962 if (__ratelimit(&drbd_ratelimit_state))
963 drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
964 bio->bi_error, idx);
965 } else {
966 bm_clear_page_io_err(b->bm_pages[idx]);
967 dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
968 }
969
970 bm_page_unlock_io(device, idx);
971
972 if (ctx->flags & BM_AIO_COPY_PAGES)
973 mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
974
975 bio_put(bio);
976
977 if (atomic_dec_and_test(&ctx->in_flight)) {
978 ctx->done = 1;
979 wake_up(&device->misc_wait);
980 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
981 }
982}
983
984static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
985{
986 struct bio *bio = bio_alloc_drbd(GFP_NOIO);
987 struct drbd_device *device = ctx->device;
988 struct drbd_bitmap *b = device->bitmap;
989 struct page *page;
990 unsigned int len;
991 unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE;
992
993 sector_t on_disk_sector =
994 device->ldev->md.md_offset + device->ldev->md.bm_offset;
995 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
996
997
998
999
1000 len = min_t(unsigned int, PAGE_SIZE,
1001 (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
1002
1003
1004 bm_page_lock_io(device, page_nr);
1005
1006
1007 bm_set_page_unchanged(b->bm_pages[page_nr]);
1008
1009 if (ctx->flags & BM_AIO_COPY_PAGES) {
1010 page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
1011 copy_highpage(page, b->bm_pages[page_nr]);
1012 bm_store_page_idx(page, page_nr);
1013 } else
1014 page = b->bm_pages[page_nr];
1015 bio->bi_bdev = device->ldev->md_bdev;
1016 bio->bi_iter.bi_sector = on_disk_sector;
1017
1018
1019 bio_add_page(bio, page, len, 0);
1020 bio->bi_private = ctx;
1021 bio->bi_end_io = drbd_bm_endio;
1022
1023 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1024 bio->bi_rw |= rw;
1025 bio_io_error(bio);
1026 } else {
1027 submit_bio(rw, bio);
1028
1029
1030 atomic_add(len >> 9, &device->rs_sect_ev);
1031 }
1032}
1033
1034
1035
1036
1037static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1038{
1039 struct drbd_bm_aio_ctx *ctx;
1040 struct drbd_bitmap *b = device->bitmap;
1041 int num_pages, i, count = 0;
1042 unsigned long now;
1043 char ppb[10];
1044 int err = 0;
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO);
1056 if (!ctx)
1057 return -ENOMEM;
1058
1059 *ctx = (struct drbd_bm_aio_ctx) {
1060 .device = device,
1061 .start_jif = jiffies,
1062 .in_flight = ATOMIC_INIT(1),
1063 .done = 0,
1064 .flags = flags,
1065 .error = 0,
1066 .kref = { ATOMIC_INIT(2) },
1067 };
1068
1069 if (!get_ldev_if_state(device, D_ATTACHING)) {
1070 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
1071 kfree(ctx);
1072 return -ENODEV;
1073 }
1074
1075
1076
1077 if (0 == (ctx->flags & ~BM_AIO_READ))
1078 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1079
1080 spin_lock_irq(&device->resource->req_lock);
1081 list_add_tail(&ctx->list, &device->pending_bitmap_io);
1082 spin_unlock_irq(&device->resource->req_lock);
1083
1084 num_pages = b->bm_number_of_pages;
1085
1086 now = jiffies;
1087
1088
1089 for (i = 0; i < num_pages; i++) {
1090
1091 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1092 break;
1093 if (!(flags & BM_AIO_READ)) {
1094 if ((flags & BM_AIO_WRITE_HINTED) &&
1095 !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
1096 &page_private(b->bm_pages[i])))
1097 continue;
1098
1099 if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
1100 bm_test_page_unchanged(b->bm_pages[i])) {
1101 dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
1102 continue;
1103 }
1104
1105
1106 if (lazy_writeout_upper_idx &&
1107 !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1108 dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
1109 continue;
1110 }
1111 }
1112 atomic_inc(&ctx->in_flight);
1113 bm_page_io_async(ctx, i);
1114 ++count;
1115 cond_resched();
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 if (!atomic_dec_and_test(&ctx->in_flight))
1127 wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
1128 else
1129 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1130
1131
1132 if (flags == 0)
1133 drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
1134 (flags & BM_AIO_READ) ? "READ" : "WRITE",
1135 count, jiffies - now);
1136
1137 if (ctx->error) {
1138 drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
1139 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
1140 err = -EIO;
1141 }
1142
1143 if (atomic_read(&ctx->in_flight))
1144 err = -EIO;
1145
1146 now = jiffies;
1147 if (flags & BM_AIO_READ) {
1148 b->bm_set = bm_count_bits(b);
1149 drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
1150 jiffies - now);
1151 }
1152 now = b->bm_set;
1153
1154 if ((flags & ~BM_AIO_READ) == 0)
1155 drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1156 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1157
1158 kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1159 return err;
1160}
1161
1162
1163
1164
1165
1166int drbd_bm_read(struct drbd_device *device) __must_hold(local)
1167{
1168 return bm_rw(device, BM_AIO_READ, 0);
1169}
1170
1171
1172
1173
1174
1175
1176
1177int drbd_bm_write(struct drbd_device *device) __must_hold(local)
1178{
1179 return bm_rw(device, 0, 0);
1180}
1181
1182
1183
1184
1185
1186
1187
1188int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
1189{
1190 return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
1191}
1192
1193
1194
1195
1196
1197
1198int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local)
1199{
1200 return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
1215{
1216 return bm_rw(device, BM_AIO_COPY_PAGES, 0);
1217}
1218
1219
1220
1221
1222
1223int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
1224{
1225 return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
1237 const int find_zero_bit)
1238{
1239 struct drbd_bitmap *b = device->bitmap;
1240 unsigned long *p_addr;
1241 unsigned long bit_offset;
1242 unsigned i;
1243
1244
1245 if (bm_fo > b->bm_bits) {
1246 drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
1247 bm_fo = DRBD_END_OF_BITMAP;
1248 } else {
1249 while (bm_fo < b->bm_bits) {
1250
1251 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1252 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1253
1254 if (find_zero_bit)
1255 i = find_next_zero_bit_le(p_addr,
1256 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1257 else
1258 i = find_next_bit_le(p_addr,
1259 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1260
1261 __bm_unmap(p_addr);
1262 if (i < PAGE_SIZE*8) {
1263 bm_fo = bit_offset + i;
1264 if (bm_fo >= b->bm_bits)
1265 break;
1266 goto found;
1267 }
1268 bm_fo = bit_offset + PAGE_SIZE*8;
1269 }
1270 bm_fo = DRBD_END_OF_BITMAP;
1271 }
1272 found:
1273 return bm_fo;
1274}
1275
1276static unsigned long bm_find_next(struct drbd_device *device,
1277 unsigned long bm_fo, const int find_zero_bit)
1278{
1279 struct drbd_bitmap *b = device->bitmap;
1280 unsigned long i = DRBD_END_OF_BITMAP;
1281
1282 if (!expect(b))
1283 return i;
1284 if (!expect(b->bm_pages))
1285 return i;
1286
1287 spin_lock_irq(&b->bm_lock);
1288 if (BM_DONT_TEST & b->bm_flags)
1289 bm_print_lock_info(device);
1290
1291 i = __bm_find_next(device, bm_fo, find_zero_bit);
1292
1293 spin_unlock_irq(&b->bm_lock);
1294 return i;
1295}
1296
1297unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1298{
1299 return bm_find_next(device, bm_fo, 0);
1300}
1301
1302#if 0
1303
1304unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1305{
1306 return bm_find_next(device, bm_fo, 1);
1307}
1308#endif
1309
1310
1311
1312unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1313{
1314
1315 return __bm_find_next(device, bm_fo, 0);
1316}
1317
1318unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1319{
1320
1321 return __bm_find_next(device, bm_fo, 1);
1322}
1323
1324
1325
1326
1327
1328
1329
1330static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1331 unsigned long e, int val)
1332{
1333 struct drbd_bitmap *b = device->bitmap;
1334 unsigned long *p_addr = NULL;
1335 unsigned long bitnr;
1336 unsigned int last_page_nr = -1U;
1337 int c = 0;
1338 int changed_total = 0;
1339
1340 if (e >= b->bm_bits) {
1341 drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1342 s, e, b->bm_bits);
1343 e = b->bm_bits ? b->bm_bits -1 : 0;
1344 }
1345 for (bitnr = s; bitnr <= e; bitnr++) {
1346 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1347 if (page_nr != last_page_nr) {
1348 if (p_addr)
1349 __bm_unmap(p_addr);
1350 if (c < 0)
1351 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1352 else if (c > 0)
1353 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1354 changed_total += c;
1355 c = 0;
1356 p_addr = __bm_map_pidx(b, page_nr);
1357 last_page_nr = page_nr;
1358 }
1359 if (val)
1360 c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1361 else
1362 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1363 }
1364 if (p_addr)
1365 __bm_unmap(p_addr);
1366 if (c < 0)
1367 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1368 else if (c > 0)
1369 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1370 changed_total += c;
1371 b->bm_set += changed_total;
1372 return changed_total;
1373}
1374
1375
1376
1377
1378
1379static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1380 const unsigned long e, int val)
1381{
1382 unsigned long flags;
1383 struct drbd_bitmap *b = device->bitmap;
1384 int c = 0;
1385
1386 if (!expect(b))
1387 return 1;
1388 if (!expect(b->bm_pages))
1389 return 0;
1390
1391 spin_lock_irqsave(&b->bm_lock, flags);
1392 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1393 bm_print_lock_info(device);
1394
1395 c = __bm_change_bits_to(device, s, e, val);
1396
1397 spin_unlock_irqrestore(&b->bm_lock, flags);
1398 return c;
1399}
1400
1401
1402int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1403{
1404 return bm_change_bits_to(device, s, e, 1);
1405}
1406
1407
1408int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1409{
1410 return -bm_change_bits_to(device, s, e, 0);
1411}
1412
1413
1414
1415static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1416 int page_nr, int first_word, int last_word)
1417{
1418 int i;
1419 int bits;
1420 int changed = 0;
1421 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1422 for (i = first_word; i < last_word; i++) {
1423 bits = hweight_long(paddr[i]);
1424 paddr[i] = ~0UL;
1425 changed += BITS_PER_LONG - bits;
1426 }
1427 kunmap_atomic(paddr);
1428 if (changed) {
1429
1430
1431
1432 bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
1433 b->bm_set += changed;
1434 }
1435}
1436
1437
1438
1439
1440
1441
1442void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1443{
1444
1445
1446
1447
1448
1449
1450
1451
1452 struct drbd_bitmap *b = device->bitmap;
1453 unsigned long sl = ALIGN(s,BITS_PER_LONG);
1454 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1455 int first_page;
1456 int last_page;
1457 int page_nr;
1458 int first_word;
1459 int last_word;
1460
1461 if (e - s <= 3*BITS_PER_LONG) {
1462
1463 spin_lock_irq(&b->bm_lock);
1464 __bm_change_bits_to(device, s, e, 1);
1465 spin_unlock_irq(&b->bm_lock);
1466 return;
1467 }
1468
1469
1470
1471 spin_lock_irq(&b->bm_lock);
1472
1473
1474 if (sl)
1475 __bm_change_bits_to(device, s, sl-1, 1);
1476
1477 first_page = sl >> (3 + PAGE_SHIFT);
1478 last_page = el >> (3 + PAGE_SHIFT);
1479
1480
1481
1482 first_word = MLPP(sl >> LN2_BPL);
1483 last_word = LWPP;
1484
1485
1486 for (page_nr = first_page; page_nr < last_page; page_nr++) {
1487 bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
1488 spin_unlock_irq(&b->bm_lock);
1489 cond_resched();
1490 first_word = 0;
1491 spin_lock_irq(&b->bm_lock);
1492 }
1493
1494 last_word = MLPP(el >> LN2_BPL);
1495
1496
1497
1498
1499
1500
1501
1502 if (last_word)
1503 bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
1504
1505
1506
1507
1508
1509
1510 if (el <= e)
1511 __bm_change_bits_to(device, el, e, 1);
1512 spin_unlock_irq(&b->bm_lock);
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
1523{
1524 unsigned long flags;
1525 struct drbd_bitmap *b = device->bitmap;
1526 unsigned long *p_addr;
1527 int i;
1528
1529 if (!expect(b))
1530 return 0;
1531 if (!expect(b->bm_pages))
1532 return 0;
1533
1534 spin_lock_irqsave(&b->bm_lock, flags);
1535 if (BM_DONT_TEST & b->bm_flags)
1536 bm_print_lock_info(device);
1537 if (bitnr < b->bm_bits) {
1538 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
1539 i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1540 bm_unmap(p_addr);
1541 } else if (bitnr == b->bm_bits) {
1542 i = -1;
1543 } else {
1544 drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1545 i = 0;
1546 }
1547
1548 spin_unlock_irqrestore(&b->bm_lock, flags);
1549 return i;
1550}
1551
1552
1553int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1554{
1555 unsigned long flags;
1556 struct drbd_bitmap *b = device->bitmap;
1557 unsigned long *p_addr = NULL;
1558 unsigned long bitnr;
1559 unsigned int page_nr = -1U;
1560 int c = 0;
1561
1562
1563
1564
1565
1566 if (!expect(b))
1567 return 1;
1568 if (!expect(b->bm_pages))
1569 return 1;
1570
1571 spin_lock_irqsave(&b->bm_lock, flags);
1572 if (BM_DONT_TEST & b->bm_flags)
1573 bm_print_lock_info(device);
1574 for (bitnr = s; bitnr <= e; bitnr++) {
1575 unsigned int idx = bm_bit_to_page_idx(b, bitnr);
1576 if (page_nr != idx) {
1577 page_nr = idx;
1578 if (p_addr)
1579 bm_unmap(p_addr);
1580 p_addr = bm_map_pidx(b, idx);
1581 }
1582 if (expect(bitnr < b->bm_bits))
1583 c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1584 else
1585 drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1586 }
1587 if (p_addr)
1588 bm_unmap(p_addr);
1589 spin_unlock_irqrestore(&b->bm_lock, flags);
1590 return c;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
1609{
1610 struct drbd_bitmap *b = device->bitmap;
1611 int count, s, e;
1612 unsigned long flags;
1613 unsigned long *p_addr, *bm;
1614
1615 if (!expect(b))
1616 return 0;
1617 if (!expect(b->bm_pages))
1618 return 0;
1619
1620 spin_lock_irqsave(&b->bm_lock, flags);
1621 if (BM_DONT_TEST & b->bm_flags)
1622 bm_print_lock_info(device);
1623
1624 s = S2W(enr);
1625 e = min((size_t)S2W(enr+1), b->bm_words);
1626 count = 0;
1627 if (s < b->bm_words) {
1628 int n = e-s;
1629 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1630 bm = p_addr + MLPP(s);
1631 while (n--)
1632 count += hweight_long(*bm++);
1633 bm_unmap(p_addr);
1634 } else {
1635 drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1636 }
1637 spin_unlock_irqrestore(&b->bm_lock, flags);
1638 return count;
1639}
1640