1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/time.h>
15#include <linux/capability.h>
16#include <linux/fs.h>
17#include <linux/jbd2.h>
18#include <linux/ext4_fs.h>
19#include <linux/ext4_jbd2.h>
20#include <linux/quotaops.h>
21#include <linux/buffer_head.h>
22
23#include "group.h"
24
25
26
27
28
29
30
31void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
32 unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
33{
34 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
35 ext4_grpblk_t offset;
36
37 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
38 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
39 if (offsetp)
40 *offsetp = offset;
41 if (blockgrpp)
42 *blockgrpp = blocknr;
43
44}
45
46
47
48unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
49 int block_group, struct ext4_group_desc *gdp)
50{
51 unsigned long start;
52 int bit, bit_max;
53 unsigned free_blocks, group_blocks;
54 struct ext4_sb_info *sbi = EXT4_SB(sb);
55
56 if (bh) {
57 J_ASSERT_BH(bh, buffer_locked(bh));
58
59
60
61 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
62 ext4_error(sb, __FUNCTION__,
63 "Checksum bad for group %u\n", block_group);
64 gdp->bg_free_blocks_count = 0;
65 gdp->bg_free_inodes_count = 0;
66 gdp->bg_itable_unused = 0;
67 memset(bh->b_data, 0xff, sb->s_blocksize);
68 return 0;
69 }
70 memset(bh->b_data, 0, sb->s_blocksize);
71 }
72
73
74 bit_max = ext4_bg_has_super(sb, block_group);
75
76 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
77 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
78 sbi->s_desc_per_block) {
79 if (bit_max) {
80 bit_max += ext4_bg_num_gdb(sb, block_group);
81 bit_max +=
82 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
83 }
84 } else {
85 int group_rel = (block_group -
86 le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
87 EXT4_DESC_PER_BLOCK(sb);
88 if (group_rel == 0 || group_rel == 1 ||
89 (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
90 bit_max += 1;
91 }
92
93 if (block_group == sbi->s_groups_count - 1) {
94
95
96
97
98
99 group_blocks = ext4_blocks_count(sbi->s_es) -
100 le32_to_cpu(sbi->s_es->s_first_data_block) -
101 (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
102 } else {
103 group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
104 }
105
106 free_blocks = group_blocks - bit_max;
107
108 if (bh) {
109 for (bit = 0; bit < bit_max; bit++)
110 ext4_set_bit(bit, bh->b_data);
111
112 start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
113 le32_to_cpu(sbi->s_es->s_first_data_block);
114
115
116 ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
117 ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);
118 for (bit = (ext4_inode_table(sb, gdp) - start),
119 bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
120 ext4_set_bit(bit, bh->b_data);
121
122
123
124
125
126
127 mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
128 }
129
130 return free_blocks - sbi->s_itb_per_group - 2;
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
147
148
149
150
151
152
153
154
155struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
156 unsigned int block_group,
157 struct buffer_head ** bh)
158{
159 unsigned long group_desc;
160 unsigned long offset;
161 struct ext4_group_desc * desc;
162 struct ext4_sb_info *sbi = EXT4_SB(sb);
163
164 if (block_group >= sbi->s_groups_count) {
165 ext4_error (sb, "ext4_get_group_desc",
166 "block_group >= groups_count - "
167 "block_group = %d, groups_count = %lu",
168 block_group, sbi->s_groups_count);
169
170 return NULL;
171 }
172 smp_rmb();
173
174 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
175 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
176 if (!sbi->s_group_desc[group_desc]) {
177 ext4_error (sb, "ext4_get_group_desc",
178 "Group descriptor not loaded - "
179 "block_group = %d, group_desc = %lu, desc = %lu",
180 block_group, group_desc, offset);
181 return NULL;
182 }
183
184 desc = (struct ext4_group_desc *)(
185 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
186 offset * EXT4_DESC_SIZE(sb));
187 if (bh)
188 *bh = sbi->s_group_desc[group_desc];
189 return desc;
190}
191
192
193
194
195
196
197
198
199
200
201
202struct buffer_head *
203read_block_bitmap(struct super_block *sb, unsigned int block_group)
204{
205 struct ext4_group_desc * desc;
206 struct buffer_head * bh = NULL;
207 ext4_fsblk_t bitmap_blk;
208
209 desc = ext4_get_group_desc(sb, block_group, NULL);
210 if (!desc)
211 return NULL;
212 bitmap_blk = ext4_block_bitmap(sb, desc);
213 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
214 bh = sb_getblk(sb, bitmap_blk);
215 if (!buffer_uptodate(bh)) {
216 lock_buffer(bh);
217 if (!buffer_uptodate(bh)) {
218 ext4_init_block_bitmap(sb, bh, block_group,
219 desc);
220 set_buffer_uptodate(bh);
221 }
222 unlock_buffer(bh);
223 }
224 } else {
225 bh = sb_bread(sb, bitmap_blk);
226 }
227 if (!bh)
228 ext4_error (sb, __FUNCTION__,
229 "Cannot read block bitmap - "
230 "block_group = %d, block_bitmap = %llu",
231 block_group, bitmap_blk);
232 return bh;
233}
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255#if 1
256static void __rsv_window_dump(struct rb_root *root, int verbose,
257 const char *fn)
258{
259 struct rb_node *n;
260 struct ext4_reserve_window_node *rsv, *prev;
261 int bad;
262
263restart:
264 n = rb_first(root);
265 bad = 0;
266 prev = NULL;
267
268 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
269 while (n) {
270 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
271 if (verbose)
272 printk("reservation window 0x%p "
273 "start: %llu, end: %llu\n",
274 rsv, rsv->rsv_start, rsv->rsv_end);
275 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
276 printk("Bad reservation %p (start >= end)\n",
277 rsv);
278 bad = 1;
279 }
280 if (prev && prev->rsv_end >= rsv->rsv_start) {
281 printk("Bad reservation %p (prev->end >= start)\n",
282 rsv);
283 bad = 1;
284 }
285 if (bad) {
286 if (!verbose) {
287 printk("Restarting reservation walk in verbose mode\n");
288 verbose = 1;
289 goto restart;
290 }
291 }
292 n = rb_next(n);
293 prev = rsv;
294 }
295 printk("Window map complete.\n");
296 if (bad)
297 BUG();
298}
299#define rsv_window_dump(root, verbose) \
300 __rsv_window_dump((root), (verbose), __FUNCTION__)
301#else
302#define rsv_window_dump(root, verbose) do {} while (0)
303#endif
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321static int
322goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
323 unsigned int group, struct super_block * sb)
324{
325 ext4_fsblk_t group_first_block, group_last_block;
326
327 group_first_block = ext4_group_first_block_no(sb, group);
328 group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
329
330 if ((rsv->_rsv_start > group_last_block) ||
331 (rsv->_rsv_end < group_first_block))
332 return 0;
333 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
334 || (grp_goal + group_first_block > rsv->_rsv_end)))
335 return 0;
336 return 1;
337}
338
339
340
341
342
343
344
345
346
347
348static struct ext4_reserve_window_node *
349search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
350{
351 struct rb_node *n = root->rb_node;
352 struct ext4_reserve_window_node *rsv;
353
354 if (!n)
355 return NULL;
356
357 do {
358 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
359
360 if (goal < rsv->rsv_start)
361 n = n->rb_left;
362 else if (goal > rsv->rsv_end)
363 n = n->rb_right;
364 else
365 return rsv;
366 } while (n);
367
368
369
370
371
372
373 if (rsv->rsv_start > goal) {
374 n = rb_prev(&rsv->rsv_node);
375 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
376 }
377 return rsv;
378}
379
380
381
382
383
384
385
386
387void ext4_rsv_window_add(struct super_block *sb,
388 struct ext4_reserve_window_node *rsv)
389{
390 struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
391 struct rb_node *node = &rsv->rsv_node;
392 ext4_fsblk_t start = rsv->rsv_start;
393
394 struct rb_node ** p = &root->rb_node;
395 struct rb_node * parent = NULL;
396 struct ext4_reserve_window_node *this;
397
398 while (*p)
399 {
400 parent = *p;
401 this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
402
403 if (start < this->rsv_start)
404 p = &(*p)->rb_left;
405 else if (start > this->rsv_end)
406 p = &(*p)->rb_right;
407 else {
408 rsv_window_dump(root, 1);
409 BUG();
410 }
411 }
412
413 rb_link_node(node, parent, p);
414 rb_insert_color(node, root);
415}
416
417
418
419
420
421
422
423
424
425
426static void rsv_window_remove(struct super_block *sb,
427 struct ext4_reserve_window_node *rsv)
428{
429 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
430 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
431 rsv->rsv_alloc_hit = 0;
432 rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
433}
434
435
436
437
438
439
440
441static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
442{
443
444 return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468void ext4_init_block_alloc_info(struct inode *inode)
469{
470 struct ext4_inode_info *ei = EXT4_I(inode);
471 struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
472 struct super_block *sb = inode->i_sb;
473
474 block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
475 if (block_i) {
476 struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
477
478 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
479 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
480
481
482
483
484
485
486 if (!test_opt(sb, RESERVATION))
487 rsv->rsv_goal_size = 0;
488 else
489 rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
490 rsv->rsv_alloc_hit = 0;
491 block_i->last_alloc_logical_block = 0;
492 block_i->last_alloc_physical_block = 0;
493 }
494 ei->i_block_alloc_info = block_i;
495}
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510void ext4_discard_reservation(struct inode *inode)
511{
512 struct ext4_inode_info *ei = EXT4_I(inode);
513 struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
514 struct ext4_reserve_window_node *rsv;
515 spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
516
517 if (!block_i)
518 return;
519
520 rsv = &block_i->rsv_window_node;
521 if (!rsv_is_empty(&rsv->rsv_window)) {
522 spin_lock(rsv_lock);
523 if (!rsv_is_empty(&rsv->rsv_window))
524 rsv_window_remove(inode->i_sb, rsv);
525 spin_unlock(rsv_lock);
526 }
527}
528
529
530
531
532
533
534
535
536
537void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
538 ext4_fsblk_t block, unsigned long count,
539 unsigned long *pdquot_freed_blocks)
540{
541 struct buffer_head *bitmap_bh = NULL;
542 struct buffer_head *gd_bh;
543 unsigned long block_group;
544 ext4_grpblk_t bit;
545 unsigned long i;
546 unsigned long overflow;
547 struct ext4_group_desc * desc;
548 struct ext4_super_block * es;
549 struct ext4_sb_info *sbi;
550 int err = 0, ret;
551 ext4_grpblk_t group_freed;
552
553 *pdquot_freed_blocks = 0;
554 sbi = EXT4_SB(sb);
555 es = sbi->s_es;
556 if (block < le32_to_cpu(es->s_first_data_block) ||
557 block + count < block ||
558 block + count > ext4_blocks_count(es)) {
559 ext4_error (sb, "ext4_free_blocks",
560 "Freeing blocks not in datazone - "
561 "block = %llu, count = %lu", block, count);
562 goto error_return;
563 }
564
565 ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
566
567do_more:
568 overflow = 0;
569 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
570
571
572
573
574 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
575 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
576 count -= overflow;
577 }
578 brelse(bitmap_bh);
579 bitmap_bh = read_block_bitmap(sb, block_group);
580 if (!bitmap_bh)
581 goto error_return;
582 desc = ext4_get_group_desc (sb, block_group, &gd_bh);
583 if (!desc)
584 goto error_return;
585
586 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
587 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
588 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
589 in_range(block + count - 1, ext4_inode_table(sb, desc),
590 sbi->s_itb_per_group))
591 ext4_error (sb, "ext4_free_blocks",
592 "Freeing blocks in system zones - "
593 "Block = %llu, count = %lu",
594 block, count);
595
596
597
598
599
600
601 BUFFER_TRACE(bitmap_bh, "getting undo access");
602 err = ext4_journal_get_undo_access(handle, bitmap_bh);
603 if (err)
604 goto error_return;
605
606
607
608
609
610
611 BUFFER_TRACE(gd_bh, "get_write_access");
612 err = ext4_journal_get_write_access(handle, gd_bh);
613 if (err)
614 goto error_return;
615
616 jbd_lock_bh_state(bitmap_bh);
617
618 for (i = 0, group_freed = 0; i < count; i++) {
619
620
621
622#ifdef CONFIG_JBD2_DEBUG
623 jbd_unlock_bh_state(bitmap_bh);
624 {
625 struct buffer_head *debug_bh;
626 debug_bh = sb_find_get_block(sb, block + i);
627 if (debug_bh) {
628 BUFFER_TRACE(debug_bh, "Deleted!");
629 if (!bh2jh(bitmap_bh)->b_committed_data)
630 BUFFER_TRACE(debug_bh,
631 "No commited data in bitmap");
632 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
633 __brelse(debug_bh);
634 }
635 }
636 jbd_lock_bh_state(bitmap_bh);
637#endif
638 if (need_resched()) {
639 jbd_unlock_bh_state(bitmap_bh);
640 cond_resched();
641 jbd_lock_bh_state(bitmap_bh);
642 }
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
662 J_ASSERT_BH(bitmap_bh,
663 bh2jh(bitmap_bh)->b_committed_data != NULL);
664 ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
665 bh2jh(bitmap_bh)->b_committed_data);
666
667
668
669
670
671
672 BUFFER_TRACE(bitmap_bh, "clear bit");
673 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
674 bit + i, bitmap_bh->b_data)) {
675 jbd_unlock_bh_state(bitmap_bh);
676 ext4_error(sb, __FUNCTION__,
677 "bit already cleared for block %llu",
678 (ext4_fsblk_t)(block + i));
679 jbd_lock_bh_state(bitmap_bh);
680 BUFFER_TRACE(bitmap_bh, "bit already cleared");
681 } else {
682 group_freed++;
683 }
684 }
685 jbd_unlock_bh_state(bitmap_bh);
686
687 spin_lock(sb_bgl_lock(sbi, block_group));
688 desc->bg_free_blocks_count =
689 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
690 group_freed);
691 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
692 spin_unlock(sb_bgl_lock(sbi, block_group));
693 percpu_counter_add(&sbi->s_freeblocks_counter, count);
694
695
696 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
697 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
698
699
700 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
701 ret = ext4_journal_dirty_metadata(handle, gd_bh);
702 if (!err) err = ret;
703 *pdquot_freed_blocks += group_freed;
704
705 if (overflow && !err) {
706 block += count;
707 count = overflow;
708 goto do_more;
709 }
710 sb->s_dirt = 1;
711error_return:
712 brelse(bitmap_bh);
713 ext4_std_error(sb, err);
714 return;
715}
716
717
718
719
720
721
722
723
724void ext4_free_blocks(handle_t *handle, struct inode *inode,
725 ext4_fsblk_t block, unsigned long count)
726{
727 struct super_block * sb;
728 unsigned long dquot_freed_blocks;
729
730 sb = inode->i_sb;
731 if (!sb) {
732 printk ("ext4_free_blocks: nonexistent device");
733 return;
734 }
735 ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
736 if (dquot_freed_blocks)
737 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
738 return;
739}
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
762{
763 int ret;
764 struct journal_head *jh = bh2jh(bh);
765
766 if (ext4_test_bit(nr, bh->b_data))
767 return 0;
768
769 jbd_lock_bh_state(bh);
770 if (!jh->b_committed_data)
771 ret = 1;
772 else
773 ret = !ext4_test_bit(nr, jh->b_committed_data);
774 jbd_unlock_bh_state(bh);
775 return ret;
776}
777
778
779
780
781
782
783
784
785
786
787
788static ext4_grpblk_t
789bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
790 ext4_grpblk_t maxblocks)
791{
792 ext4_grpblk_t next;
793 struct journal_head *jh = bh2jh(bh);
794
795 while (start < maxblocks) {
796 next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
797 if (next >= maxblocks)
798 return -1;
799 if (ext4_test_allocatable(next, bh))
800 return next;
801 jbd_lock_bh_state(bh);
802 if (jh->b_committed_data)
803 start = ext4_find_next_zero_bit(jh->b_committed_data,
804 maxblocks, next);
805 jbd_unlock_bh_state(bh);
806 }
807 return -1;
808}
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823static ext4_grpblk_t
824find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
825 ext4_grpblk_t maxblocks)
826{
827 ext4_grpblk_t here, next;
828 char *p, *r;
829
830 if (start > 0) {
831
832
833
834
835
836
837
838
839 ext4_grpblk_t end_goal = (start + 63) & ~63;
840 if (end_goal > maxblocks)
841 end_goal = maxblocks;
842 here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
843 if (here < end_goal && ext4_test_allocatable(here, bh))
844 return here;
845 ext4_debug("Bit not found near goal\n");
846 }
847
848 here = start;
849 if (here < 0)
850 here = 0;
851
852 p = ((char *)bh->b_data) + (here >> 3);
853 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
854 next = (r - ((char *)bh->b_data)) << 3;
855
856 if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
857 return next;
858
859
860
861
862
863
864 here = bitmap_search_next_usable_block(here, bh, maxblocks);
865 return here;
866}
867
868
869
870
871
872
873
874
875
876
877
878
879static inline int
880claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
881{
882 struct journal_head *jh = bh2jh(bh);
883 int ret;
884
885 if (ext4_set_bit_atomic(lock, block, bh->b_data))
886 return 0;
887 jbd_lock_bh_state(bh);
888 if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
889 ext4_clear_bit_atomic(lock, block, bh->b_data);
890 ret = 0;
891 } else {
892 ret = 1;
893 }
894 jbd_unlock_bh_state(bh);
895 return ret;
896}
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922static ext4_grpblk_t
923ext4_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
924 struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal,
925 unsigned long *count, struct ext4_reserve_window *my_rsv)
926{
927 ext4_fsblk_t group_first_block;
928 ext4_grpblk_t start, end;
929 unsigned long num = 0;
930
931
932 if (my_rsv) {
933 group_first_block = ext4_group_first_block_no(sb, group);
934 if (my_rsv->_rsv_start >= group_first_block)
935 start = my_rsv->_rsv_start - group_first_block;
936 else
937
938 start = 0;
939 end = my_rsv->_rsv_end - group_first_block + 1;
940 if (end > EXT4_BLOCKS_PER_GROUP(sb))
941
942 end = EXT4_BLOCKS_PER_GROUP(sb);
943 if ((start <= grp_goal) && (grp_goal < end))
944 start = grp_goal;
945 else
946 grp_goal = -1;
947 } else {
948 if (grp_goal > 0)
949 start = grp_goal;
950 else
951 start = 0;
952 end = EXT4_BLOCKS_PER_GROUP(sb);
953 }
954
955 BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
956
957repeat:
958 if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
959 grp_goal = find_next_usable_block(start, bitmap_bh, end);
960 if (grp_goal < 0)
961 goto fail_access;
962 if (!my_rsv) {
963 int i;
964
965 for (i = 0; i < 7 && grp_goal > start &&
966 ext4_test_allocatable(grp_goal - 1,
967 bitmap_bh);
968 i++, grp_goal--)
969 ;
970 }
971 }
972 start = grp_goal;
973
974 if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
975 grp_goal, bitmap_bh)) {
976
977
978
979
980 start++;
981 grp_goal++;
982 if (start >= end)
983 goto fail_access;
984 goto repeat;
985 }
986 num++;
987 grp_goal++;
988 while (num < *count && grp_goal < end
989 && ext4_test_allocatable(grp_goal, bitmap_bh)
990 && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
991 grp_goal, bitmap_bh)) {
992 num++;
993 grp_goal++;
994 }
995 *count = num;
996 return grp_goal - num;
997fail_access:
998 *count = num;
999 return -1;
1000}
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035static int find_next_reservable_window(
1036 struct ext4_reserve_window_node *search_head,
1037 struct ext4_reserve_window_node *my_rsv,
1038 struct super_block * sb,
1039 ext4_fsblk_t start_block,
1040 ext4_fsblk_t last_block)
1041{
1042 struct rb_node *next;
1043 struct ext4_reserve_window_node *rsv, *prev;
1044 ext4_fsblk_t cur;
1045 int size = my_rsv->rsv_goal_size;
1046
1047
1048
1049 cur = start_block;
1050 rsv = search_head;
1051 if (!rsv)
1052 return -1;
1053
1054 while (1) {
1055 if (cur <= rsv->rsv_end)
1056 cur = rsv->rsv_end + 1;
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 if (cur > last_block)
1068 return -1;
1069
1070 prev = rsv;
1071 next = rb_next(&rsv->rsv_node);
1072 rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
1073
1074
1075
1076
1077
1078 if (!next)
1079 break;
1080
1081 if (cur + size <= rsv->rsv_start) {
1082
1083
1084
1085
1086 break;
1087 }
1088 }
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
1101 rsv_window_remove(sb, my_rsv);
1102
1103
1104
1105
1106
1107
1108
1109
1110 my_rsv->rsv_start = cur;
1111 my_rsv->rsv_end = cur + size - 1;
1112 my_rsv->rsv_alloc_hit = 0;
1113
1114 if (prev != my_rsv)
1115 ext4_rsv_window_add(sb, my_rsv);
1116
1117 return 0;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
1158 ext4_grpblk_t grp_goal, struct super_block *sb,
1159 unsigned int group, struct buffer_head *bitmap_bh)
1160{
1161 struct ext4_reserve_window_node *search_head;
1162 ext4_fsblk_t group_first_block, group_end_block, start_block;
1163 ext4_grpblk_t first_free_block;
1164 struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
1165 unsigned long size;
1166 int ret;
1167 spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1168
1169 group_first_block = ext4_group_first_block_no(sb, group);
1170 group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1171
1172 if (grp_goal < 0)
1173 start_block = group_first_block;
1174 else
1175 start_block = grp_goal + group_first_block;
1176
1177 size = my_rsv->rsv_goal_size;
1178
1179 if (!rsv_is_empty(&my_rsv->rsv_window)) {
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 if ((my_rsv->rsv_start <= group_end_block) &&
1195 (my_rsv->rsv_end > group_end_block) &&
1196 (start_block >= my_rsv->rsv_start))
1197 return -1;
1198
1199 if ((my_rsv->rsv_alloc_hit >
1200 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1201
1202
1203
1204
1205
1206
1207 size = size * 2;
1208 if (size > EXT4_MAX_RESERVE_BLOCKS)
1209 size = EXT4_MAX_RESERVE_BLOCKS;
1210 my_rsv->rsv_goal_size= size;
1211 }
1212 }
1213
1214 spin_lock(rsv_lock);
1215
1216
1217
1218 search_head = search_reserve_window(fs_rsv_root, start_block);
1219
1220
1221
1222
1223
1224
1225
1226
1227retry:
1228 ret = find_next_reservable_window(search_head, my_rsv, sb,
1229 start_block, group_end_block);
1230
1231 if (ret == -1) {
1232 if (!rsv_is_empty(&my_rsv->rsv_window))
1233 rsv_window_remove(sb, my_rsv);
1234 spin_unlock(rsv_lock);
1235 return -1;
1236 }
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 spin_unlock(rsv_lock);
1250 first_free_block = bitmap_search_next_usable_block(
1251 my_rsv->rsv_start - group_first_block,
1252 bitmap_bh, group_end_block - group_first_block + 1);
1253
1254 if (first_free_block < 0) {
1255
1256
1257
1258
1259 spin_lock(rsv_lock);
1260 if (!rsv_is_empty(&my_rsv->rsv_window))
1261 rsv_window_remove(sb, my_rsv);
1262 spin_unlock(rsv_lock);
1263 return -1;
1264 }
1265
1266 start_block = first_free_block + group_first_block;
1267
1268
1269
1270
1271 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1272 return 0;
1273
1274
1275
1276
1277
1278
1279 search_head = my_rsv;
1280 spin_lock(rsv_lock);
1281 goto retry;
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1302 struct super_block *sb, int size)
1303{
1304 struct ext4_reserve_window_node *next_rsv;
1305 struct rb_node *next;
1306 spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1307
1308 if (!spin_trylock(rsv_lock))
1309 return;
1310
1311 next = rb_next(&my_rsv->rsv_node);
1312
1313 if (!next)
1314 my_rsv->rsv_end += size;
1315 else {
1316 next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1317
1318 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1319 my_rsv->rsv_end += size;
1320 else
1321 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1322 }
1323 spin_unlock(rsv_lock);
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355static ext4_grpblk_t
1356ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1357 unsigned int group, struct buffer_head *bitmap_bh,
1358 ext4_grpblk_t grp_goal,
1359 struct ext4_reserve_window_node * my_rsv,
1360 unsigned long *count, int *errp)
1361{
1362 ext4_fsblk_t group_first_block, group_last_block;
1363 ext4_grpblk_t ret = 0;
1364 int fatal;
1365 unsigned long num = *count;
1366
1367 *errp = 0;
1368
1369
1370
1371
1372
1373
1374 BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1375 fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
1376 if (fatal) {
1377 *errp = fatal;
1378 return -1;
1379 }
1380
1381
1382
1383
1384
1385
1386
1387 if (my_rsv == NULL ) {
1388 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1389 grp_goal, count, NULL);
1390 goto out;
1391 }
1392
1393
1394
1395
1396
1397
1398 group_first_block = ext4_group_first_block_no(sb, group);
1399 group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 while (1) {
1417 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1418 !goal_in_my_reservation(&my_rsv->rsv_window,
1419 grp_goal, group, sb)) {
1420 if (my_rsv->rsv_goal_size < *count)
1421 my_rsv->rsv_goal_size = *count;
1422 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1423 group, bitmap_bh);
1424 if (ret < 0)
1425 break;
1426
1427 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1428 grp_goal, group, sb))
1429 grp_goal = -1;
1430 } else if (grp_goal >= 0) {
1431 int curr = my_rsv->rsv_end -
1432 (grp_goal + group_first_block) + 1;
1433
1434 if (curr < *count)
1435 try_to_extend_reservation(my_rsv, sb,
1436 *count - curr);
1437 }
1438
1439 if ((my_rsv->rsv_start > group_last_block) ||
1440 (my_rsv->rsv_end < group_first_block)) {
1441 rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
1442 BUG();
1443 }
1444 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1445 grp_goal, &num, &my_rsv->rsv_window);
1446 if (ret >= 0) {
1447 my_rsv->rsv_alloc_hit += num;
1448 *count = num;
1449 break;
1450 }
1451 num = *count;
1452 }
1453out:
1454 if (ret >= 0) {
1455 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1456 "bitmap block");
1457 fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
1458 if (fatal) {
1459 *errp = fatal;
1460 return -1;
1461 }
1462 return ret;
1463 }
1464
1465 BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1466 ext4_journal_release_buffer(handle, bitmap_bh);
1467 return ret;
1468}
1469
1470
1471
1472
1473
1474
1475
1476static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
1477{
1478 ext4_fsblk_t free_blocks, root_blocks;
1479
1480 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1481 root_blocks = ext4_r_blocks_count(sbi->s_es);
1482 if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
1483 sbi->s_resuid != current->fsuid &&
1484 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
1485 return 0;
1486 }
1487 return 1;
1488}
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502int ext4_should_retry_alloc(struct super_block *sb, int *retries)
1503{
1504 if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
1505 return 0;
1506
1507 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1508
1509 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
1527 ext4_fsblk_t goal, unsigned long *count, int *errp)
1528{
1529 struct buffer_head *bitmap_bh = NULL;
1530 struct buffer_head *gdp_bh;
1531 unsigned long group_no;
1532 int goal_group;
1533 ext4_grpblk_t grp_target_blk;
1534 ext4_grpblk_t grp_alloc_blk;
1535 ext4_fsblk_t ret_block;
1536 int bgi;
1537 int fatal = 0, err;
1538 int performed_allocation = 0;
1539 ext4_grpblk_t free_blocks;
1540 struct super_block *sb;
1541 struct ext4_group_desc *gdp;
1542 struct ext4_super_block *es;
1543 struct ext4_sb_info *sbi;
1544 struct ext4_reserve_window_node *my_rsv = NULL;
1545 struct ext4_block_alloc_info *block_i;
1546 unsigned short windowsz = 0;
1547#ifdef EXT4FS_DEBUG
1548 static int goal_hits, goal_attempts;
1549#endif
1550 unsigned long ngroups;
1551 unsigned long num = *count;
1552
1553 *errp = -ENOSPC;
1554 sb = inode->i_sb;
1555 if (!sb) {
1556 printk("ext4_new_block: nonexistent device");
1557 return 0;
1558 }
1559
1560
1561
1562
1563 if (DQUOT_ALLOC_BLOCK(inode, num)) {
1564 *errp = -EDQUOT;
1565 return 0;
1566 }
1567
1568 sbi = EXT4_SB(sb);
1569 es = EXT4_SB(sb)->s_es;
1570 ext4_debug("goal=%lu.\n", goal);
1571
1572
1573
1574
1575
1576
1577
1578
1579 block_i = EXT4_I(inode)->i_block_alloc_info;
1580 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1581 my_rsv = &block_i->rsv_window_node;
1582
1583 if (!ext4_has_free_blocks(sbi)) {
1584 *errp = -ENOSPC;
1585 goto out;
1586 }
1587
1588
1589
1590
1591 if (goal < le32_to_cpu(es->s_first_data_block) ||
1592 goal >= ext4_blocks_count(es))
1593 goal = le32_to_cpu(es->s_first_data_block);
1594 ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
1595 goal_group = group_no;
1596retry_alloc:
1597 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1598 if (!gdp)
1599 goto io_error;
1600
1601 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1602
1603
1604
1605
1606 if (my_rsv && (free_blocks < windowsz)
1607 && (rsv_is_empty(&my_rsv->rsv_window)))
1608 my_rsv = NULL;
1609
1610 if (free_blocks > 0) {
1611 bitmap_bh = read_block_bitmap(sb, group_no);
1612 if (!bitmap_bh)
1613 goto io_error;
1614 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1615 group_no, bitmap_bh, grp_target_blk,
1616 my_rsv, &num, &fatal);
1617 if (fatal)
1618 goto out;
1619 if (grp_alloc_blk >= 0)
1620 goto allocated;
1621 }
1622
1623 ngroups = EXT4_SB(sb)->s_groups_count;
1624 smp_rmb();
1625
1626
1627
1628
1629
1630 for (bgi = 0; bgi < ngroups; bgi++) {
1631 group_no++;
1632 if (group_no >= ngroups)
1633 group_no = 0;
1634 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1635 if (!gdp)
1636 goto io_error;
1637 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1638
1639
1640
1641
1642
1643 if (free_blocks <= (windowsz/2))
1644 continue;
1645
1646 brelse(bitmap_bh);
1647 bitmap_bh = read_block_bitmap(sb, group_no);
1648 if (!bitmap_bh)
1649 goto io_error;
1650
1651
1652
1653 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1654 group_no, bitmap_bh, -1, my_rsv,
1655 &num, &fatal);
1656 if (fatal)
1657 goto out;
1658 if (grp_alloc_blk >= 0)
1659 goto allocated;
1660 }
1661
1662
1663
1664
1665
1666
1667
1668 if (my_rsv) {
1669 my_rsv = NULL;
1670 windowsz = 0;
1671 group_no = goal_group;
1672 goto retry_alloc;
1673 }
1674
1675 *errp = -ENOSPC;
1676 goto out;
1677
1678allocated:
1679
1680 ext4_debug("using block group %d(%d)\n",
1681 group_no, gdp->bg_free_blocks_count);
1682
1683 BUFFER_TRACE(gdp_bh, "get_write_access");
1684 fatal = ext4_journal_get_write_access(handle, gdp_bh);
1685 if (fatal)
1686 goto out;
1687
1688 ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
1689
1690 if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
1691 in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
1692 in_range(ret_block, ext4_inode_table(sb, gdp),
1693 EXT4_SB(sb)->s_itb_per_group) ||
1694 in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
1695 EXT4_SB(sb)->s_itb_per_group))
1696 ext4_error(sb, "ext4_new_block",
1697 "Allocating block in system zone - "
1698 "blocks from %llu, length %lu",
1699 ret_block, num);
1700
1701 performed_allocation = 1;
1702
1703#ifdef CONFIG_JBD2_DEBUG
1704 {
1705 struct buffer_head *debug_bh;
1706
1707
1708 debug_bh = sb_find_get_block(sb, ret_block);
1709 if (debug_bh) {
1710 BUFFER_TRACE(debug_bh, "state when allocated");
1711 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1712 brelse(debug_bh);
1713 }
1714 }
1715 jbd_lock_bh_state(bitmap_bh);
1716 spin_lock(sb_bgl_lock(sbi, group_no));
1717 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1718 int i;
1719
1720 for (i = 0; i < num; i++) {
1721 if (ext4_test_bit(grp_alloc_blk+i,
1722 bh2jh(bitmap_bh)->b_committed_data)) {
1723 printk("%s: block was unexpectedly set in "
1724 "b_committed_data\n", __FUNCTION__);
1725 }
1726 }
1727 }
1728 ext4_debug("found bit %d\n", grp_alloc_blk);
1729 spin_unlock(sb_bgl_lock(sbi, group_no));
1730 jbd_unlock_bh_state(bitmap_bh);
1731#endif
1732
1733 if (ret_block + num - 1 >= ext4_blocks_count(es)) {
1734 ext4_error(sb, "ext4_new_block",
1735 "block(%llu) >= blocks count(%llu) - "
1736 "block_group = %lu, es == %p ", ret_block,
1737 ext4_blocks_count(es), group_no, es);
1738 goto out;
1739 }
1740
1741
1742
1743
1744
1745
1746 ext4_debug("allocating block %lu. Goal hits %d of %d.\n",
1747 ret_block, goal_hits, goal_attempts);
1748
1749 spin_lock(sb_bgl_lock(sbi, group_no));
1750 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1751 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1752 gdp->bg_free_blocks_count =
1753 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
1754 gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
1755 spin_unlock(sb_bgl_lock(sbi, group_no));
1756 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1757
1758 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1759 err = ext4_journal_dirty_metadata(handle, gdp_bh);
1760 if (!fatal)
1761 fatal = err;
1762
1763 sb->s_dirt = 1;
1764 if (fatal)
1765 goto out;
1766
1767 *errp = 0;
1768 brelse(bitmap_bh);
1769 DQUOT_FREE_BLOCK(inode, *count-num);
1770 *count = num;
1771 return ret_block;
1772
1773io_error:
1774 *errp = -EIO;
1775out:
1776 if (fatal) {
1777 *errp = fatal;
1778 ext4_std_error(sb, fatal);
1779 }
1780
1781
1782
1783 if (!performed_allocation)
1784 DQUOT_FREE_BLOCK(inode, *count);
1785 brelse(bitmap_bh);
1786 return 0;
1787}
1788
1789ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
1790 ext4_fsblk_t goal, int *errp)
1791{
1792 unsigned long count = 1;
1793
1794 return ext4_new_blocks(handle, inode, goal, &count, errp);
1795}
1796
1797
1798
1799
1800
1801
1802
1803ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
1804{
1805 ext4_fsblk_t desc_count;
1806 struct ext4_group_desc *gdp;
1807 int i;
1808 unsigned long ngroups = EXT4_SB(sb)->s_groups_count;
1809#ifdef EXT4FS_DEBUG
1810 struct ext4_super_block *es;
1811 ext4_fsblk_t bitmap_count;
1812 unsigned long x;
1813 struct buffer_head *bitmap_bh = NULL;
1814
1815 es = EXT4_SB(sb)->s_es;
1816 desc_count = 0;
1817 bitmap_count = 0;
1818 gdp = NULL;
1819
1820 smp_rmb();
1821 for (i = 0; i < ngroups; i++) {
1822 gdp = ext4_get_group_desc(sb, i, NULL);
1823 if (!gdp)
1824 continue;
1825 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1826 brelse(bitmap_bh);
1827 bitmap_bh = read_block_bitmap(sb, i);
1828 if (bitmap_bh == NULL)
1829 continue;
1830
1831 x = ext4_count_free(bitmap_bh, sb->s_blocksize);
1832 printk("group %d: stored = %d, counted = %lu\n",
1833 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1834 bitmap_count += x;
1835 }
1836 brelse(bitmap_bh);
1837 printk("ext4_count_free_blocks: stored = %llu"
1838 ", computed = %llu, %llu\n",
1839 EXT4_FREE_BLOCKS_COUNT(es),
1840 desc_count, bitmap_count);
1841 return bitmap_count;
1842#else
1843 desc_count = 0;
1844 smp_rmb();
1845 for (i = 0; i < ngroups; i++) {
1846 gdp = ext4_get_group_desc(sb, i, NULL);
1847 if (!gdp)
1848 continue;
1849 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1850 }
1851
1852 return desc_count;
1853#endif
1854}
1855
1856static inline int test_root(int a, int b)
1857{
1858 int num = b;
1859
1860 while (a > num)
1861 num *= b;
1862 return num == a;
1863}
1864
1865static int ext4_group_sparse(int group)
1866{
1867 if (group <= 1)
1868 return 1;
1869 if (!(group & 1))
1870 return 0;
1871 return (test_root(group, 7) || test_root(group, 5) ||
1872 test_root(group, 3));
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883int ext4_bg_has_super(struct super_block *sb, int group)
1884{
1885 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1886 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1887 !ext4_group_sparse(group))
1888 return 0;
1889 return 1;
1890}
1891
1892static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group)
1893{
1894 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1895 unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb);
1896 unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
1897
1898 if (group == first || group == first + 1 || group == last)
1899 return 1;
1900 return 0;
1901}
1902
1903static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group)
1904{
1905 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1906 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1907 !ext4_group_sparse(group))
1908 return 0;
1909 return EXT4_SB(sb)->s_gdb_count;
1910}
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921unsigned long ext4_bg_num_gdb(struct super_block *sb, int group)
1922{
1923 unsigned long first_meta_bg =
1924 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
1925 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1926
1927 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
1928 metagroup < first_meta_bg)
1929 return ext4_bg_num_gdb_nometa(sb,group);
1930
1931 return ext4_bg_num_gdb_meta(sb,group);
1932
1933}
1934