1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "ext4_jbd2.h"
24#include "truncate.h"
25#include <linux/dax.h>
26#include <linux/uio.h>
27
28#include <trace/events/ext4.h>
29
30typedef struct {
31 __le32 *p;
32 __le32 key;
33 struct buffer_head *bh;
34} Indirect;
35
36static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
37{
38 p->key = *(p->p = v);
39 p->bh = bh;
40}
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73static int ext4_block_to_path(struct inode *inode,
74 ext4_lblk_t i_block,
75 ext4_lblk_t offsets[4], int *boundary)
76{
77 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
78 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
79 const long direct_blocks = EXT4_NDIR_BLOCKS,
80 indirect_blocks = ptrs,
81 double_blocks = (1 << (ptrs_bits * 2));
82 int n = 0;
83 int final = 0;
84
85 if (i_block < direct_blocks) {
86 offsets[n++] = i_block;
87 final = direct_blocks;
88 } else if ((i_block -= direct_blocks) < indirect_blocks) {
89 offsets[n++] = EXT4_IND_BLOCK;
90 offsets[n++] = i_block;
91 final = ptrs;
92 } else if ((i_block -= indirect_blocks) < double_blocks) {
93 offsets[n++] = EXT4_DIND_BLOCK;
94 offsets[n++] = i_block >> ptrs_bits;
95 offsets[n++] = i_block & (ptrs - 1);
96 final = ptrs;
97 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
98 offsets[n++] = EXT4_TIND_BLOCK;
99 offsets[n++] = i_block >> (ptrs_bits * 2);
100 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
101 offsets[n++] = i_block & (ptrs - 1);
102 final = ptrs;
103 } else {
104 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
105 i_block + direct_blocks +
106 indirect_blocks + double_blocks, inode->i_ino);
107 }
108 if (boundary)
109 *boundary = final - 1 - (i_block & (ptrs - 1));
110 return n;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static Indirect *ext4_get_branch(struct inode *inode, int depth,
144 ext4_lblk_t *offsets,
145 Indirect chain[4], int *err)
146{
147 struct super_block *sb = inode->i_sb;
148 Indirect *p = chain;
149 struct buffer_head *bh;
150 int ret = -EIO;
151
152 *err = 0;
153
154 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
155 if (!p->key)
156 goto no_block;
157 while (--depth) {
158 bh = sb_getblk(sb, le32_to_cpu(p->key));
159 if (unlikely(!bh)) {
160 ret = -ENOMEM;
161 goto failure;
162 }
163
164 if (!bh_uptodate_or_lock(bh)) {
165 if (bh_submit_read(bh) < 0) {
166 put_bh(bh);
167 goto failure;
168 }
169
170 if (ext4_check_indirect_blockref(inode, bh)) {
171 put_bh(bh);
172 goto failure;
173 }
174 }
175
176 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
177
178 if (!p->key)
179 goto no_block;
180 }
181 return NULL;
182
183failure:
184 *err = ret;
185no_block:
186 return p;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
210{
211 struct ext4_inode_info *ei = EXT4_I(inode);
212 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
213 __le32 *p;
214
215
216 for (p = ind->p - 1; p >= start; p--) {
217 if (*p)
218 return le32_to_cpu(*p);
219 }
220
221
222 if (ind->bh)
223 return ind->bh->b_blocknr;
224
225
226
227
228
229 return ext4_inode_to_goal_block(inode);
230}
231
232
233
234
235
236
237
238
239
240
241
242
243static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
244 Indirect *partial)
245{
246 ext4_fsblk_t goal;
247
248
249
250
251
252 goal = ext4_find_near(inode, partial);
253 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
254 return goal;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
270 int blocks_to_boundary)
271{
272 unsigned int count = 0;
273
274
275
276
277
278 if (k > 0) {
279
280 if (blks < blocks_to_boundary + 1)
281 count += blks;
282 else
283 count += blocks_to_boundary + 1;
284 return count;
285 }
286
287 count++;
288 while (count < blks && count <= blocks_to_boundary &&
289 le32_to_cpu(*(branch[0].p + count)) == 0) {
290 count++;
291 }
292 return count;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322static int ext4_alloc_branch(handle_t *handle,
323 struct ext4_allocation_request *ar,
324 int indirect_blks, ext4_lblk_t *offsets,
325 Indirect *branch)
326{
327 struct buffer_head * bh;
328 ext4_fsblk_t b, new_blocks[4];
329 __le32 *p;
330 int i, j, err, len = 1;
331
332 for (i = 0; i <= indirect_blks; i++) {
333 if (i == indirect_blks) {
334 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
335 } else
336 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
337 ar->inode, ar->goal,
338 ar->flags & EXT4_MB_DELALLOC_RESERVED,
339 NULL, &err);
340 if (err) {
341 i--;
342 goto failed;
343 }
344 branch[i].key = cpu_to_le32(new_blocks[i]);
345 if (i == 0)
346 continue;
347
348 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
349 if (unlikely(!bh)) {
350 err = -ENOMEM;
351 goto failed;
352 }
353 lock_buffer(bh);
354 BUFFER_TRACE(bh, "call get_create_access");
355 err = ext4_journal_get_create_access(handle, bh);
356 if (err) {
357 unlock_buffer(bh);
358 goto failed;
359 }
360
361 memset(bh->b_data, 0, bh->b_size);
362 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
363 b = new_blocks[i];
364
365 if (i == indirect_blks)
366 len = ar->len;
367 for (j = 0; j < len; j++)
368 *p++ = cpu_to_le32(b++);
369
370 BUFFER_TRACE(bh, "marking uptodate");
371 set_buffer_uptodate(bh);
372 unlock_buffer(bh);
373
374 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
375 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
376 if (err)
377 goto failed;
378 }
379 return 0;
380failed:
381 for (; i >= 0; i--) {
382
383
384
385
386
387
388 if (i > 0 && i != indirect_blks && branch[i].bh)
389 ext4_forget(handle, 1, ar->inode, branch[i].bh,
390 branch[i].bh->b_blocknr);
391 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
392 (i == indirect_blks) ? ar->len : 1, 0);
393 }
394 return err;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412static int ext4_splice_branch(handle_t *handle,
413 struct ext4_allocation_request *ar,
414 Indirect *where, int num)
415{
416 int i;
417 int err = 0;
418 ext4_fsblk_t current_block;
419
420
421
422
423
424
425 if (where->bh) {
426 BUFFER_TRACE(where->bh, "get_write_access");
427 err = ext4_journal_get_write_access(handle, where->bh);
428 if (err)
429 goto err_out;
430 }
431
432
433 *where->p = where->key;
434
435
436
437
438
439 if (num == 0 && ar->len > 1) {
440 current_block = le32_to_cpu(where->key) + 1;
441 for (i = 1; i < ar->len; i++)
442 *(where->p + i) = cpu_to_le32(current_block++);
443 }
444
445
446
447 if (where->bh) {
448
449
450
451
452
453
454
455
456 jbd_debug(5, "splicing indirect only\n");
457 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
458 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
459 if (err)
460 goto err_out;
461 } else {
462
463
464
465 ext4_mark_inode_dirty(handle, ar->inode);
466 jbd_debug(5, "splicing direct\n");
467 }
468 return err;
469
470err_out:
471 for (i = 1; i <= num; i++) {
472
473
474
475
476
477 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
478 EXT4_FREE_BLOCKS_FORGET);
479 }
480 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
481 ar->len, 0);
482
483 return err;
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
515 struct ext4_map_blocks *map,
516 int flags)
517{
518 struct ext4_allocation_request ar;
519 int err = -EIO;
520 ext4_lblk_t offsets[4];
521 Indirect chain[4];
522 Indirect *partial;
523 int indirect_blks;
524 int blocks_to_boundary = 0;
525 int depth;
526 int count = 0;
527 ext4_fsblk_t first_block = 0;
528
529 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
530 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
531 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
532 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
533 &blocks_to_boundary);
534
535 if (depth == 0)
536 goto out;
537
538 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
539
540
541 if (!partial) {
542 first_block = le32_to_cpu(chain[depth - 1].key);
543 count++;
544
545 while (count < map->m_len && count <= blocks_to_boundary) {
546 ext4_fsblk_t blk;
547
548 blk = le32_to_cpu(*(chain[depth-1].p + count));
549
550 if (blk == first_block + count)
551 count++;
552 else
553 break;
554 }
555 goto got_it;
556 }
557
558
559 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
560 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
561 int i;
562
563
564 count = 1;
565 for (i = 0; partial + i != chain + depth - 1; i++)
566 count *= epb;
567
568 map->m_pblk = 0;
569 map->m_len = min_t(unsigned int, map->m_len, count);
570 goto cleanup;
571 }
572
573
574 if (err == -EIO)
575 goto cleanup;
576
577
578
579
580 if (ext4_has_feature_bigalloc(inode->i_sb)) {
581 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
582 "non-extent mapped inodes with bigalloc");
583 return -EFSCORRUPTED;
584 }
585
586
587 memset(&ar, 0, sizeof(ar));
588 ar.inode = inode;
589 ar.logical = map->m_lblk;
590 if (S_ISREG(inode->i_mode))
591 ar.flags = EXT4_MB_HINT_DATA;
592 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
593 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
594 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
595 ar.flags |= EXT4_MB_USE_RESERVED;
596
597 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
598
599
600 indirect_blks = (chain + depth) - partial - 1;
601
602
603
604
605
606 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
607 map->m_len, blocks_to_boundary);
608
609
610
611
612 err = ext4_alloc_branch(handle, &ar, indirect_blks,
613 offsets + (partial - chain), partial);
614
615
616
617
618
619
620
621
622 if (!err)
623 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
624 if (err)
625 goto cleanup;
626
627 map->m_flags |= EXT4_MAP_NEW;
628
629 ext4_update_inode_fsync_trans(handle, inode, 1);
630 count = ar.len;
631got_it:
632 map->m_flags |= EXT4_MAP_MAPPED;
633 map->m_pblk = le32_to_cpu(chain[depth-1].key);
634 map->m_len = count;
635 if (count > blocks_to_boundary)
636 map->m_flags |= EXT4_MAP_BOUNDARY;
637 err = count;
638
639 partial = chain + depth - 1;
640cleanup:
641 while (partial > chain) {
642 BUFFER_TRACE(partial->bh, "call brelse");
643 brelse(partial->bh);
644 partial--;
645 }
646out:
647 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
648 return err;
649}
650
651
652
653
654
655int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
656{
657 struct ext4_inode_info *ei = EXT4_I(inode);
658 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
659 int blk_bits;
660
661 if (lblock < EXT4_NDIR_BLOCKS)
662 return 0;
663
664 lblock -= EXT4_NDIR_BLOCKS;
665
666 if (ei->i_da_metadata_calc_len &&
667 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
668 ei->i_da_metadata_calc_len++;
669 return 0;
670 }
671 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
672 ei->i_da_metadata_calc_len = 1;
673 blk_bits = order_base_2(lblock);
674 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
675}
676
677
678
679
680
681int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
682{
683
684
685
686
687
688 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
689}
690
691
692
693
694
695
696
697
698
699
700
701
702
703static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
704{
705 if (!ext4_handle_valid(handle))
706 return 0;
707 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
708 return 0;
709 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
710 return 0;
711 return 1;
712}
713
714
715
716
717
718
719static inline int all_zeroes(__le32 *p, __le32 *q)
720{
721 while (p < q)
722 if (*p++)
723 return 0;
724 return 1;
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762static Indirect *ext4_find_shared(struct inode *inode, int depth,
763 ext4_lblk_t offsets[4], Indirect chain[4],
764 __le32 *top)
765{
766 Indirect *partial, *p;
767 int k, err;
768
769 *top = 0;
770
771 for (k = depth; k > 1 && !offsets[k-1]; k--)
772 ;
773 partial = ext4_get_branch(inode, k, offsets, chain, &err);
774
775 if (!partial)
776 partial = chain + k-1;
777
778
779
780
781 if (!partial->key && *partial->p)
782
783 goto no_top;
784 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
785 ;
786
787
788
789
790
791
792 if (p == chain + k - 1 && p > chain) {
793 p->p--;
794 } else {
795 *top = *p->p;
796
797#if 0
798 *p->p = 0;
799#endif
800 }
801
802
803 while (partial > p) {
804 brelse(partial->bh);
805 partial--;
806 }
807no_top:
808 return partial;
809}
810
811
812
813
814
815
816
817
818
819
820
821
822static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
823 struct buffer_head *bh,
824 ext4_fsblk_t block_to_free,
825 unsigned long count, __le32 *first,
826 __le32 *last)
827{
828 __le32 *p;
829 int flags = EXT4_FREE_BLOCKS_VALIDATED;
830 int err;
831
832 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
833 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
834 else if (ext4_should_journal_data(inode))
835 flags |= EXT4_FREE_BLOCKS_FORGET;
836
837 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
838 count)) {
839 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
840 "blocks %llu len %lu",
841 (unsigned long long) block_to_free, count);
842 return 1;
843 }
844
845 if (try_to_extend_transaction(handle, inode)) {
846 if (bh) {
847 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
848 err = ext4_handle_dirty_metadata(handle, inode, bh);
849 if (unlikely(err))
850 goto out_err;
851 }
852 err = ext4_mark_inode_dirty(handle, inode);
853 if (unlikely(err))
854 goto out_err;
855 err = ext4_truncate_restart_trans(handle, inode,
856 ext4_blocks_for_truncate(inode));
857 if (unlikely(err))
858 goto out_err;
859 if (bh) {
860 BUFFER_TRACE(bh, "retaking write access");
861 err = ext4_journal_get_write_access(handle, bh);
862 if (unlikely(err))
863 goto out_err;
864 }
865 }
866
867 for (p = first; p < last; p++)
868 *p = 0;
869
870 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
871 return 0;
872out_err:
873 ext4_std_error(inode->i_sb, err);
874 return err;
875}
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896static void ext4_free_data(handle_t *handle, struct inode *inode,
897 struct buffer_head *this_bh,
898 __le32 *first, __le32 *last)
899{
900 ext4_fsblk_t block_to_free = 0;
901 unsigned long count = 0;
902 __le32 *block_to_free_p = NULL;
903
904
905 ext4_fsblk_t nr;
906 __le32 *p;
907
908 int err = 0;
909
910 if (this_bh) {
911 BUFFER_TRACE(this_bh, "get_write_access");
912 err = ext4_journal_get_write_access(handle, this_bh);
913
914
915 if (err)
916 return;
917 }
918
919 for (p = first; p < last; p++) {
920 nr = le32_to_cpu(*p);
921 if (nr) {
922
923 if (count == 0) {
924 block_to_free = nr;
925 block_to_free_p = p;
926 count = 1;
927 } else if (nr == block_to_free + count) {
928 count++;
929 } else {
930 err = ext4_clear_blocks(handle, inode, this_bh,
931 block_to_free, count,
932 block_to_free_p, p);
933 if (err)
934 break;
935 block_to_free = nr;
936 block_to_free_p = p;
937 count = 1;
938 }
939 }
940 }
941
942 if (!err && count > 0)
943 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
944 count, block_to_free_p, p);
945 if (err < 0)
946
947 return;
948
949 if (this_bh) {
950 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
951
952
953
954
955
956
957
958 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
959 ext4_handle_dirty_metadata(handle, inode, this_bh);
960 else
961 EXT4_ERROR_INODE(inode,
962 "circular indirect block detected at "
963 "block %llu",
964 (unsigned long long) this_bh->b_blocknr);
965 }
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981static void ext4_free_branches(handle_t *handle, struct inode *inode,
982 struct buffer_head *parent_bh,
983 __le32 *first, __le32 *last, int depth)
984{
985 ext4_fsblk_t nr;
986 __le32 *p;
987
988 if (ext4_handle_is_aborted(handle))
989 return;
990
991 if (depth--) {
992 struct buffer_head *bh;
993 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
994 p = last;
995 while (--p >= first) {
996 nr = le32_to_cpu(*p);
997 if (!nr)
998 continue;
999
1000 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1001 nr, 1)) {
1002 EXT4_ERROR_INODE(inode,
1003 "invalid indirect mapped "
1004 "block %lu (level %d)",
1005 (unsigned long) nr, depth);
1006 break;
1007 }
1008
1009
1010 bh = sb_bread(inode->i_sb, nr);
1011
1012
1013
1014
1015
1016 if (!bh) {
1017 EXT4_ERROR_INODE_BLOCK(inode, nr,
1018 "Read failure");
1019 continue;
1020 }
1021
1022
1023 BUFFER_TRACE(bh, "free child branches");
1024 ext4_free_branches(handle, inode, bh,
1025 (__le32 *) bh->b_data,
1026 (__le32 *) bh->b_data + addr_per_block,
1027 depth);
1028 brelse(bh);
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (ext4_handle_is_aborted(handle))
1047 return;
1048 if (try_to_extend_transaction(handle, inode)) {
1049 ext4_mark_inode_dirty(handle, inode);
1050 ext4_truncate_restart_trans(handle, inode,
1051 ext4_blocks_for_truncate(inode));
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 ext4_free_blocks(handle, inode, NULL, nr, 1,
1066 EXT4_FREE_BLOCKS_METADATA|
1067 EXT4_FREE_BLOCKS_FORGET);
1068
1069 if (parent_bh) {
1070
1071
1072
1073
1074 BUFFER_TRACE(parent_bh, "get_write_access");
1075 if (!ext4_journal_get_write_access(handle,
1076 parent_bh)){
1077 *p = 0;
1078 BUFFER_TRACE(parent_bh,
1079 "call ext4_handle_dirty_metadata");
1080 ext4_handle_dirty_metadata(handle,
1081 inode,
1082 parent_bh);
1083 }
1084 }
1085 }
1086 } else {
1087
1088 BUFFER_TRACE(parent_bh, "free data blocks");
1089 ext4_free_data(handle, inode, parent_bh, first, last);
1090 }
1091}
1092
1093void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1094{
1095 struct ext4_inode_info *ei = EXT4_I(inode);
1096 __le32 *i_data = ei->i_data;
1097 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1098 ext4_lblk_t offsets[4];
1099 Indirect chain[4];
1100 Indirect *partial;
1101 __le32 nr = 0;
1102 int n = 0;
1103 ext4_lblk_t last_block, max_block;
1104 unsigned blocksize = inode->i_sb->s_blocksize;
1105
1106 last_block = (inode->i_size + blocksize-1)
1107 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1108 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1109 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1110
1111 if (last_block != max_block) {
1112 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1113 if (n == 0)
1114 return;
1115 }
1116
1117 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1118
1119
1120
1121
1122
1123
1124
1125
1126 ei->i_disksize = inode->i_size;
1127
1128 if (last_block == max_block) {
1129
1130
1131
1132
1133 return;
1134 } else if (n == 1) {
1135 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1136 i_data + EXT4_NDIR_BLOCKS);
1137 goto do_indirects;
1138 }
1139
1140 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1141
1142 if (nr) {
1143 if (partial == chain) {
1144
1145 ext4_free_branches(handle, inode, NULL,
1146 &nr, &nr+1, (chain+n-1) - partial);
1147 *partial->p = 0;
1148
1149
1150
1151
1152 } else {
1153
1154 BUFFER_TRACE(partial->bh, "get_write_access");
1155 ext4_free_branches(handle, inode, partial->bh,
1156 partial->p,
1157 partial->p+1, (chain+n-1) - partial);
1158 }
1159 }
1160
1161 while (partial > chain) {
1162 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1163 (__le32*)partial->bh->b_data+addr_per_block,
1164 (chain+n-1) - partial);
1165 BUFFER_TRACE(partial->bh, "call brelse");
1166 brelse(partial->bh);
1167 partial--;
1168 }
1169do_indirects:
1170
1171 switch (offsets[0]) {
1172 default:
1173 nr = i_data[EXT4_IND_BLOCK];
1174 if (nr) {
1175 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1176 i_data[EXT4_IND_BLOCK] = 0;
1177 }
1178 case EXT4_IND_BLOCK:
1179 nr = i_data[EXT4_DIND_BLOCK];
1180 if (nr) {
1181 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1182 i_data[EXT4_DIND_BLOCK] = 0;
1183 }
1184 case EXT4_DIND_BLOCK:
1185 nr = i_data[EXT4_TIND_BLOCK];
1186 if (nr) {
1187 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1188 i_data[EXT4_TIND_BLOCK] = 0;
1189 }
1190 case EXT4_TIND_BLOCK:
1191 ;
1192 }
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1206 ext4_lblk_t start, ext4_lblk_t end)
1207{
1208 struct ext4_inode_info *ei = EXT4_I(inode);
1209 __le32 *i_data = ei->i_data;
1210 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1211 ext4_lblk_t offsets[4], offsets2[4];
1212 Indirect chain[4], chain2[4];
1213 Indirect *partial, *partial2;
1214 ext4_lblk_t max_block;
1215 __le32 nr = 0, nr2 = 0;
1216 int n = 0, n2 = 0;
1217 unsigned blocksize = inode->i_sb->s_blocksize;
1218
1219 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1220 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1221 if (end >= max_block)
1222 end = max_block;
1223 if ((start >= end) || (start > max_block))
1224 return 0;
1225
1226 n = ext4_block_to_path(inode, start, offsets, NULL);
1227 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1228
1229 BUG_ON(n > n2);
1230
1231 if ((n == 1) && (n == n2)) {
1232
1233 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1234 i_data + offsets2[0]);
1235 return 0;
1236 } else if (n2 > n) {
1237
1238
1239
1240
1241
1242
1243
1244 if (n == 1) {
1245
1246
1247
1248
1249 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1250 i_data + EXT4_NDIR_BLOCKS);
1251 goto end_range;
1252 }
1253
1254
1255 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1256 if (nr) {
1257 if (partial == chain) {
1258
1259 ext4_free_branches(handle, inode, NULL,
1260 &nr, &nr+1, (chain+n-1) - partial);
1261 *partial->p = 0;
1262 } else {
1263
1264 BUFFER_TRACE(partial->bh, "get_write_access");
1265 ext4_free_branches(handle, inode, partial->bh,
1266 partial->p,
1267 partial->p+1, (chain+n-1) - partial);
1268 }
1269 }
1270
1271
1272
1273
1274
1275 while (partial > chain) {
1276 ext4_free_branches(handle, inode, partial->bh,
1277 partial->p + 1,
1278 (__le32 *)partial->bh->b_data+addr_per_block,
1279 (chain+n-1) - partial);
1280 BUFFER_TRACE(partial->bh, "call brelse");
1281 brelse(partial->bh);
1282 partial--;
1283 }
1284
1285end_range:
1286 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1287 if (nr2) {
1288 if (partial2 == chain2) {
1289
1290
1291
1292
1293
1294
1295 goto do_indirects;
1296 }
1297 } else {
1298
1299
1300
1301
1302
1303
1304 partial2->p++;
1305 }
1306
1307
1308
1309
1310
1311 while (partial2 > chain2) {
1312 ext4_free_branches(handle, inode, partial2->bh,
1313 (__le32 *)partial2->bh->b_data,
1314 partial2->p,
1315 (chain2+n2-1) - partial2);
1316 BUFFER_TRACE(partial2->bh, "call brelse");
1317 brelse(partial2->bh);
1318 partial2--;
1319 }
1320 goto do_indirects;
1321 }
1322
1323
1324 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1325 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1326
1327
1328 if (nr) {
1329 int level = min(partial - chain, partial2 - chain2);
1330 int i;
1331 int subtree = 1;
1332
1333 for (i = 0; i <= level; i++) {
1334 if (offsets[i] != offsets2[i]) {
1335 subtree = 0;
1336 break;
1337 }
1338 }
1339
1340 if (!subtree) {
1341 if (partial == chain) {
1342
1343 ext4_free_branches(handle, inode, NULL,
1344 &nr, &nr+1,
1345 (chain+n-1) - partial);
1346 *partial->p = 0;
1347 } else {
1348
1349 BUFFER_TRACE(partial->bh, "get_write_access");
1350 ext4_free_branches(handle, inode, partial->bh,
1351 partial->p,
1352 partial->p+1,
1353 (chain+n-1) - partial);
1354 }
1355 }
1356 }
1357
1358 if (!nr2) {
1359
1360
1361
1362
1363
1364
1365 partial2->p++;
1366 }
1367
1368 while (partial > chain || partial2 > chain2) {
1369 int depth = (chain+n-1) - partial;
1370 int depth2 = (chain2+n2-1) - partial2;
1371
1372 if (partial > chain && partial2 > chain2 &&
1373 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1374
1375
1376
1377
1378 ext4_free_branches(handle, inode, partial->bh,
1379 partial->p + 1,
1380 partial2->p,
1381 (chain+n-1) - partial);
1382 BUFFER_TRACE(partial->bh, "call brelse");
1383 brelse(partial->bh);
1384 BUFFER_TRACE(partial2->bh, "call brelse");
1385 brelse(partial2->bh);
1386 return 0;
1387 }
1388
1389
1390
1391
1392
1393
1394
1395
1396 if (partial > chain && depth <= depth2) {
1397 ext4_free_branches(handle, inode, partial->bh,
1398 partial->p + 1,
1399 (__le32 *)partial->bh->b_data+addr_per_block,
1400 (chain+n-1) - partial);
1401 BUFFER_TRACE(partial->bh, "call brelse");
1402 brelse(partial->bh);
1403 partial--;
1404 }
1405 if (partial2 > chain2 && depth2 <= depth) {
1406 ext4_free_branches(handle, inode, partial2->bh,
1407 (__le32 *)partial2->bh->b_data,
1408 partial2->p,
1409 (chain2+n2-1) - partial2);
1410 BUFFER_TRACE(partial2->bh, "call brelse");
1411 brelse(partial2->bh);
1412 partial2--;
1413 }
1414 }
1415 return 0;
1416
1417do_indirects:
1418
1419 switch (offsets[0]) {
1420 default:
1421 if (++n >= n2)
1422 return 0;
1423 nr = i_data[EXT4_IND_BLOCK];
1424 if (nr) {
1425 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1426 i_data[EXT4_IND_BLOCK] = 0;
1427 }
1428 case EXT4_IND_BLOCK:
1429 if (++n >= n2)
1430 return 0;
1431 nr = i_data[EXT4_DIND_BLOCK];
1432 if (nr) {
1433 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1434 i_data[EXT4_DIND_BLOCK] = 0;
1435 }
1436 case EXT4_DIND_BLOCK:
1437 if (++n >= n2)
1438 return 0;
1439 nr = i_data[EXT4_TIND_BLOCK];
1440 if (nr) {
1441 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1442 i_data[EXT4_TIND_BLOCK] = 0;
1443 }
1444 case EXT4_TIND_BLOCK:
1445 ;
1446 }
1447 return 0;
1448}
1449