1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "ext4_jbd2.h"
25#include "truncate.h"
26#include <linux/dax.h>
27#include <linux/uio.h>
28
29#include <trace/events/ext4.h>
30
31typedef struct {
32 __le32 *p;
33 __le32 key;
34 struct buffer_head *bh;
35} Indirect;
36
37static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
38{
39 p->key = *(p->p = v);
40 p->bh = bh;
41}
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static int ext4_block_to_path(struct inode *inode,
75 ext4_lblk_t i_block,
76 ext4_lblk_t offsets[4], int *boundary)
77{
78 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
79 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
80 const long direct_blocks = EXT4_NDIR_BLOCKS,
81 indirect_blocks = ptrs,
82 double_blocks = (1 << (ptrs_bits * 2));
83 int n = 0;
84 int final = 0;
85
86 if (i_block < direct_blocks) {
87 offsets[n++] = i_block;
88 final = direct_blocks;
89 } else if ((i_block -= direct_blocks) < indirect_blocks) {
90 offsets[n++] = EXT4_IND_BLOCK;
91 offsets[n++] = i_block;
92 final = ptrs;
93 } else if ((i_block -= indirect_blocks) < double_blocks) {
94 offsets[n++] = EXT4_DIND_BLOCK;
95 offsets[n++] = i_block >> ptrs_bits;
96 offsets[n++] = i_block & (ptrs - 1);
97 final = ptrs;
98 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
99 offsets[n++] = EXT4_TIND_BLOCK;
100 offsets[n++] = i_block >> (ptrs_bits * 2);
101 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
102 offsets[n++] = i_block & (ptrs - 1);
103 final = ptrs;
104 } else {
105 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
106 i_block + direct_blocks +
107 indirect_blocks + double_blocks, inode->i_ino);
108 }
109 if (boundary)
110 *boundary = final - 1 - (i_block & (ptrs - 1));
111 return n;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144static Indirect *ext4_get_branch(struct inode *inode, int depth,
145 ext4_lblk_t *offsets,
146 Indirect chain[4], int *err)
147{
148 struct super_block *sb = inode->i_sb;
149 Indirect *p = chain;
150 struct buffer_head *bh;
151 int ret = -EIO;
152
153 *err = 0;
154
155 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
156 if (!p->key)
157 goto no_block;
158 while (--depth) {
159 bh = sb_getblk(sb, le32_to_cpu(p->key));
160 if (unlikely(!bh)) {
161 ret = -ENOMEM;
162 goto failure;
163 }
164
165 if (!bh_uptodate_or_lock(bh)) {
166 if (ext4_read_bh(bh, 0, NULL) < 0) {
167 put_bh(bh);
168 goto failure;
169 }
170
171 if (ext4_check_indirect_blockref(inode, bh)) {
172 put_bh(bh);
173 goto failure;
174 }
175 }
176
177 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
178
179 if (!p->key)
180 goto no_block;
181 }
182 return NULL;
183
184failure:
185 *err = ret;
186no_block:
187 return p;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
211{
212 struct ext4_inode_info *ei = EXT4_I(inode);
213 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
214 __le32 *p;
215
216
217 for (p = ind->p - 1; p >= start; p--) {
218 if (*p)
219 return le32_to_cpu(*p);
220 }
221
222
223 if (ind->bh)
224 return ind->bh->b_blocknr;
225
226
227
228
229
230 return ext4_inode_to_goal_block(inode);
231}
232
233
234
235
236
237
238
239
240
241
242
243
244static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
245 Indirect *partial)
246{
247 ext4_fsblk_t goal;
248
249
250
251
252
253 goal = ext4_find_near(inode, partial);
254 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
255 return goal;
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
271 int blocks_to_boundary)
272{
273 unsigned int count = 0;
274
275
276
277
278
279 if (k > 0) {
280
281 if (blks < blocks_to_boundary + 1)
282 count += blks;
283 else
284 count += blocks_to_boundary + 1;
285 return count;
286 }
287
288 count++;
289 while (count < blks && count <= blocks_to_boundary &&
290 le32_to_cpu(*(branch[0].p + count)) == 0) {
291 count++;
292 }
293 return count;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321static int ext4_alloc_branch(handle_t *handle,
322 struct ext4_allocation_request *ar,
323 int indirect_blks, ext4_lblk_t *offsets,
324 Indirect *branch)
325{
326 struct buffer_head * bh;
327 ext4_fsblk_t b, new_blocks[4];
328 __le32 *p;
329 int i, j, err, len = 1;
330
331 for (i = 0; i <= indirect_blks; i++) {
332 if (i == indirect_blks) {
333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
334 } else {
335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
336 ar->inode, ar->goal,
337 ar->flags & EXT4_MB_DELALLOC_RESERVED,
338 NULL, &err);
339
340 branch[i+1].bh = NULL;
341 }
342 if (err) {
343 i--;
344 goto failed;
345 }
346 branch[i].key = cpu_to_le32(new_blocks[i]);
347 if (i == 0)
348 continue;
349
350 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
351 if (unlikely(!bh)) {
352 err = -ENOMEM;
353 goto failed;
354 }
355 lock_buffer(bh);
356 BUFFER_TRACE(bh, "call get_create_access");
357 err = ext4_journal_get_create_access(handle, ar->inode->i_sb,
358 bh, EXT4_JTR_NONE);
359 if (err) {
360 unlock_buffer(bh);
361 goto failed;
362 }
363
364 memset(bh->b_data, 0, bh->b_size);
365 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
366 b = new_blocks[i];
367
368 if (i == indirect_blks)
369 len = ar->len;
370 for (j = 0; j < len; j++)
371 *p++ = cpu_to_le32(b++);
372
373 BUFFER_TRACE(bh, "marking uptodate");
374 set_buffer_uptodate(bh);
375 unlock_buffer(bh);
376
377 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
378 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
379 if (err)
380 goto failed;
381 }
382 return 0;
383failed:
384 if (i == indirect_blks) {
385
386 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
387 ar->len, 0);
388 i--;
389 }
390 for (; i >= 0; i--) {
391
392
393
394
395
396
397
398
399
400 ext4_free_blocks(handle, ar->inode, branch[i+1].bh,
401 new_blocks[i], 1,
402 branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0);
403 }
404 return err;
405}
406
407
408
409
410
411
412
413
414
415
416
417
418static int ext4_splice_branch(handle_t *handle,
419 struct ext4_allocation_request *ar,
420 Indirect *where, int num)
421{
422 int i;
423 int err = 0;
424 ext4_fsblk_t current_block;
425
426
427
428
429
430
431 if (where->bh) {
432 BUFFER_TRACE(where->bh, "get_write_access");
433 err = ext4_journal_get_write_access(handle, ar->inode->i_sb,
434 where->bh, EXT4_JTR_NONE);
435 if (err)
436 goto err_out;
437 }
438
439
440 *where->p = where->key;
441
442
443
444
445
446 if (num == 0 && ar->len > 1) {
447 current_block = le32_to_cpu(where->key) + 1;
448 for (i = 1; i < ar->len; i++)
449 *(where->p + i) = cpu_to_le32(current_block++);
450 }
451
452
453
454 if (where->bh) {
455
456
457
458
459
460
461
462
463 jbd_debug(5, "splicing indirect only\n");
464 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
465 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
466 if (err)
467 goto err_out;
468 } else {
469
470
471
472 err = ext4_mark_inode_dirty(handle, ar->inode);
473 if (unlikely(err))
474 goto err_out;
475 jbd_debug(5, "splicing direct\n");
476 }
477 return err;
478
479err_out:
480 for (i = 1; i <= num; i++) {
481
482
483
484
485
486 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
487 EXT4_FREE_BLOCKS_FORGET);
488 }
489 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
490 ar->len, 0);
491
492 return err;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
524 struct ext4_map_blocks *map,
525 int flags)
526{
527 struct ext4_allocation_request ar;
528 int err = -EIO;
529 ext4_lblk_t offsets[4];
530 Indirect chain[4];
531 Indirect *partial;
532 int indirect_blks;
533 int blocks_to_boundary = 0;
534 int depth;
535 int count = 0;
536 ext4_fsblk_t first_block = 0;
537
538 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
539 ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
540 ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
541 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
542 &blocks_to_boundary);
543
544 if (depth == 0)
545 goto out;
546
547 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
548
549
550 if (!partial) {
551 first_block = le32_to_cpu(chain[depth - 1].key);
552 count++;
553
554 while (count < map->m_len && count <= blocks_to_boundary) {
555 ext4_fsblk_t blk;
556
557 blk = le32_to_cpu(*(chain[depth-1].p + count));
558
559 if (blk == first_block + count)
560 count++;
561 else
562 break;
563 }
564 goto got_it;
565 }
566
567
568 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
569 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
570 int i;
571
572
573
574
575
576
577
578 count = 0;
579 for (i = partial - chain + 1; i < depth; i++)
580 count = count * epb + (epb - offsets[i] - 1);
581 count++;
582
583 map->m_pblk = 0;
584 map->m_len = min_t(unsigned int, map->m_len, count);
585 goto cleanup;
586 }
587
588
589 if (err == -EIO)
590 goto cleanup;
591
592
593
594
595 if (ext4_has_feature_bigalloc(inode->i_sb)) {
596 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
597 "non-extent mapped inodes with bigalloc");
598 err = -EFSCORRUPTED;
599 goto out;
600 }
601
602
603 memset(&ar, 0, sizeof(ar));
604 ar.inode = inode;
605 ar.logical = map->m_lblk;
606 if (S_ISREG(inode->i_mode))
607 ar.flags = EXT4_MB_HINT_DATA;
608 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
609 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
610 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
611 ar.flags |= EXT4_MB_USE_RESERVED;
612
613 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
614
615
616 indirect_blks = (chain + depth) - partial - 1;
617
618
619
620
621
622 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
623 map->m_len, blocks_to_boundary);
624
625
626
627
628 err = ext4_alloc_branch(handle, &ar, indirect_blks,
629 offsets + (partial - chain), partial);
630
631
632
633
634
635
636
637
638 if (!err)
639 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
640 if (err)
641 goto cleanup;
642
643 map->m_flags |= EXT4_MAP_NEW;
644
645 ext4_update_inode_fsync_trans(handle, inode, 1);
646 count = ar.len;
647got_it:
648 map->m_flags |= EXT4_MAP_MAPPED;
649 map->m_pblk = le32_to_cpu(chain[depth-1].key);
650 map->m_len = count;
651 if (count > blocks_to_boundary)
652 map->m_flags |= EXT4_MAP_BOUNDARY;
653 err = count;
654
655 partial = chain + depth - 1;
656cleanup:
657 while (partial > chain) {
658 BUFFER_TRACE(partial->bh, "call brelse");
659 brelse(partial->bh);
660 partial--;
661 }
662out:
663 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
664 return err;
665}
666
667
668
669
670
671int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
672{
673
674
675
676
677
678 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
679}
680
681static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
682 struct buffer_head *bh, int *dropped)
683{
684 int err;
685
686 if (bh) {
687 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
688 err = ext4_handle_dirty_metadata(handle, inode, bh);
689 if (unlikely(err))
690 return err;
691 }
692 err = ext4_mark_inode_dirty(handle, inode);
693 if (unlikely(err))
694 return err;
695
696
697
698
699
700
701 BUG_ON(EXT4_JOURNAL(inode) == NULL);
702 ext4_discard_preallocations(inode, 0);
703 up_write(&EXT4_I(inode)->i_data_sem);
704 *dropped = 1;
705 return 0;
706}
707
708
709
710
711
712
713
714
715
716static int ext4_ind_truncate_ensure_credits(handle_t *handle,
717 struct inode *inode,
718 struct buffer_head *bh,
719 int revoke_creds)
720{
721 int ret;
722 int dropped = 0;
723
724 ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS,
725 ext4_blocks_for_truncate(inode), revoke_creds,
726 ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped));
727 if (dropped)
728 down_write(&EXT4_I(inode)->i_data_sem);
729 if (ret <= 0)
730 return ret;
731 if (bh) {
732 BUFFER_TRACE(bh, "retaking write access");
733 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
734 EXT4_JTR_NONE);
735 if (unlikely(ret))
736 return ret;
737 }
738 return 0;
739}
740
741
742
743
744
745
746static inline int all_zeroes(__le32 *p, __le32 *q)
747{
748 while (p < q)
749 if (*p++)
750 return 0;
751 return 1;
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789static Indirect *ext4_find_shared(struct inode *inode, int depth,
790 ext4_lblk_t offsets[4], Indirect chain[4],
791 __le32 *top)
792{
793 Indirect *partial, *p;
794 int k, err;
795
796 *top = 0;
797
798 for (k = depth; k > 1 && !offsets[k-1]; k--)
799 ;
800 partial = ext4_get_branch(inode, k, offsets, chain, &err);
801
802 if (!partial)
803 partial = chain + k-1;
804
805
806
807
808 if (!partial->key && *partial->p)
809
810 goto no_top;
811 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
812 ;
813
814
815
816
817
818
819 if (p == chain + k - 1 && p > chain) {
820 p->p--;
821 } else {
822 *top = *p->p;
823
824#if 0
825 *p->p = 0;
826#endif
827 }
828
829
830 while (partial > p) {
831 brelse(partial->bh);
832 partial--;
833 }
834no_top:
835 return partial;
836}
837
838
839
840
841
842
843
844
845
846
847
848
849static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
850 struct buffer_head *bh,
851 ext4_fsblk_t block_to_free,
852 unsigned long count, __le32 *first,
853 __le32 *last)
854{
855 __le32 *p;
856 int flags = EXT4_FREE_BLOCKS_VALIDATED;
857 int err;
858
859 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
860 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
861 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
862 else if (ext4_should_journal_data(inode))
863 flags |= EXT4_FREE_BLOCKS_FORGET;
864
865 if (!ext4_inode_block_valid(inode, block_to_free, count)) {
866 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
867 "blocks %llu len %lu",
868 (unsigned long long) block_to_free, count);
869 return 1;
870 }
871
872 err = ext4_ind_truncate_ensure_credits(handle, inode, bh,
873 ext4_free_data_revoke_credits(inode, count));
874 if (err < 0)
875 goto out_err;
876
877 for (p = first; p < last; p++)
878 *p = 0;
879
880 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
881 return 0;
882out_err:
883 ext4_std_error(inode->i_sb, err);
884 return err;
885}
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906static void ext4_free_data(handle_t *handle, struct inode *inode,
907 struct buffer_head *this_bh,
908 __le32 *first, __le32 *last)
909{
910 ext4_fsblk_t block_to_free = 0;
911 unsigned long count = 0;
912 __le32 *block_to_free_p = NULL;
913
914
915 ext4_fsblk_t nr;
916 __le32 *p;
917
918 int err = 0;
919
920 if (this_bh) {
921 BUFFER_TRACE(this_bh, "get_write_access");
922 err = ext4_journal_get_write_access(handle, inode->i_sb,
923 this_bh, EXT4_JTR_NONE);
924
925
926 if (err)
927 return;
928 }
929
930 for (p = first; p < last; p++) {
931 nr = le32_to_cpu(*p);
932 if (nr) {
933
934 if (count == 0) {
935 block_to_free = nr;
936 block_to_free_p = p;
937 count = 1;
938 } else if (nr == block_to_free + count) {
939 count++;
940 } else {
941 err = ext4_clear_blocks(handle, inode, this_bh,
942 block_to_free, count,
943 block_to_free_p, p);
944 if (err)
945 break;
946 block_to_free = nr;
947 block_to_free_p = p;
948 count = 1;
949 }
950 }
951 }
952
953 if (!err && count > 0)
954 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
955 count, block_to_free_p, p);
956 if (err < 0)
957
958 return;
959
960 if (this_bh) {
961 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
962
963
964
965
966
967
968
969 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
970 ext4_handle_dirty_metadata(handle, inode, this_bh);
971 else
972 EXT4_ERROR_INODE(inode,
973 "circular indirect block detected at "
974 "block %llu",
975 (unsigned long long) this_bh->b_blocknr);
976 }
977}
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992static void ext4_free_branches(handle_t *handle, struct inode *inode,
993 struct buffer_head *parent_bh,
994 __le32 *first, __le32 *last, int depth)
995{
996 ext4_fsblk_t nr;
997 __le32 *p;
998
999 if (ext4_handle_is_aborted(handle))
1000 return;
1001
1002 if (depth--) {
1003 struct buffer_head *bh;
1004 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1005 p = last;
1006 while (--p >= first) {
1007 nr = le32_to_cpu(*p);
1008 if (!nr)
1009 continue;
1010
1011 if (!ext4_inode_block_valid(inode, nr, 1)) {
1012 EXT4_ERROR_INODE(inode,
1013 "invalid indirect mapped "
1014 "block %lu (level %d)",
1015 (unsigned long) nr, depth);
1016 break;
1017 }
1018
1019
1020 bh = ext4_sb_bread(inode->i_sb, nr, 0);
1021
1022
1023
1024
1025
1026 if (IS_ERR(bh)) {
1027 ext4_error_inode_block(inode, nr, -PTR_ERR(bh),
1028 "Read failure");
1029 continue;
1030 }
1031
1032
1033 BUFFER_TRACE(bh, "free child branches");
1034 ext4_free_branches(handle, inode, bh,
1035 (__le32 *) bh->b_data,
1036 (__le32 *) bh->b_data + addr_per_block,
1037 depth);
1038 brelse(bh);
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 if (ext4_handle_is_aborted(handle))
1057 return;
1058 if (ext4_ind_truncate_ensure_credits(handle, inode,
1059 NULL,
1060 ext4_free_metadata_revoke_credits(
1061 inode->i_sb, 1)) < 0)
1062 return;
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 ext4_free_blocks(handle, inode, NULL, nr, 1,
1076 EXT4_FREE_BLOCKS_METADATA|
1077 EXT4_FREE_BLOCKS_FORGET);
1078
1079 if (parent_bh) {
1080
1081
1082
1083
1084 BUFFER_TRACE(parent_bh, "get_write_access");
1085 if (!ext4_journal_get_write_access(handle,
1086 inode->i_sb, parent_bh,
1087 EXT4_JTR_NONE)) {
1088 *p = 0;
1089 BUFFER_TRACE(parent_bh,
1090 "call ext4_handle_dirty_metadata");
1091 ext4_handle_dirty_metadata(handle,
1092 inode,
1093 parent_bh);
1094 }
1095 }
1096 }
1097 } else {
1098
1099 BUFFER_TRACE(parent_bh, "free data blocks");
1100 ext4_free_data(handle, inode, parent_bh, first, last);
1101 }
1102}
1103
1104void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1105{
1106 struct ext4_inode_info *ei = EXT4_I(inode);
1107 __le32 *i_data = ei->i_data;
1108 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1109 ext4_lblk_t offsets[4];
1110 Indirect chain[4];
1111 Indirect *partial;
1112 __le32 nr = 0;
1113 int n = 0;
1114 ext4_lblk_t last_block, max_block;
1115 unsigned blocksize = inode->i_sb->s_blocksize;
1116
1117 last_block = (inode->i_size + blocksize-1)
1118 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1119 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1120 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1121
1122 if (last_block != max_block) {
1123 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1124 if (n == 0)
1125 return;
1126 }
1127
1128 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1129
1130
1131
1132
1133
1134
1135
1136
1137 ei->i_disksize = inode->i_size;
1138
1139 if (last_block == max_block) {
1140
1141
1142
1143
1144 return;
1145 } else if (n == 1) {
1146 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1147 i_data + EXT4_NDIR_BLOCKS);
1148 goto do_indirects;
1149 }
1150
1151 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1152
1153 if (nr) {
1154 if (partial == chain) {
1155
1156 ext4_free_branches(handle, inode, NULL,
1157 &nr, &nr+1, (chain+n-1) - partial);
1158 *partial->p = 0;
1159
1160
1161
1162
1163 } else {
1164
1165 BUFFER_TRACE(partial->bh, "get_write_access");
1166 ext4_free_branches(handle, inode, partial->bh,
1167 partial->p,
1168 partial->p+1, (chain+n-1) - partial);
1169 }
1170 }
1171
1172 while (partial > chain) {
1173 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1174 (__le32*)partial->bh->b_data+addr_per_block,
1175 (chain+n-1) - partial);
1176 BUFFER_TRACE(partial->bh, "call brelse");
1177 brelse(partial->bh);
1178 partial--;
1179 }
1180do_indirects:
1181
1182 switch (offsets[0]) {
1183 default:
1184 nr = i_data[EXT4_IND_BLOCK];
1185 if (nr) {
1186 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1187 i_data[EXT4_IND_BLOCK] = 0;
1188 }
1189 fallthrough;
1190 case EXT4_IND_BLOCK:
1191 nr = i_data[EXT4_DIND_BLOCK];
1192 if (nr) {
1193 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1194 i_data[EXT4_DIND_BLOCK] = 0;
1195 }
1196 fallthrough;
1197 case EXT4_DIND_BLOCK:
1198 nr = i_data[EXT4_TIND_BLOCK];
1199 if (nr) {
1200 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1201 i_data[EXT4_TIND_BLOCK] = 0;
1202 }
1203 fallthrough;
1204 case EXT4_TIND_BLOCK:
1205 ;
1206 }
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1220 ext4_lblk_t start, ext4_lblk_t end)
1221{
1222 struct ext4_inode_info *ei = EXT4_I(inode);
1223 __le32 *i_data = ei->i_data;
1224 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1225 ext4_lblk_t offsets[4], offsets2[4];
1226 Indirect chain[4], chain2[4];
1227 Indirect *partial, *partial2;
1228 Indirect *p = NULL, *p2 = NULL;
1229 ext4_lblk_t max_block;
1230 __le32 nr = 0, nr2 = 0;
1231 int n = 0, n2 = 0;
1232 unsigned blocksize = inode->i_sb->s_blocksize;
1233
1234 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1235 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1236 if (end >= max_block)
1237 end = max_block;
1238 if ((start >= end) || (start > max_block))
1239 return 0;
1240
1241 n = ext4_block_to_path(inode, start, offsets, NULL);
1242 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1243
1244 BUG_ON(n > n2);
1245
1246 if ((n == 1) && (n == n2)) {
1247
1248 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1249 i_data + offsets2[0]);
1250 return 0;
1251 } else if (n2 > n) {
1252
1253
1254
1255
1256
1257
1258
1259 if (n == 1) {
1260
1261
1262
1263
1264 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1265 i_data + EXT4_NDIR_BLOCKS);
1266 goto end_range;
1267 }
1268
1269
1270 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1271 if (nr) {
1272 if (partial == chain) {
1273
1274 ext4_free_branches(handle, inode, NULL,
1275 &nr, &nr+1, (chain+n-1) - partial);
1276 *partial->p = 0;
1277 } else {
1278
1279 BUFFER_TRACE(partial->bh, "get_write_access");
1280 ext4_free_branches(handle, inode, partial->bh,
1281 partial->p,
1282 partial->p+1, (chain+n-1) - partial);
1283 }
1284 }
1285
1286
1287
1288
1289
1290 while (partial > chain) {
1291 ext4_free_branches(handle, inode, partial->bh,
1292 partial->p + 1,
1293 (__le32 *)partial->bh->b_data+addr_per_block,
1294 (chain+n-1) - partial);
1295 partial--;
1296 }
1297
1298end_range:
1299 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1300 if (nr2) {
1301 if (partial2 == chain2) {
1302
1303
1304
1305
1306
1307
1308 goto do_indirects;
1309 }
1310 } else {
1311
1312
1313
1314
1315
1316
1317 partial2->p++;
1318 }
1319
1320
1321
1322
1323
1324 while (partial2 > chain2) {
1325 ext4_free_branches(handle, inode, partial2->bh,
1326 (__le32 *)partial2->bh->b_data,
1327 partial2->p,
1328 (chain2+n2-1) - partial2);
1329 partial2--;
1330 }
1331 goto do_indirects;
1332 }
1333
1334
1335 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1336 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1337
1338
1339 if (nr) {
1340 int level = min(partial - chain, partial2 - chain2);
1341 int i;
1342 int subtree = 1;
1343
1344 for (i = 0; i <= level; i++) {
1345 if (offsets[i] != offsets2[i]) {
1346 subtree = 0;
1347 break;
1348 }
1349 }
1350
1351 if (!subtree) {
1352 if (partial == chain) {
1353
1354 ext4_free_branches(handle, inode, NULL,
1355 &nr, &nr+1,
1356 (chain+n-1) - partial);
1357 *partial->p = 0;
1358 } else {
1359
1360 BUFFER_TRACE(partial->bh, "get_write_access");
1361 ext4_free_branches(handle, inode, partial->bh,
1362 partial->p,
1363 partial->p+1,
1364 (chain+n-1) - partial);
1365 }
1366 }
1367 }
1368
1369 if (!nr2) {
1370
1371
1372
1373
1374
1375
1376 partial2->p++;
1377 }
1378
1379 while (partial > chain || partial2 > chain2) {
1380 int depth = (chain+n-1) - partial;
1381 int depth2 = (chain2+n2-1) - partial2;
1382
1383 if (partial > chain && partial2 > chain2 &&
1384 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1385
1386
1387
1388
1389 ext4_free_branches(handle, inode, partial->bh,
1390 partial->p + 1,
1391 partial2->p,
1392 (chain+n-1) - partial);
1393 goto cleanup;
1394 }
1395
1396
1397
1398
1399
1400
1401
1402
1403 if (partial > chain && depth <= depth2) {
1404 ext4_free_branches(handle, inode, partial->bh,
1405 partial->p + 1,
1406 (__le32 *)partial->bh->b_data+addr_per_block,
1407 (chain+n-1) - partial);
1408 partial--;
1409 }
1410 if (partial2 > chain2 && depth2 <= depth) {
1411 ext4_free_branches(handle, inode, partial2->bh,
1412 (__le32 *)partial2->bh->b_data,
1413 partial2->p,
1414 (chain2+n2-1) - partial2);
1415 partial2--;
1416 }
1417 }
1418
1419cleanup:
1420 while (p && p > chain) {
1421 BUFFER_TRACE(p->bh, "call brelse");
1422 brelse(p->bh);
1423 p--;
1424 }
1425 while (p2 && p2 > chain2) {
1426 BUFFER_TRACE(p2->bh, "call brelse");
1427 brelse(p2->bh);
1428 p2--;
1429 }
1430 return 0;
1431
1432do_indirects:
1433
1434 switch (offsets[0]) {
1435 default:
1436 if (++n >= n2)
1437 break;
1438 nr = i_data[EXT4_IND_BLOCK];
1439 if (nr) {
1440 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1441 i_data[EXT4_IND_BLOCK] = 0;
1442 }
1443 fallthrough;
1444 case EXT4_IND_BLOCK:
1445 if (++n >= n2)
1446 break;
1447 nr = i_data[EXT4_DIND_BLOCK];
1448 if (nr) {
1449 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1450 i_data[EXT4_DIND_BLOCK] = 0;
1451 }
1452 fallthrough;
1453 case EXT4_DIND_BLOCK:
1454 if (++n >= n2)
1455 break;
1456 nr = i_data[EXT4_TIND_BLOCK];
1457 if (nr) {
1458 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1459 i_data[EXT4_TIND_BLOCK] = 0;
1460 }
1461 fallthrough;
1462 case EXT4_TIND_BLOCK:
1463 ;
1464 }
1465 goto cleanup;
1466}
1467