1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "ext4_jbd2.h"
25#include "truncate.h"
26#include <linux/dax.h>
27#include <linux/uio.h>
28
29#include <trace/events/ext4.h>
30
31typedef struct {
32 __le32 *p;
33 __le32 key;
34 struct buffer_head *bh;
35} Indirect;
36
37static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
38{
39 p->key = *(p->p = v);
40 p->bh = bh;
41}
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static int ext4_block_to_path(struct inode *inode,
75 ext4_lblk_t i_block,
76 ext4_lblk_t offsets[4], int *boundary)
77{
78 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
79 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
80 const long direct_blocks = EXT4_NDIR_BLOCKS,
81 indirect_blocks = ptrs,
82 double_blocks = (1 << (ptrs_bits * 2));
83 int n = 0;
84 int final = 0;
85
86 if (i_block < direct_blocks) {
87 offsets[n++] = i_block;
88 final = direct_blocks;
89 } else if ((i_block -= direct_blocks) < indirect_blocks) {
90 offsets[n++] = EXT4_IND_BLOCK;
91 offsets[n++] = i_block;
92 final = ptrs;
93 } else if ((i_block -= indirect_blocks) < double_blocks) {
94 offsets[n++] = EXT4_DIND_BLOCK;
95 offsets[n++] = i_block >> ptrs_bits;
96 offsets[n++] = i_block & (ptrs - 1);
97 final = ptrs;
98 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
99 offsets[n++] = EXT4_TIND_BLOCK;
100 offsets[n++] = i_block >> (ptrs_bits * 2);
101 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
102 offsets[n++] = i_block & (ptrs - 1);
103 final = ptrs;
104 } else {
105 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
106 i_block + direct_blocks +
107 indirect_blocks + double_blocks, inode->i_ino);
108 }
109 if (boundary)
110 *boundary = final - 1 - (i_block & (ptrs - 1));
111 return n;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144static Indirect *ext4_get_branch(struct inode *inode, int depth,
145 ext4_lblk_t *offsets,
146 Indirect chain[4], int *err)
147{
148 struct super_block *sb = inode->i_sb;
149 Indirect *p = chain;
150 struct buffer_head *bh;
151 int ret = -EIO;
152
153 *err = 0;
154
155 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
156 if (!p->key)
157 goto no_block;
158 while (--depth) {
159 bh = sb_getblk(sb, le32_to_cpu(p->key));
160 if (unlikely(!bh)) {
161 ret = -ENOMEM;
162 goto failure;
163 }
164
165 if (!bh_uptodate_or_lock(bh)) {
166 if (ext4_read_bh(bh, 0, NULL) < 0) {
167 put_bh(bh);
168 goto failure;
169 }
170
171 if (ext4_check_indirect_blockref(inode, bh)) {
172 put_bh(bh);
173 goto failure;
174 }
175 }
176
177 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
178
179 if (!p->key)
180 goto no_block;
181 }
182 return NULL;
183
184failure:
185 *err = ret;
186no_block:
187 return p;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
211{
212 struct ext4_inode_info *ei = EXT4_I(inode);
213 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
214 __le32 *p;
215
216
217 for (p = ind->p - 1; p >= start; p--) {
218 if (*p)
219 return le32_to_cpu(*p);
220 }
221
222
223 if (ind->bh)
224 return ind->bh->b_blocknr;
225
226
227
228
229
230 return ext4_inode_to_goal_block(inode);
231}
232
233
234
235
236
237
238
239
240
241
242
243
244static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
245 Indirect *partial)
246{
247 ext4_fsblk_t goal;
248
249
250
251
252
253 goal = ext4_find_near(inode, partial);
254 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
255 return goal;
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
271 int blocks_to_boundary)
272{
273 unsigned int count = 0;
274
275
276
277
278
279 if (k > 0) {
280
281 if (blks < blocks_to_boundary + 1)
282 count += blks;
283 else
284 count += blocks_to_boundary + 1;
285 return count;
286 }
287
288 count++;
289 while (count < blks && count <= blocks_to_boundary &&
290 le32_to_cpu(*(branch[0].p + count)) == 0) {
291 count++;
292 }
293 return count;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321static int ext4_alloc_branch(handle_t *handle,
322 struct ext4_allocation_request *ar,
323 int indirect_blks, ext4_lblk_t *offsets,
324 Indirect *branch)
325{
326 struct buffer_head * bh;
327 ext4_fsblk_t b, new_blocks[4];
328 __le32 *p;
329 int i, j, err, len = 1;
330
331 for (i = 0; i <= indirect_blks; i++) {
332 if (i == indirect_blks) {
333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
334 } else {
335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
336 ar->inode, ar->goal,
337 ar->flags & EXT4_MB_DELALLOC_RESERVED,
338 NULL, &err);
339
340 branch[i+1].bh = NULL;
341 }
342 if (err) {
343 i--;
344 goto failed;
345 }
346 branch[i].key = cpu_to_le32(new_blocks[i]);
347 if (i == 0)
348 continue;
349
350 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
351 if (unlikely(!bh)) {
352 err = -ENOMEM;
353 goto failed;
354 }
355 lock_buffer(bh);
356 BUFFER_TRACE(bh, "call get_create_access");
357 err = ext4_journal_get_create_access(handle, bh);
358 if (err) {
359 unlock_buffer(bh);
360 goto failed;
361 }
362
363 memset(bh->b_data, 0, bh->b_size);
364 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
365 b = new_blocks[i];
366
367 if (i == indirect_blks)
368 len = ar->len;
369 for (j = 0; j < len; j++)
370 *p++ = cpu_to_le32(b++);
371
372 BUFFER_TRACE(bh, "marking uptodate");
373 set_buffer_uptodate(bh);
374 unlock_buffer(bh);
375
376 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
377 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
378 if (err)
379 goto failed;
380 }
381 return 0;
382failed:
383 if (i == indirect_blks) {
384
385 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
386 ar->len, 0);
387 i--;
388 }
389 for (; i >= 0; i--) {
390
391
392
393
394
395
396
397
398
399 ext4_free_blocks(handle, ar->inode, branch[i+1].bh,
400 new_blocks[i], 1,
401 branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0);
402 }
403 return err;
404}
405
406
407
408
409
410
411
412
413
414
415
416
417static int ext4_splice_branch(handle_t *handle,
418 struct ext4_allocation_request *ar,
419 Indirect *where, int num)
420{
421 int i;
422 int err = 0;
423 ext4_fsblk_t current_block;
424
425
426
427
428
429
430 if (where->bh) {
431 BUFFER_TRACE(where->bh, "get_write_access");
432 err = ext4_journal_get_write_access(handle, where->bh);
433 if (err)
434 goto err_out;
435 }
436
437
438 *where->p = where->key;
439
440
441
442
443
444 if (num == 0 && ar->len > 1) {
445 current_block = le32_to_cpu(where->key) + 1;
446 for (i = 1; i < ar->len; i++)
447 *(where->p + i) = cpu_to_le32(current_block++);
448 }
449
450
451
452 if (where->bh) {
453
454
455
456
457
458
459
460
461 jbd_debug(5, "splicing indirect only\n");
462 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
463 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
464 if (err)
465 goto err_out;
466 } else {
467
468
469
470 err = ext4_mark_inode_dirty(handle, ar->inode);
471 if (unlikely(err))
472 goto err_out;
473 jbd_debug(5, "splicing direct\n");
474 }
475 return err;
476
477err_out:
478 for (i = 1; i <= num; i++) {
479
480
481
482
483
484 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
485 EXT4_FREE_BLOCKS_FORGET);
486 }
487 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
488 ar->len, 0);
489
490 return err;
491}
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
522 struct ext4_map_blocks *map,
523 int flags)
524{
525 struct ext4_allocation_request ar;
526 int err = -EIO;
527 ext4_lblk_t offsets[4];
528 Indirect chain[4];
529 Indirect *partial;
530 int indirect_blks;
531 int blocks_to_boundary = 0;
532 int depth;
533 int count = 0;
534 ext4_fsblk_t first_block = 0;
535
536 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
537 ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
538 ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
539 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
540 &blocks_to_boundary);
541
542 if (depth == 0)
543 goto out;
544
545 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
546
547
548 if (!partial) {
549 first_block = le32_to_cpu(chain[depth - 1].key);
550 count++;
551
552 while (count < map->m_len && count <= blocks_to_boundary) {
553 ext4_fsblk_t blk;
554
555 blk = le32_to_cpu(*(chain[depth-1].p + count));
556
557 if (blk == first_block + count)
558 count++;
559 else
560 break;
561 }
562 goto got_it;
563 }
564
565
566 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
567 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
568 int i;
569
570
571
572
573
574
575
576 count = 0;
577 for (i = partial - chain + 1; i < depth; i++)
578 count = count * epb + (epb - offsets[i] - 1);
579 count++;
580
581 map->m_pblk = 0;
582 map->m_len = min_t(unsigned int, map->m_len, count);
583 goto cleanup;
584 }
585
586
587 if (err == -EIO)
588 goto cleanup;
589
590
591
592
593 if (ext4_has_feature_bigalloc(inode->i_sb)) {
594 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
595 "non-extent mapped inodes with bigalloc");
596 err = -EFSCORRUPTED;
597 goto out;
598 }
599
600
601 memset(&ar, 0, sizeof(ar));
602 ar.inode = inode;
603 ar.logical = map->m_lblk;
604 if (S_ISREG(inode->i_mode))
605 ar.flags = EXT4_MB_HINT_DATA;
606 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
607 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
608 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
609 ar.flags |= EXT4_MB_USE_RESERVED;
610
611 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
612
613
614 indirect_blks = (chain + depth) - partial - 1;
615
616
617
618
619
620 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
621 map->m_len, blocks_to_boundary);
622
623
624
625
626 err = ext4_alloc_branch(handle, &ar, indirect_blks,
627 offsets + (partial - chain), partial);
628
629
630
631
632
633
634
635
636 if (!err)
637 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
638 if (err)
639 goto cleanup;
640
641 map->m_flags |= EXT4_MAP_NEW;
642
643 ext4_update_inode_fsync_trans(handle, inode, 1);
644 count = ar.len;
645got_it:
646 map->m_flags |= EXT4_MAP_MAPPED;
647 map->m_pblk = le32_to_cpu(chain[depth-1].key);
648 map->m_len = count;
649 if (count > blocks_to_boundary)
650 map->m_flags |= EXT4_MAP_BOUNDARY;
651 err = count;
652
653 partial = chain + depth - 1;
654cleanup:
655 while (partial > chain) {
656 BUFFER_TRACE(partial->bh, "call brelse");
657 brelse(partial->bh);
658 partial--;
659 }
660out:
661 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
662 return err;
663}
664
665
666
667
668
669int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
670{
671
672
673
674
675
676 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
677}
678
679static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
680 struct buffer_head *bh, int *dropped)
681{
682 int err;
683
684 if (bh) {
685 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
686 err = ext4_handle_dirty_metadata(handle, inode, bh);
687 if (unlikely(err))
688 return err;
689 }
690 err = ext4_mark_inode_dirty(handle, inode);
691 if (unlikely(err))
692 return err;
693
694
695
696
697
698
699 BUG_ON(EXT4_JOURNAL(inode) == NULL);
700 ext4_discard_preallocations(inode, 0);
701 up_write(&EXT4_I(inode)->i_data_sem);
702 *dropped = 1;
703 return 0;
704}
705
706
707
708
709
710
711
712
713
714static int ext4_ind_truncate_ensure_credits(handle_t *handle,
715 struct inode *inode,
716 struct buffer_head *bh,
717 int revoke_creds)
718{
719 int ret;
720 int dropped = 0;
721
722 ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS,
723 ext4_blocks_for_truncate(inode), revoke_creds,
724 ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped));
725 if (dropped)
726 down_write(&EXT4_I(inode)->i_data_sem);
727 if (ret <= 0)
728 return ret;
729 if (bh) {
730 BUFFER_TRACE(bh, "retaking write access");
731 ret = ext4_journal_get_write_access(handle, bh);
732 if (unlikely(ret))
733 return ret;
734 }
735 return 0;
736}
737
738
739
740
741
742
743static inline int all_zeroes(__le32 *p, __le32 *q)
744{
745 while (p < q)
746 if (*p++)
747 return 0;
748 return 1;
749}
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786static Indirect *ext4_find_shared(struct inode *inode, int depth,
787 ext4_lblk_t offsets[4], Indirect chain[4],
788 __le32 *top)
789{
790 Indirect *partial, *p;
791 int k, err;
792
793 *top = 0;
794
795 for (k = depth; k > 1 && !offsets[k-1]; k--)
796 ;
797 partial = ext4_get_branch(inode, k, offsets, chain, &err);
798
799 if (!partial)
800 partial = chain + k-1;
801
802
803
804
805 if (!partial->key && *partial->p)
806
807 goto no_top;
808 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
809 ;
810
811
812
813
814
815
816 if (p == chain + k - 1 && p > chain) {
817 p->p--;
818 } else {
819 *top = *p->p;
820
821#if 0
822 *p->p = 0;
823#endif
824 }
825
826
827 while (partial > p) {
828 brelse(partial->bh);
829 partial--;
830 }
831no_top:
832 return partial;
833}
834
835
836
837
838
839
840
841
842
843
844
845
846static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
847 struct buffer_head *bh,
848 ext4_fsblk_t block_to_free,
849 unsigned long count, __le32 *first,
850 __le32 *last)
851{
852 __le32 *p;
853 int flags = EXT4_FREE_BLOCKS_VALIDATED;
854 int err;
855
856 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
857 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
858 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
859 else if (ext4_should_journal_data(inode))
860 flags |= EXT4_FREE_BLOCKS_FORGET;
861
862 if (!ext4_inode_block_valid(inode, block_to_free, count)) {
863 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
864 "blocks %llu len %lu",
865 (unsigned long long) block_to_free, count);
866 return 1;
867 }
868
869 err = ext4_ind_truncate_ensure_credits(handle, inode, bh,
870 ext4_free_data_revoke_credits(inode, count));
871 if (err < 0)
872 goto out_err;
873
874 for (p = first; p < last; p++)
875 *p = 0;
876
877 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
878 return 0;
879out_err:
880 ext4_std_error(inode->i_sb, err);
881 return err;
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903static void ext4_free_data(handle_t *handle, struct inode *inode,
904 struct buffer_head *this_bh,
905 __le32 *first, __le32 *last)
906{
907 ext4_fsblk_t block_to_free = 0;
908 unsigned long count = 0;
909 __le32 *block_to_free_p = NULL;
910
911
912 ext4_fsblk_t nr;
913 __le32 *p;
914
915 int err = 0;
916
917 if (this_bh) {
918 BUFFER_TRACE(this_bh, "get_write_access");
919 err = ext4_journal_get_write_access(handle, this_bh);
920
921
922 if (err)
923 return;
924 }
925
926 for (p = first; p < last; p++) {
927 nr = le32_to_cpu(*p);
928 if (nr) {
929
930 if (count == 0) {
931 block_to_free = nr;
932 block_to_free_p = p;
933 count = 1;
934 } else if (nr == block_to_free + count) {
935 count++;
936 } else {
937 err = ext4_clear_blocks(handle, inode, this_bh,
938 block_to_free, count,
939 block_to_free_p, p);
940 if (err)
941 break;
942 block_to_free = nr;
943 block_to_free_p = p;
944 count = 1;
945 }
946 }
947 }
948
949 if (!err && count > 0)
950 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
951 count, block_to_free_p, p);
952 if (err < 0)
953
954 return;
955
956 if (this_bh) {
957 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
958
959
960
961
962
963
964
965 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
966 ext4_handle_dirty_metadata(handle, inode, this_bh);
967 else
968 EXT4_ERROR_INODE(inode,
969 "circular indirect block detected at "
970 "block %llu",
971 (unsigned long long) this_bh->b_blocknr);
972 }
973}
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988static void ext4_free_branches(handle_t *handle, struct inode *inode,
989 struct buffer_head *parent_bh,
990 __le32 *first, __le32 *last, int depth)
991{
992 ext4_fsblk_t nr;
993 __le32 *p;
994
995 if (ext4_handle_is_aborted(handle))
996 return;
997
998 if (depth--) {
999 struct buffer_head *bh;
1000 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1001 p = last;
1002 while (--p >= first) {
1003 nr = le32_to_cpu(*p);
1004 if (!nr)
1005 continue;
1006
1007 if (!ext4_inode_block_valid(inode, nr, 1)) {
1008 EXT4_ERROR_INODE(inode,
1009 "invalid indirect mapped "
1010 "block %lu (level %d)",
1011 (unsigned long) nr, depth);
1012 break;
1013 }
1014
1015
1016 bh = ext4_sb_bread(inode->i_sb, nr, 0);
1017
1018
1019
1020
1021
1022 if (IS_ERR(bh)) {
1023 ext4_error_inode_block(inode, nr, -PTR_ERR(bh),
1024 "Read failure");
1025 continue;
1026 }
1027
1028
1029 BUFFER_TRACE(bh, "free child branches");
1030 ext4_free_branches(handle, inode, bh,
1031 (__le32 *) bh->b_data,
1032 (__le32 *) bh->b_data + addr_per_block,
1033 depth);
1034 brelse(bh);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 if (ext4_handle_is_aborted(handle))
1053 return;
1054 if (ext4_ind_truncate_ensure_credits(handle, inode,
1055 NULL,
1056 ext4_free_metadata_revoke_credits(
1057 inode->i_sb, 1)) < 0)
1058 return;
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 ext4_free_blocks(handle, inode, NULL, nr, 1,
1072 EXT4_FREE_BLOCKS_METADATA|
1073 EXT4_FREE_BLOCKS_FORGET);
1074
1075 if (parent_bh) {
1076
1077
1078
1079
1080 BUFFER_TRACE(parent_bh, "get_write_access");
1081 if (!ext4_journal_get_write_access(handle,
1082 parent_bh)){
1083 *p = 0;
1084 BUFFER_TRACE(parent_bh,
1085 "call ext4_handle_dirty_metadata");
1086 ext4_handle_dirty_metadata(handle,
1087 inode,
1088 parent_bh);
1089 }
1090 }
1091 }
1092 } else {
1093
1094 BUFFER_TRACE(parent_bh, "free data blocks");
1095 ext4_free_data(handle, inode, parent_bh, first, last);
1096 }
1097}
1098
1099void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1100{
1101 struct ext4_inode_info *ei = EXT4_I(inode);
1102 __le32 *i_data = ei->i_data;
1103 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1104 ext4_lblk_t offsets[4];
1105 Indirect chain[4];
1106 Indirect *partial;
1107 __le32 nr = 0;
1108 int n = 0;
1109 ext4_lblk_t last_block, max_block;
1110 unsigned blocksize = inode->i_sb->s_blocksize;
1111
1112 last_block = (inode->i_size + blocksize-1)
1113 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1114 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1115 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1116
1117 if (last_block != max_block) {
1118 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1119 if (n == 0)
1120 return;
1121 }
1122
1123 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1124
1125
1126
1127
1128
1129
1130
1131
1132 ei->i_disksize = inode->i_size;
1133
1134 if (last_block == max_block) {
1135
1136
1137
1138
1139 return;
1140 } else if (n == 1) {
1141 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1142 i_data + EXT4_NDIR_BLOCKS);
1143 goto do_indirects;
1144 }
1145
1146 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1147
1148 if (nr) {
1149 if (partial == chain) {
1150
1151 ext4_free_branches(handle, inode, NULL,
1152 &nr, &nr+1, (chain+n-1) - partial);
1153 *partial->p = 0;
1154
1155
1156
1157
1158 } else {
1159
1160 BUFFER_TRACE(partial->bh, "get_write_access");
1161 ext4_free_branches(handle, inode, partial->bh,
1162 partial->p,
1163 partial->p+1, (chain+n-1) - partial);
1164 }
1165 }
1166
1167 while (partial > chain) {
1168 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1169 (__le32*)partial->bh->b_data+addr_per_block,
1170 (chain+n-1) - partial);
1171 BUFFER_TRACE(partial->bh, "call brelse");
1172 brelse(partial->bh);
1173 partial--;
1174 }
1175do_indirects:
1176
1177 switch (offsets[0]) {
1178 default:
1179 nr = i_data[EXT4_IND_BLOCK];
1180 if (nr) {
1181 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1182 i_data[EXT4_IND_BLOCK] = 0;
1183 }
1184 fallthrough;
1185 case EXT4_IND_BLOCK:
1186 nr = i_data[EXT4_DIND_BLOCK];
1187 if (nr) {
1188 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1189 i_data[EXT4_DIND_BLOCK] = 0;
1190 }
1191 fallthrough;
1192 case EXT4_DIND_BLOCK:
1193 nr = i_data[EXT4_TIND_BLOCK];
1194 if (nr) {
1195 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1196 i_data[EXT4_TIND_BLOCK] = 0;
1197 }
1198 fallthrough;
1199 case EXT4_TIND_BLOCK:
1200 ;
1201 }
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1215 ext4_lblk_t start, ext4_lblk_t end)
1216{
1217 struct ext4_inode_info *ei = EXT4_I(inode);
1218 __le32 *i_data = ei->i_data;
1219 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1220 ext4_lblk_t offsets[4], offsets2[4];
1221 Indirect chain[4], chain2[4];
1222 Indirect *partial, *partial2;
1223 Indirect *p = NULL, *p2 = NULL;
1224 ext4_lblk_t max_block;
1225 __le32 nr = 0, nr2 = 0;
1226 int n = 0, n2 = 0;
1227 unsigned blocksize = inode->i_sb->s_blocksize;
1228
1229 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1230 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1231 if (end >= max_block)
1232 end = max_block;
1233 if ((start >= end) || (start > max_block))
1234 return 0;
1235
1236 n = ext4_block_to_path(inode, start, offsets, NULL);
1237 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1238
1239 BUG_ON(n > n2);
1240
1241 if ((n == 1) && (n == n2)) {
1242
1243 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1244 i_data + offsets2[0]);
1245 return 0;
1246 } else if (n2 > n) {
1247
1248
1249
1250
1251
1252
1253
1254 if (n == 1) {
1255
1256
1257
1258
1259 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1260 i_data + EXT4_NDIR_BLOCKS);
1261 goto end_range;
1262 }
1263
1264
1265 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1266 if (nr) {
1267 if (partial == chain) {
1268
1269 ext4_free_branches(handle, inode, NULL,
1270 &nr, &nr+1, (chain+n-1) - partial);
1271 *partial->p = 0;
1272 } else {
1273
1274 BUFFER_TRACE(partial->bh, "get_write_access");
1275 ext4_free_branches(handle, inode, partial->bh,
1276 partial->p,
1277 partial->p+1, (chain+n-1) - partial);
1278 }
1279 }
1280
1281
1282
1283
1284
1285 while (partial > chain) {
1286 ext4_free_branches(handle, inode, partial->bh,
1287 partial->p + 1,
1288 (__le32 *)partial->bh->b_data+addr_per_block,
1289 (chain+n-1) - partial);
1290 partial--;
1291 }
1292
1293end_range:
1294 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1295 if (nr2) {
1296 if (partial2 == chain2) {
1297
1298
1299
1300
1301
1302
1303 goto do_indirects;
1304 }
1305 } else {
1306
1307
1308
1309
1310
1311
1312 partial2->p++;
1313 }
1314
1315
1316
1317
1318
1319 while (partial2 > chain2) {
1320 ext4_free_branches(handle, inode, partial2->bh,
1321 (__le32 *)partial2->bh->b_data,
1322 partial2->p,
1323 (chain2+n2-1) - partial2);
1324 partial2--;
1325 }
1326 goto do_indirects;
1327 }
1328
1329
1330 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1331 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1332
1333
1334 if (nr) {
1335 int level = min(partial - chain, partial2 - chain2);
1336 int i;
1337 int subtree = 1;
1338
1339 for (i = 0; i <= level; i++) {
1340 if (offsets[i] != offsets2[i]) {
1341 subtree = 0;
1342 break;
1343 }
1344 }
1345
1346 if (!subtree) {
1347 if (partial == chain) {
1348
1349 ext4_free_branches(handle, inode, NULL,
1350 &nr, &nr+1,
1351 (chain+n-1) - partial);
1352 *partial->p = 0;
1353 } else {
1354
1355 BUFFER_TRACE(partial->bh, "get_write_access");
1356 ext4_free_branches(handle, inode, partial->bh,
1357 partial->p,
1358 partial->p+1,
1359 (chain+n-1) - partial);
1360 }
1361 }
1362 }
1363
1364 if (!nr2) {
1365
1366
1367
1368
1369
1370
1371 partial2->p++;
1372 }
1373
1374 while (partial > chain || partial2 > chain2) {
1375 int depth = (chain+n-1) - partial;
1376 int depth2 = (chain2+n2-1) - partial2;
1377
1378 if (partial > chain && partial2 > chain2 &&
1379 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1380
1381
1382
1383
1384 ext4_free_branches(handle, inode, partial->bh,
1385 partial->p + 1,
1386 partial2->p,
1387 (chain+n-1) - partial);
1388 goto cleanup;
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (partial > chain && depth <= depth2) {
1399 ext4_free_branches(handle, inode, partial->bh,
1400 partial->p + 1,
1401 (__le32 *)partial->bh->b_data+addr_per_block,
1402 (chain+n-1) - partial);
1403 partial--;
1404 }
1405 if (partial2 > chain2 && depth2 <= depth) {
1406 ext4_free_branches(handle, inode, partial2->bh,
1407 (__le32 *)partial2->bh->b_data,
1408 partial2->p,
1409 (chain2+n2-1) - partial2);
1410 partial2--;
1411 }
1412 }
1413
1414cleanup:
1415 while (p && p > chain) {
1416 BUFFER_TRACE(p->bh, "call brelse");
1417 brelse(p->bh);
1418 p--;
1419 }
1420 while (p2 && p2 > chain2) {
1421 BUFFER_TRACE(p2->bh, "call brelse");
1422 brelse(p2->bh);
1423 p2--;
1424 }
1425 return 0;
1426
1427do_indirects:
1428
1429 switch (offsets[0]) {
1430 default:
1431 if (++n >= n2)
1432 break;
1433 nr = i_data[EXT4_IND_BLOCK];
1434 if (nr) {
1435 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1436 i_data[EXT4_IND_BLOCK] = 0;
1437 }
1438 fallthrough;
1439 case EXT4_IND_BLOCK:
1440 if (++n >= n2)
1441 break;
1442 nr = i_data[EXT4_DIND_BLOCK];
1443 if (nr) {
1444 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1445 i_data[EXT4_DIND_BLOCK] = 0;
1446 }
1447 fallthrough;
1448 case EXT4_DIND_BLOCK:
1449 if (++n >= n2)
1450 break;
1451 nr = i_data[EXT4_TIND_BLOCK];
1452 if (nr) {
1453 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1454 i_data[EXT4_TIND_BLOCK] = 0;
1455 }
1456 fallthrough;
1457 case EXT4_TIND_BLOCK:
1458 ;
1459 }
1460 goto cleanup;
1461}
1462