1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "ext4_jbd2.h"
25#include "truncate.h"
26#include <linux/dax.h>
27#include <linux/uio.h>
28
29#include <trace/events/ext4.h>
30
31typedef struct {
32 __le32 *p;
33 __le32 key;
34 struct buffer_head *bh;
35} Indirect;
36
37static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
38{
39 p->key = *(p->p = v);
40 p->bh = bh;
41}
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static int ext4_block_to_path(struct inode *inode,
75 ext4_lblk_t i_block,
76 ext4_lblk_t offsets[4], int *boundary)
77{
78 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
79 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
80 const long direct_blocks = EXT4_NDIR_BLOCKS,
81 indirect_blocks = ptrs,
82 double_blocks = (1 << (ptrs_bits * 2));
83 int n = 0;
84 int final = 0;
85
86 if (i_block < direct_blocks) {
87 offsets[n++] = i_block;
88 final = direct_blocks;
89 } else if ((i_block -= direct_blocks) < indirect_blocks) {
90 offsets[n++] = EXT4_IND_BLOCK;
91 offsets[n++] = i_block;
92 final = ptrs;
93 } else if ((i_block -= indirect_blocks) < double_blocks) {
94 offsets[n++] = EXT4_DIND_BLOCK;
95 offsets[n++] = i_block >> ptrs_bits;
96 offsets[n++] = i_block & (ptrs - 1);
97 final = ptrs;
98 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
99 offsets[n++] = EXT4_TIND_BLOCK;
100 offsets[n++] = i_block >> (ptrs_bits * 2);
101 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
102 offsets[n++] = i_block & (ptrs - 1);
103 final = ptrs;
104 } else {
105 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
106 i_block + direct_blocks +
107 indirect_blocks + double_blocks, inode->i_ino);
108 }
109 if (boundary)
110 *boundary = final - 1 - (i_block & (ptrs - 1));
111 return n;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144static Indirect *ext4_get_branch(struct inode *inode, int depth,
145 ext4_lblk_t *offsets,
146 Indirect chain[4], int *err)
147{
148 struct super_block *sb = inode->i_sb;
149 Indirect *p = chain;
150 struct buffer_head *bh;
151 int ret = -EIO;
152
153 *err = 0;
154
155 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
156 if (!p->key)
157 goto no_block;
158 while (--depth) {
159 bh = sb_getblk(sb, le32_to_cpu(p->key));
160 if (unlikely(!bh)) {
161 ret = -ENOMEM;
162 goto failure;
163 }
164
165 if (!bh_uptodate_or_lock(bh)) {
166 if (bh_submit_read(bh) < 0) {
167 put_bh(bh);
168 goto failure;
169 }
170
171 if (ext4_check_indirect_blockref(inode, bh)) {
172 put_bh(bh);
173 goto failure;
174 }
175 }
176
177 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
178
179 if (!p->key)
180 goto no_block;
181 }
182 return NULL;
183
184failure:
185 *err = ret;
186no_block:
187 return p;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
211{
212 struct ext4_inode_info *ei = EXT4_I(inode);
213 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
214 __le32 *p;
215
216
217 for (p = ind->p - 1; p >= start; p--) {
218 if (*p)
219 return le32_to_cpu(*p);
220 }
221
222
223 if (ind->bh)
224 return ind->bh->b_blocknr;
225
226
227
228
229
230 return ext4_inode_to_goal_block(inode);
231}
232
233
234
235
236
237
238
239
240
241
242
243
244static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
245 Indirect *partial)
246{
247 ext4_fsblk_t goal;
248
249
250
251
252
253 goal = ext4_find_near(inode, partial);
254 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
255 return goal;
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
271 int blocks_to_boundary)
272{
273 unsigned int count = 0;
274
275
276
277
278
279 if (k > 0) {
280
281 if (blks < blocks_to_boundary + 1)
282 count += blks;
283 else
284 count += blocks_to_boundary + 1;
285 return count;
286 }
287
288 count++;
289 while (count < blks && count <= blocks_to_boundary &&
290 le32_to_cpu(*(branch[0].p + count)) == 0) {
291 count++;
292 }
293 return count;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323static int ext4_alloc_branch(handle_t *handle,
324 struct ext4_allocation_request *ar,
325 int indirect_blks, ext4_lblk_t *offsets,
326 Indirect *branch)
327{
328 struct buffer_head * bh;
329 ext4_fsblk_t b, new_blocks[4];
330 __le32 *p;
331 int i, j, err, len = 1;
332
333 for (i = 0; i <= indirect_blks; i++) {
334 if (i == indirect_blks) {
335 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
336 } else
337 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
338 ar->inode, ar->goal,
339 ar->flags & EXT4_MB_DELALLOC_RESERVED,
340 NULL, &err);
341 if (err) {
342 i--;
343 goto failed;
344 }
345 branch[i].key = cpu_to_le32(new_blocks[i]);
346 if (i == 0)
347 continue;
348
349 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
350 if (unlikely(!bh)) {
351 err = -ENOMEM;
352 goto failed;
353 }
354 lock_buffer(bh);
355 BUFFER_TRACE(bh, "call get_create_access");
356 err = ext4_journal_get_create_access(handle, bh);
357 if (err) {
358 unlock_buffer(bh);
359 goto failed;
360 }
361
362 memset(bh->b_data, 0, bh->b_size);
363 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
364 b = new_blocks[i];
365
366 if (i == indirect_blks)
367 len = ar->len;
368 for (j = 0; j < len; j++)
369 *p++ = cpu_to_le32(b++);
370
371 BUFFER_TRACE(bh, "marking uptodate");
372 set_buffer_uptodate(bh);
373 unlock_buffer(bh);
374
375 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
376 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
377 if (err)
378 goto failed;
379 }
380 return 0;
381failed:
382 for (; i >= 0; i--) {
383
384
385
386
387
388
389 if (i > 0 && i != indirect_blks && branch[i].bh)
390 ext4_forget(handle, 1, ar->inode, branch[i].bh,
391 branch[i].bh->b_blocknr);
392 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
393 (i == indirect_blks) ? ar->len : 1, 0);
394 }
395 return err;
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413static int ext4_splice_branch(handle_t *handle,
414 struct ext4_allocation_request *ar,
415 Indirect *where, int num)
416{
417 int i;
418 int err = 0;
419 ext4_fsblk_t current_block;
420
421
422
423
424
425
426 if (where->bh) {
427 BUFFER_TRACE(where->bh, "get_write_access");
428 err = ext4_journal_get_write_access(handle, where->bh);
429 if (err)
430 goto err_out;
431 }
432
433
434 *where->p = where->key;
435
436
437
438
439
440 if (num == 0 && ar->len > 1) {
441 current_block = le32_to_cpu(where->key) + 1;
442 for (i = 1; i < ar->len; i++)
443 *(where->p + i) = cpu_to_le32(current_block++);
444 }
445
446
447
448 if (where->bh) {
449
450
451
452
453
454
455
456
457 jbd_debug(5, "splicing indirect only\n");
458 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
459 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
460 if (err)
461 goto err_out;
462 } else {
463
464
465
466 ext4_mark_inode_dirty(handle, ar->inode);
467 jbd_debug(5, "splicing direct\n");
468 }
469 return err;
470
471err_out:
472 for (i = 1; i <= num; i++) {
473
474
475
476
477
478 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
479 EXT4_FREE_BLOCKS_FORGET);
480 }
481 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
482 ar->len, 0);
483
484 return err;
485}
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
516 struct ext4_map_blocks *map,
517 int flags)
518{
519 struct ext4_allocation_request ar;
520 int err = -EIO;
521 ext4_lblk_t offsets[4];
522 Indirect chain[4];
523 Indirect *partial;
524 int indirect_blks;
525 int blocks_to_boundary = 0;
526 int depth;
527 int count = 0;
528 ext4_fsblk_t first_block = 0;
529
530 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
531 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
532 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
533 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
534 &blocks_to_boundary);
535
536 if (depth == 0)
537 goto out;
538
539 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
540
541
542 if (!partial) {
543 first_block = le32_to_cpu(chain[depth - 1].key);
544 count++;
545
546 while (count < map->m_len && count <= blocks_to_boundary) {
547 ext4_fsblk_t blk;
548
549 blk = le32_to_cpu(*(chain[depth-1].p + count));
550
551 if (blk == first_block + count)
552 count++;
553 else
554 break;
555 }
556 goto got_it;
557 }
558
559
560 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
561 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
562 int i;
563
564
565 count = 1;
566 for (i = 0; partial + i != chain + depth - 1; i++)
567 count *= epb;
568
569 map->m_pblk = 0;
570 map->m_len = min_t(unsigned int, map->m_len, count);
571 goto cleanup;
572 }
573
574
575 if (err == -EIO)
576 goto cleanup;
577
578
579
580
581 if (ext4_has_feature_bigalloc(inode->i_sb)) {
582 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
583 "non-extent mapped inodes with bigalloc");
584 return -EFSCORRUPTED;
585 }
586
587
588 memset(&ar, 0, sizeof(ar));
589 ar.inode = inode;
590 ar.logical = map->m_lblk;
591 if (S_ISREG(inode->i_mode))
592 ar.flags = EXT4_MB_HINT_DATA;
593 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
594 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
595 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
596 ar.flags |= EXT4_MB_USE_RESERVED;
597
598 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
599
600
601 indirect_blks = (chain + depth) - partial - 1;
602
603
604
605
606
607 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
608 map->m_len, blocks_to_boundary);
609
610
611
612
613 err = ext4_alloc_branch(handle, &ar, indirect_blks,
614 offsets + (partial - chain), partial);
615
616
617
618
619
620
621
622
623 if (!err)
624 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
625 if (err)
626 goto cleanup;
627
628 map->m_flags |= EXT4_MAP_NEW;
629
630 ext4_update_inode_fsync_trans(handle, inode, 1);
631 count = ar.len;
632got_it:
633 map->m_flags |= EXT4_MAP_MAPPED;
634 map->m_pblk = le32_to_cpu(chain[depth-1].key);
635 map->m_len = count;
636 if (count > blocks_to_boundary)
637 map->m_flags |= EXT4_MAP_BOUNDARY;
638 err = count;
639
640 partial = chain + depth - 1;
641cleanup:
642 while (partial > chain) {
643 BUFFER_TRACE(partial->bh, "call brelse");
644 brelse(partial->bh);
645 partial--;
646 }
647out:
648 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
649 return err;
650}
651
652
653
654
655
656int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
657{
658 struct ext4_inode_info *ei = EXT4_I(inode);
659 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
660 int blk_bits;
661
662 if (lblock < EXT4_NDIR_BLOCKS)
663 return 0;
664
665 lblock -= EXT4_NDIR_BLOCKS;
666
667 if (ei->i_da_metadata_calc_len &&
668 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
669 ei->i_da_metadata_calc_len++;
670 return 0;
671 }
672 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
673 ei->i_da_metadata_calc_len = 1;
674 blk_bits = order_base_2(lblock);
675 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
676}
677
678
679
680
681
682int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
683{
684
685
686
687
688
689 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
690}
691
692
693
694
695
696
697
698
699
700
701
702
703
704static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
705{
706 if (!ext4_handle_valid(handle))
707 return 0;
708 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
709 return 0;
710 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
711 return 0;
712 return 1;
713}
714
715
716
717
718
719
720static inline int all_zeroes(__le32 *p, __le32 *q)
721{
722 while (p < q)
723 if (*p++)
724 return 0;
725 return 1;
726}
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763static Indirect *ext4_find_shared(struct inode *inode, int depth,
764 ext4_lblk_t offsets[4], Indirect chain[4],
765 __le32 *top)
766{
767 Indirect *partial, *p;
768 int k, err;
769
770 *top = 0;
771
772 for (k = depth; k > 1 && !offsets[k-1]; k--)
773 ;
774 partial = ext4_get_branch(inode, k, offsets, chain, &err);
775
776 if (!partial)
777 partial = chain + k-1;
778
779
780
781
782 if (!partial->key && *partial->p)
783
784 goto no_top;
785 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
786 ;
787
788
789
790
791
792
793 if (p == chain + k - 1 && p > chain) {
794 p->p--;
795 } else {
796 *top = *p->p;
797
798#if 0
799 *p->p = 0;
800#endif
801 }
802
803
804 while (partial > p) {
805 brelse(partial->bh);
806 partial--;
807 }
808no_top:
809 return partial;
810}
811
812
813
814
815
816
817
818
819
820
821
822
823static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
824 struct buffer_head *bh,
825 ext4_fsblk_t block_to_free,
826 unsigned long count, __le32 *first,
827 __le32 *last)
828{
829 __le32 *p;
830 int flags = EXT4_FREE_BLOCKS_VALIDATED;
831 int err;
832
833 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
834 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
835 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
836 else if (ext4_should_journal_data(inode))
837 flags |= EXT4_FREE_BLOCKS_FORGET;
838
839 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
840 count)) {
841 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
842 "blocks %llu len %lu",
843 (unsigned long long) block_to_free, count);
844 return 1;
845 }
846
847 if (try_to_extend_transaction(handle, inode)) {
848 if (bh) {
849 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
850 err = ext4_handle_dirty_metadata(handle, inode, bh);
851 if (unlikely(err))
852 goto out_err;
853 }
854 err = ext4_mark_inode_dirty(handle, inode);
855 if (unlikely(err))
856 goto out_err;
857 err = ext4_truncate_restart_trans(handle, inode,
858 ext4_blocks_for_truncate(inode));
859 if (unlikely(err))
860 goto out_err;
861 if (bh) {
862 BUFFER_TRACE(bh, "retaking write access");
863 err = ext4_journal_get_write_access(handle, bh);
864 if (unlikely(err))
865 goto out_err;
866 }
867 }
868
869 for (p = first; p < last; p++)
870 *p = 0;
871
872 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
873 return 0;
874out_err:
875 ext4_std_error(inode->i_sb, err);
876 return err;
877}
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898static void ext4_free_data(handle_t *handle, struct inode *inode,
899 struct buffer_head *this_bh,
900 __le32 *first, __le32 *last)
901{
902 ext4_fsblk_t block_to_free = 0;
903 unsigned long count = 0;
904 __le32 *block_to_free_p = NULL;
905
906
907 ext4_fsblk_t nr;
908 __le32 *p;
909
910 int err = 0;
911
912 if (this_bh) {
913 BUFFER_TRACE(this_bh, "get_write_access");
914 err = ext4_journal_get_write_access(handle, this_bh);
915
916
917 if (err)
918 return;
919 }
920
921 for (p = first; p < last; p++) {
922 nr = le32_to_cpu(*p);
923 if (nr) {
924
925 if (count == 0) {
926 block_to_free = nr;
927 block_to_free_p = p;
928 count = 1;
929 } else if (nr == block_to_free + count) {
930 count++;
931 } else {
932 err = ext4_clear_blocks(handle, inode, this_bh,
933 block_to_free, count,
934 block_to_free_p, p);
935 if (err)
936 break;
937 block_to_free = nr;
938 block_to_free_p = p;
939 count = 1;
940 }
941 }
942 }
943
944 if (!err && count > 0)
945 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
946 count, block_to_free_p, p);
947 if (err < 0)
948
949 return;
950
951 if (this_bh) {
952 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
953
954
955
956
957
958
959
960 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
961 ext4_handle_dirty_metadata(handle, inode, this_bh);
962 else
963 EXT4_ERROR_INODE(inode,
964 "circular indirect block detected at "
965 "block %llu",
966 (unsigned long long) this_bh->b_blocknr);
967 }
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983static void ext4_free_branches(handle_t *handle, struct inode *inode,
984 struct buffer_head *parent_bh,
985 __le32 *first, __le32 *last, int depth)
986{
987 ext4_fsblk_t nr;
988 __le32 *p;
989
990 if (ext4_handle_is_aborted(handle))
991 return;
992
993 if (depth--) {
994 struct buffer_head *bh;
995 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
996 p = last;
997 while (--p >= first) {
998 nr = le32_to_cpu(*p);
999 if (!nr)
1000 continue;
1001
1002 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1003 nr, 1)) {
1004 EXT4_ERROR_INODE(inode,
1005 "invalid indirect mapped "
1006 "block %lu (level %d)",
1007 (unsigned long) nr, depth);
1008 break;
1009 }
1010
1011
1012 bh = sb_bread(inode->i_sb, nr);
1013
1014
1015
1016
1017
1018 if (!bh) {
1019 EXT4_ERROR_INODE_BLOCK(inode, nr,
1020 "Read failure");
1021 continue;
1022 }
1023
1024
1025 BUFFER_TRACE(bh, "free child branches");
1026 ext4_free_branches(handle, inode, bh,
1027 (__le32 *) bh->b_data,
1028 (__le32 *) bh->b_data + addr_per_block,
1029 depth);
1030 brelse(bh);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 if (ext4_handle_is_aborted(handle))
1049 return;
1050 if (try_to_extend_transaction(handle, inode)) {
1051 ext4_mark_inode_dirty(handle, inode);
1052 ext4_truncate_restart_trans(handle, inode,
1053 ext4_blocks_for_truncate(inode));
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 ext4_free_blocks(handle, inode, NULL, nr, 1,
1068 EXT4_FREE_BLOCKS_METADATA|
1069 EXT4_FREE_BLOCKS_FORGET);
1070
1071 if (parent_bh) {
1072
1073
1074
1075
1076 BUFFER_TRACE(parent_bh, "get_write_access");
1077 if (!ext4_journal_get_write_access(handle,
1078 parent_bh)){
1079 *p = 0;
1080 BUFFER_TRACE(parent_bh,
1081 "call ext4_handle_dirty_metadata");
1082 ext4_handle_dirty_metadata(handle,
1083 inode,
1084 parent_bh);
1085 }
1086 }
1087 }
1088 } else {
1089
1090 BUFFER_TRACE(parent_bh, "free data blocks");
1091 ext4_free_data(handle, inode, parent_bh, first, last);
1092 }
1093}
1094
1095void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1096{
1097 struct ext4_inode_info *ei = EXT4_I(inode);
1098 __le32 *i_data = ei->i_data;
1099 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1100 ext4_lblk_t offsets[4];
1101 Indirect chain[4];
1102 Indirect *partial;
1103 __le32 nr = 0;
1104 int n = 0;
1105 ext4_lblk_t last_block, max_block;
1106 unsigned blocksize = inode->i_sb->s_blocksize;
1107
1108 last_block = (inode->i_size + blocksize-1)
1109 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1110 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1111 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1112
1113 if (last_block != max_block) {
1114 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1115 if (n == 0)
1116 return;
1117 }
1118
1119 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1120
1121
1122
1123
1124
1125
1126
1127
1128 ei->i_disksize = inode->i_size;
1129
1130 if (last_block == max_block) {
1131
1132
1133
1134
1135 return;
1136 } else if (n == 1) {
1137 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1138 i_data + EXT4_NDIR_BLOCKS);
1139 goto do_indirects;
1140 }
1141
1142 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1143
1144 if (nr) {
1145 if (partial == chain) {
1146
1147 ext4_free_branches(handle, inode, NULL,
1148 &nr, &nr+1, (chain+n-1) - partial);
1149 *partial->p = 0;
1150
1151
1152
1153
1154 } else {
1155
1156 BUFFER_TRACE(partial->bh, "get_write_access");
1157 ext4_free_branches(handle, inode, partial->bh,
1158 partial->p,
1159 partial->p+1, (chain+n-1) - partial);
1160 }
1161 }
1162
1163 while (partial > chain) {
1164 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1165 (__le32*)partial->bh->b_data+addr_per_block,
1166 (chain+n-1) - partial);
1167 BUFFER_TRACE(partial->bh, "call brelse");
1168 brelse(partial->bh);
1169 partial--;
1170 }
1171do_indirects:
1172
1173 switch (offsets[0]) {
1174 default:
1175 nr = i_data[EXT4_IND_BLOCK];
1176 if (nr) {
1177 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1178 i_data[EXT4_IND_BLOCK] = 0;
1179 }
1180 case EXT4_IND_BLOCK:
1181 nr = i_data[EXT4_DIND_BLOCK];
1182 if (nr) {
1183 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1184 i_data[EXT4_DIND_BLOCK] = 0;
1185 }
1186 case EXT4_DIND_BLOCK:
1187 nr = i_data[EXT4_TIND_BLOCK];
1188 if (nr) {
1189 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1190 i_data[EXT4_TIND_BLOCK] = 0;
1191 }
1192 case EXT4_TIND_BLOCK:
1193 ;
1194 }
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1208 ext4_lblk_t start, ext4_lblk_t end)
1209{
1210 struct ext4_inode_info *ei = EXT4_I(inode);
1211 __le32 *i_data = ei->i_data;
1212 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1213 ext4_lblk_t offsets[4], offsets2[4];
1214 Indirect chain[4], chain2[4];
1215 Indirect *partial, *partial2;
1216 ext4_lblk_t max_block;
1217 __le32 nr = 0, nr2 = 0;
1218 int n = 0, n2 = 0;
1219 unsigned blocksize = inode->i_sb->s_blocksize;
1220
1221 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1222 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1223 if (end >= max_block)
1224 end = max_block;
1225 if ((start >= end) || (start > max_block))
1226 return 0;
1227
1228 n = ext4_block_to_path(inode, start, offsets, NULL);
1229 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1230
1231 BUG_ON(n > n2);
1232
1233 if ((n == 1) && (n == n2)) {
1234
1235 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1236 i_data + offsets2[0]);
1237 return 0;
1238 } else if (n2 > n) {
1239
1240
1241
1242
1243
1244
1245
1246 if (n == 1) {
1247
1248
1249
1250
1251 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1252 i_data + EXT4_NDIR_BLOCKS);
1253 goto end_range;
1254 }
1255
1256
1257 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1258 if (nr) {
1259 if (partial == chain) {
1260
1261 ext4_free_branches(handle, inode, NULL,
1262 &nr, &nr+1, (chain+n-1) - partial);
1263 *partial->p = 0;
1264 } else {
1265
1266 BUFFER_TRACE(partial->bh, "get_write_access");
1267 ext4_free_branches(handle, inode, partial->bh,
1268 partial->p,
1269 partial->p+1, (chain+n-1) - partial);
1270 }
1271 }
1272
1273
1274
1275
1276
1277 while (partial > chain) {
1278 ext4_free_branches(handle, inode, partial->bh,
1279 partial->p + 1,
1280 (__le32 *)partial->bh->b_data+addr_per_block,
1281 (chain+n-1) - partial);
1282 BUFFER_TRACE(partial->bh, "call brelse");
1283 brelse(partial->bh);
1284 partial--;
1285 }
1286
1287end_range:
1288 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1289 if (nr2) {
1290 if (partial2 == chain2) {
1291
1292
1293
1294
1295
1296
1297 goto do_indirects;
1298 }
1299 } else {
1300
1301
1302
1303
1304
1305
1306 partial2->p++;
1307 }
1308
1309
1310
1311
1312
1313 while (partial2 > chain2) {
1314 ext4_free_branches(handle, inode, partial2->bh,
1315 (__le32 *)partial2->bh->b_data,
1316 partial2->p,
1317 (chain2+n2-1) - partial2);
1318 BUFFER_TRACE(partial2->bh, "call brelse");
1319 brelse(partial2->bh);
1320 partial2--;
1321 }
1322 goto do_indirects;
1323 }
1324
1325
1326 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1327 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1328
1329
1330 if (nr) {
1331 int level = min(partial - chain, partial2 - chain2);
1332 int i;
1333 int subtree = 1;
1334
1335 for (i = 0; i <= level; i++) {
1336 if (offsets[i] != offsets2[i]) {
1337 subtree = 0;
1338 break;
1339 }
1340 }
1341
1342 if (!subtree) {
1343 if (partial == chain) {
1344
1345 ext4_free_branches(handle, inode, NULL,
1346 &nr, &nr+1,
1347 (chain+n-1) - partial);
1348 *partial->p = 0;
1349 } else {
1350
1351 BUFFER_TRACE(partial->bh, "get_write_access");
1352 ext4_free_branches(handle, inode, partial->bh,
1353 partial->p,
1354 partial->p+1,
1355 (chain+n-1) - partial);
1356 }
1357 }
1358 }
1359
1360 if (!nr2) {
1361
1362
1363
1364
1365
1366
1367 partial2->p++;
1368 }
1369
1370 while (partial > chain || partial2 > chain2) {
1371 int depth = (chain+n-1) - partial;
1372 int depth2 = (chain2+n2-1) - partial2;
1373
1374 if (partial > chain && partial2 > chain2 &&
1375 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1376
1377
1378
1379
1380 ext4_free_branches(handle, inode, partial->bh,
1381 partial->p + 1,
1382 partial2->p,
1383 (chain+n-1) - partial);
1384 BUFFER_TRACE(partial->bh, "call brelse");
1385 brelse(partial->bh);
1386 BUFFER_TRACE(partial2->bh, "call brelse");
1387 brelse(partial2->bh);
1388 return 0;
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (partial > chain && depth <= depth2) {
1399 ext4_free_branches(handle, inode, partial->bh,
1400 partial->p + 1,
1401 (__le32 *)partial->bh->b_data+addr_per_block,
1402 (chain+n-1) - partial);
1403 BUFFER_TRACE(partial->bh, "call brelse");
1404 brelse(partial->bh);
1405 partial--;
1406 }
1407 if (partial2 > chain2 && depth2 <= depth) {
1408 ext4_free_branches(handle, inode, partial2->bh,
1409 (__le32 *)partial2->bh->b_data,
1410 partial2->p,
1411 (chain2+n2-1) - partial2);
1412 BUFFER_TRACE(partial2->bh, "call brelse");
1413 brelse(partial2->bh);
1414 partial2--;
1415 }
1416 }
1417 return 0;
1418
1419do_indirects:
1420
1421 switch (offsets[0]) {
1422 default:
1423 if (++n >= n2)
1424 return 0;
1425 nr = i_data[EXT4_IND_BLOCK];
1426 if (nr) {
1427 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1428 i_data[EXT4_IND_BLOCK] = 0;
1429 }
1430 case EXT4_IND_BLOCK:
1431 if (++n >= n2)
1432 return 0;
1433 nr = i_data[EXT4_DIND_BLOCK];
1434 if (nr) {
1435 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1436 i_data[EXT4_DIND_BLOCK] = 0;
1437 }
1438 case EXT4_DIND_BLOCK:
1439 if (++n >= n2)
1440 return 0;
1441 nr = i_data[EXT4_TIND_BLOCK];
1442 if (nr) {
1443 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1444 i_data[EXT4_TIND_BLOCK] = 0;
1445 }
1446 case EXT4_TIND_BLOCK:
1447 ;
1448 }
1449 return 0;
1450}
1451