1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/aio.h>
24#include "ext4_jbd2.h"
25#include "truncate.h"
26#include <linux/dax.h>
27
28#include <trace/events/ext4.h>
29
30typedef struct {
31 __le32 *p;
32 __le32 key;
33 struct buffer_head *bh;
34} Indirect;
35
36static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
37{
38 p->key = *(p->p = v);
39 p->bh = bh;
40}
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73static int ext4_block_to_path(struct inode *inode,
74 ext4_lblk_t i_block,
75 ext4_lblk_t offsets[4], int *boundary)
76{
77 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
78 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
79 const long direct_blocks = EXT4_NDIR_BLOCKS,
80 indirect_blocks = ptrs,
81 double_blocks = (1 << (ptrs_bits * 2));
82 int n = 0;
83 int final = 0;
84
85 if (i_block < direct_blocks) {
86 offsets[n++] = i_block;
87 final = direct_blocks;
88 } else if ((i_block -= direct_blocks) < indirect_blocks) {
89 offsets[n++] = EXT4_IND_BLOCK;
90 offsets[n++] = i_block;
91 final = ptrs;
92 } else if ((i_block -= indirect_blocks) < double_blocks) {
93 offsets[n++] = EXT4_DIND_BLOCK;
94 offsets[n++] = i_block >> ptrs_bits;
95 offsets[n++] = i_block & (ptrs - 1);
96 final = ptrs;
97 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
98 offsets[n++] = EXT4_TIND_BLOCK;
99 offsets[n++] = i_block >> (ptrs_bits * 2);
100 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
101 offsets[n++] = i_block & (ptrs - 1);
102 final = ptrs;
103 } else {
104 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
105 i_block + direct_blocks +
106 indirect_blocks + double_blocks, inode->i_ino);
107 }
108 if (boundary)
109 *boundary = final - 1 - (i_block & (ptrs - 1));
110 return n;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static Indirect *ext4_get_branch(struct inode *inode, int depth,
144 ext4_lblk_t *offsets,
145 Indirect chain[4], int *err)
146{
147 struct super_block *sb = inode->i_sb;
148 Indirect *p = chain;
149 struct buffer_head *bh;
150 int ret = -EIO;
151
152 *err = 0;
153
154 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
155 if (!p->key)
156 goto no_block;
157 while (--depth) {
158 bh = sb_getblk(sb, le32_to_cpu(p->key));
159 if (unlikely(!bh)) {
160 ret = -ENOMEM;
161 goto failure;
162 }
163
164 if (!bh_uptodate_or_lock(bh)) {
165 if (bh_submit_read(bh) < 0) {
166 put_bh(bh);
167 goto failure;
168 }
169
170 if (ext4_check_indirect_blockref(inode, bh)) {
171 put_bh(bh);
172 goto failure;
173 }
174 }
175
176 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
177
178 if (!p->key)
179 goto no_block;
180 }
181 return NULL;
182
183failure:
184 *err = ret;
185no_block:
186 return p;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
210{
211 struct ext4_inode_info *ei = EXT4_I(inode);
212 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
213 __le32 *p;
214
215
216 for (p = ind->p - 1; p >= start; p--) {
217 if (*p)
218 return le32_to_cpu(*p);
219 }
220
221
222 if (ind->bh)
223 return ind->bh->b_blocknr;
224
225
226
227
228
229 return ext4_inode_to_goal_block(inode);
230}
231
232
233
234
235
236
237
238
239
240
241
242
243static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
244 Indirect *partial)
245{
246 ext4_fsblk_t goal;
247
248
249
250
251
252 goal = ext4_find_near(inode, partial);
253 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
254 return goal;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
270 int blocks_to_boundary)
271{
272 unsigned int count = 0;
273
274
275
276
277
278 if (k > 0) {
279
280 if (blks < blocks_to_boundary + 1)
281 count += blks;
282 else
283 count += blocks_to_boundary + 1;
284 return count;
285 }
286
287 count++;
288 while (count < blks && count <= blocks_to_boundary &&
289 le32_to_cpu(*(branch[0].p + count)) == 0) {
290 count++;
291 }
292 return count;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322static int ext4_alloc_branch(handle_t *handle,
323 struct ext4_allocation_request *ar,
324 int indirect_blks, ext4_lblk_t *offsets,
325 Indirect *branch)
326{
327 struct buffer_head * bh;
328 ext4_fsblk_t b, new_blocks[4];
329 __le32 *p;
330 int i, j, err, len = 1;
331
332 for (i = 0; i <= indirect_blks; i++) {
333 if (i == indirect_blks) {
334 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
335 } else
336 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
337 ar->inode, ar->goal,
338 ar->flags & EXT4_MB_DELALLOC_RESERVED,
339 NULL, &err);
340 if (err) {
341 i--;
342 goto failed;
343 }
344 branch[i].key = cpu_to_le32(new_blocks[i]);
345 if (i == 0)
346 continue;
347
348 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
349 if (unlikely(!bh)) {
350 err = -ENOMEM;
351 goto failed;
352 }
353 lock_buffer(bh);
354 BUFFER_TRACE(bh, "call get_create_access");
355 err = ext4_journal_get_create_access(handle, bh);
356 if (err) {
357 unlock_buffer(bh);
358 goto failed;
359 }
360
361 memset(bh->b_data, 0, bh->b_size);
362 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
363 b = new_blocks[i];
364
365 if (i == indirect_blks)
366 len = ar->len;
367 for (j = 0; j < len; j++)
368 *p++ = cpu_to_le32(b++);
369
370 BUFFER_TRACE(bh, "marking uptodate");
371 set_buffer_uptodate(bh);
372 unlock_buffer(bh);
373
374 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
375 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
376 if (err)
377 goto failed;
378 }
379 return 0;
380failed:
381 for (; i >= 0; i--) {
382
383
384
385
386
387
388 if (i > 0 && i != indirect_blks && branch[i].bh)
389 ext4_forget(handle, 1, ar->inode, branch[i].bh,
390 branch[i].bh->b_blocknr);
391 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
392 (i == indirect_blks) ? ar->len : 1, 0);
393 }
394 return err;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412static int ext4_splice_branch(handle_t *handle,
413 struct ext4_allocation_request *ar,
414 Indirect *where, int num)
415{
416 int i;
417 int err = 0;
418 ext4_fsblk_t current_block;
419
420
421
422
423
424
425 if (where->bh) {
426 BUFFER_TRACE(where->bh, "get_write_access");
427 err = ext4_journal_get_write_access(handle, where->bh);
428 if (err)
429 goto err_out;
430 }
431
432
433 *where->p = where->key;
434
435
436
437
438
439 if (num == 0 && ar->len > 1) {
440 current_block = le32_to_cpu(where->key) + 1;
441 for (i = 1; i < ar->len; i++)
442 *(where->p + i) = cpu_to_le32(current_block++);
443 }
444
445
446
447 if (where->bh) {
448
449
450
451
452
453
454
455
456 jbd_debug(5, "splicing indirect only\n");
457 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
458 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
459 if (err)
460 goto err_out;
461 } else {
462
463
464
465 ext4_mark_inode_dirty(handle, ar->inode);
466 jbd_debug(5, "splicing direct\n");
467 }
468 return err;
469
470err_out:
471 for (i = 1; i <= num; i++) {
472
473
474
475
476
477 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
478 EXT4_FREE_BLOCKS_FORGET);
479 }
480 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
481 ar->len, 0);
482
483 return err;
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
515 struct ext4_map_blocks *map,
516 int flags)
517{
518 struct ext4_allocation_request ar;
519 int err = -EIO;
520 ext4_lblk_t offsets[4];
521 Indirect chain[4];
522 Indirect *partial;
523 int indirect_blks;
524 int blocks_to_boundary = 0;
525 int depth;
526 int count = 0;
527 ext4_fsblk_t first_block = 0;
528
529 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
530 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
531 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
532 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
533 &blocks_to_boundary);
534
535 if (depth == 0)
536 goto out;
537
538 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
539
540
541 if (!partial) {
542 first_block = le32_to_cpu(chain[depth - 1].key);
543 count++;
544
545 while (count < map->m_len && count <= blocks_to_boundary) {
546 ext4_fsblk_t blk;
547
548 blk = le32_to_cpu(*(chain[depth-1].p + count));
549
550 if (blk == first_block + count)
551 count++;
552 else
553 break;
554 }
555 goto got_it;
556 }
557
558
559 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
560 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
561 int i;
562
563
564
565
566
567
568
569 count = 0;
570 for (i = partial - chain + 1; i < depth; i++)
571 count = count * epb + (epb - offsets[i] - 1);
572 count++;
573
574 map->m_pblk = 0;
575 map->m_len = min_t(unsigned int, map->m_len, count);
576 goto cleanup;
577 }
578
579
580 if (err == -EIO)
581 goto cleanup;
582
583
584
585
586 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
587 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
588 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
589 "non-extent mapped inodes with bigalloc");
590 return -EUCLEAN;
591 }
592
593
594 memset(&ar, 0, sizeof(ar));
595 ar.inode = inode;
596 ar.logical = map->m_lblk;
597 if (S_ISREG(inode->i_mode))
598 ar.flags = EXT4_MB_HINT_DATA;
599 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
600 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
601
602 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
603
604
605 indirect_blks = (chain + depth) - partial - 1;
606
607
608
609
610
611 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
612 map->m_len, blocks_to_boundary);
613
614
615
616
617 err = ext4_alloc_branch(handle, &ar, indirect_blks,
618 offsets + (partial - chain), partial);
619
620
621
622
623
624
625
626
627 if (!err)
628 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
629 if (err)
630 goto cleanup;
631
632 map->m_flags |= EXT4_MAP_NEW;
633
634 ext4_update_inode_fsync_trans(handle, inode, 1);
635 count = ar.len;
636got_it:
637 map->m_flags |= EXT4_MAP_MAPPED;
638 map->m_pblk = le32_to_cpu(chain[depth-1].key);
639 map->m_len = count;
640 if (count > blocks_to_boundary)
641 map->m_flags |= EXT4_MAP_BOUNDARY;
642 err = count;
643
644 partial = chain + depth - 1;
645cleanup:
646 while (partial > chain) {
647 BUFFER_TRACE(partial->bh, "call brelse");
648 brelse(partial->bh);
649 partial--;
650 }
651out:
652 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
653 return err;
654}
655
656
657
658
659
660
661
662
663
664
665
666
667ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
668 const struct iovec *iov, loff_t offset,
669 unsigned long nr_segs)
670{
671 struct file *file = iocb->ki_filp;
672 struct inode *inode = file->f_mapping->host;
673 struct ext4_inode_info *ei = EXT4_I(inode);
674 handle_t *handle;
675 ssize_t ret;
676 int orphan = 0;
677 size_t count = iov_length(iov, nr_segs);
678 int retries = 0;
679
680
681 if (WARN_ON_ONCE(IS_DAX(inode)))
682 return 0;
683
684 if (rw == WRITE) {
685 loff_t final_size = offset + count;
686
687 if (final_size > inode->i_size) {
688
689 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
690 if (IS_ERR(handle)) {
691 ret = PTR_ERR(handle);
692 goto out;
693 }
694 ret = ext4_orphan_add(handle, inode);
695 if (ret) {
696 ext4_journal_stop(handle);
697 goto out;
698 }
699 orphan = 1;
700 ext4_update_i_disksize(inode, inode->i_size);
701 ext4_journal_stop(handle);
702 }
703 }
704
705retry:
706 if (rw == READ && ext4_should_dioread_nolock(inode)) {
707
708
709
710
711
712 inode_dio_begin(inode);
713 smp_mb();
714 if (unlikely(ext4_test_inode_state(inode,
715 EXT4_STATE_DIOREAD_LOCK))) {
716 inode_dio_end(inode);
717 goto locked;
718 }
719 ret = __blockdev_direct_IO(rw, iocb, inode,
720 inode->i_sb->s_bdev, iov,
721 offset, nr_segs,
722 ext4_get_block, NULL, NULL, 0);
723 inode_dio_end(inode);
724 } else {
725locked:
726 ret = blockdev_direct_IO(rw, iocb, inode, iov,
727 offset, nr_segs, ext4_get_block);
728
729 if (unlikely((rw & WRITE) && ret < 0)) {
730 loff_t isize = i_size_read(inode);
731 loff_t end = offset + iov_length(iov, nr_segs);
732
733 if (end > isize)
734 ext4_truncate_failed_write(inode);
735 }
736 }
737 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
738 goto retry;
739
740 if (orphan) {
741 int err;
742
743
744 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
745 if (IS_ERR(handle)) {
746
747
748
749
750
751
752
753
754
755
756 if (!ret)
757 ret = PTR_ERR(handle);
758 if (inode->i_nlink)
759 ext4_orphan_del(NULL, inode);
760
761 goto out;
762 }
763 if (inode->i_nlink)
764 ext4_orphan_del(handle, inode);
765 if (ret > 0) {
766 loff_t end = offset + ret;
767 if (end > inode->i_size || end > ei->i_disksize) {
768 ext4_update_i_disksize(inode, end);
769 if (end > inode->i_size)
770 i_size_write(inode, end);
771
772
773
774
775
776
777
778 ext4_mark_inode_dirty(handle, inode);
779 }
780 }
781 err = ext4_journal_stop(handle);
782 if (ret == 0)
783 ret = err;
784 }
785out:
786 return ret;
787}
788
789
790
791
792
793int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
794{
795 struct ext4_inode_info *ei = EXT4_I(inode);
796 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
797 int blk_bits;
798
799 if (lblock < EXT4_NDIR_BLOCKS)
800 return 0;
801
802 lblock -= EXT4_NDIR_BLOCKS;
803
804 if (ei->i_da_metadata_calc_len &&
805 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
806 ei->i_da_metadata_calc_len++;
807 return 0;
808 }
809 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
810 ei->i_da_metadata_calc_len = 1;
811 blk_bits = order_base_2(lblock);
812 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
813}
814
815
816
817
818
819int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
820{
821
822
823
824
825
826 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
827}
828
829
830
831
832
833
834
835
836
837
838
839
840
841static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
842{
843 if (!ext4_handle_valid(handle))
844 return 0;
845 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
846 return 0;
847 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
848 return 0;
849 return 1;
850}
851
852
853
854
855
856
857static inline int all_zeroes(__le32 *p, __le32 *q)
858{
859 while (p < q)
860 if (*p++)
861 return 0;
862 return 1;
863}
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900static Indirect *ext4_find_shared(struct inode *inode, int depth,
901 ext4_lblk_t offsets[4], Indirect chain[4],
902 __le32 *top)
903{
904 Indirect *partial, *p;
905 int k, err;
906
907 *top = 0;
908
909 for (k = depth; k > 1 && !offsets[k-1]; k--)
910 ;
911 partial = ext4_get_branch(inode, k, offsets, chain, &err);
912
913 if (!partial)
914 partial = chain + k-1;
915
916
917
918
919 if (!partial->key && *partial->p)
920
921 goto no_top;
922 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
923 ;
924
925
926
927
928
929
930 if (p == chain + k - 1 && p > chain) {
931 p->p--;
932 } else {
933 *top = *p->p;
934
935#if 0
936 *p->p = 0;
937#endif
938 }
939
940
941 while (partial > p) {
942 brelse(partial->bh);
943 partial--;
944 }
945no_top:
946 return partial;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
961 struct buffer_head *bh,
962 ext4_fsblk_t block_to_free,
963 unsigned long count, __le32 *first,
964 __le32 *last)
965{
966 __le32 *p;
967 int flags = EXT4_FREE_BLOCKS_VALIDATED;
968 int err;
969
970 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
971 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
972 else if (ext4_should_journal_data(inode))
973 flags |= EXT4_FREE_BLOCKS_FORGET;
974
975 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
976 count)) {
977 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
978 "blocks %llu len %lu",
979 (unsigned long long) block_to_free, count);
980 return 1;
981 }
982
983 if (try_to_extend_transaction(handle, inode)) {
984 if (bh) {
985 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
986 err = ext4_handle_dirty_metadata(handle, inode, bh);
987 if (unlikely(err))
988 goto out_err;
989 }
990 err = ext4_mark_inode_dirty(handle, inode);
991 if (unlikely(err))
992 goto out_err;
993 err = ext4_truncate_restart_trans(handle, inode,
994 ext4_blocks_for_truncate(inode));
995 if (unlikely(err))
996 goto out_err;
997 if (bh) {
998 BUFFER_TRACE(bh, "retaking write access");
999 err = ext4_journal_get_write_access(handle, bh);
1000 if (unlikely(err))
1001 goto out_err;
1002 }
1003 }
1004
1005 for (p = first; p < last; p++)
1006 *p = 0;
1007
1008 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
1009 return 0;
1010out_err:
1011 ext4_std_error(inode->i_sb, err);
1012 return err;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static void ext4_free_data(handle_t *handle, struct inode *inode,
1035 struct buffer_head *this_bh,
1036 __le32 *first, __le32 *last)
1037{
1038 ext4_fsblk_t block_to_free = 0;
1039 unsigned long count = 0;
1040 __le32 *block_to_free_p = NULL;
1041
1042
1043 ext4_fsblk_t nr;
1044 __le32 *p;
1045
1046 int err = 0;
1047
1048 if (this_bh) {
1049 BUFFER_TRACE(this_bh, "get_write_access");
1050 err = ext4_journal_get_write_access(handle, this_bh);
1051
1052
1053 if (err)
1054 return;
1055 }
1056
1057 for (p = first; p < last; p++) {
1058 nr = le32_to_cpu(*p);
1059 if (nr) {
1060
1061 if (count == 0) {
1062 block_to_free = nr;
1063 block_to_free_p = p;
1064 count = 1;
1065 } else if (nr == block_to_free + count) {
1066 count++;
1067 } else {
1068 err = ext4_clear_blocks(handle, inode, this_bh,
1069 block_to_free, count,
1070 block_to_free_p, p);
1071 if (err)
1072 break;
1073 block_to_free = nr;
1074 block_to_free_p = p;
1075 count = 1;
1076 }
1077 }
1078 }
1079
1080 if (!err && count > 0)
1081 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
1082 count, block_to_free_p, p);
1083 if (err < 0)
1084
1085 return;
1086
1087 if (this_bh) {
1088 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
1089
1090
1091
1092
1093
1094
1095
1096 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
1097 ext4_handle_dirty_metadata(handle, inode, this_bh);
1098 else
1099 EXT4_ERROR_INODE(inode,
1100 "circular indirect block detected at "
1101 "block %llu",
1102 (unsigned long long) this_bh->b_blocknr);
1103 }
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119static void ext4_free_branches(handle_t *handle, struct inode *inode,
1120 struct buffer_head *parent_bh,
1121 __le32 *first, __le32 *last, int depth)
1122{
1123 ext4_fsblk_t nr;
1124 __le32 *p;
1125
1126 if (ext4_handle_is_aborted(handle))
1127 return;
1128
1129 if (depth--) {
1130 struct buffer_head *bh;
1131 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1132 p = last;
1133 while (--p >= first) {
1134 nr = le32_to_cpu(*p);
1135 if (!nr)
1136 continue;
1137
1138 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1139 nr, 1)) {
1140 EXT4_ERROR_INODE(inode,
1141 "invalid indirect mapped "
1142 "block %lu (level %d)",
1143 (unsigned long) nr, depth);
1144 break;
1145 }
1146
1147
1148 bh = sb_bread(inode->i_sb, nr);
1149
1150
1151
1152
1153
1154 if (!bh) {
1155 EXT4_ERROR_INODE_BLOCK(inode, nr,
1156 "Read failure");
1157 continue;
1158 }
1159
1160
1161 BUFFER_TRACE(bh, "free child branches");
1162 ext4_free_branches(handle, inode, bh,
1163 (__le32 *) bh->b_data,
1164 (__le32 *) bh->b_data + addr_per_block,
1165 depth);
1166 brelse(bh);
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184 if (ext4_handle_is_aborted(handle))
1185 return;
1186 if (try_to_extend_transaction(handle, inode)) {
1187 ext4_mark_inode_dirty(handle, inode);
1188 ext4_truncate_restart_trans(handle, inode,
1189 ext4_blocks_for_truncate(inode));
1190 }
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 ext4_free_blocks(handle, inode, NULL, nr, 1,
1204 EXT4_FREE_BLOCKS_METADATA|
1205 EXT4_FREE_BLOCKS_FORGET);
1206
1207 if (parent_bh) {
1208
1209
1210
1211
1212 BUFFER_TRACE(parent_bh, "get_write_access");
1213 if (!ext4_journal_get_write_access(handle,
1214 parent_bh)){
1215 *p = 0;
1216 BUFFER_TRACE(parent_bh,
1217 "call ext4_handle_dirty_metadata");
1218 ext4_handle_dirty_metadata(handle,
1219 inode,
1220 parent_bh);
1221 }
1222 }
1223 }
1224 } else {
1225
1226 BUFFER_TRACE(parent_bh, "free data blocks");
1227 ext4_free_data(handle, inode, parent_bh, first, last);
1228 }
1229}
1230
1231void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1232{
1233 struct ext4_inode_info *ei = EXT4_I(inode);
1234 __le32 *i_data = ei->i_data;
1235 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1236 ext4_lblk_t offsets[4];
1237 Indirect chain[4];
1238 Indirect *partial;
1239 __le32 nr = 0;
1240 int n = 0;
1241 ext4_lblk_t last_block, max_block;
1242 unsigned blocksize = inode->i_sb->s_blocksize;
1243
1244 last_block = (inode->i_size + blocksize-1)
1245 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1246 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1247 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1248
1249 if (last_block != max_block) {
1250 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1251 if (n == 0)
1252 return;
1253 }
1254
1255 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1256
1257
1258
1259
1260
1261
1262
1263
1264 ei->i_disksize = inode->i_size;
1265
1266 if (last_block == max_block) {
1267
1268
1269
1270
1271 return;
1272 } else if (n == 1) {
1273 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1274 i_data + EXT4_NDIR_BLOCKS);
1275 goto do_indirects;
1276 }
1277
1278 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1279
1280 if (nr) {
1281 if (partial == chain) {
1282
1283 ext4_free_branches(handle, inode, NULL,
1284 &nr, &nr+1, (chain+n-1) - partial);
1285 *partial->p = 0;
1286
1287
1288
1289
1290 } else {
1291
1292 BUFFER_TRACE(partial->bh, "get_write_access");
1293 ext4_free_branches(handle, inode, partial->bh,
1294 partial->p,
1295 partial->p+1, (chain+n-1) - partial);
1296 }
1297 }
1298
1299 while (partial > chain) {
1300 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1301 (__le32*)partial->bh->b_data+addr_per_block,
1302 (chain+n-1) - partial);
1303 BUFFER_TRACE(partial->bh, "call brelse");
1304 brelse(partial->bh);
1305 partial--;
1306 }
1307do_indirects:
1308
1309 switch (offsets[0]) {
1310 default:
1311 nr = i_data[EXT4_IND_BLOCK];
1312 if (nr) {
1313 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1314 i_data[EXT4_IND_BLOCK] = 0;
1315 }
1316 case EXT4_IND_BLOCK:
1317 nr = i_data[EXT4_DIND_BLOCK];
1318 if (nr) {
1319 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1320 i_data[EXT4_DIND_BLOCK] = 0;
1321 }
1322 case EXT4_DIND_BLOCK:
1323 nr = i_data[EXT4_TIND_BLOCK];
1324 if (nr) {
1325 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1326 i_data[EXT4_TIND_BLOCK] = 0;
1327 }
1328 case EXT4_TIND_BLOCK:
1329 ;
1330 }
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1344 ext4_lblk_t start, ext4_lblk_t end)
1345{
1346 struct ext4_inode_info *ei = EXT4_I(inode);
1347 __le32 *i_data = ei->i_data;
1348 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1349 ext4_lblk_t offsets[4], offsets2[4];
1350 Indirect chain[4], chain2[4];
1351 Indirect *partial, *partial2;
1352 Indirect *p = NULL, *p2 = NULL;
1353 ext4_lblk_t max_block;
1354 __le32 nr = 0, nr2 = 0;
1355 int n = 0, n2 = 0;
1356 unsigned blocksize = inode->i_sb->s_blocksize;
1357
1358 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1359 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1360 if (end >= max_block)
1361 end = max_block;
1362 if ((start >= end) || (start > max_block))
1363 return 0;
1364
1365 n = ext4_block_to_path(inode, start, offsets, NULL);
1366 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1367
1368 BUG_ON(n > n2);
1369
1370 if ((n == 1) && (n == n2)) {
1371
1372 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1373 i_data + offsets2[0]);
1374 return 0;
1375 } else if (n2 > n) {
1376
1377
1378
1379
1380
1381
1382
1383 if (n == 1) {
1384
1385
1386
1387
1388 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1389 i_data + EXT4_NDIR_BLOCKS);
1390 goto end_range;
1391 }
1392
1393
1394 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1395 if (nr) {
1396 if (partial == chain) {
1397
1398 ext4_free_branches(handle, inode, NULL,
1399 &nr, &nr+1, (chain+n-1) - partial);
1400 *partial->p = 0;
1401 } else {
1402
1403 BUFFER_TRACE(partial->bh, "get_write_access");
1404 ext4_free_branches(handle, inode, partial->bh,
1405 partial->p,
1406 partial->p+1, (chain+n-1) - partial);
1407 }
1408 }
1409
1410
1411
1412
1413
1414 while (partial > chain) {
1415 ext4_free_branches(handle, inode, partial->bh,
1416 partial->p + 1,
1417 (__le32 *)partial->bh->b_data+addr_per_block,
1418 (chain+n-1) - partial);
1419 partial--;
1420 }
1421
1422end_range:
1423 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1424 if (nr2) {
1425 if (partial2 == chain2) {
1426
1427
1428
1429
1430
1431
1432 goto do_indirects;
1433 }
1434 } else {
1435
1436
1437
1438
1439
1440
1441 partial2->p++;
1442 }
1443
1444
1445
1446
1447
1448 while (partial2 > chain2) {
1449 ext4_free_branches(handle, inode, partial2->bh,
1450 (__le32 *)partial2->bh->b_data,
1451 partial2->p,
1452 (chain2+n2-1) - partial2);
1453 partial2--;
1454 }
1455 goto do_indirects;
1456 }
1457
1458
1459 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1460 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1461
1462
1463 if (nr) {
1464 int level = min(partial - chain, partial2 - chain2);
1465 int i;
1466 int subtree = 1;
1467
1468 for (i = 0; i <= level; i++) {
1469 if (offsets[i] != offsets2[i]) {
1470 subtree = 0;
1471 break;
1472 }
1473 }
1474
1475 if (!subtree) {
1476 if (partial == chain) {
1477
1478 ext4_free_branches(handle, inode, NULL,
1479 &nr, &nr+1,
1480 (chain+n-1) - partial);
1481 *partial->p = 0;
1482 } else {
1483
1484 BUFFER_TRACE(partial->bh, "get_write_access");
1485 ext4_free_branches(handle, inode, partial->bh,
1486 partial->p,
1487 partial->p+1,
1488 (chain+n-1) - partial);
1489 }
1490 }
1491 }
1492
1493 if (!nr2) {
1494
1495
1496
1497
1498
1499
1500 partial2->p++;
1501 }
1502
1503 while (partial > chain || partial2 > chain2) {
1504 int depth = (chain+n-1) - partial;
1505 int depth2 = (chain2+n2-1) - partial2;
1506
1507 if (partial > chain && partial2 > chain2 &&
1508 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1509
1510
1511
1512
1513 ext4_free_branches(handle, inode, partial->bh,
1514 partial->p + 1,
1515 partial2->p,
1516 (chain+n-1) - partial);
1517 goto cleanup;
1518 }
1519
1520
1521
1522
1523
1524
1525
1526
1527 if (partial > chain && depth <= depth2) {
1528 ext4_free_branches(handle, inode, partial->bh,
1529 partial->p + 1,
1530 (__le32 *)partial->bh->b_data+addr_per_block,
1531 (chain+n-1) - partial);
1532 partial--;
1533 }
1534 if (partial2 > chain2 && depth2 <= depth) {
1535 ext4_free_branches(handle, inode, partial2->bh,
1536 (__le32 *)partial2->bh->b_data,
1537 partial2->p,
1538 (chain2+n2-1) - partial2);
1539 partial2--;
1540 }
1541 }
1542
1543cleanup:
1544 while (p && p > chain) {
1545 BUFFER_TRACE(p->bh, "call brelse");
1546 brelse(p->bh);
1547 p--;
1548 }
1549 while (p2 && p2 > chain2) {
1550 BUFFER_TRACE(p2->bh, "call brelse");
1551 brelse(p2->bh);
1552 p2--;
1553 }
1554 return 0;
1555
1556do_indirects:
1557
1558 switch (offsets[0]) {
1559 default:
1560 if (++n >= n2)
1561 break;
1562 nr = i_data[EXT4_IND_BLOCK];
1563 if (nr) {
1564 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1565 i_data[EXT4_IND_BLOCK] = 0;
1566 }
1567 case EXT4_IND_BLOCK:
1568 if (++n >= n2)
1569 break;
1570 nr = i_data[EXT4_DIND_BLOCK];
1571 if (nr) {
1572 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1573 i_data[EXT4_DIND_BLOCK] = 0;
1574 }
1575 case EXT4_DIND_BLOCK:
1576 if (++n >= n2)
1577 break;
1578 nr = i_data[EXT4_TIND_BLOCK];
1579 if (nr) {
1580 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1581 i_data[EXT4_TIND_BLOCK] = 0;
1582 }
1583 case EXT4_TIND_BLOCK:
1584 ;
1585 }
1586 goto cleanup;
1587}
1588