1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "ext4_jbd2.h"
24#include "truncate.h"
25#include <linux/dax.h>
26#include <linux/uio.h>
27
28#include <trace/events/ext4.h>
29
30typedef struct {
31 __le32 *p;
32 __le32 key;
33 struct buffer_head *bh;
34} Indirect;
35
36static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
37{
38 p->key = *(p->p = v);
39 p->bh = bh;
40}
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73static int ext4_block_to_path(struct inode *inode,
74 ext4_lblk_t i_block,
75 ext4_lblk_t offsets[4], int *boundary)
76{
77 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
78 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
79 const long direct_blocks = EXT4_NDIR_BLOCKS,
80 indirect_blocks = ptrs,
81 double_blocks = (1 << (ptrs_bits * 2));
82 int n = 0;
83 int final = 0;
84
85 if (i_block < direct_blocks) {
86 offsets[n++] = i_block;
87 final = direct_blocks;
88 } else if ((i_block -= direct_blocks) < indirect_blocks) {
89 offsets[n++] = EXT4_IND_BLOCK;
90 offsets[n++] = i_block;
91 final = ptrs;
92 } else if ((i_block -= indirect_blocks) < double_blocks) {
93 offsets[n++] = EXT4_DIND_BLOCK;
94 offsets[n++] = i_block >> ptrs_bits;
95 offsets[n++] = i_block & (ptrs - 1);
96 final = ptrs;
97 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
98 offsets[n++] = EXT4_TIND_BLOCK;
99 offsets[n++] = i_block >> (ptrs_bits * 2);
100 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
101 offsets[n++] = i_block & (ptrs - 1);
102 final = ptrs;
103 } else {
104 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
105 i_block + direct_blocks +
106 indirect_blocks + double_blocks, inode->i_ino);
107 }
108 if (boundary)
109 *boundary = final - 1 - (i_block & (ptrs - 1));
110 return n;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static Indirect *ext4_get_branch(struct inode *inode, int depth,
144 ext4_lblk_t *offsets,
145 Indirect chain[4], int *err)
146{
147 struct super_block *sb = inode->i_sb;
148 Indirect *p = chain;
149 struct buffer_head *bh;
150 int ret = -EIO;
151
152 *err = 0;
153
154 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
155 if (!p->key)
156 goto no_block;
157 while (--depth) {
158 bh = sb_getblk(sb, le32_to_cpu(p->key));
159 if (unlikely(!bh)) {
160 ret = -ENOMEM;
161 goto failure;
162 }
163
164 if (!bh_uptodate_or_lock(bh)) {
165 if (bh_submit_read(bh) < 0) {
166 put_bh(bh);
167 goto failure;
168 }
169
170 if (ext4_check_indirect_blockref(inode, bh)) {
171 put_bh(bh);
172 goto failure;
173 }
174 }
175
176 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
177
178 if (!p->key)
179 goto no_block;
180 }
181 return NULL;
182
183failure:
184 *err = ret;
185no_block:
186 return p;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
210{
211 struct ext4_inode_info *ei = EXT4_I(inode);
212 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
213 __le32 *p;
214
215
216 for (p = ind->p - 1; p >= start; p--) {
217 if (*p)
218 return le32_to_cpu(*p);
219 }
220
221
222 if (ind->bh)
223 return ind->bh->b_blocknr;
224
225
226
227
228
229 return ext4_inode_to_goal_block(inode);
230}
231
232
233
234
235
236
237
238
239
240
241
242
243static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
244 Indirect *partial)
245{
246 ext4_fsblk_t goal;
247
248
249
250
251
252 goal = ext4_find_near(inode, partial);
253 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
254 return goal;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
270 int blocks_to_boundary)
271{
272 unsigned int count = 0;
273
274
275
276
277
278 if (k > 0) {
279
280 if (blks < blocks_to_boundary + 1)
281 count += blks;
282 else
283 count += blocks_to_boundary + 1;
284 return count;
285 }
286
287 count++;
288 while (count < blks && count <= blocks_to_boundary &&
289 le32_to_cpu(*(branch[0].p + count)) == 0) {
290 count++;
291 }
292 return count;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322static int ext4_alloc_branch(handle_t *handle,
323 struct ext4_allocation_request *ar,
324 int indirect_blks, ext4_lblk_t *offsets,
325 Indirect *branch)
326{
327 struct buffer_head * bh;
328 ext4_fsblk_t b, new_blocks[4];
329 __le32 *p;
330 int i, j, err, len = 1;
331
332 for (i = 0; i <= indirect_blks; i++) {
333 if (i == indirect_blks) {
334 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
335 } else
336 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
337 ar->inode, ar->goal,
338 ar->flags & EXT4_MB_DELALLOC_RESERVED,
339 NULL, &err);
340 if (err) {
341 i--;
342 goto failed;
343 }
344 branch[i].key = cpu_to_le32(new_blocks[i]);
345 if (i == 0)
346 continue;
347
348 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
349 if (unlikely(!bh)) {
350 err = -ENOMEM;
351 goto failed;
352 }
353 lock_buffer(bh);
354 BUFFER_TRACE(bh, "call get_create_access");
355 err = ext4_journal_get_create_access(handle, bh);
356 if (err) {
357 unlock_buffer(bh);
358 goto failed;
359 }
360
361 memset(bh->b_data, 0, bh->b_size);
362 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
363 b = new_blocks[i];
364
365 if (i == indirect_blks)
366 len = ar->len;
367 for (j = 0; j < len; j++)
368 *p++ = cpu_to_le32(b++);
369
370 BUFFER_TRACE(bh, "marking uptodate");
371 set_buffer_uptodate(bh);
372 unlock_buffer(bh);
373
374 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
375 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
376 if (err)
377 goto failed;
378 }
379 return 0;
380failed:
381 for (; i >= 0; i--) {
382
383
384
385
386
387
388 if (i > 0 && i != indirect_blks && branch[i].bh)
389 ext4_forget(handle, 1, ar->inode, branch[i].bh,
390 branch[i].bh->b_blocknr);
391 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
392 (i == indirect_blks) ? ar->len : 1, 0);
393 }
394 return err;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412static int ext4_splice_branch(handle_t *handle,
413 struct ext4_allocation_request *ar,
414 Indirect *where, int num)
415{
416 int i;
417 int err = 0;
418 ext4_fsblk_t current_block;
419
420
421
422
423
424
425 if (where->bh) {
426 BUFFER_TRACE(where->bh, "get_write_access");
427 err = ext4_journal_get_write_access(handle, where->bh);
428 if (err)
429 goto err_out;
430 }
431
432
433 *where->p = where->key;
434
435
436
437
438
439 if (num == 0 && ar->len > 1) {
440 current_block = le32_to_cpu(where->key) + 1;
441 for (i = 1; i < ar->len; i++)
442 *(where->p + i) = cpu_to_le32(current_block++);
443 }
444
445
446
447 if (where->bh) {
448
449
450
451
452
453
454
455
456 jbd_debug(5, "splicing indirect only\n");
457 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
458 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
459 if (err)
460 goto err_out;
461 } else {
462
463
464
465 ext4_mark_inode_dirty(handle, ar->inode);
466 jbd_debug(5, "splicing direct\n");
467 }
468 return err;
469
470err_out:
471 for (i = 1; i <= num; i++) {
472
473
474
475
476
477 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
478 EXT4_FREE_BLOCKS_FORGET);
479 }
480 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
481 ar->len, 0);
482
483 return err;
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
515 struct ext4_map_blocks *map,
516 int flags)
517{
518 struct ext4_allocation_request ar;
519 int err = -EIO;
520 ext4_lblk_t offsets[4];
521 Indirect chain[4];
522 Indirect *partial;
523 int indirect_blks;
524 int blocks_to_boundary = 0;
525 int depth;
526 int count = 0;
527 ext4_fsblk_t first_block = 0;
528
529 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
530 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
531 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
532 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
533 &blocks_to_boundary);
534
535 if (depth == 0)
536 goto out;
537
538 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
539
540
541 if (!partial) {
542 first_block = le32_to_cpu(chain[depth - 1].key);
543 count++;
544
545 while (count < map->m_len && count <= blocks_to_boundary) {
546 ext4_fsblk_t blk;
547
548 blk = le32_to_cpu(*(chain[depth-1].p + count));
549
550 if (blk == first_block + count)
551 count++;
552 else
553 break;
554 }
555 goto got_it;
556 }
557
558
559 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
560 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
561 int i;
562
563
564 count = 1;
565 for (i = 0; partial + i != chain + depth - 1; i++)
566 count *= epb;
567
568 map->m_pblk = 0;
569 map->m_len = min_t(unsigned int, map->m_len, count);
570 goto cleanup;
571 }
572
573
574 if (err == -EIO)
575 goto cleanup;
576
577
578
579
580 if (ext4_has_feature_bigalloc(inode->i_sb)) {
581 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
582 "non-extent mapped inodes with bigalloc");
583 return -EFSCORRUPTED;
584 }
585
586
587 memset(&ar, 0, sizeof(ar));
588 ar.inode = inode;
589 ar.logical = map->m_lblk;
590 if (S_ISREG(inode->i_mode))
591 ar.flags = EXT4_MB_HINT_DATA;
592 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
593 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
594 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
595 ar.flags |= EXT4_MB_USE_RESERVED;
596
597 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
598
599
600 indirect_blks = (chain + depth) - partial - 1;
601
602
603
604
605
606 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
607 map->m_len, blocks_to_boundary);
608
609
610
611
612 err = ext4_alloc_branch(handle, &ar, indirect_blks,
613 offsets + (partial - chain), partial);
614
615
616
617
618
619
620
621
622 if (!err)
623 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
624 if (err)
625 goto cleanup;
626
627 map->m_flags |= EXT4_MAP_NEW;
628
629 ext4_update_inode_fsync_trans(handle, inode, 1);
630 count = ar.len;
631got_it:
632 map->m_flags |= EXT4_MAP_MAPPED;
633 map->m_pblk = le32_to_cpu(chain[depth-1].key);
634 map->m_len = count;
635 if (count > blocks_to_boundary)
636 map->m_flags |= EXT4_MAP_BOUNDARY;
637 err = count;
638
639 partial = chain + depth - 1;
640cleanup:
641 while (partial > chain) {
642 BUFFER_TRACE(partial->bh, "call brelse");
643 brelse(partial->bh);
644 partial--;
645 }
646out:
647 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
648 return err;
649}
650
651
652
653
654
655
656
657
658
659
660
661
662ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
663 loff_t offset)
664{
665 struct file *file = iocb->ki_filp;
666 struct inode *inode = file->f_mapping->host;
667 struct ext4_inode_info *ei = EXT4_I(inode);
668 handle_t *handle;
669 ssize_t ret;
670 int orphan = 0;
671 size_t count = iov_iter_count(iter);
672 int retries = 0;
673
674 if (iov_iter_rw(iter) == WRITE) {
675 loff_t final_size = offset + count;
676
677 if (final_size > inode->i_size) {
678
679 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
680 if (IS_ERR(handle)) {
681 ret = PTR_ERR(handle);
682 goto out;
683 }
684 ret = ext4_orphan_add(handle, inode);
685 if (ret) {
686 ext4_journal_stop(handle);
687 goto out;
688 }
689 orphan = 1;
690 ei->i_disksize = inode->i_size;
691 ext4_journal_stop(handle);
692 }
693 }
694
695retry:
696 if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
697
698
699
700
701
702 inode_dio_begin(inode);
703 smp_mb();
704 if (unlikely(ext4_test_inode_state(inode,
705 EXT4_STATE_DIOREAD_LOCK))) {
706 inode_dio_end(inode);
707 goto locked;
708 }
709 if (IS_DAX(inode))
710 ret = dax_do_io(iocb, inode, iter, offset,
711 ext4_dio_get_block, NULL, 0);
712 else
713 ret = __blockdev_direct_IO(iocb, inode,
714 inode->i_sb->s_bdev, iter,
715 offset, ext4_dio_get_block,
716 NULL, NULL, 0);
717 inode_dio_end(inode);
718 } else {
719locked:
720 if (IS_DAX(inode))
721 ret = dax_do_io(iocb, inode, iter, offset,
722 ext4_dio_get_block, NULL, DIO_LOCKING);
723 else
724 ret = blockdev_direct_IO(iocb, inode, iter, offset,
725 ext4_dio_get_block);
726
727 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
728 loff_t isize = i_size_read(inode);
729 loff_t end = offset + count;
730
731 if (end > isize)
732 ext4_truncate_failed_write(inode);
733 }
734 }
735 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
736 goto retry;
737
738 if (orphan) {
739 int err;
740
741
742 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
743 if (IS_ERR(handle)) {
744
745
746
747 ret = PTR_ERR(handle);
748 if (inode->i_nlink)
749 ext4_orphan_del(NULL, inode);
750
751 goto out;
752 }
753 if (inode->i_nlink)
754 ext4_orphan_del(handle, inode);
755 if (ret > 0) {
756 loff_t end = offset + ret;
757 if (end > inode->i_size) {
758 ei->i_disksize = end;
759 i_size_write(inode, end);
760
761
762
763
764
765
766
767 ext4_mark_inode_dirty(handle, inode);
768 }
769 }
770 err = ext4_journal_stop(handle);
771 if (ret == 0)
772 ret = err;
773 }
774out:
775 return ret;
776}
777
778
779
780
781
782int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
783{
784 struct ext4_inode_info *ei = EXT4_I(inode);
785 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
786 int blk_bits;
787
788 if (lblock < EXT4_NDIR_BLOCKS)
789 return 0;
790
791 lblock -= EXT4_NDIR_BLOCKS;
792
793 if (ei->i_da_metadata_calc_len &&
794 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
795 ei->i_da_metadata_calc_len++;
796 return 0;
797 }
798 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
799 ei->i_da_metadata_calc_len = 1;
800 blk_bits = order_base_2(lblock);
801 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
802}
803
804
805
806
807
808int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
809{
810
811
812
813
814
815 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
816}
817
818
819
820
821
822
823
824
825
826
827
828
829
830static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
831{
832 if (!ext4_handle_valid(handle))
833 return 0;
834 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
835 return 0;
836 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
837 return 0;
838 return 1;
839}
840
841
842
843
844
845
846static inline int all_zeroes(__le32 *p, __le32 *q)
847{
848 while (p < q)
849 if (*p++)
850 return 0;
851 return 1;
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889static Indirect *ext4_find_shared(struct inode *inode, int depth,
890 ext4_lblk_t offsets[4], Indirect chain[4],
891 __le32 *top)
892{
893 Indirect *partial, *p;
894 int k, err;
895
896 *top = 0;
897
898 for (k = depth; k > 1 && !offsets[k-1]; k--)
899 ;
900 partial = ext4_get_branch(inode, k, offsets, chain, &err);
901
902 if (!partial)
903 partial = chain + k-1;
904
905
906
907
908 if (!partial->key && *partial->p)
909
910 goto no_top;
911 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
912 ;
913
914
915
916
917
918
919 if (p == chain + k - 1 && p > chain) {
920 p->p--;
921 } else {
922 *top = *p->p;
923
924#if 0
925 *p->p = 0;
926#endif
927 }
928
929
930 while (partial > p) {
931 brelse(partial->bh);
932 partial--;
933 }
934no_top:
935 return partial;
936}
937
938
939
940
941
942
943
944
945
946
947
948
949static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
950 struct buffer_head *bh,
951 ext4_fsblk_t block_to_free,
952 unsigned long count, __le32 *first,
953 __le32 *last)
954{
955 __le32 *p;
956 int flags = EXT4_FREE_BLOCKS_VALIDATED;
957 int err;
958
959 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
960 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
961 else if (ext4_should_journal_data(inode))
962 flags |= EXT4_FREE_BLOCKS_FORGET;
963
964 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
965 count)) {
966 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
967 "blocks %llu len %lu",
968 (unsigned long long) block_to_free, count);
969 return 1;
970 }
971
972 if (try_to_extend_transaction(handle, inode)) {
973 if (bh) {
974 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
975 err = ext4_handle_dirty_metadata(handle, inode, bh);
976 if (unlikely(err))
977 goto out_err;
978 }
979 err = ext4_mark_inode_dirty(handle, inode);
980 if (unlikely(err))
981 goto out_err;
982 err = ext4_truncate_restart_trans(handle, inode,
983 ext4_blocks_for_truncate(inode));
984 if (unlikely(err))
985 goto out_err;
986 if (bh) {
987 BUFFER_TRACE(bh, "retaking write access");
988 err = ext4_journal_get_write_access(handle, bh);
989 if (unlikely(err))
990 goto out_err;
991 }
992 }
993
994 for (p = first; p < last; p++)
995 *p = 0;
996
997 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
998 return 0;
999out_err:
1000 ext4_std_error(inode->i_sb, err);
1001 return err;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static void ext4_free_data(handle_t *handle, struct inode *inode,
1024 struct buffer_head *this_bh,
1025 __le32 *first, __le32 *last)
1026{
1027 ext4_fsblk_t block_to_free = 0;
1028 unsigned long count = 0;
1029 __le32 *block_to_free_p = NULL;
1030
1031
1032 ext4_fsblk_t nr;
1033 __le32 *p;
1034
1035 int err = 0;
1036
1037 if (this_bh) {
1038 BUFFER_TRACE(this_bh, "get_write_access");
1039 err = ext4_journal_get_write_access(handle, this_bh);
1040
1041
1042 if (err)
1043 return;
1044 }
1045
1046 for (p = first; p < last; p++) {
1047 nr = le32_to_cpu(*p);
1048 if (nr) {
1049
1050 if (count == 0) {
1051 block_to_free = nr;
1052 block_to_free_p = p;
1053 count = 1;
1054 } else if (nr == block_to_free + count) {
1055 count++;
1056 } else {
1057 err = ext4_clear_blocks(handle, inode, this_bh,
1058 block_to_free, count,
1059 block_to_free_p, p);
1060 if (err)
1061 break;
1062 block_to_free = nr;
1063 block_to_free_p = p;
1064 count = 1;
1065 }
1066 }
1067 }
1068
1069 if (!err && count > 0)
1070 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
1071 count, block_to_free_p, p);
1072 if (err < 0)
1073
1074 return;
1075
1076 if (this_bh) {
1077 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
1078
1079
1080
1081
1082
1083
1084
1085 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
1086 ext4_handle_dirty_metadata(handle, inode, this_bh);
1087 else
1088 EXT4_ERROR_INODE(inode,
1089 "circular indirect block detected at "
1090 "block %llu",
1091 (unsigned long long) this_bh->b_blocknr);
1092 }
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static void ext4_free_branches(handle_t *handle, struct inode *inode,
1109 struct buffer_head *parent_bh,
1110 __le32 *first, __le32 *last, int depth)
1111{
1112 ext4_fsblk_t nr;
1113 __le32 *p;
1114
1115 if (ext4_handle_is_aborted(handle))
1116 return;
1117
1118 if (depth--) {
1119 struct buffer_head *bh;
1120 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1121 p = last;
1122 while (--p >= first) {
1123 nr = le32_to_cpu(*p);
1124 if (!nr)
1125 continue;
1126
1127 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1128 nr, 1)) {
1129 EXT4_ERROR_INODE(inode,
1130 "invalid indirect mapped "
1131 "block %lu (level %d)",
1132 (unsigned long) nr, depth);
1133 break;
1134 }
1135
1136
1137 bh = sb_bread(inode->i_sb, nr);
1138
1139
1140
1141
1142
1143 if (!bh) {
1144 EXT4_ERROR_INODE_BLOCK(inode, nr,
1145 "Read failure");
1146 continue;
1147 }
1148
1149
1150 BUFFER_TRACE(bh, "free child branches");
1151 ext4_free_branches(handle, inode, bh,
1152 (__le32 *) bh->b_data,
1153 (__le32 *) bh->b_data + addr_per_block,
1154 depth);
1155 brelse(bh);
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 if (ext4_handle_is_aborted(handle))
1174 return;
1175 if (try_to_extend_transaction(handle, inode)) {
1176 ext4_mark_inode_dirty(handle, inode);
1177 ext4_truncate_restart_trans(handle, inode,
1178 ext4_blocks_for_truncate(inode));
1179 }
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 ext4_free_blocks(handle, inode, NULL, nr, 1,
1193 EXT4_FREE_BLOCKS_METADATA|
1194 EXT4_FREE_BLOCKS_FORGET);
1195
1196 if (parent_bh) {
1197
1198
1199
1200
1201 BUFFER_TRACE(parent_bh, "get_write_access");
1202 if (!ext4_journal_get_write_access(handle,
1203 parent_bh)){
1204 *p = 0;
1205 BUFFER_TRACE(parent_bh,
1206 "call ext4_handle_dirty_metadata");
1207 ext4_handle_dirty_metadata(handle,
1208 inode,
1209 parent_bh);
1210 }
1211 }
1212 }
1213 } else {
1214
1215 BUFFER_TRACE(parent_bh, "free data blocks");
1216 ext4_free_data(handle, inode, parent_bh, first, last);
1217 }
1218}
1219
1220void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1221{
1222 struct ext4_inode_info *ei = EXT4_I(inode);
1223 __le32 *i_data = ei->i_data;
1224 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1225 ext4_lblk_t offsets[4];
1226 Indirect chain[4];
1227 Indirect *partial;
1228 __le32 nr = 0;
1229 int n = 0;
1230 ext4_lblk_t last_block, max_block;
1231 unsigned blocksize = inode->i_sb->s_blocksize;
1232
1233 last_block = (inode->i_size + blocksize-1)
1234 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1235 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1236 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1237
1238 if (last_block != max_block) {
1239 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1240 if (n == 0)
1241 return;
1242 }
1243
1244 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1245
1246
1247
1248
1249
1250
1251
1252
1253 ei->i_disksize = inode->i_size;
1254
1255 if (last_block == max_block) {
1256
1257
1258
1259
1260 return;
1261 } else if (n == 1) {
1262 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1263 i_data + EXT4_NDIR_BLOCKS);
1264 goto do_indirects;
1265 }
1266
1267 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1268
1269 if (nr) {
1270 if (partial == chain) {
1271
1272 ext4_free_branches(handle, inode, NULL,
1273 &nr, &nr+1, (chain+n-1) - partial);
1274 *partial->p = 0;
1275
1276
1277
1278
1279 } else {
1280
1281 BUFFER_TRACE(partial->bh, "get_write_access");
1282 ext4_free_branches(handle, inode, partial->bh,
1283 partial->p,
1284 partial->p+1, (chain+n-1) - partial);
1285 }
1286 }
1287
1288 while (partial > chain) {
1289 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1290 (__le32*)partial->bh->b_data+addr_per_block,
1291 (chain+n-1) - partial);
1292 BUFFER_TRACE(partial->bh, "call brelse");
1293 brelse(partial->bh);
1294 partial--;
1295 }
1296do_indirects:
1297
1298 switch (offsets[0]) {
1299 default:
1300 nr = i_data[EXT4_IND_BLOCK];
1301 if (nr) {
1302 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1303 i_data[EXT4_IND_BLOCK] = 0;
1304 }
1305 case EXT4_IND_BLOCK:
1306 nr = i_data[EXT4_DIND_BLOCK];
1307 if (nr) {
1308 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1309 i_data[EXT4_DIND_BLOCK] = 0;
1310 }
1311 case EXT4_DIND_BLOCK:
1312 nr = i_data[EXT4_TIND_BLOCK];
1313 if (nr) {
1314 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1315 i_data[EXT4_TIND_BLOCK] = 0;
1316 }
1317 case EXT4_TIND_BLOCK:
1318 ;
1319 }
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1333 ext4_lblk_t start, ext4_lblk_t end)
1334{
1335 struct ext4_inode_info *ei = EXT4_I(inode);
1336 __le32 *i_data = ei->i_data;
1337 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1338 ext4_lblk_t offsets[4], offsets2[4];
1339 Indirect chain[4], chain2[4];
1340 Indirect *partial, *partial2;
1341 ext4_lblk_t max_block;
1342 __le32 nr = 0, nr2 = 0;
1343 int n = 0, n2 = 0;
1344 unsigned blocksize = inode->i_sb->s_blocksize;
1345
1346 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1347 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1348 if (end >= max_block)
1349 end = max_block;
1350 if ((start >= end) || (start > max_block))
1351 return 0;
1352
1353 n = ext4_block_to_path(inode, start, offsets, NULL);
1354 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1355
1356 BUG_ON(n > n2);
1357
1358 if ((n == 1) && (n == n2)) {
1359
1360 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1361 i_data + offsets2[0]);
1362 return 0;
1363 } else if (n2 > n) {
1364
1365
1366
1367
1368
1369
1370
1371 if (n == 1) {
1372
1373
1374
1375
1376 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1377 i_data + EXT4_NDIR_BLOCKS);
1378 goto end_range;
1379 }
1380
1381
1382 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1383 if (nr) {
1384 if (partial == chain) {
1385
1386 ext4_free_branches(handle, inode, NULL,
1387 &nr, &nr+1, (chain+n-1) - partial);
1388 *partial->p = 0;
1389 } else {
1390
1391 BUFFER_TRACE(partial->bh, "get_write_access");
1392 ext4_free_branches(handle, inode, partial->bh,
1393 partial->p,
1394 partial->p+1, (chain+n-1) - partial);
1395 }
1396 }
1397
1398
1399
1400
1401
1402 while (partial > chain) {
1403 ext4_free_branches(handle, inode, partial->bh,
1404 partial->p + 1,
1405 (__le32 *)partial->bh->b_data+addr_per_block,
1406 (chain+n-1) - partial);
1407 BUFFER_TRACE(partial->bh, "call brelse");
1408 brelse(partial->bh);
1409 partial--;
1410 }
1411
1412end_range:
1413 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1414 if (nr2) {
1415 if (partial2 == chain2) {
1416
1417
1418
1419
1420
1421
1422 goto do_indirects;
1423 }
1424 } else {
1425
1426
1427
1428
1429
1430
1431 partial2->p++;
1432 }
1433
1434
1435
1436
1437
1438 while (partial2 > chain2) {
1439 ext4_free_branches(handle, inode, partial2->bh,
1440 (__le32 *)partial2->bh->b_data,
1441 partial2->p,
1442 (chain2+n2-1) - partial2);
1443 BUFFER_TRACE(partial2->bh, "call brelse");
1444 brelse(partial2->bh);
1445 partial2--;
1446 }
1447 goto do_indirects;
1448 }
1449
1450
1451 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1452 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1453
1454
1455 if (nr) {
1456 int level = min(partial - chain, partial2 - chain2);
1457 int i;
1458 int subtree = 1;
1459
1460 for (i = 0; i <= level; i++) {
1461 if (offsets[i] != offsets2[i]) {
1462 subtree = 0;
1463 break;
1464 }
1465 }
1466
1467 if (!subtree) {
1468 if (partial == chain) {
1469
1470 ext4_free_branches(handle, inode, NULL,
1471 &nr, &nr+1,
1472 (chain+n-1) - partial);
1473 *partial->p = 0;
1474 } else {
1475
1476 BUFFER_TRACE(partial->bh, "get_write_access");
1477 ext4_free_branches(handle, inode, partial->bh,
1478 partial->p,
1479 partial->p+1,
1480 (chain+n-1) - partial);
1481 }
1482 }
1483 }
1484
1485 if (!nr2) {
1486
1487
1488
1489
1490
1491
1492 partial2->p++;
1493 }
1494
1495 while (partial > chain || partial2 > chain2) {
1496 int depth = (chain+n-1) - partial;
1497 int depth2 = (chain2+n2-1) - partial2;
1498
1499 if (partial > chain && partial2 > chain2 &&
1500 partial->bh->b_blocknr == partial2->bh->b_blocknr) {
1501
1502
1503
1504
1505 ext4_free_branches(handle, inode, partial->bh,
1506 partial->p + 1,
1507 partial2->p,
1508 (chain+n-1) - partial);
1509 BUFFER_TRACE(partial->bh, "call brelse");
1510 brelse(partial->bh);
1511 BUFFER_TRACE(partial2->bh, "call brelse");
1512 brelse(partial2->bh);
1513 return 0;
1514 }
1515
1516
1517
1518
1519
1520
1521
1522
1523 if (partial > chain && depth <= depth2) {
1524 ext4_free_branches(handle, inode, partial->bh,
1525 partial->p + 1,
1526 (__le32 *)partial->bh->b_data+addr_per_block,
1527 (chain+n-1) - partial);
1528 BUFFER_TRACE(partial->bh, "call brelse");
1529 brelse(partial->bh);
1530 partial--;
1531 }
1532 if (partial2 > chain2 && depth2 <= depth) {
1533 ext4_free_branches(handle, inode, partial2->bh,
1534 (__le32 *)partial2->bh->b_data,
1535 partial2->p,
1536 (chain2+n2-1) - partial2);
1537 BUFFER_TRACE(partial2->bh, "call brelse");
1538 brelse(partial2->bh);
1539 partial2--;
1540 }
1541 }
1542 return 0;
1543
1544do_indirects:
1545
1546 switch (offsets[0]) {
1547 default:
1548 if (++n >= n2)
1549 return 0;
1550 nr = i_data[EXT4_IND_BLOCK];
1551 if (nr) {
1552 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1553 i_data[EXT4_IND_BLOCK] = 0;
1554 }
1555 case EXT4_IND_BLOCK:
1556 if (++n >= n2)
1557 return 0;
1558 nr = i_data[EXT4_DIND_BLOCK];
1559 if (nr) {
1560 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1561 i_data[EXT4_DIND_BLOCK] = 0;
1562 }
1563 case EXT4_DIND_BLOCK:
1564 if (++n >= n2)
1565 return 0;
1566 nr = i_data[EXT4_TIND_BLOCK];
1567 if (nr) {
1568 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1569 i_data[EXT4_TIND_BLOCK] = 0;
1570 }
1571 case EXT4_TIND_BLOCK:
1572 ;
1573 }
1574 return 0;
1575}
1576