1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/aio.h>
24#include "ext4_jbd2.h"
25#include "truncate.h"
26#include "ext4_extents.h"
27
28#include <trace/events/ext4.h>
29
30typedef struct {
31 __le32 *p;
32 __le32 key;
33 struct buffer_head *bh;
34} Indirect;
35
36static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
37{
38 p->key = *(p->p = v);
39 p->bh = bh;
40}
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73static int ext4_block_to_path(struct inode *inode,
74 ext4_lblk_t i_block,
75 ext4_lblk_t offsets[4], int *boundary)
76{
77 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
78 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
79 const long direct_blocks = EXT4_NDIR_BLOCKS,
80 indirect_blocks = ptrs,
81 double_blocks = (1 << (ptrs_bits * 2));
82 int n = 0;
83 int final = 0;
84
85 if (i_block < direct_blocks) {
86 offsets[n++] = i_block;
87 final = direct_blocks;
88 } else if ((i_block -= direct_blocks) < indirect_blocks) {
89 offsets[n++] = EXT4_IND_BLOCK;
90 offsets[n++] = i_block;
91 final = ptrs;
92 } else if ((i_block -= indirect_blocks) < double_blocks) {
93 offsets[n++] = EXT4_DIND_BLOCK;
94 offsets[n++] = i_block >> ptrs_bits;
95 offsets[n++] = i_block & (ptrs - 1);
96 final = ptrs;
97 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
98 offsets[n++] = EXT4_TIND_BLOCK;
99 offsets[n++] = i_block >> (ptrs_bits * 2);
100 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
101 offsets[n++] = i_block & (ptrs - 1);
102 final = ptrs;
103 } else {
104 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
105 i_block + direct_blocks +
106 indirect_blocks + double_blocks, inode->i_ino);
107 }
108 if (boundary)
109 *boundary = final - 1 - (i_block & (ptrs - 1));
110 return n;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static Indirect *ext4_get_branch(struct inode *inode, int depth,
144 ext4_lblk_t *offsets,
145 Indirect chain[4], int *err)
146{
147 struct super_block *sb = inode->i_sb;
148 Indirect *p = chain;
149 struct buffer_head *bh;
150 int ret = -EIO;
151
152 *err = 0;
153
154 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
155 if (!p->key)
156 goto no_block;
157 while (--depth) {
158 bh = sb_getblk(sb, le32_to_cpu(p->key));
159 if (unlikely(!bh)) {
160 ret = -ENOMEM;
161 goto failure;
162 }
163
164 if (!bh_uptodate_or_lock(bh)) {
165 if (bh_submit_read(bh) < 0) {
166 put_bh(bh);
167 goto failure;
168 }
169
170 if (ext4_check_indirect_blockref(inode, bh)) {
171 put_bh(bh);
172 goto failure;
173 }
174 }
175
176 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
177
178 if (!p->key)
179 goto no_block;
180 }
181 return NULL;
182
183failure:
184 *err = ret;
185no_block:
186 return p;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
210{
211 struct ext4_inode_info *ei = EXT4_I(inode);
212 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
213 __le32 *p;
214
215
216 for (p = ind->p - 1; p >= start; p--) {
217 if (*p)
218 return le32_to_cpu(*p);
219 }
220
221
222 if (ind->bh)
223 return ind->bh->b_blocknr;
224
225
226
227
228
229 return ext4_inode_to_goal_block(inode);
230}
231
232
233
234
235
236
237
238
239
240
241
242
243static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
244 Indirect *partial)
245{
246 ext4_fsblk_t goal;
247
248
249
250
251
252 goal = ext4_find_near(inode, partial);
253 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
254 return goal;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
270 int blocks_to_boundary)
271{
272 unsigned int count = 0;
273
274
275
276
277
278 if (k > 0) {
279
280 if (blks < blocks_to_boundary + 1)
281 count += blks;
282 else
283 count += blocks_to_boundary + 1;
284 return count;
285 }
286
287 count++;
288 while (count < blks && count <= blocks_to_boundary &&
289 le32_to_cpu(*(branch[0].p + count)) == 0) {
290 count++;
291 }
292 return count;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
323 ext4_lblk_t iblock, int indirect_blks,
324 int *blks, ext4_fsblk_t goal,
325 ext4_lblk_t *offsets, Indirect *branch)
326{
327 struct ext4_allocation_request ar;
328 struct buffer_head * bh;
329 ext4_fsblk_t b, new_blocks[4];
330 __le32 *p;
331 int i, j, err, len = 1;
332
333
334
335
336 memset(&ar, 0, sizeof(ar));
337 ar.inode = inode;
338 ar.len = *blks;
339 ar.logical = iblock;
340 if (S_ISREG(inode->i_mode))
341 ar.flags = EXT4_MB_HINT_DATA;
342
343 for (i = 0; i <= indirect_blks; i++) {
344 if (i == indirect_blks) {
345 ar.goal = goal;
346 new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
347 } else
348 goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode,
349 goal, 0, NULL, &err);
350 if (err) {
351 i--;
352 goto failed;
353 }
354 branch[i].key = cpu_to_le32(new_blocks[i]);
355 if (i == 0)
356 continue;
357
358 bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]);
359 if (unlikely(!bh)) {
360 err = -ENOMEM;
361 goto failed;
362 }
363 lock_buffer(bh);
364 BUFFER_TRACE(bh, "call get_create_access");
365 err = ext4_journal_get_create_access(handle, bh);
366 if (err) {
367 unlock_buffer(bh);
368 goto failed;
369 }
370
371 memset(bh->b_data, 0, bh->b_size);
372 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
373 b = new_blocks[i];
374
375 if (i == indirect_blks)
376 len = ar.len;
377 for (j = 0; j < len; j++)
378 *p++ = cpu_to_le32(b++);
379
380 BUFFER_TRACE(bh, "marking uptodate");
381 set_buffer_uptodate(bh);
382 unlock_buffer(bh);
383
384 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
385 err = ext4_handle_dirty_metadata(handle, inode, bh);
386 if (err)
387 goto failed;
388 }
389 *blks = ar.len;
390 return 0;
391failed:
392 for (; i >= 0; i--) {
393 if (i != indirect_blks && branch[i].bh)
394 ext4_forget(handle, 1, inode, branch[i].bh,
395 branch[i].bh->b_blocknr);
396 ext4_free_blocks(handle, inode, NULL, new_blocks[i],
397 (i == indirect_blks) ? ar.len : 1, 0);
398 }
399 return err;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417static int ext4_splice_branch(handle_t *handle, struct inode *inode,
418 ext4_lblk_t block, Indirect *where, int num,
419 int blks)
420{
421 int i;
422 int err = 0;
423 ext4_fsblk_t current_block;
424
425
426
427
428
429
430 if (where->bh) {
431 BUFFER_TRACE(where->bh, "get_write_access");
432 err = ext4_journal_get_write_access(handle, where->bh);
433 if (err)
434 goto err_out;
435 }
436
437
438 *where->p = where->key;
439
440
441
442
443
444 if (num == 0 && blks > 1) {
445 current_block = le32_to_cpu(where->key) + 1;
446 for (i = 1; i < blks; i++)
447 *(where->p + i) = cpu_to_le32(current_block++);
448 }
449
450
451
452 if (where->bh) {
453
454
455
456
457
458
459
460
461 jbd_debug(5, "splicing indirect only\n");
462 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
463 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
464 if (err)
465 goto err_out;
466 } else {
467
468
469
470 ext4_mark_inode_dirty(handle, inode);
471 jbd_debug(5, "splicing direct\n");
472 }
473 return err;
474
475err_out:
476 for (i = 1; i <= num; i++) {
477
478
479
480
481
482 ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
483 EXT4_FREE_BLOCKS_FORGET);
484 }
485 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
486 blks, 0);
487
488 return err;
489}
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
520 struct ext4_map_blocks *map,
521 int flags)
522{
523 int err = -EIO;
524 ext4_lblk_t offsets[4];
525 Indirect chain[4];
526 Indirect *partial;
527 ext4_fsblk_t goal;
528 int indirect_blks;
529 int blocks_to_boundary = 0;
530 int depth;
531 int count = 0;
532 ext4_fsblk_t first_block = 0;
533
534 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
535 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
536 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
537 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
538 &blocks_to_boundary);
539
540 if (depth == 0)
541 goto out;
542
543 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
544
545
546 if (!partial) {
547 first_block = le32_to_cpu(chain[depth - 1].key);
548 count++;
549
550 while (count < map->m_len && count <= blocks_to_boundary) {
551 ext4_fsblk_t blk;
552
553 blk = le32_to_cpu(*(chain[depth-1].p + count));
554
555 if (blk == first_block + count)
556 count++;
557 else
558 break;
559 }
560 goto got_it;
561 }
562
563
564 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
565 goto cleanup;
566
567
568
569
570 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
571 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
572 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
573 "non-extent mapped inodes with bigalloc");
574 return -ENOSPC;
575 }
576
577 goal = ext4_find_goal(inode, map->m_lblk, partial);
578
579
580 indirect_blks = (chain + depth) - partial - 1;
581
582
583
584
585
586 count = ext4_blks_to_allocate(partial, indirect_blks,
587 map->m_len, blocks_to_boundary);
588
589
590
591 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
592 &count, goal,
593 offsets + (partial - chain), partial);
594
595
596
597
598
599
600
601
602 if (!err)
603 err = ext4_splice_branch(handle, inode, map->m_lblk,
604 partial, indirect_blks, count);
605 if (err)
606 goto cleanup;
607
608 map->m_flags |= EXT4_MAP_NEW;
609
610 ext4_update_inode_fsync_trans(handle, inode, 1);
611got_it:
612 map->m_flags |= EXT4_MAP_MAPPED;
613 map->m_pblk = le32_to_cpu(chain[depth-1].key);
614 map->m_len = count;
615 if (count > blocks_to_boundary)
616 map->m_flags |= EXT4_MAP_BOUNDARY;
617 err = count;
618
619 partial = chain + depth - 1;
620cleanup:
621 while (partial > chain) {
622 BUFFER_TRACE(partial->bh, "call brelse");
623 brelse(partial->bh);
624 partial--;
625 }
626out:
627 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
628 return err;
629}
630
631
632
633
634
635
636
637
638
639
640
641
642ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
643 const struct iovec *iov, loff_t offset,
644 unsigned long nr_segs)
645{
646 struct file *file = iocb->ki_filp;
647 struct inode *inode = file->f_mapping->host;
648 struct ext4_inode_info *ei = EXT4_I(inode);
649 handle_t *handle;
650 ssize_t ret;
651 int orphan = 0;
652 size_t count = iov_length(iov, nr_segs);
653 int retries = 0;
654
655 if (rw == WRITE) {
656 loff_t final_size = offset + count;
657
658 if (final_size > inode->i_size) {
659
660 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
661 if (IS_ERR(handle)) {
662 ret = PTR_ERR(handle);
663 goto out;
664 }
665 ret = ext4_orphan_add(handle, inode);
666 if (ret) {
667 ext4_journal_stop(handle);
668 goto out;
669 }
670 orphan = 1;
671 ei->i_disksize = inode->i_size;
672 ext4_journal_stop(handle);
673 }
674 }
675
676retry:
677 if (rw == READ && ext4_should_dioread_nolock(inode)) {
678
679
680
681
682
683 atomic_inc(&inode->i_dio_count);
684 smp_mb();
685 if (unlikely(ext4_test_inode_state(inode,
686 EXT4_STATE_DIOREAD_LOCK))) {
687 inode_dio_done(inode);
688 goto locked;
689 }
690 ret = __blockdev_direct_IO(rw, iocb, inode,
691 inode->i_sb->s_bdev, iov,
692 offset, nr_segs,
693 ext4_get_block, NULL, NULL, 0);
694 inode_dio_done(inode);
695 } else {
696locked:
697 ret = blockdev_direct_IO(rw, iocb, inode, iov,
698 offset, nr_segs, ext4_get_block);
699
700 if (unlikely((rw & WRITE) && ret < 0)) {
701 loff_t isize = i_size_read(inode);
702 loff_t end = offset + iov_length(iov, nr_segs);
703
704 if (end > isize)
705 ext4_truncate_failed_write(inode);
706 }
707 }
708 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
709 goto retry;
710
711 if (orphan) {
712 int err;
713
714
715 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
716 if (IS_ERR(handle)) {
717
718
719
720 ret = PTR_ERR(handle);
721 if (inode->i_nlink)
722 ext4_orphan_del(NULL, inode);
723
724 goto out;
725 }
726 if (inode->i_nlink)
727 ext4_orphan_del(handle, inode);
728 if (ret > 0) {
729 loff_t end = offset + ret;
730 if (end > inode->i_size) {
731 ei->i_disksize = end;
732 i_size_write(inode, end);
733
734
735
736
737
738
739
740 ext4_mark_inode_dirty(handle, inode);
741 }
742 }
743 err = ext4_journal_stop(handle);
744 if (ret == 0)
745 ret = err;
746 }
747out:
748 return ret;
749}
750
751
752
753
754
755int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
756{
757 struct ext4_inode_info *ei = EXT4_I(inode);
758 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
759 int blk_bits;
760
761 if (lblock < EXT4_NDIR_BLOCKS)
762 return 0;
763
764 lblock -= EXT4_NDIR_BLOCKS;
765
766 if (ei->i_da_metadata_calc_len &&
767 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
768 ei->i_da_metadata_calc_len++;
769 return 0;
770 }
771 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
772 ei->i_da_metadata_calc_len = 1;
773 blk_bits = order_base_2(lblock);
774 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
775}
776
777
778
779
780
781int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
782{
783
784
785
786
787
788 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
789}
790
791
792
793
794
795
796
797
798
799
800
801
802
803static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
804{
805 if (!ext4_handle_valid(handle))
806 return 0;
807 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
808 return 0;
809 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
810 return 0;
811 return 1;
812}
813
814
815
816
817
818
819static inline int all_zeroes(__le32 *p, __le32 *q)
820{
821 while (p < q)
822 if (*p++)
823 return 0;
824 return 1;
825}
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862static Indirect *ext4_find_shared(struct inode *inode, int depth,
863 ext4_lblk_t offsets[4], Indirect chain[4],
864 __le32 *top)
865{
866 Indirect *partial, *p;
867 int k, err;
868
869 *top = 0;
870
871 for (k = depth; k > 1 && !offsets[k-1]; k--)
872 ;
873 partial = ext4_get_branch(inode, k, offsets, chain, &err);
874
875 if (!partial)
876 partial = chain + k-1;
877
878
879
880
881 if (!partial->key && *partial->p)
882
883 goto no_top;
884 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
885 ;
886
887
888
889
890
891
892 if (p == chain + k - 1 && p > chain) {
893 p->p--;
894 } else {
895 *top = *p->p;
896
897#if 0
898 *p->p = 0;
899#endif
900 }
901
902
903 while (partial > p) {
904 brelse(partial->bh);
905 partial--;
906 }
907no_top:
908 return partial;
909}
910
911
912
913
914
915
916
917
918
919
920
921
922static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
923 struct buffer_head *bh,
924 ext4_fsblk_t block_to_free,
925 unsigned long count, __le32 *first,
926 __le32 *last)
927{
928 __le32 *p;
929 int flags = EXT4_FREE_BLOCKS_VALIDATED;
930 int err;
931
932 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
933 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
934 else if (ext4_should_journal_data(inode))
935 flags |= EXT4_FREE_BLOCKS_FORGET;
936
937 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
938 count)) {
939 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
940 "blocks %llu len %lu",
941 (unsigned long long) block_to_free, count);
942 return 1;
943 }
944
945 if (try_to_extend_transaction(handle, inode)) {
946 if (bh) {
947 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
948 err = ext4_handle_dirty_metadata(handle, inode, bh);
949 if (unlikely(err))
950 goto out_err;
951 }
952 err = ext4_mark_inode_dirty(handle, inode);
953 if (unlikely(err))
954 goto out_err;
955 err = ext4_truncate_restart_trans(handle, inode,
956 ext4_blocks_for_truncate(inode));
957 if (unlikely(err))
958 goto out_err;
959 if (bh) {
960 BUFFER_TRACE(bh, "retaking write access");
961 err = ext4_journal_get_write_access(handle, bh);
962 if (unlikely(err))
963 goto out_err;
964 }
965 }
966
967 for (p = first; p < last; p++)
968 *p = 0;
969
970 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
971 return 0;
972out_err:
973 ext4_std_error(inode->i_sb, err);
974 return err;
975}
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996static void ext4_free_data(handle_t *handle, struct inode *inode,
997 struct buffer_head *this_bh,
998 __le32 *first, __le32 *last)
999{
1000 ext4_fsblk_t block_to_free = 0;
1001 unsigned long count = 0;
1002 __le32 *block_to_free_p = NULL;
1003
1004
1005 ext4_fsblk_t nr;
1006 __le32 *p;
1007
1008 int err = 0;
1009
1010 if (this_bh) {
1011 BUFFER_TRACE(this_bh, "get_write_access");
1012 err = ext4_journal_get_write_access(handle, this_bh);
1013
1014
1015 if (err)
1016 return;
1017 }
1018
1019 for (p = first; p < last; p++) {
1020 nr = le32_to_cpu(*p);
1021 if (nr) {
1022
1023 if (count == 0) {
1024 block_to_free = nr;
1025 block_to_free_p = p;
1026 count = 1;
1027 } else if (nr == block_to_free + count) {
1028 count++;
1029 } else {
1030 err = ext4_clear_blocks(handle, inode, this_bh,
1031 block_to_free, count,
1032 block_to_free_p, p);
1033 if (err)
1034 break;
1035 block_to_free = nr;
1036 block_to_free_p = p;
1037 count = 1;
1038 }
1039 }
1040 }
1041
1042 if (!err && count > 0)
1043 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
1044 count, block_to_free_p, p);
1045 if (err < 0)
1046
1047 return;
1048
1049 if (this_bh) {
1050 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
1051
1052
1053
1054
1055
1056
1057
1058 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
1059 ext4_handle_dirty_metadata(handle, inode, this_bh);
1060 else
1061 EXT4_ERROR_INODE(inode,
1062 "circular indirect block detected at "
1063 "block %llu",
1064 (unsigned long long) this_bh->b_blocknr);
1065 }
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static void ext4_free_branches(handle_t *handle, struct inode *inode,
1082 struct buffer_head *parent_bh,
1083 __le32 *first, __le32 *last, int depth)
1084{
1085 ext4_fsblk_t nr;
1086 __le32 *p;
1087
1088 if (ext4_handle_is_aborted(handle))
1089 return;
1090
1091 if (depth--) {
1092 struct buffer_head *bh;
1093 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1094 p = last;
1095 while (--p >= first) {
1096 nr = le32_to_cpu(*p);
1097 if (!nr)
1098 continue;
1099
1100 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1101 nr, 1)) {
1102 EXT4_ERROR_INODE(inode,
1103 "invalid indirect mapped "
1104 "block %lu (level %d)",
1105 (unsigned long) nr, depth);
1106 break;
1107 }
1108
1109
1110 bh = sb_bread(inode->i_sb, nr);
1111
1112
1113
1114
1115
1116 if (!bh) {
1117 EXT4_ERROR_INODE_BLOCK(inode, nr,
1118 "Read failure");
1119 continue;
1120 }
1121
1122
1123 BUFFER_TRACE(bh, "free child branches");
1124 ext4_free_branches(handle, inode, bh,
1125 (__le32 *) bh->b_data,
1126 (__le32 *) bh->b_data + addr_per_block,
1127 depth);
1128 brelse(bh);
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 if (ext4_handle_is_aborted(handle))
1147 return;
1148 if (try_to_extend_transaction(handle, inode)) {
1149 ext4_mark_inode_dirty(handle, inode);
1150 ext4_truncate_restart_trans(handle, inode,
1151 ext4_blocks_for_truncate(inode));
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 ext4_free_blocks(handle, inode, NULL, nr, 1,
1166 EXT4_FREE_BLOCKS_METADATA|
1167 EXT4_FREE_BLOCKS_FORGET);
1168
1169 if (parent_bh) {
1170
1171
1172
1173
1174 BUFFER_TRACE(parent_bh, "get_write_access");
1175 if (!ext4_journal_get_write_access(handle,
1176 parent_bh)){
1177 *p = 0;
1178 BUFFER_TRACE(parent_bh,
1179 "call ext4_handle_dirty_metadata");
1180 ext4_handle_dirty_metadata(handle,
1181 inode,
1182 parent_bh);
1183 }
1184 }
1185 }
1186 } else {
1187
1188 BUFFER_TRACE(parent_bh, "free data blocks");
1189 ext4_free_data(handle, inode, parent_bh, first, last);
1190 }
1191}
1192
1193void ext4_ind_truncate(handle_t *handle, struct inode *inode)
1194{
1195 struct ext4_inode_info *ei = EXT4_I(inode);
1196 __le32 *i_data = ei->i_data;
1197 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1198 ext4_lblk_t offsets[4];
1199 Indirect chain[4];
1200 Indirect *partial;
1201 __le32 nr = 0;
1202 int n = 0;
1203 ext4_lblk_t last_block, max_block;
1204 unsigned blocksize = inode->i_sb->s_blocksize;
1205
1206 last_block = (inode->i_size + blocksize-1)
1207 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1208 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1209 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1210
1211 if (last_block != max_block) {
1212 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1213 if (n == 0)
1214 return;
1215 }
1216
1217 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
1218
1219
1220
1221
1222
1223
1224
1225
1226 ei->i_disksize = inode->i_size;
1227
1228 if (last_block == max_block) {
1229
1230
1231
1232
1233 return;
1234 } else if (n == 1) {
1235 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1236 i_data + EXT4_NDIR_BLOCKS);
1237 goto do_indirects;
1238 }
1239
1240 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1241
1242 if (nr) {
1243 if (partial == chain) {
1244
1245 ext4_free_branches(handle, inode, NULL,
1246 &nr, &nr+1, (chain+n-1) - partial);
1247 *partial->p = 0;
1248
1249
1250
1251
1252 } else {
1253
1254 BUFFER_TRACE(partial->bh, "get_write_access");
1255 ext4_free_branches(handle, inode, partial->bh,
1256 partial->p,
1257 partial->p+1, (chain+n-1) - partial);
1258 }
1259 }
1260
1261 while (partial > chain) {
1262 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1263 (__le32*)partial->bh->b_data+addr_per_block,
1264 (chain+n-1) - partial);
1265 BUFFER_TRACE(partial->bh, "call brelse");
1266 brelse(partial->bh);
1267 partial--;
1268 }
1269do_indirects:
1270
1271 switch (offsets[0]) {
1272 default:
1273 nr = i_data[EXT4_IND_BLOCK];
1274 if (nr) {
1275 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1276 i_data[EXT4_IND_BLOCK] = 0;
1277 }
1278 case EXT4_IND_BLOCK:
1279 nr = i_data[EXT4_DIND_BLOCK];
1280 if (nr) {
1281 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1282 i_data[EXT4_DIND_BLOCK] = 0;
1283 }
1284 case EXT4_DIND_BLOCK:
1285 nr = i_data[EXT4_TIND_BLOCK];
1286 if (nr) {
1287 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1288 i_data[EXT4_TIND_BLOCK] = 0;
1289 }
1290 case EXT4_TIND_BLOCK:
1291 ;
1292 }
1293}
1294
1295static int free_hole_blocks(handle_t *handle, struct inode *inode,
1296 struct buffer_head *parent_bh, __le32 *i_data,
1297 int level, ext4_lblk_t first,
1298 ext4_lblk_t count, int max)
1299{
1300 struct buffer_head *bh = NULL;
1301 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1302 int ret = 0;
1303 int i, inc;
1304 ext4_lblk_t offset;
1305 __le32 blk;
1306
1307 inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
1308 for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
1309 if (offset >= count + first)
1310 break;
1311 if (*i_data == 0 || (offset + inc) <= first)
1312 continue;
1313 blk = *i_data;
1314 if (level > 0) {
1315 ext4_lblk_t first2;
1316 bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
1317 if (!bh) {
1318 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
1319 "Read failure");
1320 return -EIO;
1321 }
1322 first2 = (first > offset) ? first - offset : 0;
1323 ret = free_hole_blocks(handle, inode, bh,
1324 (__le32 *)bh->b_data, level - 1,
1325 first2, count - offset,
1326 inode->i_sb->s_blocksize >> 2);
1327 if (ret) {
1328 brelse(bh);
1329 goto err;
1330 }
1331 }
1332 if (level == 0 ||
1333 (bh && all_zeroes((__le32 *)bh->b_data,
1334 (__le32 *)bh->b_data + addr_per_block))) {
1335 ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
1336 *i_data = 0;
1337 }
1338 brelse(bh);
1339 bh = NULL;
1340 }
1341
1342err:
1343 return ret;
1344}
1345
1346int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
1347 ext4_lblk_t first, ext4_lblk_t stop)
1348{
1349 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1350 int level, ret = 0;
1351 int num = EXT4_NDIR_BLOCKS;
1352 ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
1353 __le32 *i_data = EXT4_I(inode)->i_data;
1354
1355 count = stop - first;
1356 for (level = 0; level < 4; level++, max *= addr_per_block) {
1357 if (first < max) {
1358 ret = free_hole_blocks(handle, inode, NULL, i_data,
1359 level, first, count, num);
1360 if (ret)
1361 goto err;
1362 if (count > max - first)
1363 count -= max - first;
1364 else
1365 break;
1366 first = 0;
1367 } else {
1368 first -= max;
1369 }
1370 i_data += num;
1371 if (level == 0) {
1372 num = 1;
1373 max = 1;
1374 }
1375 }
1376
1377err:
1378 return ret;
1379}
1380
1381