1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/fs.h>
21#include <linux/time.h>
22#include <linux/jbd2.h>
23#include <linux/highuid.h>
24#include <linux/pagemap.h>
25#include <linux/quotaops.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28#include <linux/uaccess.h>
29#include <linux/fiemap.h>
30#include <linux/backing-dev.h>
31#include <linux/iomap.h>
32#include "ext4_jbd2.h"
33#include "ext4_extents.h"
34#include "xattr.h"
35
36#include <trace/events/ext4.h>
37
38
39
40
41#define EXT4_EXT_MAY_ZEROOUT 0x1
42
43#define EXT4_EXT_MARK_UNWRIT1 0x2
44#define EXT4_EXT_MARK_UNWRIT2 0x4
45
46#define EXT4_EXT_DATA_VALID1 0x8
47#define EXT4_EXT_DATA_VALID2 0x10
48
49static __le32 ext4_extent_block_csum(struct inode *inode,
50 struct ext4_extent_header *eh)
51{
52 struct ext4_inode_info *ei = EXT4_I(inode);
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u32 csum;
55
56 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
57 EXT4_EXTENT_TAIL_OFFSET(eh));
58 return cpu_to_le32(csum);
59}
60
61static int ext4_extent_block_csum_verify(struct inode *inode,
62 struct ext4_extent_header *eh)
63{
64 struct ext4_extent_tail *et;
65
66 if (!ext4_has_metadata_csum(inode->i_sb))
67 return 1;
68
69 et = find_ext4_extent_tail(eh);
70 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
71 return 0;
72 return 1;
73}
74
75static void ext4_extent_block_csum_set(struct inode *inode,
76 struct ext4_extent_header *eh)
77{
78 struct ext4_extent_tail *et;
79
80 if (!ext4_has_metadata_csum(inode->i_sb))
81 return;
82
83 et = find_ext4_extent_tail(eh);
84 et->et_checksum = ext4_extent_block_csum(inode, eh);
85}
86
87static int ext4_split_extent_at(handle_t *handle,
88 struct inode *inode,
89 struct ext4_ext_path **ppath,
90 ext4_lblk_t split,
91 int split_flag,
92 int flags);
93
94static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
95{
96
97
98
99
100
101
102 BUG_ON(EXT4_JOURNAL(inode) == NULL);
103 ext4_discard_preallocations(inode, 0);
104 up_write(&EXT4_I(inode)->i_data_sem);
105 *dropped = 1;
106 return 0;
107}
108
109
110
111
112
113
114
115
116
117int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
118 int check_cred, int restart_cred,
119 int revoke_cred)
120{
121 int ret;
122 int dropped = 0;
123
124 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
125 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
126 if (dropped)
127 down_write(&EXT4_I(inode)->i_data_sem);
128 return ret;
129}
130
131
132
133
134
135
136static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
137 struct ext4_ext_path *path)
138{
139 if (path->p_bh) {
140
141 BUFFER_TRACE(path->p_bh, "get_write_access");
142 return ext4_journal_get_write_access(handle, path->p_bh);
143 }
144
145
146 return 0;
147}
148
149
150
151
152
153
154
155static int __ext4_ext_dirty(const char *where, unsigned int line,
156 handle_t *handle, struct inode *inode,
157 struct ext4_ext_path *path)
158{
159 int err;
160
161 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
162 if (path->p_bh) {
163 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
164
165 err = __ext4_handle_dirty_metadata(where, line, handle,
166 inode, path->p_bh);
167 } else {
168
169 err = ext4_mark_inode_dirty(handle, inode);
170 }
171 return err;
172}
173
174#define ext4_ext_dirty(handle, inode, path) \
175 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
176
177static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
178 struct ext4_ext_path *path,
179 ext4_lblk_t block)
180{
181 if (path) {
182 int depth = path->p_depth;
183 struct ext4_extent *ex;
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202 ex = path[depth].p_ext;
203 if (ex) {
204 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
205 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
206
207 if (block > ext_block)
208 return ext_pblk + (block - ext_block);
209 else
210 return ext_pblk - (ext_block - block);
211 }
212
213
214
215 if (path[depth].p_bh)
216 return path[depth].p_bh->b_blocknr;
217 }
218
219
220 return ext4_inode_to_goal_block(inode);
221}
222
223
224
225
226static ext4_fsblk_t
227ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
228 struct ext4_ext_path *path,
229 struct ext4_extent *ex, int *err, unsigned int flags)
230{
231 ext4_fsblk_t goal, newblock;
232
233 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
234 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
235 NULL, err);
236 return newblock;
237}
238
239static inline int ext4_ext_space_block(struct inode *inode, int check)
240{
241 int size;
242
243 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
244 / sizeof(struct ext4_extent);
245#ifdef AGGRESSIVE_TEST
246 if (!check && size > 6)
247 size = 6;
248#endif
249 return size;
250}
251
252static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
253{
254 int size;
255
256 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
257 / sizeof(struct ext4_extent_idx);
258#ifdef AGGRESSIVE_TEST
259 if (!check && size > 5)
260 size = 5;
261#endif
262 return size;
263}
264
265static inline int ext4_ext_space_root(struct inode *inode, int check)
266{
267 int size;
268
269 size = sizeof(EXT4_I(inode)->i_data);
270 size -= sizeof(struct ext4_extent_header);
271 size /= sizeof(struct ext4_extent);
272#ifdef AGGRESSIVE_TEST
273 if (!check && size > 3)
274 size = 3;
275#endif
276 return size;
277}
278
279static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
280{
281 int size;
282
283 size = sizeof(EXT4_I(inode)->i_data);
284 size -= sizeof(struct ext4_extent_header);
285 size /= sizeof(struct ext4_extent_idx);
286#ifdef AGGRESSIVE_TEST
287 if (!check && size > 4)
288 size = 4;
289#endif
290 return size;
291}
292
293static inline int
294ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
295 struct ext4_ext_path **ppath, ext4_lblk_t lblk,
296 int nofail)
297{
298 struct ext4_ext_path *path = *ppath;
299 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
300 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
301
302 if (nofail)
303 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
304
305 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
306 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
307 flags);
308}
309
310static int
311ext4_ext_max_entries(struct inode *inode, int depth)
312{
313 int max;
314
315 if (depth == ext_depth(inode)) {
316 if (depth == 0)
317 max = ext4_ext_space_root(inode, 1);
318 else
319 max = ext4_ext_space_root_idx(inode, 1);
320 } else {
321 if (depth == 0)
322 max = ext4_ext_space_block(inode, 1);
323 else
324 max = ext4_ext_space_block_idx(inode, 1);
325 }
326
327 return max;
328}
329
330static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
331{
332 ext4_fsblk_t block = ext4_ext_pblock(ext);
333 int len = ext4_ext_get_actual_len(ext);
334 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
335
336
337
338
339
340
341 if (lblock + len <= lblock)
342 return 0;
343 return ext4_inode_block_valid(inode, block, len);
344}
345
346static int ext4_valid_extent_idx(struct inode *inode,
347 struct ext4_extent_idx *ext_idx)
348{
349 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
350
351 return ext4_inode_block_valid(inode, block, 1);
352}
353
354static int ext4_valid_extent_entries(struct inode *inode,
355 struct ext4_extent_header *eh,
356 ext4_fsblk_t *pblk, int depth)
357{
358 unsigned short entries;
359 if (eh->eh_entries == 0)
360 return 1;
361
362 entries = le16_to_cpu(eh->eh_entries);
363
364 if (depth == 0) {
365
366 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
367 ext4_lblk_t lblock = 0;
368 ext4_lblk_t prev = 0;
369 int len = 0;
370 while (entries) {
371 if (!ext4_valid_extent(inode, ext))
372 return 0;
373
374
375 lblock = le32_to_cpu(ext->ee_block);
376 len = ext4_ext_get_actual_len(ext);
377 if ((lblock <= prev) && prev) {
378 *pblk = ext4_ext_pblock(ext);
379 return 0;
380 }
381 ext++;
382 entries--;
383 prev = lblock + len - 1;
384 }
385 } else {
386 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
387 while (entries) {
388 if (!ext4_valid_extent_idx(inode, ext_idx))
389 return 0;
390 ext_idx++;
391 entries--;
392 }
393 }
394 return 1;
395}
396
397static int __ext4_ext_check(const char *function, unsigned int line,
398 struct inode *inode, struct ext4_extent_header *eh,
399 int depth, ext4_fsblk_t pblk)
400{
401 const char *error_msg;
402 int max = 0, err = -EFSCORRUPTED;
403
404 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
405 error_msg = "invalid magic";
406 goto corrupted;
407 }
408 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
409 error_msg = "unexpected eh_depth";
410 goto corrupted;
411 }
412 if (unlikely(eh->eh_max == 0)) {
413 error_msg = "invalid eh_max";
414 goto corrupted;
415 }
416 max = ext4_ext_max_entries(inode, depth);
417 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
418 error_msg = "too large eh_max";
419 goto corrupted;
420 }
421 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
422 error_msg = "invalid eh_entries";
423 goto corrupted;
424 }
425 if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
426 error_msg = "invalid extent entries";
427 goto corrupted;
428 }
429 if (unlikely(depth > 32)) {
430 error_msg = "too large eh_depth";
431 goto corrupted;
432 }
433
434 if (ext_depth(inode) != depth &&
435 !ext4_extent_block_csum_verify(inode, eh)) {
436 error_msg = "extent tree corrupted";
437 err = -EFSBADCRC;
438 goto corrupted;
439 }
440 return 0;
441
442corrupted:
443 ext4_error_inode_err(inode, function, line, 0, -err,
444 "pblk %llu bad header/extent: %s - magic %x, "
445 "entries %u, max %u(%u), depth %u(%u)",
446 (unsigned long long) pblk, error_msg,
447 le16_to_cpu(eh->eh_magic),
448 le16_to_cpu(eh->eh_entries),
449 le16_to_cpu(eh->eh_max),
450 max, le16_to_cpu(eh->eh_depth), depth);
451 return err;
452}
453
454#define ext4_ext_check(inode, eh, depth, pblk) \
455 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
456
457int ext4_ext_check_inode(struct inode *inode)
458{
459 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
460}
461
462static void ext4_cache_extents(struct inode *inode,
463 struct ext4_extent_header *eh)
464{
465 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
466 ext4_lblk_t prev = 0;
467 int i;
468
469 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
470 unsigned int status = EXTENT_STATUS_WRITTEN;
471 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
472 int len = ext4_ext_get_actual_len(ex);
473
474 if (prev && (prev != lblk))
475 ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
476 EXTENT_STATUS_HOLE);
477
478 if (ext4_ext_is_unwritten(ex))
479 status = EXTENT_STATUS_UNWRITTEN;
480 ext4_es_cache_extent(inode, lblk, len,
481 ext4_ext_pblock(ex), status);
482 prev = lblk + len;
483 }
484}
485
486static struct buffer_head *
487__read_extent_tree_block(const char *function, unsigned int line,
488 struct inode *inode, ext4_fsblk_t pblk, int depth,
489 int flags)
490{
491 struct buffer_head *bh;
492 int err;
493 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
494
495 if (flags & EXT4_EX_NOFAIL)
496 gfp_flags |= __GFP_NOFAIL;
497
498 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
499 if (unlikely(!bh))
500 return ERR_PTR(-ENOMEM);
501
502 if (!bh_uptodate_or_lock(bh)) {
503 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
504 err = ext4_read_bh(bh, 0, NULL);
505 if (err < 0)
506 goto errout;
507 }
508 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
509 return bh;
510 err = __ext4_ext_check(function, line, inode,
511 ext_block_hdr(bh), depth, pblk);
512 if (err)
513 goto errout;
514 set_buffer_verified(bh);
515
516
517
518 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
519 struct ext4_extent_header *eh = ext_block_hdr(bh);
520 ext4_cache_extents(inode, eh);
521 }
522 return bh;
523errout:
524 put_bh(bh);
525 return ERR_PTR(err);
526
527}
528
529#define read_extent_tree_block(inode, pblk, depth, flags) \
530 __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
531 (depth), (flags))
532
533
534
535
536
537int ext4_ext_precache(struct inode *inode)
538{
539 struct ext4_inode_info *ei = EXT4_I(inode);
540 struct ext4_ext_path *path = NULL;
541 struct buffer_head *bh;
542 int i = 0, depth, ret = 0;
543
544 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
545 return 0;
546
547 down_read(&ei->i_data_sem);
548 depth = ext_depth(inode);
549
550
551 if (!depth) {
552 up_read(&ei->i_data_sem);
553 return ret;
554 }
555
556 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
557 GFP_NOFS);
558 if (path == NULL) {
559 up_read(&ei->i_data_sem);
560 return -ENOMEM;
561 }
562
563 path[0].p_hdr = ext_inode_hdr(inode);
564 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
565 if (ret)
566 goto out;
567 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
568 while (i >= 0) {
569
570
571
572
573 if ((i == depth) ||
574 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
575 brelse(path[i].p_bh);
576 path[i].p_bh = NULL;
577 i--;
578 continue;
579 }
580 bh = read_extent_tree_block(inode,
581 ext4_idx_pblock(path[i].p_idx++),
582 depth - i - 1,
583 EXT4_EX_FORCE_CACHE);
584 if (IS_ERR(bh)) {
585 ret = PTR_ERR(bh);
586 break;
587 }
588 i++;
589 path[i].p_bh = bh;
590 path[i].p_hdr = ext_block_hdr(bh);
591 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
592 }
593 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
594out:
595 up_read(&ei->i_data_sem);
596 ext4_ext_drop_refs(path);
597 kfree(path);
598 return ret;
599}
600
601#ifdef EXT_DEBUG
602static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
603{
604 int k, l = path->p_depth;
605
606 ext_debug(inode, "path:");
607 for (k = 0; k <= l; k++, path++) {
608 if (path->p_idx) {
609 ext_debug(inode, " %d->%llu",
610 le32_to_cpu(path->p_idx->ei_block),
611 ext4_idx_pblock(path->p_idx));
612 } else if (path->p_ext) {
613 ext_debug(inode, " %d:[%d]%d:%llu ",
614 le32_to_cpu(path->p_ext->ee_block),
615 ext4_ext_is_unwritten(path->p_ext),
616 ext4_ext_get_actual_len(path->p_ext),
617 ext4_ext_pblock(path->p_ext));
618 } else
619 ext_debug(inode, " []");
620 }
621 ext_debug(inode, "\n");
622}
623
624static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
625{
626 int depth = ext_depth(inode);
627 struct ext4_extent_header *eh;
628 struct ext4_extent *ex;
629 int i;
630
631 if (!path)
632 return;
633
634 eh = path[depth].p_hdr;
635 ex = EXT_FIRST_EXTENT(eh);
636
637 ext_debug(inode, "Displaying leaf extents\n");
638
639 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
640 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
641 ext4_ext_is_unwritten(ex),
642 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
643 }
644 ext_debug(inode, "\n");
645}
646
647static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
648 ext4_fsblk_t newblock, int level)
649{
650 int depth = ext_depth(inode);
651 struct ext4_extent *ex;
652
653 if (depth != level) {
654 struct ext4_extent_idx *idx;
655 idx = path[level].p_idx;
656 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
657 ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
658 level, le32_to_cpu(idx->ei_block),
659 ext4_idx_pblock(idx), newblock);
660 idx++;
661 }
662
663 return;
664 }
665
666 ex = path[depth].p_ext;
667 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
668 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
669 le32_to_cpu(ex->ee_block),
670 ext4_ext_pblock(ex),
671 ext4_ext_is_unwritten(ex),
672 ext4_ext_get_actual_len(ex),
673 newblock);
674 ex++;
675 }
676}
677
678#else
679#define ext4_ext_show_path(inode, path)
680#define ext4_ext_show_leaf(inode, path)
681#define ext4_ext_show_move(inode, path, newblock, level)
682#endif
683
684void ext4_ext_drop_refs(struct ext4_ext_path *path)
685{
686 int depth, i;
687
688 if (!path)
689 return;
690 depth = path->p_depth;
691 for (i = 0; i <= depth; i++, path++) {
692 brelse(path->p_bh);
693 path->p_bh = NULL;
694 }
695}
696
697
698
699
700
701
702static void
703ext4_ext_binsearch_idx(struct inode *inode,
704 struct ext4_ext_path *path, ext4_lblk_t block)
705{
706 struct ext4_extent_header *eh = path->p_hdr;
707 struct ext4_extent_idx *r, *l, *m;
708
709
710 ext_debug(inode, "binsearch for %u(idx): ", block);
711
712 l = EXT_FIRST_INDEX(eh) + 1;
713 r = EXT_LAST_INDEX(eh);
714 while (l <= r) {
715 m = l + (r - l) / 2;
716 if (block < le32_to_cpu(m->ei_block))
717 r = m - 1;
718 else
719 l = m + 1;
720 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
721 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
722 r, le32_to_cpu(r->ei_block));
723 }
724
725 path->p_idx = l - 1;
726 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
727 ext4_idx_pblock(path->p_idx));
728
729#ifdef CHECK_BINSEARCH
730 {
731 struct ext4_extent_idx *chix, *ix;
732 int k;
733
734 chix = ix = EXT_FIRST_INDEX(eh);
735 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
736 if (k != 0 && le32_to_cpu(ix->ei_block) <=
737 le32_to_cpu(ix[-1].ei_block)) {
738 printk(KERN_DEBUG "k=%d, ix=0x%p, "
739 "first=0x%p\n", k,
740 ix, EXT_FIRST_INDEX(eh));
741 printk(KERN_DEBUG "%u <= %u\n",
742 le32_to_cpu(ix->ei_block),
743 le32_to_cpu(ix[-1].ei_block));
744 }
745 BUG_ON(k && le32_to_cpu(ix->ei_block)
746 <= le32_to_cpu(ix[-1].ei_block));
747 if (block < le32_to_cpu(ix->ei_block))
748 break;
749 chix = ix;
750 }
751 BUG_ON(chix != path->p_idx);
752 }
753#endif
754
755}
756
757
758
759
760
761
762static void
763ext4_ext_binsearch(struct inode *inode,
764 struct ext4_ext_path *path, ext4_lblk_t block)
765{
766 struct ext4_extent_header *eh = path->p_hdr;
767 struct ext4_extent *r, *l, *m;
768
769 if (eh->eh_entries == 0) {
770
771
772
773
774 return;
775 }
776
777 ext_debug(inode, "binsearch for %u: ", block);
778
779 l = EXT_FIRST_EXTENT(eh) + 1;
780 r = EXT_LAST_EXTENT(eh);
781
782 while (l <= r) {
783 m = l + (r - l) / 2;
784 if (block < le32_to_cpu(m->ee_block))
785 r = m - 1;
786 else
787 l = m + 1;
788 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
789 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
790 r, le32_to_cpu(r->ee_block));
791 }
792
793 path->p_ext = l - 1;
794 ext_debug(inode, " -> %d:%llu:[%d]%d ",
795 le32_to_cpu(path->p_ext->ee_block),
796 ext4_ext_pblock(path->p_ext),
797 ext4_ext_is_unwritten(path->p_ext),
798 ext4_ext_get_actual_len(path->p_ext));
799
800#ifdef CHECK_BINSEARCH
801 {
802 struct ext4_extent *chex, *ex;
803 int k;
804
805 chex = ex = EXT_FIRST_EXTENT(eh);
806 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
807 BUG_ON(k && le32_to_cpu(ex->ee_block)
808 <= le32_to_cpu(ex[-1].ee_block));
809 if (block < le32_to_cpu(ex->ee_block))
810 break;
811 chex = ex;
812 }
813 BUG_ON(chex != path->p_ext);
814 }
815#endif
816
817}
818
819void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
820{
821 struct ext4_extent_header *eh;
822
823 eh = ext_inode_hdr(inode);
824 eh->eh_depth = 0;
825 eh->eh_entries = 0;
826 eh->eh_magic = EXT4_EXT_MAGIC;
827 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
828 ext4_mark_inode_dirty(handle, inode);
829}
830
831struct ext4_ext_path *
832ext4_find_extent(struct inode *inode, ext4_lblk_t block,
833 struct ext4_ext_path **orig_path, int flags)
834{
835 struct ext4_extent_header *eh;
836 struct buffer_head *bh;
837 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
838 short int depth, i, ppos = 0;
839 int ret;
840 gfp_t gfp_flags = GFP_NOFS;
841
842 if (flags & EXT4_EX_NOFAIL)
843 gfp_flags |= __GFP_NOFAIL;
844
845 eh = ext_inode_hdr(inode);
846 depth = ext_depth(inode);
847 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
848 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
849 depth);
850 ret = -EFSCORRUPTED;
851 goto err;
852 }
853
854 if (path) {
855 ext4_ext_drop_refs(path);
856 if (depth > path[0].p_maxdepth) {
857 kfree(path);
858 *orig_path = path = NULL;
859 }
860 }
861 if (!path) {
862
863 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
864 gfp_flags);
865 if (unlikely(!path))
866 return ERR_PTR(-ENOMEM);
867 path[0].p_maxdepth = depth + 1;
868 }
869 path[0].p_hdr = eh;
870 path[0].p_bh = NULL;
871
872 i = depth;
873 if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
874 ext4_cache_extents(inode, eh);
875
876 while (i) {
877 ext_debug(inode, "depth %d: num %d, max %d\n",
878 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
879
880 ext4_ext_binsearch_idx(inode, path + ppos, block);
881 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
882 path[ppos].p_depth = i;
883 path[ppos].p_ext = NULL;
884
885 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
886 flags);
887 if (IS_ERR(bh)) {
888 ret = PTR_ERR(bh);
889 goto err;
890 }
891
892 eh = ext_block_hdr(bh);
893 ppos++;
894 path[ppos].p_bh = bh;
895 path[ppos].p_hdr = eh;
896 }
897
898 path[ppos].p_depth = i;
899 path[ppos].p_ext = NULL;
900 path[ppos].p_idx = NULL;
901
902
903 ext4_ext_binsearch(inode, path + ppos, block);
904
905 if (path[ppos].p_ext)
906 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
907
908 ext4_ext_show_path(inode, path);
909
910 return path;
911
912err:
913 ext4_ext_drop_refs(path);
914 kfree(path);
915 if (orig_path)
916 *orig_path = NULL;
917 return ERR_PTR(ret);
918}
919
920
921
922
923
924
925static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
926 struct ext4_ext_path *curp,
927 int logical, ext4_fsblk_t ptr)
928{
929 struct ext4_extent_idx *ix;
930 int len, err;
931
932 err = ext4_ext_get_access(handle, inode, curp);
933 if (err)
934 return err;
935
936 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
937 EXT4_ERROR_INODE(inode,
938 "logical %d == ei_block %d!",
939 logical, le32_to_cpu(curp->p_idx->ei_block));
940 return -EFSCORRUPTED;
941 }
942
943 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
944 >= le16_to_cpu(curp->p_hdr->eh_max))) {
945 EXT4_ERROR_INODE(inode,
946 "eh_entries %d >= eh_max %d!",
947 le16_to_cpu(curp->p_hdr->eh_entries),
948 le16_to_cpu(curp->p_hdr->eh_max));
949 return -EFSCORRUPTED;
950 }
951
952 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
953
954 ext_debug(inode, "insert new index %d after: %llu\n",
955 logical, ptr);
956 ix = curp->p_idx + 1;
957 } else {
958
959 ext_debug(inode, "insert new index %d before: %llu\n",
960 logical, ptr);
961 ix = curp->p_idx;
962 }
963
964 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
965 BUG_ON(len < 0);
966 if (len > 0) {
967 ext_debug(inode, "insert new index %d: "
968 "move %d indices from 0x%p to 0x%p\n",
969 logical, len, ix, ix + 1);
970 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
971 }
972
973 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
974 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
975 return -EFSCORRUPTED;
976 }
977
978 ix->ei_block = cpu_to_le32(logical);
979 ext4_idx_store_pblock(ix, ptr);
980 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
981
982 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
983 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
984 return -EFSCORRUPTED;
985 }
986
987 err = ext4_ext_dirty(handle, inode, curp);
988 ext4_std_error(inode->i_sb, err);
989
990 return err;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003static int ext4_ext_split(handle_t *handle, struct inode *inode,
1004 unsigned int flags,
1005 struct ext4_ext_path *path,
1006 struct ext4_extent *newext, int at)
1007{
1008 struct buffer_head *bh = NULL;
1009 int depth = ext_depth(inode);
1010 struct ext4_extent_header *neh;
1011 struct ext4_extent_idx *fidx;
1012 int i = at, k, m, a;
1013 ext4_fsblk_t newblock, oldblock;
1014 __le32 border;
1015 ext4_fsblk_t *ablocks = NULL;
1016 gfp_t gfp_flags = GFP_NOFS;
1017 int err = 0;
1018 size_t ext_size = 0;
1019
1020 if (flags & EXT4_EX_NOFAIL)
1021 gfp_flags |= __GFP_NOFAIL;
1022
1023
1024
1025
1026
1027
1028 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1029 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1030 return -EFSCORRUPTED;
1031 }
1032 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1033 border = path[depth].p_ext[1].ee_block;
1034 ext_debug(inode, "leaf will be split."
1035 " next leaf starts at %d\n",
1036 le32_to_cpu(border));
1037 } else {
1038 border = newext->ee_block;
1039 ext_debug(inode, "leaf will be added."
1040 " next leaf starts at %d\n",
1041 le32_to_cpu(border));
1042 }
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1057 if (!ablocks)
1058 return -ENOMEM;
1059
1060
1061 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1062 for (a = 0; a < depth - at; a++) {
1063 newblock = ext4_ext_new_meta_block(handle, inode, path,
1064 newext, &err, flags);
1065 if (newblock == 0)
1066 goto cleanup;
1067 ablocks[a] = newblock;
1068 }
1069
1070
1071 newblock = ablocks[--a];
1072 if (unlikely(newblock == 0)) {
1073 EXT4_ERROR_INODE(inode, "newblock == 0!");
1074 err = -EFSCORRUPTED;
1075 goto cleanup;
1076 }
1077 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1078 if (unlikely(!bh)) {
1079 err = -ENOMEM;
1080 goto cleanup;
1081 }
1082 lock_buffer(bh);
1083
1084 err = ext4_journal_get_create_access(handle, bh);
1085 if (err)
1086 goto cleanup;
1087
1088 neh = ext_block_hdr(bh);
1089 neh->eh_entries = 0;
1090 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1091 neh->eh_magic = EXT4_EXT_MAGIC;
1092 neh->eh_depth = 0;
1093
1094
1095 if (unlikely(path[depth].p_hdr->eh_entries !=
1096 path[depth].p_hdr->eh_max)) {
1097 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1098 path[depth].p_hdr->eh_entries,
1099 path[depth].p_hdr->eh_max);
1100 err = -EFSCORRUPTED;
1101 goto cleanup;
1102 }
1103
1104 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1105 ext4_ext_show_move(inode, path, newblock, depth);
1106 if (m) {
1107 struct ext4_extent *ex;
1108 ex = EXT_FIRST_EXTENT(neh);
1109 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1110 le16_add_cpu(&neh->eh_entries, m);
1111 }
1112
1113
1114 ext_size = sizeof(struct ext4_extent_header) +
1115 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1116 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1117 ext4_extent_block_csum_set(inode, neh);
1118 set_buffer_uptodate(bh);
1119 unlock_buffer(bh);
1120
1121 err = ext4_handle_dirty_metadata(handle, inode, bh);
1122 if (err)
1123 goto cleanup;
1124 brelse(bh);
1125 bh = NULL;
1126
1127
1128 if (m) {
1129 err = ext4_ext_get_access(handle, inode, path + depth);
1130 if (err)
1131 goto cleanup;
1132 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1133 err = ext4_ext_dirty(handle, inode, path + depth);
1134 if (err)
1135 goto cleanup;
1136
1137 }
1138
1139
1140 k = depth - at - 1;
1141 if (unlikely(k < 0)) {
1142 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1143 err = -EFSCORRUPTED;
1144 goto cleanup;
1145 }
1146 if (k)
1147 ext_debug(inode, "create %d intermediate indices\n", k);
1148
1149
1150 i = depth - 1;
1151 while (k--) {
1152 oldblock = newblock;
1153 newblock = ablocks[--a];
1154 bh = sb_getblk(inode->i_sb, newblock);
1155 if (unlikely(!bh)) {
1156 err = -ENOMEM;
1157 goto cleanup;
1158 }
1159 lock_buffer(bh);
1160
1161 err = ext4_journal_get_create_access(handle, bh);
1162 if (err)
1163 goto cleanup;
1164
1165 neh = ext_block_hdr(bh);
1166 neh->eh_entries = cpu_to_le16(1);
1167 neh->eh_magic = EXT4_EXT_MAGIC;
1168 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1169 neh->eh_depth = cpu_to_le16(depth - i);
1170 fidx = EXT_FIRST_INDEX(neh);
1171 fidx->ei_block = border;
1172 ext4_idx_store_pblock(fidx, oldblock);
1173
1174 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1175 i, newblock, le32_to_cpu(border), oldblock);
1176
1177
1178 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1179 EXT_LAST_INDEX(path[i].p_hdr))) {
1180 EXT4_ERROR_INODE(inode,
1181 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1182 le32_to_cpu(path[i].p_ext->ee_block));
1183 err = -EFSCORRUPTED;
1184 goto cleanup;
1185 }
1186
1187 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1188 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1189 EXT_MAX_INDEX(path[i].p_hdr));
1190 ext4_ext_show_move(inode, path, newblock, i);
1191 if (m) {
1192 memmove(++fidx, path[i].p_idx,
1193 sizeof(struct ext4_extent_idx) * m);
1194 le16_add_cpu(&neh->eh_entries, m);
1195 }
1196
1197 ext_size = sizeof(struct ext4_extent_header) +
1198 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1199 memset(bh->b_data + ext_size, 0,
1200 inode->i_sb->s_blocksize - ext_size);
1201 ext4_extent_block_csum_set(inode, neh);
1202 set_buffer_uptodate(bh);
1203 unlock_buffer(bh);
1204
1205 err = ext4_handle_dirty_metadata(handle, inode, bh);
1206 if (err)
1207 goto cleanup;
1208 brelse(bh);
1209 bh = NULL;
1210
1211
1212 if (m) {
1213 err = ext4_ext_get_access(handle, inode, path + i);
1214 if (err)
1215 goto cleanup;
1216 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1217 err = ext4_ext_dirty(handle, inode, path + i);
1218 if (err)
1219 goto cleanup;
1220 }
1221
1222 i--;
1223 }
1224
1225
1226 err = ext4_ext_insert_index(handle, inode, path + at,
1227 le32_to_cpu(border), newblock);
1228
1229cleanup:
1230 if (bh) {
1231 if (buffer_locked(bh))
1232 unlock_buffer(bh);
1233 brelse(bh);
1234 }
1235
1236 if (err) {
1237
1238 for (i = 0; i < depth; i++) {
1239 if (!ablocks[i])
1240 continue;
1241 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1242 EXT4_FREE_BLOCKS_METADATA);
1243 }
1244 }
1245 kfree(ablocks);
1246
1247 return err;
1248}
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1259 unsigned int flags)
1260{
1261 struct ext4_extent_header *neh;
1262 struct buffer_head *bh;
1263 ext4_fsblk_t newblock, goal = 0;
1264 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1265 int err = 0;
1266 size_t ext_size = 0;
1267
1268
1269 if (ext_depth(inode))
1270 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1271 if (goal > le32_to_cpu(es->s_first_data_block)) {
1272 flags |= EXT4_MB_HINT_TRY_GOAL;
1273 goal--;
1274 } else
1275 goal = ext4_inode_to_goal_block(inode);
1276 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1277 NULL, &err);
1278 if (newblock == 0)
1279 return err;
1280
1281 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1282 if (unlikely(!bh))
1283 return -ENOMEM;
1284 lock_buffer(bh);
1285
1286 err = ext4_journal_get_create_access(handle, bh);
1287 if (err) {
1288 unlock_buffer(bh);
1289 goto out;
1290 }
1291
1292 ext_size = sizeof(EXT4_I(inode)->i_data);
1293
1294 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1295
1296 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1297
1298
1299 neh = ext_block_hdr(bh);
1300
1301
1302 if (ext_depth(inode))
1303 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1304 else
1305 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1306 neh->eh_magic = EXT4_EXT_MAGIC;
1307 ext4_extent_block_csum_set(inode, neh);
1308 set_buffer_uptodate(bh);
1309 unlock_buffer(bh);
1310
1311 err = ext4_handle_dirty_metadata(handle, inode, bh);
1312 if (err)
1313 goto out;
1314
1315
1316 neh = ext_inode_hdr(inode);
1317 neh->eh_entries = cpu_to_le16(1);
1318 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1319 if (neh->eh_depth == 0) {
1320
1321 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1322 EXT_FIRST_INDEX(neh)->ei_block =
1323 EXT_FIRST_EXTENT(neh)->ee_block;
1324 }
1325 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1326 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1327 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1328 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1329
1330 le16_add_cpu(&neh->eh_depth, 1);
1331 err = ext4_mark_inode_dirty(handle, inode);
1332out:
1333 brelse(bh);
1334
1335 return err;
1336}
1337
1338
1339
1340
1341
1342
1343static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1344 unsigned int mb_flags,
1345 unsigned int gb_flags,
1346 struct ext4_ext_path **ppath,
1347 struct ext4_extent *newext)
1348{
1349 struct ext4_ext_path *path = *ppath;
1350 struct ext4_ext_path *curp;
1351 int depth, i, err = 0;
1352
1353repeat:
1354 i = depth = ext_depth(inode);
1355
1356
1357 curp = path + depth;
1358 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1359 i--;
1360 curp--;
1361 }
1362
1363
1364
1365 if (EXT_HAS_FREE_INDEX(curp)) {
1366
1367
1368 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1369 if (err)
1370 goto out;
1371
1372
1373 path = ext4_find_extent(inode,
1374 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1375 ppath, gb_flags);
1376 if (IS_ERR(path))
1377 err = PTR_ERR(path);
1378 } else {
1379
1380 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1381 if (err)
1382 goto out;
1383
1384
1385 path = ext4_find_extent(inode,
1386 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1387 ppath, gb_flags);
1388 if (IS_ERR(path)) {
1389 err = PTR_ERR(path);
1390 goto out;
1391 }
1392
1393
1394
1395
1396
1397 depth = ext_depth(inode);
1398 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1399
1400 goto repeat;
1401 }
1402 }
1403
1404out:
1405 return err;
1406}
1407
1408
1409
1410
1411
1412
1413
1414
1415static int ext4_ext_search_left(struct inode *inode,
1416 struct ext4_ext_path *path,
1417 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1418{
1419 struct ext4_extent_idx *ix;
1420 struct ext4_extent *ex;
1421 int depth, ee_len;
1422
1423 if (unlikely(path == NULL)) {
1424 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1425 return -EFSCORRUPTED;
1426 }
1427 depth = path->p_depth;
1428 *phys = 0;
1429
1430 if (depth == 0 && path->p_ext == NULL)
1431 return 0;
1432
1433
1434
1435
1436
1437 ex = path[depth].p_ext;
1438 ee_len = ext4_ext_get_actual_len(ex);
1439 if (*logical < le32_to_cpu(ex->ee_block)) {
1440 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1441 EXT4_ERROR_INODE(inode,
1442 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1443 *logical, le32_to_cpu(ex->ee_block));
1444 return -EFSCORRUPTED;
1445 }
1446 while (--depth >= 0) {
1447 ix = path[depth].p_idx;
1448 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1449 EXT4_ERROR_INODE(inode,
1450 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1451 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1452 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1453 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1454 depth);
1455 return -EFSCORRUPTED;
1456 }
1457 }
1458 return 0;
1459 }
1460
1461 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1462 EXT4_ERROR_INODE(inode,
1463 "logical %d < ee_block %d + ee_len %d!",
1464 *logical, le32_to_cpu(ex->ee_block), ee_len);
1465 return -EFSCORRUPTED;
1466 }
1467
1468 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1469 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1470 return 0;
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480static int ext4_ext_search_right(struct inode *inode,
1481 struct ext4_ext_path *path,
1482 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1483 struct ext4_extent *ret_ex)
1484{
1485 struct buffer_head *bh = NULL;
1486 struct ext4_extent_header *eh;
1487 struct ext4_extent_idx *ix;
1488 struct ext4_extent *ex;
1489 ext4_fsblk_t block;
1490 int depth;
1491 int ee_len;
1492
1493 if (unlikely(path == NULL)) {
1494 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1495 return -EFSCORRUPTED;
1496 }
1497 depth = path->p_depth;
1498 *phys = 0;
1499
1500 if (depth == 0 && path->p_ext == NULL)
1501 return 0;
1502
1503
1504
1505
1506
1507 ex = path[depth].p_ext;
1508 ee_len = ext4_ext_get_actual_len(ex);
1509 if (*logical < le32_to_cpu(ex->ee_block)) {
1510 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1511 EXT4_ERROR_INODE(inode,
1512 "first_extent(path[%d].p_hdr) != ex",
1513 depth);
1514 return -EFSCORRUPTED;
1515 }
1516 while (--depth >= 0) {
1517 ix = path[depth].p_idx;
1518 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1519 EXT4_ERROR_INODE(inode,
1520 "ix != EXT_FIRST_INDEX *logical %d!",
1521 *logical);
1522 return -EFSCORRUPTED;
1523 }
1524 }
1525 goto found_extent;
1526 }
1527
1528 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1529 EXT4_ERROR_INODE(inode,
1530 "logical %d < ee_block %d + ee_len %d!",
1531 *logical, le32_to_cpu(ex->ee_block), ee_len);
1532 return -EFSCORRUPTED;
1533 }
1534
1535 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1536
1537 ex++;
1538 goto found_extent;
1539 }
1540
1541
1542 while (--depth >= 0) {
1543 ix = path[depth].p_idx;
1544 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1545 goto got_index;
1546 }
1547
1548
1549 return 0;
1550
1551got_index:
1552
1553
1554
1555 ix++;
1556 block = ext4_idx_pblock(ix);
1557 while (++depth < path->p_depth) {
1558
1559 bh = read_extent_tree_block(inode, block,
1560 path->p_depth - depth, 0);
1561 if (IS_ERR(bh))
1562 return PTR_ERR(bh);
1563 eh = ext_block_hdr(bh);
1564 ix = EXT_FIRST_INDEX(eh);
1565 block = ext4_idx_pblock(ix);
1566 put_bh(bh);
1567 }
1568
1569 bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1570 if (IS_ERR(bh))
1571 return PTR_ERR(bh);
1572 eh = ext_block_hdr(bh);
1573 ex = EXT_FIRST_EXTENT(eh);
1574found_extent:
1575 *logical = le32_to_cpu(ex->ee_block);
1576 *phys = ext4_ext_pblock(ex);
1577 if (ret_ex)
1578 *ret_ex = *ex;
1579 if (bh)
1580 put_bh(bh);
1581 return 1;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591ext4_lblk_t
1592ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1593{
1594 int depth;
1595
1596 BUG_ON(path == NULL);
1597 depth = path->p_depth;
1598
1599 if (depth == 0 && path->p_ext == NULL)
1600 return EXT_MAX_BLOCKS;
1601
1602 while (depth >= 0) {
1603 struct ext4_ext_path *p = &path[depth];
1604
1605 if (depth == path->p_depth) {
1606
1607 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
1608 return le32_to_cpu(p->p_ext[1].ee_block);
1609 } else {
1610
1611 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
1612 return le32_to_cpu(p->p_idx[1].ei_block);
1613 }
1614 depth--;
1615 }
1616
1617 return EXT_MAX_BLOCKS;
1618}
1619
1620
1621
1622
1623
1624static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1625{
1626 int depth;
1627
1628 BUG_ON(path == NULL);
1629 depth = path->p_depth;
1630
1631
1632 if (depth == 0)
1633 return EXT_MAX_BLOCKS;
1634
1635
1636 depth--;
1637
1638 while (depth >= 0) {
1639 if (path[depth].p_idx !=
1640 EXT_LAST_INDEX(path[depth].p_hdr))
1641 return (ext4_lblk_t)
1642 le32_to_cpu(path[depth].p_idx[1].ei_block);
1643 depth--;
1644 }
1645
1646 return EXT_MAX_BLOCKS;
1647}
1648
1649
1650
1651
1652
1653
1654
1655static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1656 struct ext4_ext_path *path)
1657{
1658 struct ext4_extent_header *eh;
1659 int depth = ext_depth(inode);
1660 struct ext4_extent *ex;
1661 __le32 border;
1662 int k, err = 0;
1663
1664 eh = path[depth].p_hdr;
1665 ex = path[depth].p_ext;
1666
1667 if (unlikely(ex == NULL || eh == NULL)) {
1668 EXT4_ERROR_INODE(inode,
1669 "ex %p == NULL or eh %p == NULL", ex, eh);
1670 return -EFSCORRUPTED;
1671 }
1672
1673 if (depth == 0) {
1674
1675 return 0;
1676 }
1677
1678 if (ex != EXT_FIRST_EXTENT(eh)) {
1679
1680 return 0;
1681 }
1682
1683
1684
1685
1686 k = depth - 1;
1687 border = path[depth].p_ext->ee_block;
1688 err = ext4_ext_get_access(handle, inode, path + k);
1689 if (err)
1690 return err;
1691 path[k].p_idx->ei_block = border;
1692 err = ext4_ext_dirty(handle, inode, path + k);
1693 if (err)
1694 return err;
1695
1696 while (k--) {
1697
1698 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1699 break;
1700 err = ext4_ext_get_access(handle, inode, path + k);
1701 if (err)
1702 break;
1703 path[k].p_idx->ei_block = border;
1704 err = ext4_ext_dirty(handle, inode, path + k);
1705 if (err)
1706 break;
1707 }
1708
1709 return err;
1710}
1711
1712static int ext4_can_extents_be_merged(struct inode *inode,
1713 struct ext4_extent *ex1,
1714 struct ext4_extent *ex2)
1715{
1716 unsigned short ext1_ee_len, ext2_ee_len;
1717
1718 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1719 return 0;
1720
1721 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1722 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1723
1724 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1725 le32_to_cpu(ex2->ee_block))
1726 return 0;
1727
1728 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1729 return 0;
1730
1731 if (ext4_ext_is_unwritten(ex1) &&
1732 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1733 return 0;
1734#ifdef AGGRESSIVE_TEST
1735 if (ext1_ee_len >= 4)
1736 return 0;
1737#endif
1738
1739 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1740 return 1;
1741 return 0;
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751static int ext4_ext_try_to_merge_right(struct inode *inode,
1752 struct ext4_ext_path *path,
1753 struct ext4_extent *ex)
1754{
1755 struct ext4_extent_header *eh;
1756 unsigned int depth, len;
1757 int merge_done = 0, unwritten;
1758
1759 depth = ext_depth(inode);
1760 BUG_ON(path[depth].p_hdr == NULL);
1761 eh = path[depth].p_hdr;
1762
1763 while (ex < EXT_LAST_EXTENT(eh)) {
1764 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1765 break;
1766
1767 unwritten = ext4_ext_is_unwritten(ex);
1768 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1769 + ext4_ext_get_actual_len(ex + 1));
1770 if (unwritten)
1771 ext4_ext_mark_unwritten(ex);
1772
1773 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1774 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1775 * sizeof(struct ext4_extent);
1776 memmove(ex + 1, ex + 2, len);
1777 }
1778 le16_add_cpu(&eh->eh_entries, -1);
1779 merge_done = 1;
1780 WARN_ON(eh->eh_entries == 0);
1781 if (!eh->eh_entries)
1782 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1783 }
1784
1785 return merge_done;
1786}
1787
1788
1789
1790
1791
1792static void ext4_ext_try_to_merge_up(handle_t *handle,
1793 struct inode *inode,
1794 struct ext4_ext_path *path)
1795{
1796 size_t s;
1797 unsigned max_root = ext4_ext_space_root(inode, 0);
1798 ext4_fsblk_t blk;
1799
1800 if ((path[0].p_depth != 1) ||
1801 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1802 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1803 return;
1804
1805
1806
1807
1808
1809
1810 if (ext4_journal_extend(handle, 2,
1811 ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1812 return;
1813
1814
1815
1816
1817 blk = ext4_idx_pblock(path[0].p_idx);
1818 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1819 sizeof(struct ext4_extent_idx);
1820 s += sizeof(struct ext4_extent_header);
1821
1822 path[1].p_maxdepth = path[0].p_maxdepth;
1823 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1824 path[0].p_depth = 0;
1825 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1826 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1827 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1828
1829 brelse(path[1].p_bh);
1830 ext4_free_blocks(handle, inode, NULL, blk, 1,
1831 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1832}
1833
1834
1835
1836
1837
1838static void ext4_ext_try_to_merge(handle_t *handle,
1839 struct inode *inode,
1840 struct ext4_ext_path *path,
1841 struct ext4_extent *ex)
1842{
1843 struct ext4_extent_header *eh;
1844 unsigned int depth;
1845 int merge_done = 0;
1846
1847 depth = ext_depth(inode);
1848 BUG_ON(path[depth].p_hdr == NULL);
1849 eh = path[depth].p_hdr;
1850
1851 if (ex > EXT_FIRST_EXTENT(eh))
1852 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1853
1854 if (!merge_done)
1855 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1856
1857 ext4_ext_try_to_merge_up(handle, inode, path);
1858}
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1869 struct inode *inode,
1870 struct ext4_extent *newext,
1871 struct ext4_ext_path *path)
1872{
1873 ext4_lblk_t b1, b2;
1874 unsigned int depth, len1;
1875 unsigned int ret = 0;
1876
1877 b1 = le32_to_cpu(newext->ee_block);
1878 len1 = ext4_ext_get_actual_len(newext);
1879 depth = ext_depth(inode);
1880 if (!path[depth].p_ext)
1881 goto out;
1882 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1883
1884
1885
1886
1887
1888 if (b2 < b1) {
1889 b2 = ext4_ext_next_allocated_block(path);
1890 if (b2 == EXT_MAX_BLOCKS)
1891 goto out;
1892 b2 = EXT4_LBLK_CMASK(sbi, b2);
1893 }
1894
1895
1896 if (b1 + len1 < b1) {
1897 len1 = EXT_MAX_BLOCKS - b1;
1898 newext->ee_len = cpu_to_le16(len1);
1899 ret = 1;
1900 }
1901
1902
1903 if (b1 + len1 > b2) {
1904 newext->ee_len = cpu_to_le16(b2 - b1);
1905 ret = 1;
1906 }
1907out:
1908 return ret;
1909}
1910
1911
1912
1913
1914
1915
1916
1917int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1918 struct ext4_ext_path **ppath,
1919 struct ext4_extent *newext, int gb_flags)
1920{
1921 struct ext4_ext_path *path = *ppath;
1922 struct ext4_extent_header *eh;
1923 struct ext4_extent *ex, *fex;
1924 struct ext4_extent *nearex;
1925 struct ext4_ext_path *npath = NULL;
1926 int depth, len, err;
1927 ext4_lblk_t next;
1928 int mb_flags = 0, unwritten;
1929
1930 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1931 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1932 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1933 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1934 return -EFSCORRUPTED;
1935 }
1936 depth = ext_depth(inode);
1937 ex = path[depth].p_ext;
1938 eh = path[depth].p_hdr;
1939 if (unlikely(path[depth].p_hdr == NULL)) {
1940 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1941 return -EFSCORRUPTED;
1942 }
1943
1944
1945 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1946
1947
1948
1949
1950
1951
1952
1953
1954 if (ex < EXT_LAST_EXTENT(eh) &&
1955 (le32_to_cpu(ex->ee_block) +
1956 ext4_ext_get_actual_len(ex) <
1957 le32_to_cpu(newext->ee_block))) {
1958 ex += 1;
1959 goto prepend;
1960 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1961 (le32_to_cpu(newext->ee_block) +
1962 ext4_ext_get_actual_len(newext) <
1963 le32_to_cpu(ex->ee_block)))
1964 ex -= 1;
1965
1966
1967 if (ext4_can_extents_be_merged(inode, ex, newext)) {
1968 ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
1969 "(from %llu)\n",
1970 ext4_ext_is_unwritten(newext),
1971 ext4_ext_get_actual_len(newext),
1972 le32_to_cpu(ex->ee_block),
1973 ext4_ext_is_unwritten(ex),
1974 ext4_ext_get_actual_len(ex),
1975 ext4_ext_pblock(ex));
1976 err = ext4_ext_get_access(handle, inode,
1977 path + depth);
1978 if (err)
1979 return err;
1980 unwritten = ext4_ext_is_unwritten(ex);
1981 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1982 + ext4_ext_get_actual_len(newext));
1983 if (unwritten)
1984 ext4_ext_mark_unwritten(ex);
1985 eh = path[depth].p_hdr;
1986 nearex = ex;
1987 goto merge;
1988 }
1989
1990prepend:
1991
1992 if (ext4_can_extents_be_merged(inode, newext, ex)) {
1993 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
1994 "(from %llu)\n",
1995 le32_to_cpu(newext->ee_block),
1996 ext4_ext_is_unwritten(newext),
1997 ext4_ext_get_actual_len(newext),
1998 le32_to_cpu(ex->ee_block),
1999 ext4_ext_is_unwritten(ex),
2000 ext4_ext_get_actual_len(ex),
2001 ext4_ext_pblock(ex));
2002 err = ext4_ext_get_access(handle, inode,
2003 path + depth);
2004 if (err)
2005 return err;
2006
2007 unwritten = ext4_ext_is_unwritten(ex);
2008 ex->ee_block = newext->ee_block;
2009 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2010 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2011 + ext4_ext_get_actual_len(newext));
2012 if (unwritten)
2013 ext4_ext_mark_unwritten(ex);
2014 eh = path[depth].p_hdr;
2015 nearex = ex;
2016 goto merge;
2017 }
2018 }
2019
2020 depth = ext_depth(inode);
2021 eh = path[depth].p_hdr;
2022 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2023 goto has_space;
2024
2025
2026 fex = EXT_LAST_EXTENT(eh);
2027 next = EXT_MAX_BLOCKS;
2028 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2029 next = ext4_ext_next_leaf_block(path);
2030 if (next != EXT_MAX_BLOCKS) {
2031 ext_debug(inode, "next leaf block - %u\n", next);
2032 BUG_ON(npath != NULL);
2033 npath = ext4_find_extent(inode, next, NULL, gb_flags);
2034 if (IS_ERR(npath))
2035 return PTR_ERR(npath);
2036 BUG_ON(npath->p_depth != path->p_depth);
2037 eh = npath[depth].p_hdr;
2038 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2039 ext_debug(inode, "next leaf isn't full(%d)\n",
2040 le16_to_cpu(eh->eh_entries));
2041 path = npath;
2042 goto has_space;
2043 }
2044 ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2045 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2046 }
2047
2048
2049
2050
2051
2052 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2053 mb_flags |= EXT4_MB_USE_RESERVED;
2054 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2055 ppath, newext);
2056 if (err)
2057 goto cleanup;
2058 depth = ext_depth(inode);
2059 eh = path[depth].p_hdr;
2060
2061has_space:
2062 nearex = path[depth].p_ext;
2063
2064 err = ext4_ext_get_access(handle, inode, path + depth);
2065 if (err)
2066 goto cleanup;
2067
2068 if (!nearex) {
2069
2070 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2071 le32_to_cpu(newext->ee_block),
2072 ext4_ext_pblock(newext),
2073 ext4_ext_is_unwritten(newext),
2074 ext4_ext_get_actual_len(newext));
2075 nearex = EXT_FIRST_EXTENT(eh);
2076 } else {
2077 if (le32_to_cpu(newext->ee_block)
2078 > le32_to_cpu(nearex->ee_block)) {
2079
2080 ext_debug(inode, "insert %u:%llu:[%d]%d before: "
2081 "nearest %p\n",
2082 le32_to_cpu(newext->ee_block),
2083 ext4_ext_pblock(newext),
2084 ext4_ext_is_unwritten(newext),
2085 ext4_ext_get_actual_len(newext),
2086 nearex);
2087 nearex++;
2088 } else {
2089
2090 BUG_ON(newext->ee_block == nearex->ee_block);
2091 ext_debug(inode, "insert %u:%llu:[%d]%d after: "
2092 "nearest %p\n",
2093 le32_to_cpu(newext->ee_block),
2094 ext4_ext_pblock(newext),
2095 ext4_ext_is_unwritten(newext),
2096 ext4_ext_get_actual_len(newext),
2097 nearex);
2098 }
2099 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2100 if (len > 0) {
2101 ext_debug(inode, "insert %u:%llu:[%d]%d: "
2102 "move %d extents from 0x%p to 0x%p\n",
2103 le32_to_cpu(newext->ee_block),
2104 ext4_ext_pblock(newext),
2105 ext4_ext_is_unwritten(newext),
2106 ext4_ext_get_actual_len(newext),
2107 len, nearex, nearex + 1);
2108 memmove(nearex + 1, nearex,
2109 len * sizeof(struct ext4_extent));
2110 }
2111 }
2112
2113 le16_add_cpu(&eh->eh_entries, 1);
2114 path[depth].p_ext = nearex;
2115 nearex->ee_block = newext->ee_block;
2116 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2117 nearex->ee_len = newext->ee_len;
2118
2119merge:
2120
2121 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2122 ext4_ext_try_to_merge(handle, inode, path, nearex);
2123
2124
2125
2126 err = ext4_ext_correct_indexes(handle, inode, path);
2127 if (err)
2128 goto cleanup;
2129
2130 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2131
2132cleanup:
2133 ext4_ext_drop_refs(npath);
2134 kfree(npath);
2135 return err;
2136}
2137
2138static int ext4_fill_es_cache_info(struct inode *inode,
2139 ext4_lblk_t block, ext4_lblk_t num,
2140 struct fiemap_extent_info *fieinfo)
2141{
2142 ext4_lblk_t next, end = block + num - 1;
2143 struct extent_status es;
2144 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2145 unsigned int flags;
2146 int err;
2147
2148 while (block <= end) {
2149 next = 0;
2150 flags = 0;
2151 if (!ext4_es_lookup_extent(inode, block, &next, &es))
2152 break;
2153 if (ext4_es_is_unwritten(&es))
2154 flags |= FIEMAP_EXTENT_UNWRITTEN;
2155 if (ext4_es_is_delayed(&es))
2156 flags |= (FIEMAP_EXTENT_DELALLOC |
2157 FIEMAP_EXTENT_UNKNOWN);
2158 if (ext4_es_is_hole(&es))
2159 flags |= EXT4_FIEMAP_EXTENT_HOLE;
2160 if (next == 0)
2161 flags |= FIEMAP_EXTENT_LAST;
2162 if (flags & (FIEMAP_EXTENT_DELALLOC|
2163 EXT4_FIEMAP_EXTENT_HOLE))
2164 es.es_pblk = 0;
2165 else
2166 es.es_pblk = ext4_es_pblock(&es);
2167 err = fiemap_fill_next_extent(fieinfo,
2168 (__u64)es.es_lblk << blksize_bits,
2169 (__u64)es.es_pblk << blksize_bits,
2170 (__u64)es.es_len << blksize_bits,
2171 flags);
2172 if (next == 0)
2173 break;
2174 block = next;
2175 if (err < 0)
2176 return err;
2177 if (err == 1)
2178 return 0;
2179 }
2180 return 0;
2181}
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
2198 struct ext4_ext_path *path,
2199 ext4_lblk_t *lblk)
2200{
2201 int depth = ext_depth(inode);
2202 struct ext4_extent *ex;
2203 ext4_lblk_t len;
2204
2205 ex = path[depth].p_ext;
2206 if (ex == NULL) {
2207
2208 *lblk = 0;
2209 len = EXT_MAX_BLOCKS;
2210 } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2211 len = le32_to_cpu(ex->ee_block) - *lblk;
2212 } else if (*lblk >= le32_to_cpu(ex->ee_block)
2213 + ext4_ext_get_actual_len(ex)) {
2214 ext4_lblk_t next;
2215
2216 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2217 next = ext4_ext_next_allocated_block(path);
2218 BUG_ON(next == *lblk);
2219 len = next - *lblk;
2220 } else {
2221 BUG();
2222 }
2223 return len;
2224}
2225
2226
2227
2228
2229
2230
2231static void
2232ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
2233 ext4_lblk_t hole_len)
2234{
2235 struct extent_status es;
2236
2237 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
2238 hole_start + hole_len - 1, &es);
2239 if (es.es_len) {
2240
2241 if (es.es_lblk <= hole_start)
2242 return;
2243 hole_len = min(es.es_lblk - hole_start, hole_len);
2244 }
2245 ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
2246 ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
2247 EXTENT_STATUS_HOLE);
2248}
2249
2250
2251
2252
2253
2254static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2255 struct ext4_ext_path *path, int depth)
2256{
2257 int err;
2258 ext4_fsblk_t leaf;
2259
2260
2261 depth--;
2262 path = path + depth;
2263 leaf = ext4_idx_pblock(path->p_idx);
2264 if (unlikely(path->p_hdr->eh_entries == 0)) {
2265 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2266 return -EFSCORRUPTED;
2267 }
2268 err = ext4_ext_get_access(handle, inode, path);
2269 if (err)
2270 return err;
2271
2272 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2273 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2274 len *= sizeof(struct ext4_extent_idx);
2275 memmove(path->p_idx, path->p_idx + 1, len);
2276 }
2277
2278 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2279 err = ext4_ext_dirty(handle, inode, path);
2280 if (err)
2281 return err;
2282 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2283 trace_ext4_ext_rm_idx(inode, leaf);
2284
2285 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2286 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2287
2288 while (--depth >= 0) {
2289 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2290 break;
2291 path--;
2292 err = ext4_ext_get_access(handle, inode, path);
2293 if (err)
2294 break;
2295 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2296 err = ext4_ext_dirty(handle, inode, path);
2297 if (err)
2298 break;
2299 }
2300 return err;
2301}
2302
2303
2304
2305
2306
2307
2308
2309
2310int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2311 struct ext4_ext_path *path)
2312{
2313 if (path) {
2314 int depth = ext_depth(inode);
2315 int ret = 0;
2316
2317
2318 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2319 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2331 return ret;
2332 }
2333 }
2334
2335 return ext4_chunk_trans_blocks(inode, nrblocks);
2336}
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2348{
2349 int index;
2350 int depth;
2351
2352
2353 if (ext4_has_inline_data(inode))
2354 return 1;
2355
2356 depth = ext_depth(inode);
2357
2358 if (extents <= 1)
2359 index = depth * 2;
2360 else
2361 index = depth * 3;
2362
2363 return index;
2364}
2365
2366static inline int get_default_free_blocks_flags(struct inode *inode)
2367{
2368 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2369 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2370 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2371 else if (ext4_should_journal_data(inode))
2372 return EXT4_FREE_BLOCKS_FORGET;
2373 return 0;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2392{
2393 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2394 struct ext4_inode_info *ei = EXT4_I(inode);
2395
2396 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2397
2398 spin_lock(&ei->i_block_reservation_lock);
2399 ei->i_reserved_data_blocks++;
2400 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2401 spin_unlock(&ei->i_block_reservation_lock);
2402
2403 percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2404 ext4_remove_pending(inode, lblk);
2405}
2406
2407static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2408 struct ext4_extent *ex,
2409 struct partial_cluster *partial,
2410 ext4_lblk_t from, ext4_lblk_t to)
2411{
2412 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2413 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2414 ext4_fsblk_t last_pblk, pblk;
2415 ext4_lblk_t num;
2416 int flags;
2417
2418
2419 if (from < le32_to_cpu(ex->ee_block) ||
2420 to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2421 ext4_error(sbi->s_sb,
2422 "strange request: removal(2) %u-%u from %u:%u",
2423 from, to, le32_to_cpu(ex->ee_block), ee_len);
2424 return 0;
2425 }
2426
2427#ifdef EXTENTS_STATS
2428 spin_lock(&sbi->s_ext_stats_lock);
2429 sbi->s_ext_blocks += ee_len;
2430 sbi->s_ext_extents++;
2431 if (ee_len < sbi->s_ext_min)
2432 sbi->s_ext_min = ee_len;
2433 if (ee_len > sbi->s_ext_max)
2434 sbi->s_ext_max = ee_len;
2435 if (ext_depth(inode) > sbi->s_depth_max)
2436 sbi->s_depth_max = ext_depth(inode);
2437 spin_unlock(&sbi->s_ext_stats_lock);
2438#endif
2439
2440 trace_ext4_remove_blocks(inode, ex, from, to, partial);
2441
2442
2443
2444
2445
2446 last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2447
2448 if (partial->state != initial &&
2449 partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2450 if (partial->state == tofree) {
2451 flags = get_default_free_blocks_flags(inode);
2452 if (ext4_is_pending(inode, partial->lblk))
2453 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2454 ext4_free_blocks(handle, inode, NULL,
2455 EXT4_C2B(sbi, partial->pclu),
2456 sbi->s_cluster_ratio, flags);
2457 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2458 ext4_rereserve_cluster(inode, partial->lblk);
2459 }
2460 partial->state = initial;
2461 }
2462
2463 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2464 pblk = ext4_ext_pblock(ex) + ee_len - num;
2465
2466
2467
2468
2469
2470
2471
2472 flags = get_default_free_blocks_flags(inode);
2473
2474
2475 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2476 (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2477 (partial->state != nofree)) {
2478 if (ext4_is_pending(inode, to))
2479 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2480 ext4_free_blocks(handle, inode, NULL,
2481 EXT4_PBLK_CMASK(sbi, last_pblk),
2482 sbi->s_cluster_ratio, flags);
2483 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2484 ext4_rereserve_cluster(inode, to);
2485 partial->state = initial;
2486 flags = get_default_free_blocks_flags(inode);
2487 }
2488
2489 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2490
2491
2492
2493
2494
2495
2496
2497 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2498 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2499
2500
2501 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2502 partial->state = initial;
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2515 if (partial->state == initial) {
2516 partial->pclu = EXT4_B2C(sbi, pblk);
2517 partial->lblk = from;
2518 partial->state = tofree;
2519 }
2520 } else {
2521 partial->state = initial;
2522 }
2523
2524 return 0;
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542static int
2543ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2544 struct ext4_ext_path *path,
2545 struct partial_cluster *partial,
2546 ext4_lblk_t start, ext4_lblk_t end)
2547{
2548 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2549 int err = 0, correct_index = 0;
2550 int depth = ext_depth(inode), credits, revoke_credits;
2551 struct ext4_extent_header *eh;
2552 ext4_lblk_t a, b;
2553 unsigned num;
2554 ext4_lblk_t ex_ee_block;
2555 unsigned short ex_ee_len;
2556 unsigned unwritten = 0;
2557 struct ext4_extent *ex;
2558 ext4_fsblk_t pblk;
2559
2560
2561 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2562 if (!path[depth].p_hdr)
2563 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2564 eh = path[depth].p_hdr;
2565 if (unlikely(path[depth].p_hdr == NULL)) {
2566 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2567 return -EFSCORRUPTED;
2568 }
2569
2570 ex = path[depth].p_ext;
2571 if (!ex)
2572 ex = EXT_LAST_EXTENT(eh);
2573
2574 ex_ee_block = le32_to_cpu(ex->ee_block);
2575 ex_ee_len = ext4_ext_get_actual_len(ex);
2576
2577 trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2578
2579 while (ex >= EXT_FIRST_EXTENT(eh) &&
2580 ex_ee_block + ex_ee_len > start) {
2581
2582 if (ext4_ext_is_unwritten(ex))
2583 unwritten = 1;
2584 else
2585 unwritten = 0;
2586
2587 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2588 unwritten, ex_ee_len);
2589 path[depth].p_ext = ex;
2590
2591 a = ex_ee_block > start ? ex_ee_block : start;
2592 b = ex_ee_block+ex_ee_len - 1 < end ?
2593 ex_ee_block+ex_ee_len - 1 : end;
2594
2595 ext_debug(inode, " border %u:%u\n", a, b);
2596
2597
2598 if (end < ex_ee_block) {
2599
2600
2601
2602
2603
2604
2605
2606 if (sbi->s_cluster_ratio > 1) {
2607 pblk = ext4_ext_pblock(ex);
2608 partial->pclu = EXT4_B2C(sbi, pblk);
2609 partial->state = nofree;
2610 }
2611 ex--;
2612 ex_ee_block = le32_to_cpu(ex->ee_block);
2613 ex_ee_len = ext4_ext_get_actual_len(ex);
2614 continue;
2615 } else if (b != ex_ee_block + ex_ee_len - 1) {
2616 EXT4_ERROR_INODE(inode,
2617 "can not handle truncate %u:%u "
2618 "on extent %u:%u",
2619 start, end, ex_ee_block,
2620 ex_ee_block + ex_ee_len - 1);
2621 err = -EFSCORRUPTED;
2622 goto out;
2623 } else if (a != ex_ee_block) {
2624
2625 num = a - ex_ee_block;
2626 } else {
2627
2628 num = 0;
2629 }
2630
2631
2632
2633
2634
2635
2636 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2637 if (ex == EXT_FIRST_EXTENT(eh)) {
2638 correct_index = 1;
2639 credits += (ext_depth(inode)) + 1;
2640 }
2641 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2642
2643
2644
2645
2646
2647 revoke_credits =
2648 ext4_free_metadata_revoke_credits(inode->i_sb,
2649 ext_depth(inode)) +
2650 ext4_free_data_revoke_credits(inode, b - a + 1);
2651
2652 err = ext4_datasem_ensure_credits(handle, inode, credits,
2653 credits, revoke_credits);
2654 if (err) {
2655 if (err > 0)
2656 err = -EAGAIN;
2657 goto out;
2658 }
2659
2660 err = ext4_ext_get_access(handle, inode, path + depth);
2661 if (err)
2662 goto out;
2663
2664 err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2665 if (err)
2666 goto out;
2667
2668 if (num == 0)
2669
2670 ext4_ext_store_pblock(ex, 0);
2671
2672 ex->ee_len = cpu_to_le16(num);
2673
2674
2675
2676
2677 if (unwritten && num)
2678 ext4_ext_mark_unwritten(ex);
2679
2680
2681
2682
2683 if (num == 0) {
2684 if (end != EXT_MAX_BLOCKS - 1) {
2685
2686
2687
2688
2689
2690 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2691 sizeof(struct ext4_extent));
2692
2693
2694 memset(EXT_LAST_EXTENT(eh), 0,
2695 sizeof(struct ext4_extent));
2696 }
2697 le16_add_cpu(&eh->eh_entries, -1);
2698 }
2699
2700 err = ext4_ext_dirty(handle, inode, path + depth);
2701 if (err)
2702 goto out;
2703
2704 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2705 ext4_ext_pblock(ex));
2706 ex--;
2707 ex_ee_block = le32_to_cpu(ex->ee_block);
2708 ex_ee_len = ext4_ext_get_actual_len(ex);
2709 }
2710
2711 if (correct_index && eh->eh_entries)
2712 err = ext4_ext_correct_indexes(handle, inode, path);
2713
2714
2715
2716
2717
2718
2719
2720
2721 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2722 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2723 if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2724 int flags = get_default_free_blocks_flags(inode);
2725
2726 if (ext4_is_pending(inode, partial->lblk))
2727 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2728 ext4_free_blocks(handle, inode, NULL,
2729 EXT4_C2B(sbi, partial->pclu),
2730 sbi->s_cluster_ratio, flags);
2731 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2732 ext4_rereserve_cluster(inode, partial->lblk);
2733 }
2734 partial->state = initial;
2735 }
2736
2737
2738
2739 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2740 err = ext4_ext_rm_idx(handle, inode, path, depth);
2741
2742out:
2743 return err;
2744}
2745
2746
2747
2748
2749
2750static int
2751ext4_ext_more_to_rm(struct ext4_ext_path *path)
2752{
2753 BUG_ON(path->p_idx == NULL);
2754
2755 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2756 return 0;
2757
2758
2759
2760
2761
2762 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2763 return 0;
2764 return 1;
2765}
2766
2767int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2768 ext4_lblk_t end)
2769{
2770 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2771 int depth = ext_depth(inode);
2772 struct ext4_ext_path *path = NULL;
2773 struct partial_cluster partial;
2774 handle_t *handle;
2775 int i = 0, err = 0;
2776
2777 partial.pclu = 0;
2778 partial.lblk = 0;
2779 partial.state = initial;
2780
2781 ext_debug(inode, "truncate since %u to %u\n", start, end);
2782
2783
2784 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
2785 depth + 1,
2786 ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2787 if (IS_ERR(handle))
2788 return PTR_ERR(handle);
2789
2790again:
2791 trace_ext4_ext_remove_space(inode, start, end, depth);
2792
2793
2794
2795
2796
2797
2798
2799
2800 if (end < EXT_MAX_BLOCKS - 1) {
2801 struct ext4_extent *ex;
2802 ext4_lblk_t ee_block, ex_end, lblk;
2803 ext4_fsblk_t pblk;
2804
2805
2806 path = ext4_find_extent(inode, end, NULL,
2807 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
2808 if (IS_ERR(path)) {
2809 ext4_journal_stop(handle);
2810 return PTR_ERR(path);
2811 }
2812 depth = ext_depth(inode);
2813
2814 ex = path[depth].p_ext;
2815 if (!ex) {
2816 if (depth) {
2817 EXT4_ERROR_INODE(inode,
2818 "path[%d].p_hdr == NULL",
2819 depth);
2820 err = -EFSCORRUPTED;
2821 }
2822 goto out;
2823 }
2824
2825 ee_block = le32_to_cpu(ex->ee_block);
2826 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2827
2828
2829
2830
2831
2832
2833
2834 if (end >= ee_block && end < ex_end) {
2835
2836
2837
2838
2839
2840
2841 if (sbi->s_cluster_ratio > 1) {
2842 pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
2843 partial.pclu = EXT4_B2C(sbi, pblk);
2844 partial.state = nofree;
2845 }
2846
2847
2848
2849
2850
2851
2852
2853 err = ext4_force_split_extent_at(handle, inode, &path,
2854 end + 1, 1);
2855 if (err < 0)
2856 goto out;
2857
2858 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
2859 partial.state == initial) {
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870 lblk = ex_end + 1;
2871 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2872 NULL);
2873 if (err < 0)
2874 goto out;
2875 if (pblk) {
2876 partial.pclu = EXT4_B2C(sbi, pblk);
2877 partial.state = nofree;
2878 }
2879 }
2880 }
2881
2882
2883
2884
2885 depth = ext_depth(inode);
2886 if (path) {
2887 int k = i = depth;
2888 while (--k > 0)
2889 path[k].p_block =
2890 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2891 } else {
2892 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2893 GFP_NOFS | __GFP_NOFAIL);
2894 if (path == NULL) {
2895 ext4_journal_stop(handle);
2896 return -ENOMEM;
2897 }
2898 path[0].p_maxdepth = path[0].p_depth = depth;
2899 path[0].p_hdr = ext_inode_hdr(inode);
2900 i = 0;
2901
2902 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2903 err = -EFSCORRUPTED;
2904 goto out;
2905 }
2906 }
2907 err = 0;
2908
2909 while (i >= 0 && err == 0) {
2910 if (i == depth) {
2911
2912 err = ext4_ext_rm_leaf(handle, inode, path,
2913 &partial, start, end);
2914
2915 brelse(path[i].p_bh);
2916 path[i].p_bh = NULL;
2917 i--;
2918 continue;
2919 }
2920
2921
2922 if (!path[i].p_hdr) {
2923 ext_debug(inode, "initialize header\n");
2924 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2925 }
2926
2927 if (!path[i].p_idx) {
2928
2929 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2930 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2931 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2932 path[i].p_hdr,
2933 le16_to_cpu(path[i].p_hdr->eh_entries));
2934 } else {
2935
2936 path[i].p_idx--;
2937 }
2938
2939 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2940 i, EXT_FIRST_INDEX(path[i].p_hdr),
2941 path[i].p_idx);
2942 if (ext4_ext_more_to_rm(path + i)) {
2943 struct buffer_head *bh;
2944
2945 ext_debug(inode, "move to level %d (block %llu)\n",
2946 i + 1, ext4_idx_pblock(path[i].p_idx));
2947 memset(path + i + 1, 0, sizeof(*path));
2948 bh = read_extent_tree_block(inode,
2949 ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2950 EXT4_EX_NOCACHE);
2951 if (IS_ERR(bh)) {
2952
2953 err = PTR_ERR(bh);
2954 break;
2955 }
2956
2957
2958 cond_resched();
2959 if (WARN_ON(i + 1 > depth)) {
2960 err = -EFSCORRUPTED;
2961 break;
2962 }
2963 path[i + 1].p_bh = bh;
2964
2965
2966
2967 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2968 i++;
2969 } else {
2970
2971 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2972
2973
2974
2975 err = ext4_ext_rm_idx(handle, inode, path, i);
2976 }
2977
2978 brelse(path[i].p_bh);
2979 path[i].p_bh = NULL;
2980 i--;
2981 ext_debug(inode, "return to level %d\n", i);
2982 }
2983 }
2984
2985 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
2986 path->p_hdr->eh_entries);
2987
2988
2989
2990
2991
2992 if (partial.state == tofree && err == 0) {
2993 int flags = get_default_free_blocks_flags(inode);
2994
2995 if (ext4_is_pending(inode, partial.lblk))
2996 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2997 ext4_free_blocks(handle, inode, NULL,
2998 EXT4_C2B(sbi, partial.pclu),
2999 sbi->s_cluster_ratio, flags);
3000 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3001 ext4_rereserve_cluster(inode, partial.lblk);
3002 partial.state = initial;
3003 }
3004
3005
3006 if (path->p_hdr->eh_entries == 0) {
3007
3008
3009
3010
3011 err = ext4_ext_get_access(handle, inode, path);
3012 if (err == 0) {
3013 ext_inode_hdr(inode)->eh_depth = 0;
3014 ext_inode_hdr(inode)->eh_max =
3015 cpu_to_le16(ext4_ext_space_root(inode, 0));
3016 err = ext4_ext_dirty(handle, inode, path);
3017 }
3018 }
3019out:
3020 ext4_ext_drop_refs(path);
3021 kfree(path);
3022 path = NULL;
3023 if (err == -EAGAIN)
3024 goto again;
3025 ext4_journal_stop(handle);
3026
3027 return err;
3028}
3029
3030
3031
3032
3033void ext4_ext_init(struct super_block *sb)
3034{
3035
3036
3037
3038
3039 if (ext4_has_feature_extents(sb)) {
3040#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3041 printk(KERN_INFO "EXT4-fs: file extents enabled"
3042#ifdef AGGRESSIVE_TEST
3043 ", aggressive tests"
3044#endif
3045#ifdef CHECK_BINSEARCH
3046 ", check binsearch"
3047#endif
3048#ifdef EXTENTS_STATS
3049 ", stats"
3050#endif
3051 "\n");
3052#endif
3053#ifdef EXTENTS_STATS
3054 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3055 EXT4_SB(sb)->s_ext_min = 1 << 30;
3056 EXT4_SB(sb)->s_ext_max = 0;
3057#endif
3058 }
3059}
3060
3061
3062
3063
3064void ext4_ext_release(struct super_block *sb)
3065{
3066 if (!ext4_has_feature_extents(sb))
3067 return;
3068
3069#ifdef EXTENTS_STATS
3070 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3071 struct ext4_sb_info *sbi = EXT4_SB(sb);
3072 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3073 sbi->s_ext_blocks, sbi->s_ext_extents,
3074 sbi->s_ext_blocks / sbi->s_ext_extents);
3075 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3076 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3077 }
3078#endif
3079}
3080
3081static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3082{
3083 ext4_lblk_t ee_block;
3084 ext4_fsblk_t ee_pblock;
3085 unsigned int ee_len;
3086
3087 ee_block = le32_to_cpu(ex->ee_block);
3088 ee_len = ext4_ext_get_actual_len(ex);
3089 ee_pblock = ext4_ext_pblock(ex);
3090
3091 if (ee_len == 0)
3092 return 0;
3093
3094 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3095 EXTENT_STATUS_WRITTEN);
3096}
3097
3098
3099static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3100{
3101 ext4_fsblk_t ee_pblock;
3102 unsigned int ee_len;
3103
3104 ee_len = ext4_ext_get_actual_len(ex);
3105 ee_pblock = ext4_ext_pblock(ex);
3106 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3107 ee_len);
3108}
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131static int ext4_split_extent_at(handle_t *handle,
3132 struct inode *inode,
3133 struct ext4_ext_path **ppath,
3134 ext4_lblk_t split,
3135 int split_flag,
3136 int flags)
3137{
3138 struct ext4_ext_path *path = *ppath;
3139 ext4_fsblk_t newblock;
3140 ext4_lblk_t ee_block;
3141 struct ext4_extent *ex, newex, orig_ex, zero_ex;
3142 struct ext4_extent *ex2 = NULL;
3143 unsigned int ee_len, depth;
3144 int err = 0;
3145
3146 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3147 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3148
3149 ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
3150
3151 ext4_ext_show_leaf(inode, path);
3152
3153 depth = ext_depth(inode);
3154 ex = path[depth].p_ext;
3155 ee_block = le32_to_cpu(ex->ee_block);
3156 ee_len = ext4_ext_get_actual_len(ex);
3157 newblock = split - ee_block + ext4_ext_pblock(ex);
3158
3159 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3160 BUG_ON(!ext4_ext_is_unwritten(ex) &&
3161 split_flag & (EXT4_EXT_MAY_ZEROOUT |
3162 EXT4_EXT_MARK_UNWRIT1 |
3163 EXT4_EXT_MARK_UNWRIT2));
3164
3165 err = ext4_ext_get_access(handle, inode, path + depth);
3166 if (err)
3167 goto out;
3168
3169 if (split == ee_block) {
3170
3171
3172
3173
3174
3175 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3176 ext4_ext_mark_unwritten(ex);
3177 else
3178 ext4_ext_mark_initialized(ex);
3179
3180 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3181 ext4_ext_try_to_merge(handle, inode, path, ex);
3182
3183 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3184 goto out;
3185 }
3186
3187
3188 memcpy(&orig_ex, ex, sizeof(orig_ex));
3189 ex->ee_len = cpu_to_le16(split - ee_block);
3190 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3191 ext4_ext_mark_unwritten(ex);
3192
3193
3194
3195
3196
3197 err = ext4_ext_dirty(handle, inode, path + depth);
3198 if (err)
3199 goto fix_extent_len;
3200
3201 ex2 = &newex;
3202 ex2->ee_block = cpu_to_le32(split);
3203 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3204 ext4_ext_store_pblock(ex2, newblock);
3205 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3206 ext4_ext_mark_unwritten(ex2);
3207
3208 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3209 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3210 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3211 if (split_flag & EXT4_EXT_DATA_VALID1) {
3212 err = ext4_ext_zeroout(inode, ex2);
3213 zero_ex.ee_block = ex2->ee_block;
3214 zero_ex.ee_len = cpu_to_le16(
3215 ext4_ext_get_actual_len(ex2));
3216 ext4_ext_store_pblock(&zero_ex,
3217 ext4_ext_pblock(ex2));
3218 } else {
3219 err = ext4_ext_zeroout(inode, ex);
3220 zero_ex.ee_block = ex->ee_block;
3221 zero_ex.ee_len = cpu_to_le16(
3222 ext4_ext_get_actual_len(ex));
3223 ext4_ext_store_pblock(&zero_ex,
3224 ext4_ext_pblock(ex));
3225 }
3226 } else {
3227 err = ext4_ext_zeroout(inode, &orig_ex);
3228 zero_ex.ee_block = orig_ex.ee_block;
3229 zero_ex.ee_len = cpu_to_le16(
3230 ext4_ext_get_actual_len(&orig_ex));
3231 ext4_ext_store_pblock(&zero_ex,
3232 ext4_ext_pblock(&orig_ex));
3233 }
3234
3235 if (err)
3236 goto fix_extent_len;
3237
3238 ex->ee_len = cpu_to_le16(ee_len);
3239 ext4_ext_try_to_merge(handle, inode, path, ex);
3240 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3241 if (err)
3242 goto fix_extent_len;
3243
3244
3245 err = ext4_zeroout_es(inode, &zero_ex);
3246
3247 goto out;
3248 } else if (err)
3249 goto fix_extent_len;
3250
3251out:
3252 ext4_ext_show_leaf(inode, path);
3253 return err;
3254
3255fix_extent_len:
3256 ex->ee_len = orig_ex.ee_len;
3257
3258
3259
3260
3261 ext4_ext_dirty(handle, inode, path + path->p_depth);
3262 return err;
3263}
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276static int ext4_split_extent(handle_t *handle,
3277 struct inode *inode,
3278 struct ext4_ext_path **ppath,
3279 struct ext4_map_blocks *map,
3280 int split_flag,
3281 int flags)
3282{
3283 struct ext4_ext_path *path = *ppath;
3284 ext4_lblk_t ee_block;
3285 struct ext4_extent *ex;
3286 unsigned int ee_len, depth;
3287 int err = 0;
3288 int unwritten;
3289 int split_flag1, flags1;
3290 int allocated = map->m_len;
3291
3292 depth = ext_depth(inode);
3293 ex = path[depth].p_ext;
3294 ee_block = le32_to_cpu(ex->ee_block);
3295 ee_len = ext4_ext_get_actual_len(ex);
3296 unwritten = ext4_ext_is_unwritten(ex);
3297
3298 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3299 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3300 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3301 if (unwritten)
3302 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3303 EXT4_EXT_MARK_UNWRIT2;
3304 if (split_flag & EXT4_EXT_DATA_VALID2)
3305 split_flag1 |= EXT4_EXT_DATA_VALID1;
3306 err = ext4_split_extent_at(handle, inode, ppath,
3307 map->m_lblk + map->m_len, split_flag1, flags1);
3308 if (err)
3309 goto out;
3310 } else {
3311 allocated = ee_len - (map->m_lblk - ee_block);
3312 }
3313
3314
3315
3316
3317 path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3318 if (IS_ERR(path))
3319 return PTR_ERR(path);
3320 depth = ext_depth(inode);
3321 ex = path[depth].p_ext;
3322 if (!ex) {
3323 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3324 (unsigned long) map->m_lblk);
3325 return -EFSCORRUPTED;
3326 }
3327 unwritten = ext4_ext_is_unwritten(ex);
3328 split_flag1 = 0;
3329
3330 if (map->m_lblk >= ee_block) {
3331 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3332 if (unwritten) {
3333 split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3334 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3335 EXT4_EXT_MARK_UNWRIT2);
3336 }
3337 err = ext4_split_extent_at(handle, inode, ppath,
3338 map->m_lblk, split_flag1, flags);
3339 if (err)
3340 goto out;
3341 }
3342
3343 ext4_ext_show_leaf(inode, path);
3344out:
3345 return err ? err : allocated;
3346}
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368static int ext4_ext_convert_to_initialized(handle_t *handle,
3369 struct inode *inode,
3370 struct ext4_map_blocks *map,
3371 struct ext4_ext_path **ppath,
3372 int flags)
3373{
3374 struct ext4_ext_path *path = *ppath;
3375 struct ext4_sb_info *sbi;
3376 struct ext4_extent_header *eh;
3377 struct ext4_map_blocks split_map;
3378 struct ext4_extent zero_ex1, zero_ex2;
3379 struct ext4_extent *ex, *abut_ex;
3380 ext4_lblk_t ee_block, eof_block;
3381 unsigned int ee_len, depth, map_len = map->m_len;
3382 int allocated = 0, max_zeroout = 0;
3383 int err = 0;
3384 int split_flag = EXT4_EXT_DATA_VALID2;
3385
3386 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3387 (unsigned long long)map->m_lblk, map_len);
3388
3389 sbi = EXT4_SB(inode->i_sb);
3390 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3391 >> inode->i_sb->s_blocksize_bits;
3392 if (eof_block < map->m_lblk + map_len)
3393 eof_block = map->m_lblk + map_len;
3394
3395 depth = ext_depth(inode);
3396 eh = path[depth].p_hdr;
3397 ex = path[depth].p_ext;
3398 ee_block = le32_to_cpu(ex->ee_block);
3399 ee_len = ext4_ext_get_actual_len(ex);
3400 zero_ex1.ee_len = 0;
3401 zero_ex2.ee_len = 0;
3402
3403 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3404
3405
3406 BUG_ON(!ext4_ext_is_unwritten(ex));
3407 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424 if ((map->m_lblk == ee_block) &&
3425
3426 (map_len < ee_len) &&
3427 (ex > EXT_FIRST_EXTENT(eh))) {
3428 ext4_lblk_t prev_lblk;
3429 ext4_fsblk_t prev_pblk, ee_pblk;
3430 unsigned int prev_len;
3431
3432 abut_ex = ex - 1;
3433 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3434 prev_len = ext4_ext_get_actual_len(abut_ex);
3435 prev_pblk = ext4_ext_pblock(abut_ex);
3436 ee_pblk = ext4_ext_pblock(ex);
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3448 ((prev_lblk + prev_len) == ee_block) &&
3449 ((prev_pblk + prev_len) == ee_pblk) &&
3450 (prev_len < (EXT_INIT_MAX_LEN - map_len))) {
3451 err = ext4_ext_get_access(handle, inode, path + depth);
3452 if (err)
3453 goto out;
3454
3455 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3456 map, ex, abut_ex);
3457
3458
3459 ex->ee_block = cpu_to_le32(ee_block + map_len);
3460 ext4_ext_store_pblock(ex, ee_pblk + map_len);
3461 ex->ee_len = cpu_to_le16(ee_len - map_len);
3462 ext4_ext_mark_unwritten(ex);
3463
3464
3465 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3466
3467
3468 allocated = map_len;
3469 }
3470 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3471 (map_len < ee_len) &&
3472 ex < EXT_LAST_EXTENT(eh)) {
3473
3474 ext4_lblk_t next_lblk;
3475 ext4_fsblk_t next_pblk, ee_pblk;
3476 unsigned int next_len;
3477
3478 abut_ex = ex + 1;
3479 next_lblk = le32_to_cpu(abut_ex->ee_block);
3480 next_len = ext4_ext_get_actual_len(abut_ex);
3481 next_pblk = ext4_ext_pblock(abut_ex);
3482 ee_pblk = ext4_ext_pblock(ex);
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3494 ((map->m_lblk + map_len) == next_lblk) &&
3495 ((ee_pblk + ee_len) == next_pblk) &&
3496 (next_len < (EXT_INIT_MAX_LEN - map_len))) {
3497 err = ext4_ext_get_access(handle, inode, path + depth);
3498 if (err)
3499 goto out;
3500
3501 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3502 map, ex, abut_ex);
3503
3504
3505 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3506 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3507 ex->ee_len = cpu_to_le16(ee_len - map_len);
3508 ext4_ext_mark_unwritten(ex);
3509
3510
3511 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3512
3513
3514 allocated = map_len;
3515 }
3516 }
3517 if (allocated) {
3518
3519 err = ext4_ext_dirty(handle, inode, path + depth);
3520
3521
3522 path[depth].p_ext = abut_ex;
3523 goto out;
3524 } else
3525 allocated = ee_len - (map->m_lblk - ee_block);
3526
3527 WARN_ON(map->m_lblk < ee_block);
3528
3529
3530
3531
3532 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3533
3534 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3535 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3536 (inode->i_sb->s_blocksize_bits - 10);
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549 split_map.m_lblk = map->m_lblk;
3550 split_map.m_len = map->m_len;
3551
3552 if (max_zeroout && (allocated > split_map.m_len)) {
3553 if (allocated <= max_zeroout) {
3554
3555 zero_ex1.ee_block =
3556 cpu_to_le32(split_map.m_lblk +
3557 split_map.m_len);
3558 zero_ex1.ee_len =
3559 cpu_to_le16(allocated - split_map.m_len);
3560 ext4_ext_store_pblock(&zero_ex1,
3561 ext4_ext_pblock(ex) + split_map.m_lblk +
3562 split_map.m_len - ee_block);
3563 err = ext4_ext_zeroout(inode, &zero_ex1);
3564 if (err)
3565 goto out;
3566 split_map.m_len = allocated;
3567 }
3568 if (split_map.m_lblk - ee_block + split_map.m_len <
3569 max_zeroout) {
3570
3571 if (split_map.m_lblk != ee_block) {
3572 zero_ex2.ee_block = ex->ee_block;
3573 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3574 ee_block);
3575 ext4_ext_store_pblock(&zero_ex2,
3576 ext4_ext_pblock(ex));
3577 err = ext4_ext_zeroout(inode, &zero_ex2);
3578 if (err)
3579 goto out;
3580 }
3581
3582 split_map.m_len += split_map.m_lblk - ee_block;
3583 split_map.m_lblk = ee_block;
3584 allocated = map->m_len;
3585 }
3586 }
3587
3588 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3589 flags);
3590 if (err > 0)
3591 err = 0;
3592out:
3593
3594 if (!err) {
3595 err = ext4_zeroout_es(inode, &zero_ex1);
3596 if (!err)
3597 err = ext4_zeroout_es(inode, &zero_ex2);
3598 }
3599 return err ? err : allocated;
3600}
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626static int ext4_split_convert_extents(handle_t *handle,
3627 struct inode *inode,
3628 struct ext4_map_blocks *map,
3629 struct ext4_ext_path **ppath,
3630 int flags)
3631{
3632 struct ext4_ext_path *path = *ppath;
3633 ext4_lblk_t eof_block;
3634 ext4_lblk_t ee_block;
3635 struct ext4_extent *ex;
3636 unsigned int ee_len;
3637 int split_flag = 0, depth;
3638
3639 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3640 (unsigned long long)map->m_lblk, map->m_len);
3641
3642 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3643 >> inode->i_sb->s_blocksize_bits;
3644 if (eof_block < map->m_lblk + map->m_len)
3645 eof_block = map->m_lblk + map->m_len;
3646
3647
3648
3649
3650 depth = ext_depth(inode);
3651 ex = path[depth].p_ext;
3652 ee_block = le32_to_cpu(ex->ee_block);
3653 ee_len = ext4_ext_get_actual_len(ex);
3654
3655
3656 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3657 split_flag |= EXT4_EXT_DATA_VALID1;
3658
3659 } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3660 split_flag |= ee_block + ee_len <= eof_block ?
3661 EXT4_EXT_MAY_ZEROOUT : 0;
3662 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3663 }
3664 flags |= EXT4_GET_BLOCKS_PRE_IO;
3665 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3666}
3667
3668static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3669 struct inode *inode,
3670 struct ext4_map_blocks *map,
3671 struct ext4_ext_path **ppath)
3672{
3673 struct ext4_ext_path *path = *ppath;
3674 struct ext4_extent *ex;
3675 ext4_lblk_t ee_block;
3676 unsigned int ee_len;
3677 int depth;
3678 int err = 0;
3679
3680 depth = ext_depth(inode);
3681 ex = path[depth].p_ext;
3682 ee_block = le32_to_cpu(ex->ee_block);
3683 ee_len = ext4_ext_get_actual_len(ex);
3684
3685 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3686 (unsigned long long)ee_block, ee_len);
3687
3688
3689
3690
3691
3692
3693
3694 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3695#ifdef CONFIG_EXT4_DEBUG
3696 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3697 " len %u; IO logical block %llu, len %u",
3698 inode->i_ino, (unsigned long long)ee_block, ee_len,
3699 (unsigned long long)map->m_lblk, map->m_len);
3700#endif
3701 err = ext4_split_convert_extents(handle, inode, map, ppath,
3702 EXT4_GET_BLOCKS_CONVERT);
3703 if (err < 0)
3704 return err;
3705 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3706 if (IS_ERR(path))
3707 return PTR_ERR(path);
3708 depth = ext_depth(inode);
3709 ex = path[depth].p_ext;
3710 }
3711
3712 err = ext4_ext_get_access(handle, inode, path + depth);
3713 if (err)
3714 goto out;
3715
3716 ext4_ext_mark_initialized(ex);
3717
3718
3719
3720
3721 ext4_ext_try_to_merge(handle, inode, path, ex);
3722
3723
3724 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3725out:
3726 ext4_ext_show_leaf(inode, path);
3727 return err;
3728}
3729
3730static int
3731convert_initialized_extent(handle_t *handle, struct inode *inode,
3732 struct ext4_map_blocks *map,
3733 struct ext4_ext_path **ppath,
3734 unsigned int *allocated)
3735{
3736 struct ext4_ext_path *path = *ppath;
3737 struct ext4_extent *ex;
3738 ext4_lblk_t ee_block;
3739 unsigned int ee_len;
3740 int depth;
3741 int err = 0;
3742
3743
3744
3745
3746
3747 if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3748 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3749
3750 depth = ext_depth(inode);
3751 ex = path[depth].p_ext;
3752 ee_block = le32_to_cpu(ex->ee_block);
3753 ee_len = ext4_ext_get_actual_len(ex);
3754
3755 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3756 (unsigned long long)ee_block, ee_len);
3757
3758 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3759 err = ext4_split_convert_extents(handle, inode, map, ppath,
3760 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3761 if (err < 0)
3762 return err;
3763 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3764 if (IS_ERR(path))
3765 return PTR_ERR(path);
3766 depth = ext_depth(inode);
3767 ex = path[depth].p_ext;
3768 if (!ex) {
3769 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3770 (unsigned long) map->m_lblk);
3771 return -EFSCORRUPTED;
3772 }
3773 }
3774
3775 err = ext4_ext_get_access(handle, inode, path + depth);
3776 if (err)
3777 return err;
3778
3779 ext4_ext_mark_unwritten(ex);
3780
3781
3782
3783
3784 ext4_ext_try_to_merge(handle, inode, path, ex);
3785
3786
3787 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3788 if (err)
3789 return err;
3790 ext4_ext_show_leaf(inode, path);
3791
3792 ext4_update_inode_fsync_trans(handle, inode, 1);
3793
3794 map->m_flags |= EXT4_MAP_UNWRITTEN;
3795 if (*allocated > map->m_len)
3796 *allocated = map->m_len;
3797 map->m_len = *allocated;
3798 return 0;
3799}
3800
3801static int
3802ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3803 struct ext4_map_blocks *map,
3804 struct ext4_ext_path **ppath, int flags,
3805 unsigned int allocated, ext4_fsblk_t newblock)
3806{
3807 struct ext4_ext_path __maybe_unused *path = *ppath;
3808 int ret = 0;
3809 int err = 0;
3810
3811 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
3812 (unsigned long long)map->m_lblk, map->m_len, flags,
3813 allocated);
3814 ext4_ext_show_leaf(inode, path);
3815
3816
3817
3818
3819
3820 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3821
3822 trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3823 allocated, newblock);
3824
3825
3826 if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3827 ret = ext4_split_convert_extents(handle, inode, map, ppath,
3828 flags | EXT4_GET_BLOCKS_CONVERT);
3829 if (ret < 0) {
3830 err = ret;
3831 goto out2;
3832 }
3833
3834
3835
3836
3837 if (unlikely(ret == 0)) {
3838 EXT4_ERROR_INODE(inode,
3839 "unexpected ret == 0, m_len = %u",
3840 map->m_len);
3841 err = -EFSCORRUPTED;
3842 goto out2;
3843 }
3844 map->m_flags |= EXT4_MAP_UNWRITTEN;
3845 goto out;
3846 }
3847
3848 if (flags & EXT4_GET_BLOCKS_CONVERT) {
3849 err = ext4_convert_unwritten_extents_endio(handle, inode, map,
3850 ppath);
3851 if (err < 0)
3852 goto out2;
3853 ext4_update_inode_fsync_trans(handle, inode, 1);
3854 goto map_out;
3855 }
3856
3857
3858
3859
3860
3861 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3862 map->m_flags |= EXT4_MAP_UNWRITTEN;
3863 goto map_out;
3864 }
3865
3866
3867 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3868
3869
3870
3871
3872
3873
3874
3875 map->m_flags |= EXT4_MAP_UNWRITTEN;
3876 goto out1;
3877 }
3878
3879
3880
3881
3882
3883
3884 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
3885 if (ret < 0) {
3886 err = ret;
3887 goto out2;
3888 }
3889 ext4_update_inode_fsync_trans(handle, inode, 1);
3890
3891
3892
3893
3894 if (unlikely(ret == 0)) {
3895 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
3896 map->m_len);
3897 err = -EFSCORRUPTED;
3898 goto out2;
3899 }
3900
3901out:
3902 allocated = ret;
3903 map->m_flags |= EXT4_MAP_NEW;
3904map_out:
3905 map->m_flags |= EXT4_MAP_MAPPED;
3906out1:
3907 map->m_pblk = newblock;
3908 if (allocated > map->m_len)
3909 allocated = map->m_len;
3910 map->m_len = allocated;
3911 ext4_ext_show_leaf(inode, path);
3912out2:
3913 return err ? err : allocated;
3914}
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957static int get_implied_cluster_alloc(struct super_block *sb,
3958 struct ext4_map_blocks *map,
3959 struct ext4_extent *ex,
3960 struct ext4_ext_path *path)
3961{
3962 struct ext4_sb_info *sbi = EXT4_SB(sb);
3963 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
3964 ext4_lblk_t ex_cluster_start, ex_cluster_end;
3965 ext4_lblk_t rr_cluster_start;
3966 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3967 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3968 unsigned short ee_len = ext4_ext_get_actual_len(ex);
3969
3970
3971 ex_cluster_start = EXT4_B2C(sbi, ee_block);
3972 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3973
3974
3975 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3976
3977 if ((rr_cluster_start == ex_cluster_end) ||
3978 (rr_cluster_start == ex_cluster_start)) {
3979 if (rr_cluster_start == ex_cluster_end)
3980 ee_start += ee_len - 1;
3981 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
3982 map->m_len = min(map->m_len,
3983 (unsigned) sbi->s_cluster_ratio - c_offset);
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993 if (map->m_lblk < ee_block)
3994 map->m_len = min(map->m_len, ee_block - map->m_lblk);
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005 if (map->m_lblk > ee_block) {
4006 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4007 map->m_len = min(map->m_len, next - map->m_lblk);
4008 }
4009
4010 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4011 return 1;
4012 }
4013
4014 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4015 return 0;
4016}
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4038 struct ext4_map_blocks *map, int flags)
4039{
4040 struct ext4_ext_path *path = NULL;
4041 struct ext4_extent newex, *ex, ex2;
4042 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4043 ext4_fsblk_t newblock = 0, pblk;
4044 int err = 0, depth, ret;
4045 unsigned int allocated = 0, offset = 0;
4046 unsigned int allocated_clusters = 0;
4047 struct ext4_allocation_request ar;
4048 ext4_lblk_t cluster_offset;
4049
4050 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
4051 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4052
4053
4054 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4055 if (IS_ERR(path)) {
4056 err = PTR_ERR(path);
4057 path = NULL;
4058 goto out;
4059 }
4060
4061 depth = ext_depth(inode);
4062
4063
4064
4065
4066
4067
4068 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4069 EXT4_ERROR_INODE(inode, "bad extent address "
4070 "lblock: %lu, depth: %d pblock %lld",
4071 (unsigned long) map->m_lblk, depth,
4072 path[depth].p_block);
4073 err = -EFSCORRUPTED;
4074 goto out;
4075 }
4076
4077 ex = path[depth].p_ext;
4078 if (ex) {
4079 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4080 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4081 unsigned short ee_len;
4082
4083
4084
4085
4086
4087
4088 ee_len = ext4_ext_get_actual_len(ex);
4089
4090 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4091
4092
4093 if (in_range(map->m_lblk, ee_block, ee_len)) {
4094 newblock = map->m_lblk - ee_block + ee_start;
4095
4096 allocated = ee_len - (map->m_lblk - ee_block);
4097 ext_debug(inode, "%u fit into %u:%d -> %llu\n",
4098 map->m_lblk, ee_block, ee_len, newblock);
4099
4100
4101
4102
4103
4104 if ((!ext4_ext_is_unwritten(ex)) &&
4105 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4106 err = convert_initialized_extent(handle,
4107 inode, map, &path, &allocated);
4108 goto out;
4109 } else if (!ext4_ext_is_unwritten(ex)) {
4110 map->m_flags |= EXT4_MAP_MAPPED;
4111 map->m_pblk = newblock;
4112 if (allocated > map->m_len)
4113 allocated = map->m_len;
4114 map->m_len = allocated;
4115 ext4_ext_show_leaf(inode, path);
4116 goto out;
4117 }
4118
4119 ret = ext4_ext_handle_unwritten_extents(
4120 handle, inode, map, &path, flags,
4121 allocated, newblock);
4122 if (ret < 0)
4123 err = ret;
4124 else
4125 allocated = ret;
4126 goto out;
4127 }
4128 }
4129
4130
4131
4132
4133
4134 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4135 ext4_lblk_t hole_start, hole_len;
4136
4137 hole_start = map->m_lblk;
4138 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
4139
4140
4141
4142
4143 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
4144
4145
4146 if (hole_start != map->m_lblk)
4147 hole_len -= map->m_lblk - hole_start;
4148 map->m_pblk = 0;
4149 map->m_len = min_t(unsigned int, map->m_len, hole_len);
4150
4151 goto out;
4152 }
4153
4154
4155
4156
4157 newex.ee_block = cpu_to_le32(map->m_lblk);
4158 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4159
4160
4161
4162
4163
4164 if (cluster_offset && ex &&
4165 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4166 ar.len = allocated = map->m_len;
4167 newblock = map->m_pblk;
4168 goto got_allocated_blocks;
4169 }
4170
4171
4172 ar.lleft = map->m_lblk;
4173 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4174 if (err)
4175 goto out;
4176 ar.lright = map->m_lblk;
4177 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4178 if (err < 0)
4179 goto out;
4180
4181
4182
4183 if ((sbi->s_cluster_ratio > 1) && err &&
4184 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4185 ar.len = allocated = map->m_len;
4186 newblock = map->m_pblk;
4187 goto got_allocated_blocks;
4188 }
4189
4190
4191
4192
4193
4194
4195
4196 if (map->m_len > EXT_INIT_MAX_LEN &&
4197 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4198 map->m_len = EXT_INIT_MAX_LEN;
4199 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4200 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4201 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4202
4203
4204 newex.ee_len = cpu_to_le16(map->m_len);
4205 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4206 if (err)
4207 allocated = ext4_ext_get_actual_len(&newex);
4208 else
4209 allocated = map->m_len;
4210
4211
4212 ar.inode = inode;
4213 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4214 ar.logical = map->m_lblk;
4215
4216
4217
4218
4219
4220
4221
4222
4223 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4224 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4225 ar.goal -= offset;
4226 ar.logical -= offset;
4227 if (S_ISREG(inode->i_mode))
4228 ar.flags = EXT4_MB_HINT_DATA;
4229 else
4230
4231 ar.flags = 0;
4232 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4233 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4234 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4235 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4236 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4237 ar.flags |= EXT4_MB_USE_RESERVED;
4238 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4239 if (!newblock)
4240 goto out;
4241 allocated_clusters = ar.len;
4242 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4243 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4244 ar.goal, newblock, ar.len, allocated);
4245 if (ar.len > allocated)
4246 ar.len = allocated;
4247
4248got_allocated_blocks:
4249
4250 pblk = newblock + offset;
4251 ext4_ext_store_pblock(&newex, pblk);
4252 newex.ee_len = cpu_to_le16(ar.len);
4253
4254 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4255 ext4_ext_mark_unwritten(&newex);
4256 map->m_flags |= EXT4_MAP_UNWRITTEN;
4257 }
4258
4259 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
4260 if (err) {
4261 if (allocated_clusters) {
4262 int fb_flags = 0;
4263
4264
4265
4266
4267
4268
4269 ext4_discard_preallocations(inode, 0);
4270 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4271 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4272 ext4_free_blocks(handle, inode, NULL, newblock,
4273 EXT4_C2B(sbi, allocated_clusters),
4274 fb_flags);
4275 }
4276 goto out;
4277 }
4278
4279
4280
4281
4282
4283
4284
4285 if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
4286 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4287
4288
4289
4290
4291 ext4_da_update_reserve_space(inode, allocated_clusters,
4292 1);
4293 } else {
4294 ext4_lblk_t lblk, len;
4295 unsigned int n;
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4309 len = allocated_clusters << sbi->s_cluster_bits;
4310 n = ext4_es_delayed_clu(inode, lblk, len);
4311 if (n > 0)
4312 ext4_da_update_reserve_space(inode, (int) n, 0);
4313 }
4314 }
4315
4316
4317
4318
4319
4320 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4321 ext4_update_inode_fsync_trans(handle, inode, 1);
4322 else
4323 ext4_update_inode_fsync_trans(handle, inode, 0);
4324
4325 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
4326 map->m_pblk = pblk;
4327 map->m_len = ar.len;
4328 allocated = map->m_len;
4329 ext4_ext_show_leaf(inode, path);
4330out:
4331 ext4_ext_drop_refs(path);
4332 kfree(path);
4333
4334 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4335 err ? err : allocated);
4336 return err ? err : allocated;
4337}
4338
4339int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4340{
4341 struct super_block *sb = inode->i_sb;
4342 ext4_lblk_t last_block;
4343 int err = 0;
4344
4345
4346
4347
4348
4349
4350
4351
4352 EXT4_I(inode)->i_disksize = inode->i_size;
4353 err = ext4_mark_inode_dirty(handle, inode);
4354 if (err)
4355 return err;
4356
4357 last_block = (inode->i_size + sb->s_blocksize - 1)
4358 >> EXT4_BLOCK_SIZE_BITS(sb);
4359retry:
4360 err = ext4_es_remove_extent(inode, last_block,
4361 EXT_MAX_BLOCKS - last_block);
4362 if (err == -ENOMEM) {
4363 cond_resched();
4364 congestion_wait(BLK_RW_ASYNC, HZ/50);
4365 goto retry;
4366 }
4367 if (err)
4368 return err;
4369retry_remove_space:
4370 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4371 if (err == -ENOMEM) {
4372 cond_resched();
4373 congestion_wait(BLK_RW_ASYNC, HZ/50);
4374 goto retry_remove_space;
4375 }
4376 return err;
4377}
4378
4379static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4380 ext4_lblk_t len, loff_t new_size,
4381 int flags)
4382{
4383 struct inode *inode = file_inode(file);
4384 handle_t *handle;
4385 int ret = 0;
4386 int ret2 = 0, ret3 = 0;
4387 int retries = 0;
4388 int depth = 0;
4389 struct ext4_map_blocks map;
4390 unsigned int credits;
4391 loff_t epos;
4392
4393 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4394 map.m_lblk = offset;
4395 map.m_len = len;
4396
4397
4398
4399
4400
4401 if (len <= EXT_UNWRITTEN_MAX_LEN)
4402 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4403
4404
4405
4406
4407 credits = ext4_chunk_trans_blocks(inode, len);
4408 depth = ext_depth(inode);
4409
4410retry:
4411 while (ret >= 0 && len) {
4412
4413
4414
4415 if (depth != ext_depth(inode)) {
4416 credits = ext4_chunk_trans_blocks(inode, len);
4417 depth = ext_depth(inode);
4418 }
4419
4420 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4421 credits);
4422 if (IS_ERR(handle)) {
4423 ret = PTR_ERR(handle);
4424 break;
4425 }
4426 ret = ext4_map_blocks(handle, inode, &map, flags);
4427 if (ret <= 0) {
4428 ext4_debug("inode #%lu: block %u: len %u: "
4429 "ext4_ext_map_blocks returned %d",
4430 inode->i_ino, map.m_lblk,
4431 map.m_len, ret);
4432 ext4_mark_inode_dirty(handle, inode);
4433 ret2 = ext4_journal_stop(handle);
4434 break;
4435 }
4436 map.m_lblk += ret;
4437 map.m_len = len = len - ret;
4438 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4439 inode->i_ctime = current_time(inode);
4440 if (new_size) {
4441 if (epos > new_size)
4442 epos = new_size;
4443 if (ext4_update_inode_size(inode, epos) & 0x1)
4444 inode->i_mtime = inode->i_ctime;
4445 }
4446 ret2 = ext4_mark_inode_dirty(handle, inode);
4447 ext4_update_inode_fsync_trans(handle, inode, 1);
4448 ret3 = ext4_journal_stop(handle);
4449 ret2 = ret3 ? ret3 : ret2;
4450 if (unlikely(ret2))
4451 break;
4452 }
4453 if (ret == -ENOSPC &&
4454 ext4_should_retry_alloc(inode->i_sb, &retries)) {
4455 ret = 0;
4456 goto retry;
4457 }
4458
4459 return ret > 0 ? ret2 : ret;
4460}
4461
4462static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
4463
4464static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
4465
4466static long ext4_zero_range(struct file *file, loff_t offset,
4467 loff_t len, int mode)
4468{
4469 struct inode *inode = file_inode(file);
4470 handle_t *handle = NULL;
4471 unsigned int max_blocks;
4472 loff_t new_size = 0;
4473 int ret = 0;
4474 int flags;
4475 int credits;
4476 int partial_begin, partial_end;
4477 loff_t start, end;
4478 ext4_lblk_t lblk;
4479 unsigned int blkbits = inode->i_blkbits;
4480
4481 trace_ext4_zero_range(inode, offset, len, mode);
4482
4483
4484 if (ext4_should_journal_data(inode)) {
4485 ret = ext4_force_commit(inode->i_sb);
4486 if (ret)
4487 return ret;
4488 }
4489
4490
4491
4492
4493
4494
4495
4496 start = round_up(offset, 1 << blkbits);
4497 end = round_down((offset + len), 1 << blkbits);
4498
4499 if (start < offset || end > offset + len)
4500 return -EINVAL;
4501 partial_begin = offset & ((1 << blkbits) - 1);
4502 partial_end = (offset + len) & ((1 << blkbits) - 1);
4503
4504 lblk = start >> blkbits;
4505 max_blocks = (end >> blkbits);
4506 if (max_blocks < lblk)
4507 max_blocks = 0;
4508 else
4509 max_blocks -= lblk;
4510
4511 inode_lock(inode);
4512
4513
4514
4515
4516 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4517 ret = -EOPNOTSUPP;
4518 goto out_mutex;
4519 }
4520
4521 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4522 (offset + len > inode->i_size ||
4523 offset + len > EXT4_I(inode)->i_disksize)) {
4524 new_size = offset + len;
4525 ret = inode_newsize_ok(inode, new_size);
4526 if (ret)
4527 goto out_mutex;
4528 }
4529
4530 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4531
4532
4533 inode_dio_wait(inode);
4534
4535
4536 if (partial_begin || partial_end) {
4537 ret = ext4_alloc_file_blocks(file,
4538 round_down(offset, 1 << blkbits) >> blkbits,
4539 (round_up((offset + len), 1 << blkbits) -
4540 round_down(offset, 1 << blkbits)) >> blkbits,
4541 new_size, flags);
4542 if (ret)
4543 goto out_mutex;
4544
4545 }
4546
4547
4548 if (max_blocks > 0) {
4549 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4550 EXT4_EX_NOCACHE);
4551
4552
4553
4554
4555
4556 down_write(&EXT4_I(inode)->i_mmap_sem);
4557
4558 ret = ext4_break_layouts(inode);
4559 if (ret) {
4560 up_write(&EXT4_I(inode)->i_mmap_sem);
4561 goto out_mutex;
4562 }
4563
4564 ret = ext4_update_disksize_before_punch(inode, offset, len);
4565 if (ret) {
4566 up_write(&EXT4_I(inode)->i_mmap_sem);
4567 goto out_mutex;
4568 }
4569
4570 truncate_pagecache_range(inode, start, end - 1);
4571 inode->i_mtime = inode->i_ctime = current_time(inode);
4572
4573 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4574 flags);
4575 up_write(&EXT4_I(inode)->i_mmap_sem);
4576 if (ret)
4577 goto out_mutex;
4578 }
4579 if (!partial_begin && !partial_end)
4580 goto out_mutex;
4581
4582
4583
4584
4585
4586 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4587 if (ext4_should_journal_data(inode))
4588 credits += 2;
4589 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4590 if (IS_ERR(handle)) {
4591 ret = PTR_ERR(handle);
4592 ext4_std_error(inode->i_sb, ret);
4593 goto out_mutex;
4594 }
4595
4596 inode->i_mtime = inode->i_ctime = current_time(inode);
4597 if (new_size)
4598 ext4_update_inode_size(inode, new_size);
4599 ret = ext4_mark_inode_dirty(handle, inode);
4600 if (unlikely(ret))
4601 goto out_handle;
4602 ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits,
4603 (offset + len - 1) >> inode->i_sb->s_blocksize_bits);
4604
4605 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4606 if (ret >= 0)
4607 ext4_update_inode_fsync_trans(handle, inode, 1);
4608
4609 if (file->f_flags & O_SYNC)
4610 ext4_handle_sync(handle);
4611
4612out_handle:
4613 ext4_journal_stop(handle);
4614out_mutex:
4615 inode_unlock(inode);
4616 return ret;
4617}
4618
4619
4620
4621
4622
4623
4624
4625
4626long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4627{
4628 struct inode *inode = file_inode(file);
4629 loff_t new_size = 0;
4630 unsigned int max_blocks;
4631 int ret = 0;
4632 int flags;
4633 ext4_lblk_t lblk;
4634 unsigned int blkbits = inode->i_blkbits;
4635
4636
4637
4638
4639
4640
4641
4642 if (IS_ENCRYPTED(inode) &&
4643 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
4644 return -EOPNOTSUPP;
4645
4646
4647 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4648 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4649 FALLOC_FL_INSERT_RANGE))
4650 return -EOPNOTSUPP;
4651
4652 ext4_fc_start_update(inode);
4653
4654 if (mode & FALLOC_FL_PUNCH_HOLE) {
4655 ret = ext4_punch_hole(inode, offset, len);
4656 goto exit;
4657 }
4658
4659 ret = ext4_convert_inline_data(inode);
4660 if (ret)
4661 goto exit;
4662
4663 if (mode & FALLOC_FL_COLLAPSE_RANGE) {
4664 ret = ext4_collapse_range(inode, offset, len);
4665 goto exit;
4666 }
4667
4668 if (mode & FALLOC_FL_INSERT_RANGE) {
4669 ret = ext4_insert_range(inode, offset, len);
4670 goto exit;
4671 }
4672
4673 if (mode & FALLOC_FL_ZERO_RANGE) {
4674 ret = ext4_zero_range(file, offset, len, mode);
4675 goto exit;
4676 }
4677 trace_ext4_fallocate_enter(inode, offset, len, mode);
4678 lblk = offset >> blkbits;
4679
4680 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4681 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4682
4683 inode_lock(inode);
4684
4685
4686
4687
4688 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4689 ret = -EOPNOTSUPP;
4690 goto out;
4691 }
4692
4693 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4694 (offset + len > inode->i_size ||
4695 offset + len > EXT4_I(inode)->i_disksize)) {
4696 new_size = offset + len;
4697 ret = inode_newsize_ok(inode, new_size);
4698 if (ret)
4699 goto out;
4700 }
4701
4702
4703 inode_dio_wait(inode);
4704
4705 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4706 if (ret)
4707 goto out;
4708
4709 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4710 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4711 EXT4_I(inode)->i_sync_tid);
4712 }
4713out:
4714 inode_unlock(inode);
4715 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4716exit:
4717 ext4_fc_stop_update(inode);
4718 return ret;
4719}
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4732 loff_t offset, ssize_t len)
4733{
4734 unsigned int max_blocks;
4735 int ret = 0, ret2 = 0, ret3 = 0;
4736 struct ext4_map_blocks map;
4737 unsigned int blkbits = inode->i_blkbits;
4738 unsigned int credits = 0;
4739
4740 map.m_lblk = offset >> blkbits;
4741 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4742
4743 if (!handle) {
4744
4745
4746
4747 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4748 }
4749 while (ret >= 0 && ret < max_blocks) {
4750 map.m_lblk += ret;
4751 map.m_len = (max_blocks -= ret);
4752 if (credits) {
4753 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4754 credits);
4755 if (IS_ERR(handle)) {
4756 ret = PTR_ERR(handle);
4757 break;
4758 }
4759 }
4760 ret = ext4_map_blocks(handle, inode, &map,
4761 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4762 if (ret <= 0)
4763 ext4_warning(inode->i_sb,
4764 "inode #%lu: block %u: len %u: "
4765 "ext4_ext_map_blocks returned %d",
4766 inode->i_ino, map.m_lblk,
4767 map.m_len, ret);
4768 ret2 = ext4_mark_inode_dirty(handle, inode);
4769 if (credits) {
4770 ret3 = ext4_journal_stop(handle);
4771 if (unlikely(ret3))
4772 ret2 = ret3;
4773 }
4774
4775 if (ret <= 0 || ret2)
4776 break;
4777 }
4778 return ret > 0 ? ret2 : ret;
4779}
4780
4781int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4782{
4783 int ret = 0, err = 0;
4784 struct ext4_io_end_vec *io_end_vec;
4785
4786
4787
4788
4789
4790
4791 if (handle) {
4792 handle = ext4_journal_start_reserved(handle,
4793 EXT4_HT_EXT_CONVERT);
4794 if (IS_ERR(handle))
4795 return PTR_ERR(handle);
4796 }
4797
4798 list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4799 ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4800 io_end_vec->offset,
4801 io_end_vec->size);
4802 if (ret)
4803 break;
4804 }
4805
4806 if (handle)
4807 err = ext4_journal_stop(handle);
4808
4809 return ret < 0 ? ret : err;
4810}
4811
4812static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
4813{
4814 __u64 physical = 0;
4815 __u64 length = 0;
4816 int blockbits = inode->i_sb->s_blocksize_bits;
4817 int error = 0;
4818 u16 iomap_type;
4819
4820
4821 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4822 struct ext4_iloc iloc;
4823 int offset;
4824
4825 error = ext4_get_inode_loc(inode, &iloc);
4826 if (error)
4827 return error;
4828 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4829 offset = EXT4_GOOD_OLD_INODE_SIZE +
4830 EXT4_I(inode)->i_extra_isize;
4831 physical += offset;
4832 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4833 brelse(iloc.bh);
4834 iomap_type = IOMAP_INLINE;
4835 } else if (EXT4_I(inode)->i_file_acl) {
4836 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4837 length = inode->i_sb->s_blocksize;
4838 iomap_type = IOMAP_MAPPED;
4839 } else {
4840
4841 error = -ENOENT;
4842 goto out;
4843 }
4844
4845 iomap->addr = physical;
4846 iomap->offset = 0;
4847 iomap->length = length;
4848 iomap->type = iomap_type;
4849 iomap->flags = 0;
4850out:
4851 return error;
4852}
4853
4854static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4855 loff_t length, unsigned flags,
4856 struct iomap *iomap, struct iomap *srcmap)
4857{
4858 int error;
4859
4860 error = ext4_iomap_xattr_fiemap(inode, iomap);
4861 if (error == 0 && (offset >= iomap->length))
4862 error = -ENOENT;
4863 return error;
4864}
4865
4866static const struct iomap_ops ext4_iomap_xattr_ops = {
4867 .iomap_begin = ext4_iomap_xattr_begin,
4868};
4869
4870static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4871{
4872 u64 maxbytes;
4873
4874 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4875 maxbytes = inode->i_sb->s_maxbytes;
4876 else
4877 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
4878
4879 if (*len == 0)
4880 return -EINVAL;
4881 if (start > maxbytes)
4882 return -EFBIG;
4883
4884
4885
4886
4887 if (*len > maxbytes || (maxbytes - *len) < start)
4888 *len = maxbytes - start;
4889 return 0;
4890}
4891
4892int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4893 u64 start, u64 len)
4894{
4895 int error = 0;
4896
4897 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4898 error = ext4_ext_precache(inode);
4899 if (error)
4900 return error;
4901 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4902 }
4903
4904
4905
4906
4907
4908
4909 error = ext4_fiemap_check_ranges(inode, start, &len);
4910 if (error)
4911 return error;
4912
4913 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4914 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
4915 return iomap_fiemap(inode, fieinfo, start, len,
4916 &ext4_iomap_xattr_ops);
4917 }
4918
4919 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
4920}
4921
4922int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
4923 __u64 start, __u64 len)
4924{
4925 ext4_lblk_t start_blk, len_blks;
4926 __u64 last_blk;
4927 int error = 0;
4928
4929 if (ext4_has_inline_data(inode)) {
4930 int has_inline;
4931
4932 down_read(&EXT4_I(inode)->xattr_sem);
4933 has_inline = ext4_has_inline_data(inode);
4934 up_read(&EXT4_I(inode)->xattr_sem);
4935 if (has_inline)
4936 return 0;
4937 }
4938
4939 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4940 error = ext4_ext_precache(inode);
4941 if (error)
4942 return error;
4943 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4944 }
4945
4946 error = fiemap_prep(inode, fieinfo, start, &len, 0);
4947 if (error)
4948 return error;
4949
4950 error = ext4_fiemap_check_ranges(inode, start, &len);
4951 if (error)
4952 return error;
4953
4954 start_blk = start >> inode->i_sb->s_blocksize_bits;
4955 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4956 if (last_blk >= EXT_MAX_BLOCKS)
4957 last_blk = EXT_MAX_BLOCKS-1;
4958 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4959
4960
4961
4962
4963
4964 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
4965}
4966
4967
4968
4969
4970
4971
4972
4973static int
4974ext4_access_path(handle_t *handle, struct inode *inode,
4975 struct ext4_ext_path *path)
4976{
4977 int credits, err;
4978
4979 if (!ext4_handle_valid(handle))
4980 return 0;
4981
4982
4983
4984
4985
4986
4987
4988 credits = ext4_writepage_trans_blocks(inode);
4989 err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0);
4990 if (err < 0)
4991 return err;
4992
4993 err = ext4_ext_get_access(handle, inode, path);
4994 return err;
4995}
4996
4997
4998
4999
5000
5001
5002
5003static int
5004ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5005 struct inode *inode, handle_t *handle,
5006 enum SHIFT_DIRECTION SHIFT)
5007{
5008 int depth, err = 0;
5009 struct ext4_extent *ex_start, *ex_last;
5010 bool update = false;
5011 depth = path->p_depth;
5012
5013 while (depth >= 0) {
5014 if (depth == path->p_depth) {
5015 ex_start = path[depth].p_ext;
5016 if (!ex_start)
5017 return -EFSCORRUPTED;
5018
5019 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5020
5021 err = ext4_access_path(handle, inode, path + depth);
5022 if (err)
5023 goto out;
5024
5025 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
5026 update = true;
5027
5028 while (ex_start <= ex_last) {
5029 if (SHIFT == SHIFT_LEFT) {
5030 le32_add_cpu(&ex_start->ee_block,
5031 -shift);
5032
5033 if ((ex_start >
5034 EXT_FIRST_EXTENT(path[depth].p_hdr))
5035 &&
5036 ext4_ext_try_to_merge_right(inode,
5037 path, ex_start - 1))
5038 ex_last--;
5039 else
5040 ex_start++;
5041 } else {
5042 le32_add_cpu(&ex_last->ee_block, shift);
5043 ext4_ext_try_to_merge_right(inode, path,
5044 ex_last);
5045 ex_last--;
5046 }
5047 }
5048 err = ext4_ext_dirty(handle, inode, path + depth);
5049 if (err)
5050 goto out;
5051
5052 if (--depth < 0 || !update)
5053 break;
5054 }
5055
5056
5057 err = ext4_access_path(handle, inode, path + depth);
5058 if (err)
5059 goto out;
5060
5061 if (SHIFT == SHIFT_LEFT)
5062 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5063 else
5064 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5065 err = ext4_ext_dirty(handle, inode, path + depth);
5066 if (err)
5067 goto out;
5068
5069
5070 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5071 break;
5072
5073 depth--;
5074 }
5075
5076out:
5077 return err;
5078}
5079
5080
5081
5082
5083
5084
5085
5086
5087static int
5088ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5089 ext4_lblk_t start, ext4_lblk_t shift,
5090 enum SHIFT_DIRECTION SHIFT)
5091{
5092 struct ext4_ext_path *path;
5093 int ret = 0, depth;
5094 struct ext4_extent *extent;
5095 ext4_lblk_t stop, *iterator, ex_start, ex_end;
5096
5097
5098 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5099 EXT4_EX_NOCACHE);
5100 if (IS_ERR(path))
5101 return PTR_ERR(path);
5102
5103 depth = path->p_depth;
5104 extent = path[depth].p_ext;
5105 if (!extent)
5106 goto out;
5107
5108 stop = le32_to_cpu(extent->ee_block);
5109
5110
5111
5112
5113
5114
5115 if (SHIFT == SHIFT_LEFT) {
5116 path = ext4_find_extent(inode, start - 1, &path,
5117 EXT4_EX_NOCACHE);
5118 if (IS_ERR(path))
5119 return PTR_ERR(path);
5120 depth = path->p_depth;
5121 extent = path[depth].p_ext;
5122 if (extent) {
5123 ex_start = le32_to_cpu(extent->ee_block);
5124 ex_end = le32_to_cpu(extent->ee_block) +
5125 ext4_ext_get_actual_len(extent);
5126 } else {
5127 ex_start = 0;
5128 ex_end = 0;
5129 }
5130
5131 if ((start == ex_start && shift > ex_start) ||
5132 (shift > start - ex_end)) {
5133 ret = -EINVAL;
5134 goto out;
5135 }
5136 } else {
5137 if (shift > EXT_MAX_BLOCKS -
5138 (stop + ext4_ext_get_actual_len(extent))) {
5139 ret = -EINVAL;
5140 goto out;
5141 }
5142 }
5143
5144
5145
5146
5147
5148
5149 if (SHIFT == SHIFT_LEFT)
5150 iterator = &start;
5151 else
5152 iterator = &stop;
5153
5154
5155
5156
5157
5158
5159 while (iterator && start <= stop) {
5160 path = ext4_find_extent(inode, *iterator, &path,
5161 EXT4_EX_NOCACHE);
5162 if (IS_ERR(path))
5163 return PTR_ERR(path);
5164 depth = path->p_depth;
5165 extent = path[depth].p_ext;
5166 if (!extent) {
5167 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5168 (unsigned long) *iterator);
5169 return -EFSCORRUPTED;
5170 }
5171 if (SHIFT == SHIFT_LEFT && *iterator >
5172 le32_to_cpu(extent->ee_block)) {
5173
5174 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5175 path[depth].p_ext++;
5176 } else {
5177 *iterator = ext4_ext_next_allocated_block(path);
5178 continue;
5179 }
5180 }
5181
5182 if (SHIFT == SHIFT_LEFT) {
5183 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5184 *iterator = le32_to_cpu(extent->ee_block) +
5185 ext4_ext_get_actual_len(extent);
5186 } else {
5187 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5188 if (le32_to_cpu(extent->ee_block) > 0)
5189 *iterator = le32_to_cpu(extent->ee_block) - 1;
5190 else
5191
5192 iterator = NULL;
5193
5194 while (le32_to_cpu(extent->ee_block) < start)
5195 extent++;
5196 path[depth].p_ext = extent;
5197 }
5198 ret = ext4_ext_shift_path_extents(path, shift, inode,
5199 handle, SHIFT);
5200 if (ret)
5201 break;
5202 }
5203out:
5204 ext4_ext_drop_refs(path);
5205 kfree(path);
5206 return ret;
5207}
5208
5209
5210
5211
5212
5213
5214static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5215{
5216 struct super_block *sb = inode->i_sb;
5217 ext4_lblk_t punch_start, punch_stop;
5218 handle_t *handle;
5219 unsigned int credits;
5220 loff_t new_size, ioffset;
5221 int ret;
5222
5223
5224
5225
5226
5227
5228 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5229 return -EOPNOTSUPP;
5230
5231
5232 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5233 return -EINVAL;
5234
5235 trace_ext4_collapse_range(inode, offset, len);
5236
5237 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5238 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5239
5240
5241 if (ext4_should_journal_data(inode)) {
5242 ret = ext4_force_commit(inode->i_sb);
5243 if (ret)
5244 return ret;
5245 }
5246
5247 inode_lock(inode);
5248
5249
5250
5251
5252 if (offset + len >= inode->i_size) {
5253 ret = -EINVAL;
5254 goto out_mutex;
5255 }
5256
5257
5258 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5259 ret = -EOPNOTSUPP;
5260 goto out_mutex;
5261 }
5262
5263
5264 inode_dio_wait(inode);
5265
5266
5267
5268
5269
5270 down_write(&EXT4_I(inode)->i_mmap_sem);
5271
5272 ret = ext4_break_layouts(inode);
5273 if (ret)
5274 goto out_mmap;
5275
5276
5277
5278
5279
5280 ioffset = round_down(offset, PAGE_SIZE);
5281
5282
5283
5284
5285 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
5286 if (ret)
5287 goto out_mmap;
5288
5289
5290
5291
5292
5293 ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
5294 LLONG_MAX);
5295 if (ret)
5296 goto out_mmap;
5297 truncate_pagecache(inode, ioffset);
5298
5299 credits = ext4_writepage_trans_blocks(inode);
5300 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5301 if (IS_ERR(handle)) {
5302 ret = PTR_ERR(handle);
5303 goto out_mmap;
5304 }
5305 ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
5306
5307 down_write(&EXT4_I(inode)->i_data_sem);
5308 ext4_discard_preallocations(inode, 0);
5309
5310 ret = ext4_es_remove_extent(inode, punch_start,
5311 EXT_MAX_BLOCKS - punch_start);
5312 if (ret) {
5313 up_write(&EXT4_I(inode)->i_data_sem);
5314 goto out_stop;
5315 }
5316
5317 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5318 if (ret) {
5319 up_write(&EXT4_I(inode)->i_data_sem);
5320 goto out_stop;
5321 }
5322 ext4_discard_preallocations(inode, 0);
5323
5324 ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5325 punch_stop - punch_start, SHIFT_LEFT);
5326 if (ret) {
5327 up_write(&EXT4_I(inode)->i_data_sem);
5328 goto out_stop;
5329 }
5330
5331 new_size = inode->i_size - len;
5332 i_size_write(inode, new_size);
5333 EXT4_I(inode)->i_disksize = new_size;
5334
5335 up_write(&EXT4_I(inode)->i_data_sem);
5336 if (IS_SYNC(inode))
5337 ext4_handle_sync(handle);
5338 inode->i_mtime = inode->i_ctime = current_time(inode);
5339 ret = ext4_mark_inode_dirty(handle, inode);
5340 ext4_update_inode_fsync_trans(handle, inode, 1);
5341
5342out_stop:
5343 ext4_journal_stop(handle);
5344 ext4_fc_stop_ineligible(sb);
5345out_mmap:
5346 up_write(&EXT4_I(inode)->i_mmap_sem);
5347out_mutex:
5348 inode_unlock(inode);
5349 return ret;
5350}
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5361{
5362 struct super_block *sb = inode->i_sb;
5363 handle_t *handle;
5364 struct ext4_ext_path *path;
5365 struct ext4_extent *extent;
5366 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5367 unsigned int credits, ee_len;
5368 int ret = 0, depth, split_flag = 0;
5369 loff_t ioffset;
5370
5371
5372
5373
5374
5375
5376 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5377 return -EOPNOTSUPP;
5378
5379
5380 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5381 return -EINVAL;
5382
5383 trace_ext4_insert_range(inode, offset, len);
5384
5385 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5386 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5387
5388
5389 if (ext4_should_journal_data(inode)) {
5390 ret = ext4_force_commit(inode->i_sb);
5391 if (ret)
5392 return ret;
5393 }
5394
5395 inode_lock(inode);
5396
5397 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5398 ret = -EOPNOTSUPP;
5399 goto out_mutex;
5400 }
5401
5402
5403 if (len > inode->i_sb->s_maxbytes - inode->i_size) {
5404 ret = -EFBIG;
5405 goto out_mutex;
5406 }
5407
5408
5409 if (offset >= inode->i_size) {
5410 ret = -EINVAL;
5411 goto out_mutex;
5412 }
5413
5414
5415 inode_dio_wait(inode);
5416
5417
5418
5419
5420
5421 down_write(&EXT4_I(inode)->i_mmap_sem);
5422
5423 ret = ext4_break_layouts(inode);
5424 if (ret)
5425 goto out_mmap;
5426
5427
5428
5429
5430
5431 ioffset = round_down(offset, PAGE_SIZE);
5432
5433 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5434 LLONG_MAX);
5435 if (ret)
5436 goto out_mmap;
5437 truncate_pagecache(inode, ioffset);
5438
5439 credits = ext4_writepage_trans_blocks(inode);
5440 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5441 if (IS_ERR(handle)) {
5442 ret = PTR_ERR(handle);
5443 goto out_mmap;
5444 }
5445 ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
5446
5447
5448 inode->i_size += len;
5449 EXT4_I(inode)->i_disksize += len;
5450 inode->i_mtime = inode->i_ctime = current_time(inode);
5451 ret = ext4_mark_inode_dirty(handle, inode);
5452 if (ret)
5453 goto out_stop;
5454
5455 down_write(&EXT4_I(inode)->i_data_sem);
5456 ext4_discard_preallocations(inode, 0);
5457
5458 path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5459 if (IS_ERR(path)) {
5460 up_write(&EXT4_I(inode)->i_data_sem);
5461 goto out_stop;
5462 }
5463
5464 depth = ext_depth(inode);
5465 extent = path[depth].p_ext;
5466 if (extent) {
5467 ee_start_lblk = le32_to_cpu(extent->ee_block);
5468 ee_len = ext4_ext_get_actual_len(extent);
5469
5470
5471
5472
5473
5474 if ((offset_lblk > ee_start_lblk) &&
5475 (offset_lblk < (ee_start_lblk + ee_len))) {
5476 if (ext4_ext_is_unwritten(extent))
5477 split_flag = EXT4_EXT_MARK_UNWRIT1 |
5478 EXT4_EXT_MARK_UNWRIT2;
5479 ret = ext4_split_extent_at(handle, inode, &path,
5480 offset_lblk, split_flag,
5481 EXT4_EX_NOCACHE |
5482 EXT4_GET_BLOCKS_PRE_IO |
5483 EXT4_GET_BLOCKS_METADATA_NOFAIL);
5484 }
5485
5486 ext4_ext_drop_refs(path);
5487 kfree(path);
5488 if (ret < 0) {
5489 up_write(&EXT4_I(inode)->i_data_sem);
5490 goto out_stop;
5491 }
5492 } else {
5493 ext4_ext_drop_refs(path);
5494 kfree(path);
5495 }
5496
5497 ret = ext4_es_remove_extent(inode, offset_lblk,
5498 EXT_MAX_BLOCKS - offset_lblk);
5499 if (ret) {
5500 up_write(&EXT4_I(inode)->i_data_sem);
5501 goto out_stop;
5502 }
5503
5504
5505
5506
5507
5508 ret = ext4_ext_shift_extents(inode, handle,
5509 ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
5510 len_lblk, SHIFT_RIGHT);
5511
5512 up_write(&EXT4_I(inode)->i_data_sem);
5513 if (IS_SYNC(inode))
5514 ext4_handle_sync(handle);
5515 if (ret >= 0)
5516 ext4_update_inode_fsync_trans(handle, inode, 1);
5517
5518out_stop:
5519 ext4_journal_stop(handle);
5520 ext4_fc_stop_ineligible(sb);
5521out_mmap:
5522 up_write(&EXT4_I(inode)->i_mmap_sem);
5523out_mutex:
5524 inode_unlock(inode);
5525 return ret;
5526}
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548int
5549ext4_swap_extents(handle_t *handle, struct inode *inode1,
5550 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5551 ext4_lblk_t count, int unwritten, int *erp)
5552{
5553 struct ext4_ext_path *path1 = NULL;
5554 struct ext4_ext_path *path2 = NULL;
5555 int replaced_count = 0;
5556
5557 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5558 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5559 BUG_ON(!inode_is_locked(inode1));
5560 BUG_ON(!inode_is_locked(inode2));
5561
5562 *erp = ext4_es_remove_extent(inode1, lblk1, count);
5563 if (unlikely(*erp))
5564 return 0;
5565 *erp = ext4_es_remove_extent(inode2, lblk2, count);
5566 if (unlikely(*erp))
5567 return 0;
5568
5569 while (count) {
5570 struct ext4_extent *ex1, *ex2, tmp_ex;
5571 ext4_lblk_t e1_blk, e2_blk;
5572 int e1_len, e2_len, len;
5573 int split = 0;
5574
5575 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5576 if (IS_ERR(path1)) {
5577 *erp = PTR_ERR(path1);
5578 path1 = NULL;
5579 finish:
5580 count = 0;
5581 goto repeat;
5582 }
5583 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5584 if (IS_ERR(path2)) {
5585 *erp = PTR_ERR(path2);
5586 path2 = NULL;
5587 goto finish;
5588 }
5589 ex1 = path1[path1->p_depth].p_ext;
5590 ex2 = path2[path2->p_depth].p_ext;
5591
5592 if (unlikely(!ex2 || !ex1))
5593 goto finish;
5594
5595 e1_blk = le32_to_cpu(ex1->ee_block);
5596 e2_blk = le32_to_cpu(ex2->ee_block);
5597 e1_len = ext4_ext_get_actual_len(ex1);
5598 e2_len = ext4_ext_get_actual_len(ex2);
5599
5600
5601 if (!in_range(lblk1, e1_blk, e1_len) ||
5602 !in_range(lblk2, e2_blk, e2_len)) {
5603 ext4_lblk_t next1, next2;
5604
5605
5606 next1 = ext4_ext_next_allocated_block(path1);
5607 next2 = ext4_ext_next_allocated_block(path2);
5608
5609 if (e1_blk > lblk1)
5610 next1 = e1_blk;
5611 if (e2_blk > lblk2)
5612 next2 = e2_blk;
5613
5614 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5615 goto finish;
5616
5617 len = next1 - lblk1;
5618 if (len < next2 - lblk2)
5619 len = next2 - lblk2;
5620 if (len > count)
5621 len = count;
5622 lblk1 += len;
5623 lblk2 += len;
5624 count -= len;
5625 goto repeat;
5626 }
5627
5628
5629 if (e1_blk < lblk1) {
5630 split = 1;
5631 *erp = ext4_force_split_extent_at(handle, inode1,
5632 &path1, lblk1, 0);
5633 if (unlikely(*erp))
5634 goto finish;
5635 }
5636 if (e2_blk < lblk2) {
5637 split = 1;
5638 *erp = ext4_force_split_extent_at(handle, inode2,
5639 &path2, lblk2, 0);
5640 if (unlikely(*erp))
5641 goto finish;
5642 }
5643
5644
5645 if (split)
5646 goto repeat;
5647
5648
5649 len = count;
5650 if (len > e1_blk + e1_len - lblk1)
5651 len = e1_blk + e1_len - lblk1;
5652 if (len > e2_blk + e2_len - lblk2)
5653 len = e2_blk + e2_len - lblk2;
5654
5655 if (len != e1_len) {
5656 split = 1;
5657 *erp = ext4_force_split_extent_at(handle, inode1,
5658 &path1, lblk1 + len, 0);
5659 if (unlikely(*erp))
5660 goto finish;
5661 }
5662 if (len != e2_len) {
5663 split = 1;
5664 *erp = ext4_force_split_extent_at(handle, inode2,
5665 &path2, lblk2 + len, 0);
5666 if (*erp)
5667 goto finish;
5668 }
5669
5670
5671 if (split)
5672 goto repeat;
5673
5674 BUG_ON(e2_len != e1_len);
5675 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5676 if (unlikely(*erp))
5677 goto finish;
5678 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5679 if (unlikely(*erp))
5680 goto finish;
5681
5682
5683 tmp_ex = *ex1;
5684 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5685 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5686 ex1->ee_len = cpu_to_le16(e2_len);
5687 ex2->ee_len = cpu_to_le16(e1_len);
5688 if (unwritten)
5689 ext4_ext_mark_unwritten(ex2);
5690 if (ext4_ext_is_unwritten(&tmp_ex))
5691 ext4_ext_mark_unwritten(ex1);
5692
5693 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5694 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5695 *erp = ext4_ext_dirty(handle, inode2, path2 +
5696 path2->p_depth);
5697 if (unlikely(*erp))
5698 goto finish;
5699 *erp = ext4_ext_dirty(handle, inode1, path1 +
5700 path1->p_depth);
5701
5702
5703
5704
5705
5706
5707 if (unlikely(*erp))
5708 goto finish;
5709 lblk1 += len;
5710 lblk2 += len;
5711 replaced_count += len;
5712 count -= len;
5713
5714 repeat:
5715 ext4_ext_drop_refs(path1);
5716 kfree(path1);
5717 ext4_ext_drop_refs(path2);
5718 kfree(path2);
5719 path1 = path2 = NULL;
5720 }
5721 return replaced_count;
5722}
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5737{
5738 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5739 struct ext4_ext_path *path;
5740 int depth, mapped = 0, err = 0;
5741 struct ext4_extent *extent;
5742 ext4_lblk_t first_lblk, first_lclu, last_lclu;
5743
5744
5745 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5746 if (IS_ERR(path)) {
5747 err = PTR_ERR(path);
5748 path = NULL;
5749 goto out;
5750 }
5751
5752 depth = ext_depth(inode);
5753
5754
5755
5756
5757
5758
5759 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5760 EXT4_ERROR_INODE(inode,
5761 "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5762 (unsigned long) EXT4_C2B(sbi, lclu),
5763 depth, path[depth].p_block);
5764 err = -EFSCORRUPTED;
5765 goto out;
5766 }
5767
5768 extent = path[depth].p_ext;
5769
5770
5771 if (extent == NULL)
5772 goto out;
5773
5774 first_lblk = le32_to_cpu(extent->ee_block);
5775 first_lclu = EXT4_B2C(sbi, first_lblk);
5776
5777
5778
5779
5780
5781
5782
5783 if (lclu >= first_lclu) {
5784 last_lclu = EXT4_B2C(sbi, first_lblk +
5785 ext4_ext_get_actual_len(extent) - 1);
5786 if (lclu <= last_lclu) {
5787 mapped = 1;
5788 } else {
5789 first_lblk = ext4_ext_next_allocated_block(path);
5790 first_lclu = EXT4_B2C(sbi, first_lblk);
5791 if (lclu == first_lclu)
5792 mapped = 1;
5793 }
5794 }
5795
5796out:
5797 ext4_ext_drop_refs(path);
5798 kfree(path);
5799
5800 return err ? err : mapped;
5801}
5802
5803
5804
5805
5806
5807
5808
5809
5810int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
5811 int len, int unwritten, ext4_fsblk_t pblk)
5812{
5813 struct ext4_ext_path *path = NULL, *ppath;
5814 struct ext4_extent *ex;
5815 int ret;
5816
5817 path = ext4_find_extent(inode, start, NULL, 0);
5818 if (!path)
5819 return -EINVAL;
5820 ex = path[path->p_depth].p_ext;
5821 if (!ex) {
5822 ret = -EFSCORRUPTED;
5823 goto out;
5824 }
5825
5826 if (le32_to_cpu(ex->ee_block) != start ||
5827 ext4_ext_get_actual_len(ex) != len) {
5828
5829 ppath = path;
5830 down_write(&EXT4_I(inode)->i_data_sem);
5831 ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
5832 up_write(&EXT4_I(inode)->i_data_sem);
5833 if (ret)
5834 goto out;
5835 kfree(path);
5836 path = ext4_find_extent(inode, start, NULL, 0);
5837 if (IS_ERR(path))
5838 return -1;
5839 ppath = path;
5840 ex = path[path->p_depth].p_ext;
5841 WARN_ON(le32_to_cpu(ex->ee_block) != start);
5842 if (ext4_ext_get_actual_len(ex) != len) {
5843 down_write(&EXT4_I(inode)->i_data_sem);
5844 ret = ext4_force_split_extent_at(NULL, inode, &ppath,
5845 start + len, 1);
5846 up_write(&EXT4_I(inode)->i_data_sem);
5847 if (ret)
5848 goto out;
5849 kfree(path);
5850 path = ext4_find_extent(inode, start, NULL, 0);
5851 if (IS_ERR(path))
5852 return -EINVAL;
5853 ex = path[path->p_depth].p_ext;
5854 }
5855 }
5856 if (unwritten)
5857 ext4_ext_mark_unwritten(ex);
5858 else
5859 ext4_ext_mark_initialized(ex);
5860 ext4_ext_store_pblock(ex, pblk);
5861 down_write(&EXT4_I(inode)->i_data_sem);
5862 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5863 up_write(&EXT4_I(inode)->i_data_sem);
5864out:
5865 ext4_ext_drop_refs(path);
5866 kfree(path);
5867 ext4_mark_inode_dirty(NULL, inode);
5868 return ret;
5869}
5870
5871
5872void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
5873{
5874 struct ext4_ext_path *path = NULL;
5875 struct ext4_extent *ex;
5876 ext4_lblk_t old_cur, cur = 0;
5877
5878 while (cur < end) {
5879 path = ext4_find_extent(inode, cur, NULL, 0);
5880 if (IS_ERR(path))
5881 return;
5882 ex = path[path->p_depth].p_ext;
5883 if (!ex) {
5884 ext4_ext_drop_refs(path);
5885 kfree(path);
5886 ext4_mark_inode_dirty(NULL, inode);
5887 return;
5888 }
5889 old_cur = cur;
5890 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5891 if (cur <= old_cur)
5892 cur = old_cur + 1;
5893 ext4_ext_try_to_merge(NULL, inode, path, ex);
5894 down_write(&EXT4_I(inode)->i_data_sem);
5895 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5896 up_write(&EXT4_I(inode)->i_data_sem);
5897 ext4_mark_inode_dirty(NULL, inode);
5898 ext4_ext_drop_refs(path);
5899 kfree(path);
5900 }
5901}
5902
5903
5904static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
5905{
5906 int ret;
5907 struct ext4_map_blocks map;
5908
5909 map.m_lblk = *cur;
5910 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
5911
5912 ret = ext4_map_blocks(NULL, inode, &map, 0);
5913 if (ret != 0)
5914 return;
5915 *cur = *cur + map.m_len;
5916}
5917
5918
5919int ext4_ext_replay_set_iblocks(struct inode *inode)
5920{
5921 struct ext4_ext_path *path = NULL, *path2 = NULL;
5922 struct ext4_extent *ex;
5923 ext4_lblk_t cur = 0, end;
5924 int numblks = 0, i, ret = 0;
5925 ext4_fsblk_t cmp1, cmp2;
5926 struct ext4_map_blocks map;
5927
5928
5929 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5930 EXT4_EX_NOCACHE);
5931 if (IS_ERR(path))
5932 return PTR_ERR(path);
5933 ex = path[path->p_depth].p_ext;
5934 if (!ex) {
5935 ext4_ext_drop_refs(path);
5936 kfree(path);
5937 goto out;
5938 }
5939 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5940 ext4_ext_drop_refs(path);
5941 kfree(path);
5942
5943
5944 cur = 0;
5945 while (cur < end) {
5946 map.m_lblk = cur;
5947 map.m_len = end - cur;
5948 ret = ext4_map_blocks(NULL, inode, &map, 0);
5949 if (ret < 0)
5950 break;
5951 if (ret > 0)
5952 numblks += ret;
5953 cur = cur + map.m_len;
5954 }
5955
5956
5957
5958
5959
5960
5961
5962
5963 cur = 0;
5964 skip_hole(inode, &cur);
5965 path = ext4_find_extent(inode, cur, NULL, 0);
5966 if (IS_ERR(path))
5967 goto out;
5968 numblks += path->p_depth;
5969 ext4_ext_drop_refs(path);
5970 kfree(path);
5971 while (cur < end) {
5972 path = ext4_find_extent(inode, cur, NULL, 0);
5973 if (IS_ERR(path))
5974 break;
5975 ex = path[path->p_depth].p_ext;
5976 if (!ex) {
5977 ext4_ext_drop_refs(path);
5978 kfree(path);
5979 return 0;
5980 }
5981 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
5982 ext4_ext_get_actual_len(ex));
5983 skip_hole(inode, &cur);
5984
5985 path2 = ext4_find_extent(inode, cur, NULL, 0);
5986 if (IS_ERR(path2)) {
5987 ext4_ext_drop_refs(path);
5988 kfree(path);
5989 break;
5990 }
5991 ex = path2[path2->p_depth].p_ext;
5992 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
5993 cmp1 = cmp2 = 0;
5994 if (i <= path->p_depth)
5995 cmp1 = path[i].p_bh ?
5996 path[i].p_bh->b_blocknr : 0;
5997 if (i <= path2->p_depth)
5998 cmp2 = path2[i].p_bh ?
5999 path2[i].p_bh->b_blocknr : 0;
6000 if (cmp1 != cmp2 && cmp2 != 0)
6001 numblks++;
6002 }
6003 ext4_ext_drop_refs(path);
6004 ext4_ext_drop_refs(path2);
6005 kfree(path);
6006 kfree(path2);
6007 }
6008
6009out:
6010 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
6011 ext4_mark_inode_dirty(NULL, inode);
6012 return 0;
6013}
6014
6015int ext4_ext_clear_bb(struct inode *inode)
6016{
6017 struct ext4_ext_path *path = NULL;
6018 struct ext4_extent *ex;
6019 ext4_lblk_t cur = 0, end;
6020 int j, ret = 0;
6021 struct ext4_map_blocks map;
6022
6023
6024 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6025 EXT4_EX_NOCACHE);
6026 if (IS_ERR(path))
6027 return PTR_ERR(path);
6028 ex = path[path->p_depth].p_ext;
6029 if (!ex) {
6030 ext4_ext_drop_refs(path);
6031 kfree(path);
6032 return 0;
6033 }
6034 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6035 ext4_ext_drop_refs(path);
6036 kfree(path);
6037
6038 cur = 0;
6039 while (cur < end) {
6040 map.m_lblk = cur;
6041 map.m_len = end - cur;
6042 ret = ext4_map_blocks(NULL, inode, &map, 0);
6043 if (ret < 0)
6044 break;
6045 if (ret > 0) {
6046 path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6047 if (!IS_ERR_OR_NULL(path)) {
6048 for (j = 0; j < path->p_depth; j++) {
6049
6050 ext4_mb_mark_bb(inode->i_sb,
6051 path[j].p_block, 1, 0);
6052 }
6053 ext4_ext_drop_refs(path);
6054 kfree(path);
6055 }
6056 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
6057 }
6058 cur = cur + map.m_len;
6059 }
6060
6061 return 0;
6062}
6063