1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/fs.h>
21#include <linux/time.h>
22#include <linux/jbd2.h>
23#include <linux/highuid.h>
24#include <linux/pagemap.h>
25#include <linux/quotaops.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28#include <linux/uaccess.h>
29#include <linux/fiemap.h>
30#include <linux/backing-dev.h>
31#include <linux/iomap.h>
32#include "ext4_jbd2.h"
33#include "ext4_extents.h"
34#include "xattr.h"
35
36#include <trace/events/ext4.h>
37
38
39
40
41#define EXT4_EXT_MAY_ZEROOUT 0x1
42
43#define EXT4_EXT_MARK_UNWRIT1 0x2
44#define EXT4_EXT_MARK_UNWRIT2 0x4
45
46#define EXT4_EXT_DATA_VALID1 0x8
47#define EXT4_EXT_DATA_VALID2 0x10
48
49static __le32 ext4_extent_block_csum(struct inode *inode,
50 struct ext4_extent_header *eh)
51{
52 struct ext4_inode_info *ei = EXT4_I(inode);
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u32 csum;
55
56 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
57 EXT4_EXTENT_TAIL_OFFSET(eh));
58 return cpu_to_le32(csum);
59}
60
61static int ext4_extent_block_csum_verify(struct inode *inode,
62 struct ext4_extent_header *eh)
63{
64 struct ext4_extent_tail *et;
65
66 if (!ext4_has_metadata_csum(inode->i_sb))
67 return 1;
68
69 et = find_ext4_extent_tail(eh);
70 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
71 return 0;
72 return 1;
73}
74
75static void ext4_extent_block_csum_set(struct inode *inode,
76 struct ext4_extent_header *eh)
77{
78 struct ext4_extent_tail *et;
79
80 if (!ext4_has_metadata_csum(inode->i_sb))
81 return;
82
83 et = find_ext4_extent_tail(eh);
84 et->et_checksum = ext4_extent_block_csum(inode, eh);
85}
86
87static int ext4_split_extent_at(handle_t *handle,
88 struct inode *inode,
89 struct ext4_ext_path **ppath,
90 ext4_lblk_t split,
91 int split_flag,
92 int flags);
93
94static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
95{
96
97
98
99
100
101
102 BUG_ON(EXT4_JOURNAL(inode) == NULL);
103 ext4_discard_preallocations(inode, 0);
104 up_write(&EXT4_I(inode)->i_data_sem);
105 *dropped = 1;
106 return 0;
107}
108
109
110
111
112
113
114
115
116
117int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
118 int check_cred, int restart_cred,
119 int revoke_cred)
120{
121 int ret;
122 int dropped = 0;
123
124 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
125 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
126 if (dropped)
127 down_write(&EXT4_I(inode)->i_data_sem);
128 return ret;
129}
130
131
132
133
134
135
136static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
137 struct ext4_ext_path *path)
138{
139 if (path->p_bh) {
140
141 BUFFER_TRACE(path->p_bh, "get_write_access");
142 return ext4_journal_get_write_access(handle, path->p_bh);
143 }
144
145
146 return 0;
147}
148
149
150
151
152
153
154
155static int __ext4_ext_dirty(const char *where, unsigned int line,
156 handle_t *handle, struct inode *inode,
157 struct ext4_ext_path *path)
158{
159 int err;
160
161 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
162 if (path->p_bh) {
163 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
164
165 err = __ext4_handle_dirty_metadata(where, line, handle,
166 inode, path->p_bh);
167 } else {
168
169 err = ext4_mark_inode_dirty(handle, inode);
170 }
171 return err;
172}
173
174#define ext4_ext_dirty(handle, inode, path) \
175 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
176
177static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
178 struct ext4_ext_path *path,
179 ext4_lblk_t block)
180{
181 if (path) {
182 int depth = path->p_depth;
183 struct ext4_extent *ex;
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202 ex = path[depth].p_ext;
203 if (ex) {
204 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
205 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
206
207 if (block > ext_block)
208 return ext_pblk + (block - ext_block);
209 else
210 return ext_pblk - (ext_block - block);
211 }
212
213
214
215 if (path[depth].p_bh)
216 return path[depth].p_bh->b_blocknr;
217 }
218
219
220 return ext4_inode_to_goal_block(inode);
221}
222
223
224
225
226static ext4_fsblk_t
227ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
228 struct ext4_ext_path *path,
229 struct ext4_extent *ex, int *err, unsigned int flags)
230{
231 ext4_fsblk_t goal, newblock;
232
233 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
234 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
235 NULL, err);
236 return newblock;
237}
238
239static inline int ext4_ext_space_block(struct inode *inode, int check)
240{
241 int size;
242
243 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
244 / sizeof(struct ext4_extent);
245#ifdef AGGRESSIVE_TEST
246 if (!check && size > 6)
247 size = 6;
248#endif
249 return size;
250}
251
252static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
253{
254 int size;
255
256 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
257 / sizeof(struct ext4_extent_idx);
258#ifdef AGGRESSIVE_TEST
259 if (!check && size > 5)
260 size = 5;
261#endif
262 return size;
263}
264
265static inline int ext4_ext_space_root(struct inode *inode, int check)
266{
267 int size;
268
269 size = sizeof(EXT4_I(inode)->i_data);
270 size -= sizeof(struct ext4_extent_header);
271 size /= sizeof(struct ext4_extent);
272#ifdef AGGRESSIVE_TEST
273 if (!check && size > 3)
274 size = 3;
275#endif
276 return size;
277}
278
279static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
280{
281 int size;
282
283 size = sizeof(EXT4_I(inode)->i_data);
284 size -= sizeof(struct ext4_extent_header);
285 size /= sizeof(struct ext4_extent_idx);
286#ifdef AGGRESSIVE_TEST
287 if (!check && size > 4)
288 size = 4;
289#endif
290 return size;
291}
292
293static inline int
294ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
295 struct ext4_ext_path **ppath, ext4_lblk_t lblk,
296 int nofail)
297{
298 struct ext4_ext_path *path = *ppath;
299 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
300 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
301
302 if (nofail)
303 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
304
305 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
306 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
307 flags);
308}
309
310static int
311ext4_ext_max_entries(struct inode *inode, int depth)
312{
313 int max;
314
315 if (depth == ext_depth(inode)) {
316 if (depth == 0)
317 max = ext4_ext_space_root(inode, 1);
318 else
319 max = ext4_ext_space_root_idx(inode, 1);
320 } else {
321 if (depth == 0)
322 max = ext4_ext_space_block(inode, 1);
323 else
324 max = ext4_ext_space_block_idx(inode, 1);
325 }
326
327 return max;
328}
329
330static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
331{
332 ext4_fsblk_t block = ext4_ext_pblock(ext);
333 int len = ext4_ext_get_actual_len(ext);
334 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
335
336
337
338
339
340
341 if (lblock + len <= lblock)
342 return 0;
343 return ext4_inode_block_valid(inode, block, len);
344}
345
346static int ext4_valid_extent_idx(struct inode *inode,
347 struct ext4_extent_idx *ext_idx)
348{
349 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
350
351 return ext4_inode_block_valid(inode, block, 1);
352}
353
354static int ext4_valid_extent_entries(struct inode *inode,
355 struct ext4_extent_header *eh,
356 ext4_fsblk_t *pblk, int depth)
357{
358 unsigned short entries;
359 if (eh->eh_entries == 0)
360 return 1;
361
362 entries = le16_to_cpu(eh->eh_entries);
363
364 if (depth == 0) {
365
366 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
367 ext4_lblk_t lblock = 0;
368 ext4_lblk_t prev = 0;
369 int len = 0;
370 while (entries) {
371 if (!ext4_valid_extent(inode, ext))
372 return 0;
373
374
375 lblock = le32_to_cpu(ext->ee_block);
376 len = ext4_ext_get_actual_len(ext);
377 if ((lblock <= prev) && prev) {
378 *pblk = ext4_ext_pblock(ext);
379 return 0;
380 }
381 ext++;
382 entries--;
383 prev = lblock + len - 1;
384 }
385 } else {
386 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
387 while (entries) {
388 if (!ext4_valid_extent_idx(inode, ext_idx))
389 return 0;
390 ext_idx++;
391 entries--;
392 }
393 }
394 return 1;
395}
396
397static int __ext4_ext_check(const char *function, unsigned int line,
398 struct inode *inode, struct ext4_extent_header *eh,
399 int depth, ext4_fsblk_t pblk)
400{
401 const char *error_msg;
402 int max = 0, err = -EFSCORRUPTED;
403
404 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
405 error_msg = "invalid magic";
406 goto corrupted;
407 }
408 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
409 error_msg = "unexpected eh_depth";
410 goto corrupted;
411 }
412 if (unlikely(eh->eh_max == 0)) {
413 error_msg = "invalid eh_max";
414 goto corrupted;
415 }
416 max = ext4_ext_max_entries(inode, depth);
417 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
418 error_msg = "too large eh_max";
419 goto corrupted;
420 }
421 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
422 error_msg = "invalid eh_entries";
423 goto corrupted;
424 }
425 if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
426 error_msg = "invalid extent entries";
427 goto corrupted;
428 }
429 if (unlikely(depth > 32)) {
430 error_msg = "too large eh_depth";
431 goto corrupted;
432 }
433
434 if (ext_depth(inode) != depth &&
435 !ext4_extent_block_csum_verify(inode, eh)) {
436 error_msg = "extent tree corrupted";
437 err = -EFSBADCRC;
438 goto corrupted;
439 }
440 return 0;
441
442corrupted:
443 ext4_error_inode_err(inode, function, line, 0, -err,
444 "pblk %llu bad header/extent: %s - magic %x, "
445 "entries %u, max %u(%u), depth %u(%u)",
446 (unsigned long long) pblk, error_msg,
447 le16_to_cpu(eh->eh_magic),
448 le16_to_cpu(eh->eh_entries),
449 le16_to_cpu(eh->eh_max),
450 max, le16_to_cpu(eh->eh_depth), depth);
451 return err;
452}
453
454#define ext4_ext_check(inode, eh, depth, pblk) \
455 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
456
457int ext4_ext_check_inode(struct inode *inode)
458{
459 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
460}
461
462static void ext4_cache_extents(struct inode *inode,
463 struct ext4_extent_header *eh)
464{
465 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
466 ext4_lblk_t prev = 0;
467 int i;
468
469 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
470 unsigned int status = EXTENT_STATUS_WRITTEN;
471 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
472 int len = ext4_ext_get_actual_len(ex);
473
474 if (prev && (prev != lblk))
475 ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
476 EXTENT_STATUS_HOLE);
477
478 if (ext4_ext_is_unwritten(ex))
479 status = EXTENT_STATUS_UNWRITTEN;
480 ext4_es_cache_extent(inode, lblk, len,
481 ext4_ext_pblock(ex), status);
482 prev = lblk + len;
483 }
484}
485
486static struct buffer_head *
487__read_extent_tree_block(const char *function, unsigned int line,
488 struct inode *inode, ext4_fsblk_t pblk, int depth,
489 int flags)
490{
491 struct buffer_head *bh;
492 int err;
493 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
494
495 if (flags & EXT4_EX_NOFAIL)
496 gfp_flags |= __GFP_NOFAIL;
497
498 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
499 if (unlikely(!bh))
500 return ERR_PTR(-ENOMEM);
501
502 if (!bh_uptodate_or_lock(bh)) {
503 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
504 err = ext4_read_bh(bh, 0, NULL);
505 if (err < 0)
506 goto errout;
507 }
508 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
509 return bh;
510 err = __ext4_ext_check(function, line, inode,
511 ext_block_hdr(bh), depth, pblk);
512 if (err)
513 goto errout;
514 set_buffer_verified(bh);
515
516
517
518 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
519 struct ext4_extent_header *eh = ext_block_hdr(bh);
520 ext4_cache_extents(inode, eh);
521 }
522 return bh;
523errout:
524 put_bh(bh);
525 return ERR_PTR(err);
526
527}
528
529#define read_extent_tree_block(inode, pblk, depth, flags) \
530 __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
531 (depth), (flags))
532
533
534
535
536
537int ext4_ext_precache(struct inode *inode)
538{
539 struct ext4_inode_info *ei = EXT4_I(inode);
540 struct ext4_ext_path *path = NULL;
541 struct buffer_head *bh;
542 int i = 0, depth, ret = 0;
543
544 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
545 return 0;
546
547 down_read(&ei->i_data_sem);
548 depth = ext_depth(inode);
549
550
551 if (!depth) {
552 up_read(&ei->i_data_sem);
553 return ret;
554 }
555
556 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
557 GFP_NOFS);
558 if (path == NULL) {
559 up_read(&ei->i_data_sem);
560 return -ENOMEM;
561 }
562
563 path[0].p_hdr = ext_inode_hdr(inode);
564 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
565 if (ret)
566 goto out;
567 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
568 while (i >= 0) {
569
570
571
572
573 if ((i == depth) ||
574 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
575 brelse(path[i].p_bh);
576 path[i].p_bh = NULL;
577 i--;
578 continue;
579 }
580 bh = read_extent_tree_block(inode,
581 ext4_idx_pblock(path[i].p_idx++),
582 depth - i - 1,
583 EXT4_EX_FORCE_CACHE);
584 if (IS_ERR(bh)) {
585 ret = PTR_ERR(bh);
586 break;
587 }
588 i++;
589 path[i].p_bh = bh;
590 path[i].p_hdr = ext_block_hdr(bh);
591 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
592 }
593 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
594out:
595 up_read(&ei->i_data_sem);
596 ext4_ext_drop_refs(path);
597 kfree(path);
598 return ret;
599}
600
601#ifdef EXT_DEBUG
602static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
603{
604 int k, l = path->p_depth;
605
606 ext_debug(inode, "path:");
607 for (k = 0; k <= l; k++, path++) {
608 if (path->p_idx) {
609 ext_debug(inode, " %d->%llu",
610 le32_to_cpu(path->p_idx->ei_block),
611 ext4_idx_pblock(path->p_idx));
612 } else if (path->p_ext) {
613 ext_debug(inode, " %d:[%d]%d:%llu ",
614 le32_to_cpu(path->p_ext->ee_block),
615 ext4_ext_is_unwritten(path->p_ext),
616 ext4_ext_get_actual_len(path->p_ext),
617 ext4_ext_pblock(path->p_ext));
618 } else
619 ext_debug(inode, " []");
620 }
621 ext_debug(inode, "\n");
622}
623
624static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
625{
626 int depth = ext_depth(inode);
627 struct ext4_extent_header *eh;
628 struct ext4_extent *ex;
629 int i;
630
631 if (!path)
632 return;
633
634 eh = path[depth].p_hdr;
635 ex = EXT_FIRST_EXTENT(eh);
636
637 ext_debug(inode, "Displaying leaf extents\n");
638
639 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
640 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
641 ext4_ext_is_unwritten(ex),
642 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
643 }
644 ext_debug(inode, "\n");
645}
646
647static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
648 ext4_fsblk_t newblock, int level)
649{
650 int depth = ext_depth(inode);
651 struct ext4_extent *ex;
652
653 if (depth != level) {
654 struct ext4_extent_idx *idx;
655 idx = path[level].p_idx;
656 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
657 ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
658 level, le32_to_cpu(idx->ei_block),
659 ext4_idx_pblock(idx), newblock);
660 idx++;
661 }
662
663 return;
664 }
665
666 ex = path[depth].p_ext;
667 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
668 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
669 le32_to_cpu(ex->ee_block),
670 ext4_ext_pblock(ex),
671 ext4_ext_is_unwritten(ex),
672 ext4_ext_get_actual_len(ex),
673 newblock);
674 ex++;
675 }
676}
677
678#else
679#define ext4_ext_show_path(inode, path)
680#define ext4_ext_show_leaf(inode, path)
681#define ext4_ext_show_move(inode, path, newblock, level)
682#endif
683
684void ext4_ext_drop_refs(struct ext4_ext_path *path)
685{
686 int depth, i;
687
688 if (!path)
689 return;
690 depth = path->p_depth;
691 for (i = 0; i <= depth; i++, path++) {
692 brelse(path->p_bh);
693 path->p_bh = NULL;
694 }
695}
696
697
698
699
700
701
702static void
703ext4_ext_binsearch_idx(struct inode *inode,
704 struct ext4_ext_path *path, ext4_lblk_t block)
705{
706 struct ext4_extent_header *eh = path->p_hdr;
707 struct ext4_extent_idx *r, *l, *m;
708
709
710 ext_debug(inode, "binsearch for %u(idx): ", block);
711
712 l = EXT_FIRST_INDEX(eh) + 1;
713 r = EXT_LAST_INDEX(eh);
714 while (l <= r) {
715 m = l + (r - l) / 2;
716 if (block < le32_to_cpu(m->ei_block))
717 r = m - 1;
718 else
719 l = m + 1;
720 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
721 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
722 r, le32_to_cpu(r->ei_block));
723 }
724
725 path->p_idx = l - 1;
726 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
727 ext4_idx_pblock(path->p_idx));
728
729#ifdef CHECK_BINSEARCH
730 {
731 struct ext4_extent_idx *chix, *ix;
732 int k;
733
734 chix = ix = EXT_FIRST_INDEX(eh);
735 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
736 if (k != 0 && le32_to_cpu(ix->ei_block) <=
737 le32_to_cpu(ix[-1].ei_block)) {
738 printk(KERN_DEBUG "k=%d, ix=0x%p, "
739 "first=0x%p\n", k,
740 ix, EXT_FIRST_INDEX(eh));
741 printk(KERN_DEBUG "%u <= %u\n",
742 le32_to_cpu(ix->ei_block),
743 le32_to_cpu(ix[-1].ei_block));
744 }
745 BUG_ON(k && le32_to_cpu(ix->ei_block)
746 <= le32_to_cpu(ix[-1].ei_block));
747 if (block < le32_to_cpu(ix->ei_block))
748 break;
749 chix = ix;
750 }
751 BUG_ON(chix != path->p_idx);
752 }
753#endif
754
755}
756
757
758
759
760
761
762static void
763ext4_ext_binsearch(struct inode *inode,
764 struct ext4_ext_path *path, ext4_lblk_t block)
765{
766 struct ext4_extent_header *eh = path->p_hdr;
767 struct ext4_extent *r, *l, *m;
768
769 if (eh->eh_entries == 0) {
770
771
772
773
774 return;
775 }
776
777 ext_debug(inode, "binsearch for %u: ", block);
778
779 l = EXT_FIRST_EXTENT(eh) + 1;
780 r = EXT_LAST_EXTENT(eh);
781
782 while (l <= r) {
783 m = l + (r - l) / 2;
784 if (block < le32_to_cpu(m->ee_block))
785 r = m - 1;
786 else
787 l = m + 1;
788 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
789 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
790 r, le32_to_cpu(r->ee_block));
791 }
792
793 path->p_ext = l - 1;
794 ext_debug(inode, " -> %d:%llu:[%d]%d ",
795 le32_to_cpu(path->p_ext->ee_block),
796 ext4_ext_pblock(path->p_ext),
797 ext4_ext_is_unwritten(path->p_ext),
798 ext4_ext_get_actual_len(path->p_ext));
799
800#ifdef CHECK_BINSEARCH
801 {
802 struct ext4_extent *chex, *ex;
803 int k;
804
805 chex = ex = EXT_FIRST_EXTENT(eh);
806 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
807 BUG_ON(k && le32_to_cpu(ex->ee_block)
808 <= le32_to_cpu(ex[-1].ee_block));
809 if (block < le32_to_cpu(ex->ee_block))
810 break;
811 chex = ex;
812 }
813 BUG_ON(chex != path->p_ext);
814 }
815#endif
816
817}
818
819void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
820{
821 struct ext4_extent_header *eh;
822
823 eh = ext_inode_hdr(inode);
824 eh->eh_depth = 0;
825 eh->eh_entries = 0;
826 eh->eh_magic = EXT4_EXT_MAGIC;
827 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
828 ext4_mark_inode_dirty(handle, inode);
829}
830
831struct ext4_ext_path *
832ext4_find_extent(struct inode *inode, ext4_lblk_t block,
833 struct ext4_ext_path **orig_path, int flags)
834{
835 struct ext4_extent_header *eh;
836 struct buffer_head *bh;
837 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
838 short int depth, i, ppos = 0;
839 int ret;
840 gfp_t gfp_flags = GFP_NOFS;
841
842 if (flags & EXT4_EX_NOFAIL)
843 gfp_flags |= __GFP_NOFAIL;
844
845 eh = ext_inode_hdr(inode);
846 depth = ext_depth(inode);
847 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
848 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
849 depth);
850 ret = -EFSCORRUPTED;
851 goto err;
852 }
853
854 if (path) {
855 ext4_ext_drop_refs(path);
856 if (depth > path[0].p_maxdepth) {
857 kfree(path);
858 *orig_path = path = NULL;
859 }
860 }
861 if (!path) {
862
863 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
864 gfp_flags);
865 if (unlikely(!path))
866 return ERR_PTR(-ENOMEM);
867 path[0].p_maxdepth = depth + 1;
868 }
869 path[0].p_hdr = eh;
870 path[0].p_bh = NULL;
871
872 i = depth;
873 if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
874 ext4_cache_extents(inode, eh);
875
876 while (i) {
877 ext_debug(inode, "depth %d: num %d, max %d\n",
878 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
879
880 ext4_ext_binsearch_idx(inode, path + ppos, block);
881 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
882 path[ppos].p_depth = i;
883 path[ppos].p_ext = NULL;
884
885 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
886 flags);
887 if (IS_ERR(bh)) {
888 ret = PTR_ERR(bh);
889 goto err;
890 }
891
892 eh = ext_block_hdr(bh);
893 ppos++;
894 path[ppos].p_bh = bh;
895 path[ppos].p_hdr = eh;
896 }
897
898 path[ppos].p_depth = i;
899 path[ppos].p_ext = NULL;
900 path[ppos].p_idx = NULL;
901
902
903 ext4_ext_binsearch(inode, path + ppos, block);
904
905 if (path[ppos].p_ext)
906 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
907
908 ext4_ext_show_path(inode, path);
909
910 return path;
911
912err:
913 ext4_ext_drop_refs(path);
914 kfree(path);
915 if (orig_path)
916 *orig_path = NULL;
917 return ERR_PTR(ret);
918}
919
920
921
922
923
924
925static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
926 struct ext4_ext_path *curp,
927 int logical, ext4_fsblk_t ptr)
928{
929 struct ext4_extent_idx *ix;
930 int len, err;
931
932 err = ext4_ext_get_access(handle, inode, curp);
933 if (err)
934 return err;
935
936 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
937 EXT4_ERROR_INODE(inode,
938 "logical %d == ei_block %d!",
939 logical, le32_to_cpu(curp->p_idx->ei_block));
940 return -EFSCORRUPTED;
941 }
942
943 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
944 >= le16_to_cpu(curp->p_hdr->eh_max))) {
945 EXT4_ERROR_INODE(inode,
946 "eh_entries %d >= eh_max %d!",
947 le16_to_cpu(curp->p_hdr->eh_entries),
948 le16_to_cpu(curp->p_hdr->eh_max));
949 return -EFSCORRUPTED;
950 }
951
952 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
953
954 ext_debug(inode, "insert new index %d after: %llu\n",
955 logical, ptr);
956 ix = curp->p_idx + 1;
957 } else {
958
959 ext_debug(inode, "insert new index %d before: %llu\n",
960 logical, ptr);
961 ix = curp->p_idx;
962 }
963
964 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
965 BUG_ON(len < 0);
966 if (len > 0) {
967 ext_debug(inode, "insert new index %d: "
968 "move %d indices from 0x%p to 0x%p\n",
969 logical, len, ix, ix + 1);
970 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
971 }
972
973 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
974 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
975 return -EFSCORRUPTED;
976 }
977
978 ix->ei_block = cpu_to_le32(logical);
979 ext4_idx_store_pblock(ix, ptr);
980 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
981
982 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
983 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
984 return -EFSCORRUPTED;
985 }
986
987 err = ext4_ext_dirty(handle, inode, curp);
988 ext4_std_error(inode->i_sb, err);
989
990 return err;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003static int ext4_ext_split(handle_t *handle, struct inode *inode,
1004 unsigned int flags,
1005 struct ext4_ext_path *path,
1006 struct ext4_extent *newext, int at)
1007{
1008 struct buffer_head *bh = NULL;
1009 int depth = ext_depth(inode);
1010 struct ext4_extent_header *neh;
1011 struct ext4_extent_idx *fidx;
1012 int i = at, k, m, a;
1013 ext4_fsblk_t newblock, oldblock;
1014 __le32 border;
1015 ext4_fsblk_t *ablocks = NULL;
1016 gfp_t gfp_flags = GFP_NOFS;
1017 int err = 0;
1018 size_t ext_size = 0;
1019
1020 if (flags & EXT4_EX_NOFAIL)
1021 gfp_flags |= __GFP_NOFAIL;
1022
1023
1024
1025
1026
1027
1028 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1029 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1030 return -EFSCORRUPTED;
1031 }
1032 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1033 border = path[depth].p_ext[1].ee_block;
1034 ext_debug(inode, "leaf will be split."
1035 " next leaf starts at %d\n",
1036 le32_to_cpu(border));
1037 } else {
1038 border = newext->ee_block;
1039 ext_debug(inode, "leaf will be added."
1040 " next leaf starts at %d\n",
1041 le32_to_cpu(border));
1042 }
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1057 if (!ablocks)
1058 return -ENOMEM;
1059
1060
1061 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1062 for (a = 0; a < depth - at; a++) {
1063 newblock = ext4_ext_new_meta_block(handle, inode, path,
1064 newext, &err, flags);
1065 if (newblock == 0)
1066 goto cleanup;
1067 ablocks[a] = newblock;
1068 }
1069
1070
1071 newblock = ablocks[--a];
1072 if (unlikely(newblock == 0)) {
1073 EXT4_ERROR_INODE(inode, "newblock == 0!");
1074 err = -EFSCORRUPTED;
1075 goto cleanup;
1076 }
1077 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1078 if (unlikely(!bh)) {
1079 err = -ENOMEM;
1080 goto cleanup;
1081 }
1082 lock_buffer(bh);
1083
1084 err = ext4_journal_get_create_access(handle, bh);
1085 if (err)
1086 goto cleanup;
1087
1088 neh = ext_block_hdr(bh);
1089 neh->eh_entries = 0;
1090 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1091 neh->eh_magic = EXT4_EXT_MAGIC;
1092 neh->eh_depth = 0;
1093
1094
1095 if (unlikely(path[depth].p_hdr->eh_entries !=
1096 path[depth].p_hdr->eh_max)) {
1097 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1098 path[depth].p_hdr->eh_entries,
1099 path[depth].p_hdr->eh_max);
1100 err = -EFSCORRUPTED;
1101 goto cleanup;
1102 }
1103
1104 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1105 ext4_ext_show_move(inode, path, newblock, depth);
1106 if (m) {
1107 struct ext4_extent *ex;
1108 ex = EXT_FIRST_EXTENT(neh);
1109 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1110 le16_add_cpu(&neh->eh_entries, m);
1111 }
1112
1113
1114 ext_size = sizeof(struct ext4_extent_header) +
1115 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1116 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1117 ext4_extent_block_csum_set(inode, neh);
1118 set_buffer_uptodate(bh);
1119 unlock_buffer(bh);
1120
1121 err = ext4_handle_dirty_metadata(handle, inode, bh);
1122 if (err)
1123 goto cleanup;
1124 brelse(bh);
1125 bh = NULL;
1126
1127
1128 if (m) {
1129 err = ext4_ext_get_access(handle, inode, path + depth);
1130 if (err)
1131 goto cleanup;
1132 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1133 err = ext4_ext_dirty(handle, inode, path + depth);
1134 if (err)
1135 goto cleanup;
1136
1137 }
1138
1139
1140 k = depth - at - 1;
1141 if (unlikely(k < 0)) {
1142 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1143 err = -EFSCORRUPTED;
1144 goto cleanup;
1145 }
1146 if (k)
1147 ext_debug(inode, "create %d intermediate indices\n", k);
1148
1149
1150 i = depth - 1;
1151 while (k--) {
1152 oldblock = newblock;
1153 newblock = ablocks[--a];
1154 bh = sb_getblk(inode->i_sb, newblock);
1155 if (unlikely(!bh)) {
1156 err = -ENOMEM;
1157 goto cleanup;
1158 }
1159 lock_buffer(bh);
1160
1161 err = ext4_journal_get_create_access(handle, bh);
1162 if (err)
1163 goto cleanup;
1164
1165 neh = ext_block_hdr(bh);
1166 neh->eh_entries = cpu_to_le16(1);
1167 neh->eh_magic = EXT4_EXT_MAGIC;
1168 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1169 neh->eh_depth = cpu_to_le16(depth - i);
1170 fidx = EXT_FIRST_INDEX(neh);
1171 fidx->ei_block = border;
1172 ext4_idx_store_pblock(fidx, oldblock);
1173
1174 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1175 i, newblock, le32_to_cpu(border), oldblock);
1176
1177
1178 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1179 EXT_LAST_INDEX(path[i].p_hdr))) {
1180 EXT4_ERROR_INODE(inode,
1181 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1182 le32_to_cpu(path[i].p_ext->ee_block));
1183 err = -EFSCORRUPTED;
1184 goto cleanup;
1185 }
1186
1187 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1188 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1189 EXT_MAX_INDEX(path[i].p_hdr));
1190 ext4_ext_show_move(inode, path, newblock, i);
1191 if (m) {
1192 memmove(++fidx, path[i].p_idx,
1193 sizeof(struct ext4_extent_idx) * m);
1194 le16_add_cpu(&neh->eh_entries, m);
1195 }
1196
1197 ext_size = sizeof(struct ext4_extent_header) +
1198 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1199 memset(bh->b_data + ext_size, 0,
1200 inode->i_sb->s_blocksize - ext_size);
1201 ext4_extent_block_csum_set(inode, neh);
1202 set_buffer_uptodate(bh);
1203 unlock_buffer(bh);
1204
1205 err = ext4_handle_dirty_metadata(handle, inode, bh);
1206 if (err)
1207 goto cleanup;
1208 brelse(bh);
1209 bh = NULL;
1210
1211
1212 if (m) {
1213 err = ext4_ext_get_access(handle, inode, path + i);
1214 if (err)
1215 goto cleanup;
1216 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1217 err = ext4_ext_dirty(handle, inode, path + i);
1218 if (err)
1219 goto cleanup;
1220 }
1221
1222 i--;
1223 }
1224
1225
1226 err = ext4_ext_insert_index(handle, inode, path + at,
1227 le32_to_cpu(border), newblock);
1228
1229cleanup:
1230 if (bh) {
1231 if (buffer_locked(bh))
1232 unlock_buffer(bh);
1233 brelse(bh);
1234 }
1235
1236 if (err) {
1237
1238 for (i = 0; i < depth; i++) {
1239 if (!ablocks[i])
1240 continue;
1241 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1242 EXT4_FREE_BLOCKS_METADATA);
1243 }
1244 }
1245 kfree(ablocks);
1246
1247 return err;
1248}
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1259 unsigned int flags)
1260{
1261 struct ext4_extent_header *neh;
1262 struct buffer_head *bh;
1263 ext4_fsblk_t newblock, goal = 0;
1264 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1265 int err = 0;
1266 size_t ext_size = 0;
1267
1268
1269 if (ext_depth(inode))
1270 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1271 if (goal > le32_to_cpu(es->s_first_data_block)) {
1272 flags |= EXT4_MB_HINT_TRY_GOAL;
1273 goal--;
1274 } else
1275 goal = ext4_inode_to_goal_block(inode);
1276 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1277 NULL, &err);
1278 if (newblock == 0)
1279 return err;
1280
1281 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1282 if (unlikely(!bh))
1283 return -ENOMEM;
1284 lock_buffer(bh);
1285
1286 err = ext4_journal_get_create_access(handle, bh);
1287 if (err) {
1288 unlock_buffer(bh);
1289 goto out;
1290 }
1291
1292 ext_size = sizeof(EXT4_I(inode)->i_data);
1293
1294 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1295
1296 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1297
1298
1299 neh = ext_block_hdr(bh);
1300
1301
1302 if (ext_depth(inode))
1303 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1304 else
1305 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1306 neh->eh_magic = EXT4_EXT_MAGIC;
1307 ext4_extent_block_csum_set(inode, neh);
1308 set_buffer_uptodate(bh);
1309 unlock_buffer(bh);
1310
1311 err = ext4_handle_dirty_metadata(handle, inode, bh);
1312 if (err)
1313 goto out;
1314
1315
1316 neh = ext_inode_hdr(inode);
1317 neh->eh_entries = cpu_to_le16(1);
1318 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1319 if (neh->eh_depth == 0) {
1320
1321 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1322 EXT_FIRST_INDEX(neh)->ei_block =
1323 EXT_FIRST_EXTENT(neh)->ee_block;
1324 }
1325 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1326 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1327 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1328 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1329
1330 le16_add_cpu(&neh->eh_depth, 1);
1331 err = ext4_mark_inode_dirty(handle, inode);
1332out:
1333 brelse(bh);
1334
1335 return err;
1336}
1337
1338
1339
1340
1341
1342
1343static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1344 unsigned int mb_flags,
1345 unsigned int gb_flags,
1346 struct ext4_ext_path **ppath,
1347 struct ext4_extent *newext)
1348{
1349 struct ext4_ext_path *path = *ppath;
1350 struct ext4_ext_path *curp;
1351 int depth, i, err = 0;
1352
1353repeat:
1354 i = depth = ext_depth(inode);
1355
1356
1357 curp = path + depth;
1358 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1359 i--;
1360 curp--;
1361 }
1362
1363
1364
1365 if (EXT_HAS_FREE_INDEX(curp)) {
1366
1367
1368 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1369 if (err)
1370 goto out;
1371
1372
1373 path = ext4_find_extent(inode,
1374 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1375 ppath, gb_flags);
1376 if (IS_ERR(path))
1377 err = PTR_ERR(path);
1378 } else {
1379
1380 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1381 if (err)
1382 goto out;
1383
1384
1385 path = ext4_find_extent(inode,
1386 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1387 ppath, gb_flags);
1388 if (IS_ERR(path)) {
1389 err = PTR_ERR(path);
1390 goto out;
1391 }
1392
1393
1394
1395
1396
1397 depth = ext_depth(inode);
1398 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1399
1400 goto repeat;
1401 }
1402 }
1403
1404out:
1405 return err;
1406}
1407
1408
1409
1410
1411
1412
1413
1414
1415static int ext4_ext_search_left(struct inode *inode,
1416 struct ext4_ext_path *path,
1417 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1418{
1419 struct ext4_extent_idx *ix;
1420 struct ext4_extent *ex;
1421 int depth, ee_len;
1422
1423 if (unlikely(path == NULL)) {
1424 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1425 return -EFSCORRUPTED;
1426 }
1427 depth = path->p_depth;
1428 *phys = 0;
1429
1430 if (depth == 0 && path->p_ext == NULL)
1431 return 0;
1432
1433
1434
1435
1436
1437 ex = path[depth].p_ext;
1438 ee_len = ext4_ext_get_actual_len(ex);
1439 if (*logical < le32_to_cpu(ex->ee_block)) {
1440 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1441 EXT4_ERROR_INODE(inode,
1442 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1443 *logical, le32_to_cpu(ex->ee_block));
1444 return -EFSCORRUPTED;
1445 }
1446 while (--depth >= 0) {
1447 ix = path[depth].p_idx;
1448 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1449 EXT4_ERROR_INODE(inode,
1450 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1451 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1452 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1453 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1454 depth);
1455 return -EFSCORRUPTED;
1456 }
1457 }
1458 return 0;
1459 }
1460
1461 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1462 EXT4_ERROR_INODE(inode,
1463 "logical %d < ee_block %d + ee_len %d!",
1464 *logical, le32_to_cpu(ex->ee_block), ee_len);
1465 return -EFSCORRUPTED;
1466 }
1467
1468 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1469 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1470 return 0;
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480static int ext4_ext_search_right(struct inode *inode,
1481 struct ext4_ext_path *path,
1482 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1483 struct ext4_extent *ret_ex)
1484{
1485 struct buffer_head *bh = NULL;
1486 struct ext4_extent_header *eh;
1487 struct ext4_extent_idx *ix;
1488 struct ext4_extent *ex;
1489 ext4_fsblk_t block;
1490 int depth;
1491 int ee_len;
1492
1493 if (unlikely(path == NULL)) {
1494 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1495 return -EFSCORRUPTED;
1496 }
1497 depth = path->p_depth;
1498 *phys = 0;
1499
1500 if (depth == 0 && path->p_ext == NULL)
1501 return 0;
1502
1503
1504
1505
1506
1507 ex = path[depth].p_ext;
1508 ee_len = ext4_ext_get_actual_len(ex);
1509 if (*logical < le32_to_cpu(ex->ee_block)) {
1510 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1511 EXT4_ERROR_INODE(inode,
1512 "first_extent(path[%d].p_hdr) != ex",
1513 depth);
1514 return -EFSCORRUPTED;
1515 }
1516 while (--depth >= 0) {
1517 ix = path[depth].p_idx;
1518 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1519 EXT4_ERROR_INODE(inode,
1520 "ix != EXT_FIRST_INDEX *logical %d!",
1521 *logical);
1522 return -EFSCORRUPTED;
1523 }
1524 }
1525 goto found_extent;
1526 }
1527
1528 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1529 EXT4_ERROR_INODE(inode,
1530 "logical %d < ee_block %d + ee_len %d!",
1531 *logical, le32_to_cpu(ex->ee_block), ee_len);
1532 return -EFSCORRUPTED;
1533 }
1534
1535 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1536
1537 ex++;
1538 goto found_extent;
1539 }
1540
1541
1542 while (--depth >= 0) {
1543 ix = path[depth].p_idx;
1544 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1545 goto got_index;
1546 }
1547
1548
1549 return 0;
1550
1551got_index:
1552
1553
1554
1555 ix++;
1556 block = ext4_idx_pblock(ix);
1557 while (++depth < path->p_depth) {
1558
1559 bh = read_extent_tree_block(inode, block,
1560 path->p_depth - depth, 0);
1561 if (IS_ERR(bh))
1562 return PTR_ERR(bh);
1563 eh = ext_block_hdr(bh);
1564 ix = EXT_FIRST_INDEX(eh);
1565 block = ext4_idx_pblock(ix);
1566 put_bh(bh);
1567 }
1568
1569 bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1570 if (IS_ERR(bh))
1571 return PTR_ERR(bh);
1572 eh = ext_block_hdr(bh);
1573 ex = EXT_FIRST_EXTENT(eh);
1574found_extent:
1575 *logical = le32_to_cpu(ex->ee_block);
1576 *phys = ext4_ext_pblock(ex);
1577 if (ret_ex)
1578 *ret_ex = *ex;
1579 if (bh)
1580 put_bh(bh);
1581 return 1;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591ext4_lblk_t
1592ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1593{
1594 int depth;
1595
1596 BUG_ON(path == NULL);
1597 depth = path->p_depth;
1598
1599 if (depth == 0 && path->p_ext == NULL)
1600 return EXT_MAX_BLOCKS;
1601
1602 while (depth >= 0) {
1603 struct ext4_ext_path *p = &path[depth];
1604
1605 if (depth == path->p_depth) {
1606
1607 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
1608 return le32_to_cpu(p->p_ext[1].ee_block);
1609 } else {
1610
1611 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
1612 return le32_to_cpu(p->p_idx[1].ei_block);
1613 }
1614 depth--;
1615 }
1616
1617 return EXT_MAX_BLOCKS;
1618}
1619
1620
1621
1622
1623
1624static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1625{
1626 int depth;
1627
1628 BUG_ON(path == NULL);
1629 depth = path->p_depth;
1630
1631
1632 if (depth == 0)
1633 return EXT_MAX_BLOCKS;
1634
1635
1636 depth--;
1637
1638 while (depth >= 0) {
1639 if (path[depth].p_idx !=
1640 EXT_LAST_INDEX(path[depth].p_hdr))
1641 return (ext4_lblk_t)
1642 le32_to_cpu(path[depth].p_idx[1].ei_block);
1643 depth--;
1644 }
1645
1646 return EXT_MAX_BLOCKS;
1647}
1648
1649
1650
1651
1652
1653
1654
1655static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1656 struct ext4_ext_path *path)
1657{
1658 struct ext4_extent_header *eh;
1659 int depth = ext_depth(inode);
1660 struct ext4_extent *ex;
1661 __le32 border;
1662 int k, err = 0;
1663
1664 eh = path[depth].p_hdr;
1665 ex = path[depth].p_ext;
1666
1667 if (unlikely(ex == NULL || eh == NULL)) {
1668 EXT4_ERROR_INODE(inode,
1669 "ex %p == NULL or eh %p == NULL", ex, eh);
1670 return -EFSCORRUPTED;
1671 }
1672
1673 if (depth == 0) {
1674
1675 return 0;
1676 }
1677
1678 if (ex != EXT_FIRST_EXTENT(eh)) {
1679
1680 return 0;
1681 }
1682
1683
1684
1685
1686 k = depth - 1;
1687 border = path[depth].p_ext->ee_block;
1688 err = ext4_ext_get_access(handle, inode, path + k);
1689 if (err)
1690 return err;
1691 path[k].p_idx->ei_block = border;
1692 err = ext4_ext_dirty(handle, inode, path + k);
1693 if (err)
1694 return err;
1695
1696 while (k--) {
1697
1698 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1699 break;
1700 err = ext4_ext_get_access(handle, inode, path + k);
1701 if (err)
1702 break;
1703 path[k].p_idx->ei_block = border;
1704 err = ext4_ext_dirty(handle, inode, path + k);
1705 if (err)
1706 break;
1707 }
1708
1709 return err;
1710}
1711
1712static int ext4_can_extents_be_merged(struct inode *inode,
1713 struct ext4_extent *ex1,
1714 struct ext4_extent *ex2)
1715{
1716 unsigned short ext1_ee_len, ext2_ee_len;
1717
1718 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1719 return 0;
1720
1721 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1722 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1723
1724 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1725 le32_to_cpu(ex2->ee_block))
1726 return 0;
1727
1728 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1729 return 0;
1730
1731 if (ext4_ext_is_unwritten(ex1) &&
1732 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1733 return 0;
1734#ifdef AGGRESSIVE_TEST
1735 if (ext1_ee_len >= 4)
1736 return 0;
1737#endif
1738
1739 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1740 return 1;
1741 return 0;
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751static int ext4_ext_try_to_merge_right(struct inode *inode,
1752 struct ext4_ext_path *path,
1753 struct ext4_extent *ex)
1754{
1755 struct ext4_extent_header *eh;
1756 unsigned int depth, len;
1757 int merge_done = 0, unwritten;
1758
1759 depth = ext_depth(inode);
1760 BUG_ON(path[depth].p_hdr == NULL);
1761 eh = path[depth].p_hdr;
1762
1763 while (ex < EXT_LAST_EXTENT(eh)) {
1764 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1765 break;
1766
1767 unwritten = ext4_ext_is_unwritten(ex);
1768 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1769 + ext4_ext_get_actual_len(ex + 1));
1770 if (unwritten)
1771 ext4_ext_mark_unwritten(ex);
1772
1773 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1774 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1775 * sizeof(struct ext4_extent);
1776 memmove(ex + 1, ex + 2, len);
1777 }
1778 le16_add_cpu(&eh->eh_entries, -1);
1779 merge_done = 1;
1780 WARN_ON(eh->eh_entries == 0);
1781 if (!eh->eh_entries)
1782 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1783 }
1784
1785 return merge_done;
1786}
1787
1788
1789
1790
1791
1792static void ext4_ext_try_to_merge_up(handle_t *handle,
1793 struct inode *inode,
1794 struct ext4_ext_path *path)
1795{
1796 size_t s;
1797 unsigned max_root = ext4_ext_space_root(inode, 0);
1798 ext4_fsblk_t blk;
1799
1800 if ((path[0].p_depth != 1) ||
1801 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1802 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1803 return;
1804
1805
1806
1807
1808
1809
1810 if (ext4_journal_extend(handle, 2,
1811 ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1812 return;
1813
1814
1815
1816
1817 blk = ext4_idx_pblock(path[0].p_idx);
1818 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1819 sizeof(struct ext4_extent_idx);
1820 s += sizeof(struct ext4_extent_header);
1821
1822 path[1].p_maxdepth = path[0].p_maxdepth;
1823 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1824 path[0].p_depth = 0;
1825 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1826 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1827 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1828
1829 brelse(path[1].p_bh);
1830 ext4_free_blocks(handle, inode, NULL, blk, 1,
1831 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1832}
1833
1834
1835
1836
1837
1838static void ext4_ext_try_to_merge(handle_t *handle,
1839 struct inode *inode,
1840 struct ext4_ext_path *path,
1841 struct ext4_extent *ex)
1842{
1843 struct ext4_extent_header *eh;
1844 unsigned int depth;
1845 int merge_done = 0;
1846
1847 depth = ext_depth(inode);
1848 BUG_ON(path[depth].p_hdr == NULL);
1849 eh = path[depth].p_hdr;
1850
1851 if (ex > EXT_FIRST_EXTENT(eh))
1852 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1853
1854 if (!merge_done)
1855 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1856
1857 ext4_ext_try_to_merge_up(handle, inode, path);
1858}
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1869 struct inode *inode,
1870 struct ext4_extent *newext,
1871 struct ext4_ext_path *path)
1872{
1873 ext4_lblk_t b1, b2;
1874 unsigned int depth, len1;
1875 unsigned int ret = 0;
1876
1877 b1 = le32_to_cpu(newext->ee_block);
1878 len1 = ext4_ext_get_actual_len(newext);
1879 depth = ext_depth(inode);
1880 if (!path[depth].p_ext)
1881 goto out;
1882 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1883
1884
1885
1886
1887
1888 if (b2 < b1) {
1889 b2 = ext4_ext_next_allocated_block(path);
1890 if (b2 == EXT_MAX_BLOCKS)
1891 goto out;
1892 b2 = EXT4_LBLK_CMASK(sbi, b2);
1893 }
1894
1895
1896 if (b1 + len1 < b1) {
1897 len1 = EXT_MAX_BLOCKS - b1;
1898 newext->ee_len = cpu_to_le16(len1);
1899 ret = 1;
1900 }
1901
1902
1903 if (b1 + len1 > b2) {
1904 newext->ee_len = cpu_to_le16(b2 - b1);
1905 ret = 1;
1906 }
1907out:
1908 return ret;
1909}
1910
1911
1912
1913
1914
1915
1916
1917int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1918 struct ext4_ext_path **ppath,
1919 struct ext4_extent *newext, int gb_flags)
1920{
1921 struct ext4_ext_path *path = *ppath;
1922 struct ext4_extent_header *eh;
1923 struct ext4_extent *ex, *fex;
1924 struct ext4_extent *nearex;
1925 struct ext4_ext_path *npath = NULL;
1926 int depth, len, err;
1927 ext4_lblk_t next;
1928 int mb_flags = 0, unwritten;
1929
1930 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1931 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1932 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1933 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1934 return -EFSCORRUPTED;
1935 }
1936 depth = ext_depth(inode);
1937 ex = path[depth].p_ext;
1938 eh = path[depth].p_hdr;
1939 if (unlikely(path[depth].p_hdr == NULL)) {
1940 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1941 return -EFSCORRUPTED;
1942 }
1943
1944
1945 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1946
1947
1948
1949
1950
1951
1952
1953
1954 if (ex < EXT_LAST_EXTENT(eh) &&
1955 (le32_to_cpu(ex->ee_block) +
1956 ext4_ext_get_actual_len(ex) <
1957 le32_to_cpu(newext->ee_block))) {
1958 ex += 1;
1959 goto prepend;
1960 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1961 (le32_to_cpu(newext->ee_block) +
1962 ext4_ext_get_actual_len(newext) <
1963 le32_to_cpu(ex->ee_block)))
1964 ex -= 1;
1965
1966
1967 if (ext4_can_extents_be_merged(inode, ex, newext)) {
1968 ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
1969 "(from %llu)\n",
1970 ext4_ext_is_unwritten(newext),
1971 ext4_ext_get_actual_len(newext),
1972 le32_to_cpu(ex->ee_block),
1973 ext4_ext_is_unwritten(ex),
1974 ext4_ext_get_actual_len(ex),
1975 ext4_ext_pblock(ex));
1976 err = ext4_ext_get_access(handle, inode,
1977 path + depth);
1978 if (err)
1979 return err;
1980 unwritten = ext4_ext_is_unwritten(ex);
1981 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1982 + ext4_ext_get_actual_len(newext));
1983 if (unwritten)
1984 ext4_ext_mark_unwritten(ex);
1985 eh = path[depth].p_hdr;
1986 nearex = ex;
1987 goto merge;
1988 }
1989
1990prepend:
1991
1992 if (ext4_can_extents_be_merged(inode, newext, ex)) {
1993 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
1994 "(from %llu)\n",
1995 le32_to_cpu(newext->ee_block),
1996 ext4_ext_is_unwritten(newext),
1997 ext4_ext_get_actual_len(newext),
1998 le32_to_cpu(ex->ee_block),
1999 ext4_ext_is_unwritten(ex),
2000 ext4_ext_get_actual_len(ex),
2001 ext4_ext_pblock(ex));
2002 err = ext4_ext_get_access(handle, inode,
2003 path + depth);
2004 if (err)
2005 return err;
2006
2007 unwritten = ext4_ext_is_unwritten(ex);
2008 ex->ee_block = newext->ee_block;
2009 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2010 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2011 + ext4_ext_get_actual_len(newext));
2012 if (unwritten)
2013 ext4_ext_mark_unwritten(ex);
2014 eh = path[depth].p_hdr;
2015 nearex = ex;
2016 goto merge;
2017 }
2018 }
2019
2020 depth = ext_depth(inode);
2021 eh = path[depth].p_hdr;
2022 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2023 goto has_space;
2024
2025
2026 fex = EXT_LAST_EXTENT(eh);
2027 next = EXT_MAX_BLOCKS;
2028 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2029 next = ext4_ext_next_leaf_block(path);
2030 if (next != EXT_MAX_BLOCKS) {
2031 ext_debug(inode, "next leaf block - %u\n", next);
2032 BUG_ON(npath != NULL);
2033 npath = ext4_find_extent(inode, next, NULL, gb_flags);
2034 if (IS_ERR(npath))
2035 return PTR_ERR(npath);
2036 BUG_ON(npath->p_depth != path->p_depth);
2037 eh = npath[depth].p_hdr;
2038 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2039 ext_debug(inode, "next leaf isn't full(%d)\n",
2040 le16_to_cpu(eh->eh_entries));
2041 path = npath;
2042 goto has_space;
2043 }
2044 ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2045 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2046 }
2047
2048
2049
2050
2051
2052 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2053 mb_flags |= EXT4_MB_USE_RESERVED;
2054 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2055 ppath, newext);
2056 if (err)
2057 goto cleanup;
2058 depth = ext_depth(inode);
2059 eh = path[depth].p_hdr;
2060
2061has_space:
2062 nearex = path[depth].p_ext;
2063
2064 err = ext4_ext_get_access(handle, inode, path + depth);
2065 if (err)
2066 goto cleanup;
2067
2068 if (!nearex) {
2069
2070 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2071 le32_to_cpu(newext->ee_block),
2072 ext4_ext_pblock(newext),
2073 ext4_ext_is_unwritten(newext),
2074 ext4_ext_get_actual_len(newext));
2075 nearex = EXT_FIRST_EXTENT(eh);
2076 } else {
2077 if (le32_to_cpu(newext->ee_block)
2078 > le32_to_cpu(nearex->ee_block)) {
2079
2080 ext_debug(inode, "insert %u:%llu:[%d]%d before: "
2081 "nearest %p\n",
2082 le32_to_cpu(newext->ee_block),
2083 ext4_ext_pblock(newext),
2084 ext4_ext_is_unwritten(newext),
2085 ext4_ext_get_actual_len(newext),
2086 nearex);
2087 nearex++;
2088 } else {
2089
2090 BUG_ON(newext->ee_block == nearex->ee_block);
2091 ext_debug(inode, "insert %u:%llu:[%d]%d after: "
2092 "nearest %p\n",
2093 le32_to_cpu(newext->ee_block),
2094 ext4_ext_pblock(newext),
2095 ext4_ext_is_unwritten(newext),
2096 ext4_ext_get_actual_len(newext),
2097 nearex);
2098 }
2099 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2100 if (len > 0) {
2101 ext_debug(inode, "insert %u:%llu:[%d]%d: "
2102 "move %d extents from 0x%p to 0x%p\n",
2103 le32_to_cpu(newext->ee_block),
2104 ext4_ext_pblock(newext),
2105 ext4_ext_is_unwritten(newext),
2106 ext4_ext_get_actual_len(newext),
2107 len, nearex, nearex + 1);
2108 memmove(nearex + 1, nearex,
2109 len * sizeof(struct ext4_extent));
2110 }
2111 }
2112
2113 le16_add_cpu(&eh->eh_entries, 1);
2114 path[depth].p_ext = nearex;
2115 nearex->ee_block = newext->ee_block;
2116 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2117 nearex->ee_len = newext->ee_len;
2118
2119merge:
2120
2121 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2122 ext4_ext_try_to_merge(handle, inode, path, nearex);
2123
2124
2125
2126 err = ext4_ext_correct_indexes(handle, inode, path);
2127 if (err)
2128 goto cleanup;
2129
2130 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2131
2132cleanup:
2133 ext4_ext_drop_refs(npath);
2134 kfree(npath);
2135 return err;
2136}
2137
2138static int ext4_fill_es_cache_info(struct inode *inode,
2139 ext4_lblk_t block, ext4_lblk_t num,
2140 struct fiemap_extent_info *fieinfo)
2141{
2142 ext4_lblk_t next, end = block + num - 1;
2143 struct extent_status es;
2144 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2145 unsigned int flags;
2146 int err;
2147
2148 while (block <= end) {
2149 next = 0;
2150 flags = 0;
2151 if (!ext4_es_lookup_extent(inode, block, &next, &es))
2152 break;
2153 if (ext4_es_is_unwritten(&es))
2154 flags |= FIEMAP_EXTENT_UNWRITTEN;
2155 if (ext4_es_is_delayed(&es))
2156 flags |= (FIEMAP_EXTENT_DELALLOC |
2157 FIEMAP_EXTENT_UNKNOWN);
2158 if (ext4_es_is_hole(&es))
2159 flags |= EXT4_FIEMAP_EXTENT_HOLE;
2160 if (next == 0)
2161 flags |= FIEMAP_EXTENT_LAST;
2162 if (flags & (FIEMAP_EXTENT_DELALLOC|
2163 EXT4_FIEMAP_EXTENT_HOLE))
2164 es.es_pblk = 0;
2165 else
2166 es.es_pblk = ext4_es_pblock(&es);
2167 err = fiemap_fill_next_extent(fieinfo,
2168 (__u64)es.es_lblk << blksize_bits,
2169 (__u64)es.es_pblk << blksize_bits,
2170 (__u64)es.es_len << blksize_bits,
2171 flags);
2172 if (next == 0)
2173 break;
2174 block = next;
2175 if (err < 0)
2176 return err;
2177 if (err == 1)
2178 return 0;
2179 }
2180 return 0;
2181}
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
2198 struct ext4_ext_path *path,
2199 ext4_lblk_t *lblk)
2200{
2201 int depth = ext_depth(inode);
2202 struct ext4_extent *ex;
2203 ext4_lblk_t len;
2204
2205 ex = path[depth].p_ext;
2206 if (ex == NULL) {
2207
2208 *lblk = 0;
2209 len = EXT_MAX_BLOCKS;
2210 } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2211 len = le32_to_cpu(ex->ee_block) - *lblk;
2212 } else if (*lblk >= le32_to_cpu(ex->ee_block)
2213 + ext4_ext_get_actual_len(ex)) {
2214 ext4_lblk_t next;
2215
2216 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2217 next = ext4_ext_next_allocated_block(path);
2218 BUG_ON(next == *lblk);
2219 len = next - *lblk;
2220 } else {
2221 BUG();
2222 }
2223 return len;
2224}
2225
2226
2227
2228
2229
2230
2231static void
2232ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
2233 ext4_lblk_t hole_len)
2234{
2235 struct extent_status es;
2236
2237 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
2238 hole_start + hole_len - 1, &es);
2239 if (es.es_len) {
2240
2241 if (es.es_lblk <= hole_start)
2242 return;
2243 hole_len = min(es.es_lblk - hole_start, hole_len);
2244 }
2245 ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
2246 ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
2247 EXTENT_STATUS_HOLE);
2248}
2249
2250
2251
2252
2253
2254static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2255 struct ext4_ext_path *path, int depth)
2256{
2257 int err;
2258 ext4_fsblk_t leaf;
2259
2260
2261 depth--;
2262 path = path + depth;
2263 leaf = ext4_idx_pblock(path->p_idx);
2264 if (unlikely(path->p_hdr->eh_entries == 0)) {
2265 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2266 return -EFSCORRUPTED;
2267 }
2268 err = ext4_ext_get_access(handle, inode, path);
2269 if (err)
2270 return err;
2271
2272 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2273 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2274 len *= sizeof(struct ext4_extent_idx);
2275 memmove(path->p_idx, path->p_idx + 1, len);
2276 }
2277
2278 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2279 err = ext4_ext_dirty(handle, inode, path);
2280 if (err)
2281 return err;
2282 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2283 trace_ext4_ext_rm_idx(inode, leaf);
2284
2285 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2286 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2287
2288 while (--depth >= 0) {
2289 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2290 break;
2291 path--;
2292 err = ext4_ext_get_access(handle, inode, path);
2293 if (err)
2294 break;
2295 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2296 err = ext4_ext_dirty(handle, inode, path);
2297 if (err)
2298 break;
2299 }
2300 return err;
2301}
2302
2303
2304
2305
2306
2307
2308
2309
2310int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2311 struct ext4_ext_path *path)
2312{
2313 if (path) {
2314 int depth = ext_depth(inode);
2315 int ret = 0;
2316
2317
2318 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2319 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2331 return ret;
2332 }
2333 }
2334
2335 return ext4_chunk_trans_blocks(inode, nrblocks);
2336}
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2348{
2349 int index;
2350 int depth;
2351
2352
2353 if (ext4_has_inline_data(inode))
2354 return 1;
2355
2356 depth = ext_depth(inode);
2357
2358 if (extents <= 1)
2359 index = depth * 2;
2360 else
2361 index = depth * 3;
2362
2363 return index;
2364}
2365
2366static inline int get_default_free_blocks_flags(struct inode *inode)
2367{
2368 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2369 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2370 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2371 else if (ext4_should_journal_data(inode))
2372 return EXT4_FREE_BLOCKS_FORGET;
2373 return 0;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2392{
2393 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2394 struct ext4_inode_info *ei = EXT4_I(inode);
2395
2396 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2397
2398 spin_lock(&ei->i_block_reservation_lock);
2399 ei->i_reserved_data_blocks++;
2400 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2401 spin_unlock(&ei->i_block_reservation_lock);
2402
2403 percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2404 ext4_remove_pending(inode, lblk);
2405}
2406
2407static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2408 struct ext4_extent *ex,
2409 struct partial_cluster *partial,
2410 ext4_lblk_t from, ext4_lblk_t to)
2411{
2412 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2413 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2414 ext4_fsblk_t last_pblk, pblk;
2415 ext4_lblk_t num;
2416 int flags;
2417
2418
2419 if (from < le32_to_cpu(ex->ee_block) ||
2420 to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2421 ext4_error(sbi->s_sb,
2422 "strange request: removal(2) %u-%u from %u:%u",
2423 from, to, le32_to_cpu(ex->ee_block), ee_len);
2424 return 0;
2425 }
2426
2427#ifdef EXTENTS_STATS
2428 spin_lock(&sbi->s_ext_stats_lock);
2429 sbi->s_ext_blocks += ee_len;
2430 sbi->s_ext_extents++;
2431 if (ee_len < sbi->s_ext_min)
2432 sbi->s_ext_min = ee_len;
2433 if (ee_len > sbi->s_ext_max)
2434 sbi->s_ext_max = ee_len;
2435 if (ext_depth(inode) > sbi->s_depth_max)
2436 sbi->s_depth_max = ext_depth(inode);
2437 spin_unlock(&sbi->s_ext_stats_lock);
2438#endif
2439
2440 trace_ext4_remove_blocks(inode, ex, from, to, partial);
2441
2442
2443
2444
2445
2446 last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2447
2448 if (partial->state != initial &&
2449 partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2450 if (partial->state == tofree) {
2451 flags = get_default_free_blocks_flags(inode);
2452 if (ext4_is_pending(inode, partial->lblk))
2453 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2454 ext4_free_blocks(handle, inode, NULL,
2455 EXT4_C2B(sbi, partial->pclu),
2456 sbi->s_cluster_ratio, flags);
2457 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2458 ext4_rereserve_cluster(inode, partial->lblk);
2459 }
2460 partial->state = initial;
2461 }
2462
2463 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2464 pblk = ext4_ext_pblock(ex) + ee_len - num;
2465
2466
2467
2468
2469
2470
2471
2472 flags = get_default_free_blocks_flags(inode);
2473
2474
2475 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2476 (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2477 (partial->state != nofree)) {
2478 if (ext4_is_pending(inode, to))
2479 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2480 ext4_free_blocks(handle, inode, NULL,
2481 EXT4_PBLK_CMASK(sbi, last_pblk),
2482 sbi->s_cluster_ratio, flags);
2483 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2484 ext4_rereserve_cluster(inode, to);
2485 partial->state = initial;
2486 flags = get_default_free_blocks_flags(inode);
2487 }
2488
2489 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2490
2491
2492
2493
2494
2495
2496
2497 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2498 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2499
2500
2501 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2502 partial->state = initial;
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2515 if (partial->state == initial) {
2516 partial->pclu = EXT4_B2C(sbi, pblk);
2517 partial->lblk = from;
2518 partial->state = tofree;
2519 }
2520 } else {
2521 partial->state = initial;
2522 }
2523
2524 return 0;
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542static int
2543ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2544 struct ext4_ext_path *path,
2545 struct partial_cluster *partial,
2546 ext4_lblk_t start, ext4_lblk_t end)
2547{
2548 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2549 int err = 0, correct_index = 0;
2550 int depth = ext_depth(inode), credits, revoke_credits;
2551 struct ext4_extent_header *eh;
2552 ext4_lblk_t a, b;
2553 unsigned num;
2554 ext4_lblk_t ex_ee_block;
2555 unsigned short ex_ee_len;
2556 unsigned unwritten = 0;
2557 struct ext4_extent *ex;
2558 ext4_fsblk_t pblk;
2559
2560
2561 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2562 if (!path[depth].p_hdr)
2563 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2564 eh = path[depth].p_hdr;
2565 if (unlikely(path[depth].p_hdr == NULL)) {
2566 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2567 return -EFSCORRUPTED;
2568 }
2569
2570 ex = path[depth].p_ext;
2571 if (!ex)
2572 ex = EXT_LAST_EXTENT(eh);
2573
2574 ex_ee_block = le32_to_cpu(ex->ee_block);
2575 ex_ee_len = ext4_ext_get_actual_len(ex);
2576
2577 trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2578
2579 while (ex >= EXT_FIRST_EXTENT(eh) &&
2580 ex_ee_block + ex_ee_len > start) {
2581
2582 if (ext4_ext_is_unwritten(ex))
2583 unwritten = 1;
2584 else
2585 unwritten = 0;
2586
2587 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2588 unwritten, ex_ee_len);
2589 path[depth].p_ext = ex;
2590
2591 a = ex_ee_block > start ? ex_ee_block : start;
2592 b = ex_ee_block+ex_ee_len - 1 < end ?
2593 ex_ee_block+ex_ee_len - 1 : end;
2594
2595 ext_debug(inode, " border %u:%u\n", a, b);
2596
2597
2598 if (end < ex_ee_block) {
2599
2600
2601
2602
2603
2604
2605
2606 if (sbi->s_cluster_ratio > 1) {
2607 pblk = ext4_ext_pblock(ex);
2608 partial->pclu = EXT4_B2C(sbi, pblk);
2609 partial->state = nofree;
2610 }
2611 ex--;
2612 ex_ee_block = le32_to_cpu(ex->ee_block);
2613 ex_ee_len = ext4_ext_get_actual_len(ex);
2614 continue;
2615 } else if (b != ex_ee_block + ex_ee_len - 1) {
2616 EXT4_ERROR_INODE(inode,
2617 "can not handle truncate %u:%u "
2618 "on extent %u:%u",
2619 start, end, ex_ee_block,
2620 ex_ee_block + ex_ee_len - 1);
2621 err = -EFSCORRUPTED;
2622 goto out;
2623 } else if (a != ex_ee_block) {
2624
2625 num = a - ex_ee_block;
2626 } else {
2627
2628 num = 0;
2629 }
2630
2631
2632
2633
2634
2635
2636 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2637 if (ex == EXT_FIRST_EXTENT(eh)) {
2638 correct_index = 1;
2639 credits += (ext_depth(inode)) + 1;
2640 }
2641 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2642
2643
2644
2645
2646
2647 revoke_credits =
2648 ext4_free_metadata_revoke_credits(inode->i_sb,
2649 ext_depth(inode)) +
2650 ext4_free_data_revoke_credits(inode, b - a + 1);
2651
2652 err = ext4_datasem_ensure_credits(handle, inode, credits,
2653 credits, revoke_credits);
2654 if (err) {
2655 if (err > 0)
2656 err = -EAGAIN;
2657 goto out;
2658 }
2659
2660 err = ext4_ext_get_access(handle, inode, path + depth);
2661 if (err)
2662 goto out;
2663
2664 err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2665 if (err)
2666 goto out;
2667
2668 if (num == 0)
2669
2670 ext4_ext_store_pblock(ex, 0);
2671
2672 ex->ee_len = cpu_to_le16(num);
2673
2674
2675
2676
2677 if (unwritten && num)
2678 ext4_ext_mark_unwritten(ex);
2679
2680
2681
2682
2683 if (num == 0) {
2684 if (end != EXT_MAX_BLOCKS - 1) {
2685
2686
2687
2688
2689
2690 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2691 sizeof(struct ext4_extent));
2692
2693
2694 memset(EXT_LAST_EXTENT(eh), 0,
2695 sizeof(struct ext4_extent));
2696 }
2697 le16_add_cpu(&eh->eh_entries, -1);
2698 }
2699
2700 err = ext4_ext_dirty(handle, inode, path + depth);
2701 if (err)
2702 goto out;
2703
2704 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2705 ext4_ext_pblock(ex));
2706 ex--;
2707 ex_ee_block = le32_to_cpu(ex->ee_block);
2708 ex_ee_len = ext4_ext_get_actual_len(ex);
2709 }
2710
2711 if (correct_index && eh->eh_entries)
2712 err = ext4_ext_correct_indexes(handle, inode, path);
2713
2714
2715
2716
2717
2718
2719
2720
2721 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2722 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2723 if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2724 int flags = get_default_free_blocks_flags(inode);
2725
2726 if (ext4_is_pending(inode, partial->lblk))
2727 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2728 ext4_free_blocks(handle, inode, NULL,
2729 EXT4_C2B(sbi, partial->pclu),
2730 sbi->s_cluster_ratio, flags);
2731 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2732 ext4_rereserve_cluster(inode, partial->lblk);
2733 }
2734 partial->state = initial;
2735 }
2736
2737
2738
2739 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2740 err = ext4_ext_rm_idx(handle, inode, path, depth);
2741
2742out:
2743 return err;
2744}
2745
2746
2747
2748
2749
2750static int
2751ext4_ext_more_to_rm(struct ext4_ext_path *path)
2752{
2753 BUG_ON(path->p_idx == NULL);
2754
2755 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2756 return 0;
2757
2758
2759
2760
2761
2762 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2763 return 0;
2764 return 1;
2765}
2766
2767int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2768 ext4_lblk_t end)
2769{
2770 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2771 int depth = ext_depth(inode);
2772 struct ext4_ext_path *path = NULL;
2773 struct partial_cluster partial;
2774 handle_t *handle;
2775 int i = 0, err = 0;
2776
2777 partial.pclu = 0;
2778 partial.lblk = 0;
2779 partial.state = initial;
2780
2781 ext_debug(inode, "truncate since %u to %u\n", start, end);
2782
2783
2784 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
2785 depth + 1,
2786 ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2787 if (IS_ERR(handle))
2788 return PTR_ERR(handle);
2789
2790again:
2791 trace_ext4_ext_remove_space(inode, start, end, depth);
2792
2793
2794
2795
2796
2797
2798
2799
2800 if (end < EXT_MAX_BLOCKS - 1) {
2801 struct ext4_extent *ex;
2802 ext4_lblk_t ee_block, ex_end, lblk;
2803 ext4_fsblk_t pblk;
2804
2805
2806 path = ext4_find_extent(inode, end, NULL,
2807 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
2808 if (IS_ERR(path)) {
2809 ext4_journal_stop(handle);
2810 return PTR_ERR(path);
2811 }
2812 depth = ext_depth(inode);
2813
2814 ex = path[depth].p_ext;
2815 if (!ex) {
2816 if (depth) {
2817 EXT4_ERROR_INODE(inode,
2818 "path[%d].p_hdr == NULL",
2819 depth);
2820 err = -EFSCORRUPTED;
2821 }
2822 goto out;
2823 }
2824
2825 ee_block = le32_to_cpu(ex->ee_block);
2826 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2827
2828
2829
2830
2831
2832
2833
2834 if (end >= ee_block && end < ex_end) {
2835
2836
2837
2838
2839
2840
2841 if (sbi->s_cluster_ratio > 1) {
2842 pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
2843 partial.pclu = EXT4_B2C(sbi, pblk);
2844 partial.state = nofree;
2845 }
2846
2847
2848
2849
2850
2851
2852
2853 err = ext4_force_split_extent_at(handle, inode, &path,
2854 end + 1, 1);
2855 if (err < 0)
2856 goto out;
2857
2858 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
2859 partial.state == initial) {
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870 lblk = ex_end + 1;
2871 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2872 NULL);
2873 if (err < 0)
2874 goto out;
2875 if (pblk) {
2876 partial.pclu = EXT4_B2C(sbi, pblk);
2877 partial.state = nofree;
2878 }
2879 }
2880 }
2881
2882
2883
2884
2885 depth = ext_depth(inode);
2886 if (path) {
2887 int k = i = depth;
2888 while (--k > 0)
2889 path[k].p_block =
2890 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2891 } else {
2892 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2893 GFP_NOFS | __GFP_NOFAIL);
2894 if (path == NULL) {
2895 ext4_journal_stop(handle);
2896 return -ENOMEM;
2897 }
2898 path[0].p_maxdepth = path[0].p_depth = depth;
2899 path[0].p_hdr = ext_inode_hdr(inode);
2900 i = 0;
2901
2902 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2903 err = -EFSCORRUPTED;
2904 goto out;
2905 }
2906 }
2907 err = 0;
2908
2909 while (i >= 0 && err == 0) {
2910 if (i == depth) {
2911
2912 err = ext4_ext_rm_leaf(handle, inode, path,
2913 &partial, start, end);
2914
2915 brelse(path[i].p_bh);
2916 path[i].p_bh = NULL;
2917 i--;
2918 continue;
2919 }
2920
2921
2922 if (!path[i].p_hdr) {
2923 ext_debug(inode, "initialize header\n");
2924 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2925 }
2926
2927 if (!path[i].p_idx) {
2928
2929 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2930 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2931 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2932 path[i].p_hdr,
2933 le16_to_cpu(path[i].p_hdr->eh_entries));
2934 } else {
2935
2936 path[i].p_idx--;
2937 }
2938
2939 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2940 i, EXT_FIRST_INDEX(path[i].p_hdr),
2941 path[i].p_idx);
2942 if (ext4_ext_more_to_rm(path + i)) {
2943 struct buffer_head *bh;
2944
2945 ext_debug(inode, "move to level %d (block %llu)\n",
2946 i + 1, ext4_idx_pblock(path[i].p_idx));
2947 memset(path + i + 1, 0, sizeof(*path));
2948 bh = read_extent_tree_block(inode,
2949 ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2950 EXT4_EX_NOCACHE);
2951 if (IS_ERR(bh)) {
2952
2953 err = PTR_ERR(bh);
2954 break;
2955 }
2956
2957
2958 cond_resched();
2959 if (WARN_ON(i + 1 > depth)) {
2960 err = -EFSCORRUPTED;
2961 break;
2962 }
2963 path[i + 1].p_bh = bh;
2964
2965
2966
2967 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2968 i++;
2969 } else {
2970
2971 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2972
2973
2974
2975 err = ext4_ext_rm_idx(handle, inode, path, i);
2976 }
2977
2978 brelse(path[i].p_bh);
2979 path[i].p_bh = NULL;
2980 i--;
2981 ext_debug(inode, "return to level %d\n", i);
2982 }
2983 }
2984
2985 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
2986 path->p_hdr->eh_entries);
2987
2988
2989
2990
2991
2992 if (partial.state == tofree && err == 0) {
2993 int flags = get_default_free_blocks_flags(inode);
2994
2995 if (ext4_is_pending(inode, partial.lblk))
2996 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2997 ext4_free_blocks(handle, inode, NULL,
2998 EXT4_C2B(sbi, partial.pclu),
2999 sbi->s_cluster_ratio, flags);
3000 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3001 ext4_rereserve_cluster(inode, partial.lblk);
3002 partial.state = initial;
3003 }
3004
3005
3006 if (path->p_hdr->eh_entries == 0) {
3007
3008
3009
3010
3011 err = ext4_ext_get_access(handle, inode, path);
3012 if (err == 0) {
3013 ext_inode_hdr(inode)->eh_depth = 0;
3014 ext_inode_hdr(inode)->eh_max =
3015 cpu_to_le16(ext4_ext_space_root(inode, 0));
3016 err = ext4_ext_dirty(handle, inode, path);
3017 }
3018 }
3019out:
3020 ext4_ext_drop_refs(path);
3021 kfree(path);
3022 path = NULL;
3023 if (err == -EAGAIN)
3024 goto again;
3025 ext4_journal_stop(handle);
3026
3027 return err;
3028}
3029
3030
3031
3032
3033void ext4_ext_init(struct super_block *sb)
3034{
3035
3036
3037
3038
3039 if (ext4_has_feature_extents(sb)) {
3040#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3041 printk(KERN_INFO "EXT4-fs: file extents enabled"
3042#ifdef AGGRESSIVE_TEST
3043 ", aggressive tests"
3044#endif
3045#ifdef CHECK_BINSEARCH
3046 ", check binsearch"
3047#endif
3048#ifdef EXTENTS_STATS
3049 ", stats"
3050#endif
3051 "\n");
3052#endif
3053#ifdef EXTENTS_STATS
3054 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3055 EXT4_SB(sb)->s_ext_min = 1 << 30;
3056 EXT4_SB(sb)->s_ext_max = 0;
3057#endif
3058 }
3059}
3060
3061
3062
3063
3064void ext4_ext_release(struct super_block *sb)
3065{
3066 if (!ext4_has_feature_extents(sb))
3067 return;
3068
3069#ifdef EXTENTS_STATS
3070 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3071 struct ext4_sb_info *sbi = EXT4_SB(sb);
3072 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3073 sbi->s_ext_blocks, sbi->s_ext_extents,
3074 sbi->s_ext_blocks / sbi->s_ext_extents);
3075 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3076 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3077 }
3078#endif
3079}
3080
3081static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3082{
3083 ext4_lblk_t ee_block;
3084 ext4_fsblk_t ee_pblock;
3085 unsigned int ee_len;
3086
3087 ee_block = le32_to_cpu(ex->ee_block);
3088 ee_len = ext4_ext_get_actual_len(ex);
3089 ee_pblock = ext4_ext_pblock(ex);
3090
3091 if (ee_len == 0)
3092 return 0;
3093
3094 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3095 EXTENT_STATUS_WRITTEN);
3096}
3097
3098
3099static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3100{
3101 ext4_fsblk_t ee_pblock;
3102 unsigned int ee_len;
3103
3104 ee_len = ext4_ext_get_actual_len(ex);
3105 ee_pblock = ext4_ext_pblock(ex);
3106 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3107 ee_len);
3108}
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131static int ext4_split_extent_at(handle_t *handle,
3132 struct inode *inode,
3133 struct ext4_ext_path **ppath,
3134 ext4_lblk_t split,
3135 int split_flag,
3136 int flags)
3137{
3138 struct ext4_ext_path *path = *ppath;
3139 ext4_fsblk_t newblock;
3140 ext4_lblk_t ee_block;
3141 struct ext4_extent *ex, newex, orig_ex, zero_ex;
3142 struct ext4_extent *ex2 = NULL;
3143 unsigned int ee_len, depth;
3144 int err = 0;
3145
3146 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3147 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3148
3149 ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
3150
3151 ext4_ext_show_leaf(inode, path);
3152
3153 depth = ext_depth(inode);
3154 ex = path[depth].p_ext;
3155 ee_block = le32_to_cpu(ex->ee_block);
3156 ee_len = ext4_ext_get_actual_len(ex);
3157 newblock = split - ee_block + ext4_ext_pblock(ex);
3158
3159 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3160 BUG_ON(!ext4_ext_is_unwritten(ex) &&
3161 split_flag & (EXT4_EXT_MAY_ZEROOUT |
3162 EXT4_EXT_MARK_UNWRIT1 |
3163 EXT4_EXT_MARK_UNWRIT2));
3164
3165 err = ext4_ext_get_access(handle, inode, path + depth);
3166 if (err)
3167 goto out;
3168
3169 if (split == ee_block) {
3170
3171
3172
3173
3174
3175 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3176 ext4_ext_mark_unwritten(ex);
3177 else
3178 ext4_ext_mark_initialized(ex);
3179
3180 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3181 ext4_ext_try_to_merge(handle, inode, path, ex);
3182
3183 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3184 goto out;
3185 }
3186
3187
3188 memcpy(&orig_ex, ex, sizeof(orig_ex));
3189 ex->ee_len = cpu_to_le16(split - ee_block);
3190 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3191 ext4_ext_mark_unwritten(ex);
3192
3193
3194
3195
3196
3197 err = ext4_ext_dirty(handle, inode, path + depth);
3198 if (err)
3199 goto fix_extent_len;
3200
3201 ex2 = &newex;
3202 ex2->ee_block = cpu_to_le32(split);
3203 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3204 ext4_ext_store_pblock(ex2, newblock);
3205 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3206 ext4_ext_mark_unwritten(ex2);
3207
3208 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3209 if (err != -ENOSPC && err != -EDQUOT)
3210 goto out;
3211
3212 if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3213 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3214 if (split_flag & EXT4_EXT_DATA_VALID1) {
3215 err = ext4_ext_zeroout(inode, ex2);
3216 zero_ex.ee_block = ex2->ee_block;
3217 zero_ex.ee_len = cpu_to_le16(
3218 ext4_ext_get_actual_len(ex2));
3219 ext4_ext_store_pblock(&zero_ex,
3220 ext4_ext_pblock(ex2));
3221 } else {
3222 err = ext4_ext_zeroout(inode, ex);
3223 zero_ex.ee_block = ex->ee_block;
3224 zero_ex.ee_len = cpu_to_le16(
3225 ext4_ext_get_actual_len(ex));
3226 ext4_ext_store_pblock(&zero_ex,
3227 ext4_ext_pblock(ex));
3228 }
3229 } else {
3230 err = ext4_ext_zeroout(inode, &orig_ex);
3231 zero_ex.ee_block = orig_ex.ee_block;
3232 zero_ex.ee_len = cpu_to_le16(
3233 ext4_ext_get_actual_len(&orig_ex));
3234 ext4_ext_store_pblock(&zero_ex,
3235 ext4_ext_pblock(&orig_ex));
3236 }
3237
3238 if (!err) {
3239
3240 ex->ee_len = cpu_to_le16(ee_len);
3241 ext4_ext_try_to_merge(handle, inode, path, ex);
3242 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3243 if (!err)
3244
3245 err = ext4_zeroout_es(inode, &zero_ex);
3246
3247
3248
3249
3250
3251 goto out;
3252 }
3253 }
3254
3255fix_extent_len:
3256 ex->ee_len = orig_ex.ee_len;
3257
3258
3259
3260
3261 ext4_ext_dirty(handle, inode, path + path->p_depth);
3262 return err;
3263out:
3264 ext4_ext_show_leaf(inode, path);
3265 return err;
3266}
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279static int ext4_split_extent(handle_t *handle,
3280 struct inode *inode,
3281 struct ext4_ext_path **ppath,
3282 struct ext4_map_blocks *map,
3283 int split_flag,
3284 int flags)
3285{
3286 struct ext4_ext_path *path = *ppath;
3287 ext4_lblk_t ee_block;
3288 struct ext4_extent *ex;
3289 unsigned int ee_len, depth;
3290 int err = 0;
3291 int unwritten;
3292 int split_flag1, flags1;
3293 int allocated = map->m_len;
3294
3295 depth = ext_depth(inode);
3296 ex = path[depth].p_ext;
3297 ee_block = le32_to_cpu(ex->ee_block);
3298 ee_len = ext4_ext_get_actual_len(ex);
3299 unwritten = ext4_ext_is_unwritten(ex);
3300
3301 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3302 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3303 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3304 if (unwritten)
3305 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3306 EXT4_EXT_MARK_UNWRIT2;
3307 if (split_flag & EXT4_EXT_DATA_VALID2)
3308 split_flag1 |= EXT4_EXT_DATA_VALID1;
3309 err = ext4_split_extent_at(handle, inode, ppath,
3310 map->m_lblk + map->m_len, split_flag1, flags1);
3311 if (err)
3312 goto out;
3313 } else {
3314 allocated = ee_len - (map->m_lblk - ee_block);
3315 }
3316
3317
3318
3319
3320 path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3321 if (IS_ERR(path))
3322 return PTR_ERR(path);
3323 depth = ext_depth(inode);
3324 ex = path[depth].p_ext;
3325 if (!ex) {
3326 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3327 (unsigned long) map->m_lblk);
3328 return -EFSCORRUPTED;
3329 }
3330 unwritten = ext4_ext_is_unwritten(ex);
3331 split_flag1 = 0;
3332
3333 if (map->m_lblk >= ee_block) {
3334 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3335 if (unwritten) {
3336 split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3337 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3338 EXT4_EXT_MARK_UNWRIT2);
3339 }
3340 err = ext4_split_extent_at(handle, inode, ppath,
3341 map->m_lblk, split_flag1, flags);
3342 if (err)
3343 goto out;
3344 }
3345
3346 ext4_ext_show_leaf(inode, path);
3347out:
3348 return err ? err : allocated;
3349}
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371static int ext4_ext_convert_to_initialized(handle_t *handle,
3372 struct inode *inode,
3373 struct ext4_map_blocks *map,
3374 struct ext4_ext_path **ppath,
3375 int flags)
3376{
3377 struct ext4_ext_path *path = *ppath;
3378 struct ext4_sb_info *sbi;
3379 struct ext4_extent_header *eh;
3380 struct ext4_map_blocks split_map;
3381 struct ext4_extent zero_ex1, zero_ex2;
3382 struct ext4_extent *ex, *abut_ex;
3383 ext4_lblk_t ee_block, eof_block;
3384 unsigned int ee_len, depth, map_len = map->m_len;
3385 int allocated = 0, max_zeroout = 0;
3386 int err = 0;
3387 int split_flag = EXT4_EXT_DATA_VALID2;
3388
3389 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3390 (unsigned long long)map->m_lblk, map_len);
3391
3392 sbi = EXT4_SB(inode->i_sb);
3393 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3394 >> inode->i_sb->s_blocksize_bits;
3395 if (eof_block < map->m_lblk + map_len)
3396 eof_block = map->m_lblk + map_len;
3397
3398 depth = ext_depth(inode);
3399 eh = path[depth].p_hdr;
3400 ex = path[depth].p_ext;
3401 ee_block = le32_to_cpu(ex->ee_block);
3402 ee_len = ext4_ext_get_actual_len(ex);
3403 zero_ex1.ee_len = 0;
3404 zero_ex2.ee_len = 0;
3405
3406 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3407
3408
3409 BUG_ON(!ext4_ext_is_unwritten(ex));
3410 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427 if ((map->m_lblk == ee_block) &&
3428
3429 (map_len < ee_len) &&
3430 (ex > EXT_FIRST_EXTENT(eh))) {
3431 ext4_lblk_t prev_lblk;
3432 ext4_fsblk_t prev_pblk, ee_pblk;
3433 unsigned int prev_len;
3434
3435 abut_ex = ex - 1;
3436 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3437 prev_len = ext4_ext_get_actual_len(abut_ex);
3438 prev_pblk = ext4_ext_pblock(abut_ex);
3439 ee_pblk = ext4_ext_pblock(ex);
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3451 ((prev_lblk + prev_len) == ee_block) &&
3452 ((prev_pblk + prev_len) == ee_pblk) &&
3453 (prev_len < (EXT_INIT_MAX_LEN - map_len))) {
3454 err = ext4_ext_get_access(handle, inode, path + depth);
3455 if (err)
3456 goto out;
3457
3458 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3459 map, ex, abut_ex);
3460
3461
3462 ex->ee_block = cpu_to_le32(ee_block + map_len);
3463 ext4_ext_store_pblock(ex, ee_pblk + map_len);
3464 ex->ee_len = cpu_to_le16(ee_len - map_len);
3465 ext4_ext_mark_unwritten(ex);
3466
3467
3468 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3469
3470
3471 allocated = map_len;
3472 }
3473 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3474 (map_len < ee_len) &&
3475 ex < EXT_LAST_EXTENT(eh)) {
3476
3477 ext4_lblk_t next_lblk;
3478 ext4_fsblk_t next_pblk, ee_pblk;
3479 unsigned int next_len;
3480
3481 abut_ex = ex + 1;
3482 next_lblk = le32_to_cpu(abut_ex->ee_block);
3483 next_len = ext4_ext_get_actual_len(abut_ex);
3484 next_pblk = ext4_ext_pblock(abut_ex);
3485 ee_pblk = ext4_ext_pblock(ex);
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496 if ((!ext4_ext_is_unwritten(abut_ex)) &&
3497 ((map->m_lblk + map_len) == next_lblk) &&
3498 ((ee_pblk + ee_len) == next_pblk) &&
3499 (next_len < (EXT_INIT_MAX_LEN - map_len))) {
3500 err = ext4_ext_get_access(handle, inode, path + depth);
3501 if (err)
3502 goto out;
3503
3504 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3505 map, ex, abut_ex);
3506
3507
3508 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3509 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3510 ex->ee_len = cpu_to_le16(ee_len - map_len);
3511 ext4_ext_mark_unwritten(ex);
3512
3513
3514 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3515
3516
3517 allocated = map_len;
3518 }
3519 }
3520 if (allocated) {
3521
3522 err = ext4_ext_dirty(handle, inode, path + depth);
3523
3524
3525 path[depth].p_ext = abut_ex;
3526 goto out;
3527 } else
3528 allocated = ee_len - (map->m_lblk - ee_block);
3529
3530 WARN_ON(map->m_lblk < ee_block);
3531
3532
3533
3534
3535 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3536
3537 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3538 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3539 (inode->i_sb->s_blocksize_bits - 10);
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552 split_map.m_lblk = map->m_lblk;
3553 split_map.m_len = map->m_len;
3554
3555 if (max_zeroout && (allocated > split_map.m_len)) {
3556 if (allocated <= max_zeroout) {
3557
3558 zero_ex1.ee_block =
3559 cpu_to_le32(split_map.m_lblk +
3560 split_map.m_len);
3561 zero_ex1.ee_len =
3562 cpu_to_le16(allocated - split_map.m_len);
3563 ext4_ext_store_pblock(&zero_ex1,
3564 ext4_ext_pblock(ex) + split_map.m_lblk +
3565 split_map.m_len - ee_block);
3566 err = ext4_ext_zeroout(inode, &zero_ex1);
3567 if (err)
3568 goto out;
3569 split_map.m_len = allocated;
3570 }
3571 if (split_map.m_lblk - ee_block + split_map.m_len <
3572 max_zeroout) {
3573
3574 if (split_map.m_lblk != ee_block) {
3575 zero_ex2.ee_block = ex->ee_block;
3576 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3577 ee_block);
3578 ext4_ext_store_pblock(&zero_ex2,
3579 ext4_ext_pblock(ex));
3580 err = ext4_ext_zeroout(inode, &zero_ex2);
3581 if (err)
3582 goto out;
3583 }
3584
3585 split_map.m_len += split_map.m_lblk - ee_block;
3586 split_map.m_lblk = ee_block;
3587 allocated = map->m_len;
3588 }
3589 }
3590
3591 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3592 flags);
3593 if (err > 0)
3594 err = 0;
3595out:
3596
3597 if (!err) {
3598 err = ext4_zeroout_es(inode, &zero_ex1);
3599 if (!err)
3600 err = ext4_zeroout_es(inode, &zero_ex2);
3601 }
3602 return err ? err : allocated;
3603}
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629static int ext4_split_convert_extents(handle_t *handle,
3630 struct inode *inode,
3631 struct ext4_map_blocks *map,
3632 struct ext4_ext_path **ppath,
3633 int flags)
3634{
3635 struct ext4_ext_path *path = *ppath;
3636 ext4_lblk_t eof_block;
3637 ext4_lblk_t ee_block;
3638 struct ext4_extent *ex;
3639 unsigned int ee_len;
3640 int split_flag = 0, depth;
3641
3642 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3643 (unsigned long long)map->m_lblk, map->m_len);
3644
3645 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3646 >> inode->i_sb->s_blocksize_bits;
3647 if (eof_block < map->m_lblk + map->m_len)
3648 eof_block = map->m_lblk + map->m_len;
3649
3650
3651
3652
3653 depth = ext_depth(inode);
3654 ex = path[depth].p_ext;
3655 ee_block = le32_to_cpu(ex->ee_block);
3656 ee_len = ext4_ext_get_actual_len(ex);
3657
3658
3659 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3660 split_flag |= EXT4_EXT_DATA_VALID1;
3661
3662 } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3663 split_flag |= ee_block + ee_len <= eof_block ?
3664 EXT4_EXT_MAY_ZEROOUT : 0;
3665 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3666 }
3667 flags |= EXT4_GET_BLOCKS_PRE_IO;
3668 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3669}
3670
3671static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3672 struct inode *inode,
3673 struct ext4_map_blocks *map,
3674 struct ext4_ext_path **ppath)
3675{
3676 struct ext4_ext_path *path = *ppath;
3677 struct ext4_extent *ex;
3678 ext4_lblk_t ee_block;
3679 unsigned int ee_len;
3680 int depth;
3681 int err = 0;
3682
3683 depth = ext_depth(inode);
3684 ex = path[depth].p_ext;
3685 ee_block = le32_to_cpu(ex->ee_block);
3686 ee_len = ext4_ext_get_actual_len(ex);
3687
3688 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3689 (unsigned long long)ee_block, ee_len);
3690
3691
3692
3693
3694
3695
3696
3697 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3698#ifdef CONFIG_EXT4_DEBUG
3699 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3700 " len %u; IO logical block %llu, len %u",
3701 inode->i_ino, (unsigned long long)ee_block, ee_len,
3702 (unsigned long long)map->m_lblk, map->m_len);
3703#endif
3704 err = ext4_split_convert_extents(handle, inode, map, ppath,
3705 EXT4_GET_BLOCKS_CONVERT);
3706 if (err < 0)
3707 return err;
3708 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3709 if (IS_ERR(path))
3710 return PTR_ERR(path);
3711 depth = ext_depth(inode);
3712 ex = path[depth].p_ext;
3713 }
3714
3715 err = ext4_ext_get_access(handle, inode, path + depth);
3716 if (err)
3717 goto out;
3718
3719 ext4_ext_mark_initialized(ex);
3720
3721
3722
3723
3724 ext4_ext_try_to_merge(handle, inode, path, ex);
3725
3726
3727 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3728out:
3729 ext4_ext_show_leaf(inode, path);
3730 return err;
3731}
3732
3733static int
3734convert_initialized_extent(handle_t *handle, struct inode *inode,
3735 struct ext4_map_blocks *map,
3736 struct ext4_ext_path **ppath,
3737 unsigned int *allocated)
3738{
3739 struct ext4_ext_path *path = *ppath;
3740 struct ext4_extent *ex;
3741 ext4_lblk_t ee_block;
3742 unsigned int ee_len;
3743 int depth;
3744 int err = 0;
3745
3746
3747
3748
3749
3750 if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3751 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3752
3753 depth = ext_depth(inode);
3754 ex = path[depth].p_ext;
3755 ee_block = le32_to_cpu(ex->ee_block);
3756 ee_len = ext4_ext_get_actual_len(ex);
3757
3758 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3759 (unsigned long long)ee_block, ee_len);
3760
3761 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3762 err = ext4_split_convert_extents(handle, inode, map, ppath,
3763 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3764 if (err < 0)
3765 return err;
3766 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3767 if (IS_ERR(path))
3768 return PTR_ERR(path);
3769 depth = ext_depth(inode);
3770 ex = path[depth].p_ext;
3771 if (!ex) {
3772 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3773 (unsigned long) map->m_lblk);
3774 return -EFSCORRUPTED;
3775 }
3776 }
3777
3778 err = ext4_ext_get_access(handle, inode, path + depth);
3779 if (err)
3780 return err;
3781
3782 ext4_ext_mark_unwritten(ex);
3783
3784
3785
3786
3787 ext4_ext_try_to_merge(handle, inode, path, ex);
3788
3789
3790 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3791 if (err)
3792 return err;
3793 ext4_ext_show_leaf(inode, path);
3794
3795 ext4_update_inode_fsync_trans(handle, inode, 1);
3796
3797 map->m_flags |= EXT4_MAP_UNWRITTEN;
3798 if (*allocated > map->m_len)
3799 *allocated = map->m_len;
3800 map->m_len = *allocated;
3801 return 0;
3802}
3803
3804static int
3805ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3806 struct ext4_map_blocks *map,
3807 struct ext4_ext_path **ppath, int flags,
3808 unsigned int allocated, ext4_fsblk_t newblock)
3809{
3810 struct ext4_ext_path __maybe_unused *path = *ppath;
3811 int ret = 0;
3812 int err = 0;
3813
3814 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
3815 (unsigned long long)map->m_lblk, map->m_len, flags,
3816 allocated);
3817 ext4_ext_show_leaf(inode, path);
3818
3819
3820
3821
3822
3823 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3824
3825 trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3826 allocated, newblock);
3827
3828
3829 if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3830 ret = ext4_split_convert_extents(handle, inode, map, ppath,
3831 flags | EXT4_GET_BLOCKS_CONVERT);
3832 if (ret < 0) {
3833 err = ret;
3834 goto out2;
3835 }
3836
3837
3838
3839
3840 if (unlikely(ret == 0)) {
3841 EXT4_ERROR_INODE(inode,
3842 "unexpected ret == 0, m_len = %u",
3843 map->m_len);
3844 err = -EFSCORRUPTED;
3845 goto out2;
3846 }
3847 map->m_flags |= EXT4_MAP_UNWRITTEN;
3848 goto out;
3849 }
3850
3851 if (flags & EXT4_GET_BLOCKS_CONVERT) {
3852 err = ext4_convert_unwritten_extents_endio(handle, inode, map,
3853 ppath);
3854 if (err < 0)
3855 goto out2;
3856 ext4_update_inode_fsync_trans(handle, inode, 1);
3857 goto map_out;
3858 }
3859
3860
3861
3862
3863
3864 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3865 map->m_flags |= EXT4_MAP_UNWRITTEN;
3866 goto map_out;
3867 }
3868
3869
3870 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3871
3872
3873
3874
3875
3876
3877
3878 map->m_flags |= EXT4_MAP_UNWRITTEN;
3879 goto out1;
3880 }
3881
3882
3883
3884
3885
3886
3887 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
3888 if (ret < 0) {
3889 err = ret;
3890 goto out2;
3891 }
3892 ext4_update_inode_fsync_trans(handle, inode, 1);
3893
3894
3895
3896
3897 if (unlikely(ret == 0)) {
3898 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
3899 map->m_len);
3900 err = -EFSCORRUPTED;
3901 goto out2;
3902 }
3903
3904out:
3905 allocated = ret;
3906 map->m_flags |= EXT4_MAP_NEW;
3907map_out:
3908 map->m_flags |= EXT4_MAP_MAPPED;
3909out1:
3910 map->m_pblk = newblock;
3911 if (allocated > map->m_len)
3912 allocated = map->m_len;
3913 map->m_len = allocated;
3914 ext4_ext_show_leaf(inode, path);
3915out2:
3916 return err ? err : allocated;
3917}
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960static int get_implied_cluster_alloc(struct super_block *sb,
3961 struct ext4_map_blocks *map,
3962 struct ext4_extent *ex,
3963 struct ext4_ext_path *path)
3964{
3965 struct ext4_sb_info *sbi = EXT4_SB(sb);
3966 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
3967 ext4_lblk_t ex_cluster_start, ex_cluster_end;
3968 ext4_lblk_t rr_cluster_start;
3969 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3970 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3971 unsigned short ee_len = ext4_ext_get_actual_len(ex);
3972
3973
3974 ex_cluster_start = EXT4_B2C(sbi, ee_block);
3975 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3976
3977
3978 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3979
3980 if ((rr_cluster_start == ex_cluster_end) ||
3981 (rr_cluster_start == ex_cluster_start)) {
3982 if (rr_cluster_start == ex_cluster_end)
3983 ee_start += ee_len - 1;
3984 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
3985 map->m_len = min(map->m_len,
3986 (unsigned) sbi->s_cluster_ratio - c_offset);
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996 if (map->m_lblk < ee_block)
3997 map->m_len = min(map->m_len, ee_block - map->m_lblk);
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008 if (map->m_lblk > ee_block) {
4009 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4010 map->m_len = min(map->m_len, next - map->m_lblk);
4011 }
4012
4013 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4014 return 1;
4015 }
4016
4017 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4018 return 0;
4019}
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4041 struct ext4_map_blocks *map, int flags)
4042{
4043 struct ext4_ext_path *path = NULL;
4044 struct ext4_extent newex, *ex, ex2;
4045 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4046 ext4_fsblk_t newblock = 0, pblk;
4047 int err = 0, depth, ret;
4048 unsigned int allocated = 0, offset = 0;
4049 unsigned int allocated_clusters = 0;
4050 struct ext4_allocation_request ar;
4051 ext4_lblk_t cluster_offset;
4052
4053 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
4054 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4055
4056
4057 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4058 if (IS_ERR(path)) {
4059 err = PTR_ERR(path);
4060 path = NULL;
4061 goto out;
4062 }
4063
4064 depth = ext_depth(inode);
4065
4066
4067
4068
4069
4070
4071 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4072 EXT4_ERROR_INODE(inode, "bad extent address "
4073 "lblock: %lu, depth: %d pblock %lld",
4074 (unsigned long) map->m_lblk, depth,
4075 path[depth].p_block);
4076 err = -EFSCORRUPTED;
4077 goto out;
4078 }
4079
4080 ex = path[depth].p_ext;
4081 if (ex) {
4082 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4083 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4084 unsigned short ee_len;
4085
4086
4087
4088
4089
4090
4091 ee_len = ext4_ext_get_actual_len(ex);
4092
4093 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4094
4095
4096 if (in_range(map->m_lblk, ee_block, ee_len)) {
4097 newblock = map->m_lblk - ee_block + ee_start;
4098
4099 allocated = ee_len - (map->m_lblk - ee_block);
4100 ext_debug(inode, "%u fit into %u:%d -> %llu\n",
4101 map->m_lblk, ee_block, ee_len, newblock);
4102
4103
4104
4105
4106
4107 if ((!ext4_ext_is_unwritten(ex)) &&
4108 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4109 err = convert_initialized_extent(handle,
4110 inode, map, &path, &allocated);
4111 goto out;
4112 } else if (!ext4_ext_is_unwritten(ex)) {
4113 map->m_flags |= EXT4_MAP_MAPPED;
4114 map->m_pblk = newblock;
4115 if (allocated > map->m_len)
4116 allocated = map->m_len;
4117 map->m_len = allocated;
4118 ext4_ext_show_leaf(inode, path);
4119 goto out;
4120 }
4121
4122 ret = ext4_ext_handle_unwritten_extents(
4123 handle, inode, map, &path, flags,
4124 allocated, newblock);
4125 if (ret < 0)
4126 err = ret;
4127 else
4128 allocated = ret;
4129 goto out;
4130 }
4131 }
4132
4133
4134
4135
4136
4137 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4138 ext4_lblk_t hole_start, hole_len;
4139
4140 hole_start = map->m_lblk;
4141 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
4142
4143
4144
4145
4146 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
4147
4148
4149 if (hole_start != map->m_lblk)
4150 hole_len -= map->m_lblk - hole_start;
4151 map->m_pblk = 0;
4152 map->m_len = min_t(unsigned int, map->m_len, hole_len);
4153
4154 goto out;
4155 }
4156
4157
4158
4159
4160 newex.ee_block = cpu_to_le32(map->m_lblk);
4161 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4162
4163
4164
4165
4166
4167 if (cluster_offset && ex &&
4168 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4169 ar.len = allocated = map->m_len;
4170 newblock = map->m_pblk;
4171 goto got_allocated_blocks;
4172 }
4173
4174
4175 ar.lleft = map->m_lblk;
4176 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4177 if (err)
4178 goto out;
4179 ar.lright = map->m_lblk;
4180 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4181 if (err < 0)
4182 goto out;
4183
4184
4185
4186 if ((sbi->s_cluster_ratio > 1) && err &&
4187 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4188 ar.len = allocated = map->m_len;
4189 newblock = map->m_pblk;
4190 goto got_allocated_blocks;
4191 }
4192
4193
4194
4195
4196
4197
4198
4199 if (map->m_len > EXT_INIT_MAX_LEN &&
4200 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4201 map->m_len = EXT_INIT_MAX_LEN;
4202 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4203 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4204 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4205
4206
4207 newex.ee_len = cpu_to_le16(map->m_len);
4208 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4209 if (err)
4210 allocated = ext4_ext_get_actual_len(&newex);
4211 else
4212 allocated = map->m_len;
4213
4214
4215 ar.inode = inode;
4216 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4217 ar.logical = map->m_lblk;
4218
4219
4220
4221
4222
4223
4224
4225
4226 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4227 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4228 ar.goal -= offset;
4229 ar.logical -= offset;
4230 if (S_ISREG(inode->i_mode))
4231 ar.flags = EXT4_MB_HINT_DATA;
4232 else
4233
4234 ar.flags = 0;
4235 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4236 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4237 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4238 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4239 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4240 ar.flags |= EXT4_MB_USE_RESERVED;
4241 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4242 if (!newblock)
4243 goto out;
4244 allocated_clusters = ar.len;
4245 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4246 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4247 ar.goal, newblock, ar.len, allocated);
4248 if (ar.len > allocated)
4249 ar.len = allocated;
4250
4251got_allocated_blocks:
4252
4253 pblk = newblock + offset;
4254 ext4_ext_store_pblock(&newex, pblk);
4255 newex.ee_len = cpu_to_le16(ar.len);
4256
4257 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4258 ext4_ext_mark_unwritten(&newex);
4259 map->m_flags |= EXT4_MAP_UNWRITTEN;
4260 }
4261
4262 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
4263 if (err) {
4264 if (allocated_clusters) {
4265 int fb_flags = 0;
4266
4267
4268
4269
4270
4271
4272 ext4_discard_preallocations(inode, 0);
4273 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4274 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4275 ext4_free_blocks(handle, inode, NULL, newblock,
4276 EXT4_C2B(sbi, allocated_clusters),
4277 fb_flags);
4278 }
4279 goto out;
4280 }
4281
4282
4283
4284
4285
4286
4287
4288 if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
4289 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4290
4291
4292
4293
4294 ext4_da_update_reserve_space(inode, allocated_clusters,
4295 1);
4296 } else {
4297 ext4_lblk_t lblk, len;
4298 unsigned int n;
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4312 len = allocated_clusters << sbi->s_cluster_bits;
4313 n = ext4_es_delayed_clu(inode, lblk, len);
4314 if (n > 0)
4315 ext4_da_update_reserve_space(inode, (int) n, 0);
4316 }
4317 }
4318
4319
4320
4321
4322
4323 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4324 ext4_update_inode_fsync_trans(handle, inode, 1);
4325 else
4326 ext4_update_inode_fsync_trans(handle, inode, 0);
4327
4328 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
4329 map->m_pblk = pblk;
4330 map->m_len = ar.len;
4331 allocated = map->m_len;
4332 ext4_ext_show_leaf(inode, path);
4333out:
4334 ext4_ext_drop_refs(path);
4335 kfree(path);
4336
4337 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4338 err ? err : allocated);
4339 return err ? err : allocated;
4340}
4341
4342int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4343{
4344 struct super_block *sb = inode->i_sb;
4345 ext4_lblk_t last_block;
4346 int err = 0;
4347
4348
4349
4350
4351
4352
4353
4354
4355 EXT4_I(inode)->i_disksize = inode->i_size;
4356 err = ext4_mark_inode_dirty(handle, inode);
4357 if (err)
4358 return err;
4359
4360 last_block = (inode->i_size + sb->s_blocksize - 1)
4361 >> EXT4_BLOCK_SIZE_BITS(sb);
4362retry:
4363 err = ext4_es_remove_extent(inode, last_block,
4364 EXT_MAX_BLOCKS - last_block);
4365 if (err == -ENOMEM) {
4366 cond_resched();
4367 congestion_wait(BLK_RW_ASYNC, HZ/50);
4368 goto retry;
4369 }
4370 if (err)
4371 return err;
4372retry_remove_space:
4373 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4374 if (err == -ENOMEM) {
4375 cond_resched();
4376 congestion_wait(BLK_RW_ASYNC, HZ/50);
4377 goto retry_remove_space;
4378 }
4379 return err;
4380}
4381
4382static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4383 ext4_lblk_t len, loff_t new_size,
4384 int flags)
4385{
4386 struct inode *inode = file_inode(file);
4387 handle_t *handle;
4388 int ret = 0, ret2 = 0, ret3 = 0;
4389 int retries = 0;
4390 int depth = 0;
4391 struct ext4_map_blocks map;
4392 unsigned int credits;
4393 loff_t epos;
4394
4395 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4396 map.m_lblk = offset;
4397 map.m_len = len;
4398
4399
4400
4401
4402
4403 if (len <= EXT_UNWRITTEN_MAX_LEN)
4404 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4405
4406
4407
4408
4409 credits = ext4_chunk_trans_blocks(inode, len);
4410 depth = ext_depth(inode);
4411
4412retry:
4413 while (len) {
4414
4415
4416
4417 if (depth != ext_depth(inode)) {
4418 credits = ext4_chunk_trans_blocks(inode, len);
4419 depth = ext_depth(inode);
4420 }
4421
4422 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4423 credits);
4424 if (IS_ERR(handle)) {
4425 ret = PTR_ERR(handle);
4426 break;
4427 }
4428 ret = ext4_map_blocks(handle, inode, &map, flags);
4429 if (ret <= 0) {
4430 ext4_debug("inode #%lu: block %u: len %u: "
4431 "ext4_ext_map_blocks returned %d",
4432 inode->i_ino, map.m_lblk,
4433 map.m_len, ret);
4434 ext4_mark_inode_dirty(handle, inode);
4435 ext4_journal_stop(handle);
4436 break;
4437 }
4438
4439
4440
4441 retries = 0;
4442 map.m_lblk += ret;
4443 map.m_len = len = len - ret;
4444 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4445 inode->i_ctime = current_time(inode);
4446 if (new_size) {
4447 if (epos > new_size)
4448 epos = new_size;
4449 if (ext4_update_inode_size(inode, epos) & 0x1)
4450 inode->i_mtime = inode->i_ctime;
4451 }
4452 ret2 = ext4_mark_inode_dirty(handle, inode);
4453 ext4_update_inode_fsync_trans(handle, inode, 1);
4454 ret3 = ext4_journal_stop(handle);
4455 ret2 = ret3 ? ret3 : ret2;
4456 if (unlikely(ret2))
4457 break;
4458 }
4459 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4460 goto retry;
4461
4462 return ret > 0 ? ret2 : ret;
4463}
4464
4465static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
4466
4467static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
4468
4469static long ext4_zero_range(struct file *file, loff_t offset,
4470 loff_t len, int mode)
4471{
4472 struct inode *inode = file_inode(file);
4473 handle_t *handle = NULL;
4474 unsigned int max_blocks;
4475 loff_t new_size = 0;
4476 int ret = 0;
4477 int flags;
4478 int credits;
4479 int partial_begin, partial_end;
4480 loff_t start, end;
4481 ext4_lblk_t lblk;
4482 unsigned int blkbits = inode->i_blkbits;
4483
4484 trace_ext4_zero_range(inode, offset, len, mode);
4485
4486
4487 if (ext4_should_journal_data(inode)) {
4488 ret = ext4_force_commit(inode->i_sb);
4489 if (ret)
4490 return ret;
4491 }
4492
4493
4494
4495
4496
4497
4498
4499 start = round_up(offset, 1 << blkbits);
4500 end = round_down((offset + len), 1 << blkbits);
4501
4502 if (start < offset || end > offset + len)
4503 return -EINVAL;
4504 partial_begin = offset & ((1 << blkbits) - 1);
4505 partial_end = (offset + len) & ((1 << blkbits) - 1);
4506
4507 lblk = start >> blkbits;
4508 max_blocks = (end >> blkbits);
4509 if (max_blocks < lblk)
4510 max_blocks = 0;
4511 else
4512 max_blocks -= lblk;
4513
4514 inode_lock(inode);
4515
4516
4517
4518
4519 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4520 ret = -EOPNOTSUPP;
4521 goto out_mutex;
4522 }
4523
4524 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4525 (offset + len > inode->i_size ||
4526 offset + len > EXT4_I(inode)->i_disksize)) {
4527 new_size = offset + len;
4528 ret = inode_newsize_ok(inode, new_size);
4529 if (ret)
4530 goto out_mutex;
4531 }
4532
4533 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4534
4535
4536 inode_dio_wait(inode);
4537
4538
4539 if (partial_begin || partial_end) {
4540 ret = ext4_alloc_file_blocks(file,
4541 round_down(offset, 1 << blkbits) >> blkbits,
4542 (round_up((offset + len), 1 << blkbits) -
4543 round_down(offset, 1 << blkbits)) >> blkbits,
4544 new_size, flags);
4545 if (ret)
4546 goto out_mutex;
4547
4548 }
4549
4550
4551 if (max_blocks > 0) {
4552 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4553 EXT4_EX_NOCACHE);
4554
4555
4556
4557
4558
4559 down_write(&EXT4_I(inode)->i_mmap_sem);
4560
4561 ret = ext4_break_layouts(inode);
4562 if (ret) {
4563 up_write(&EXT4_I(inode)->i_mmap_sem);
4564 goto out_mutex;
4565 }
4566
4567 ret = ext4_update_disksize_before_punch(inode, offset, len);
4568 if (ret) {
4569 up_write(&EXT4_I(inode)->i_mmap_sem);
4570 goto out_mutex;
4571 }
4572
4573 truncate_pagecache_range(inode, start, end - 1);
4574 inode->i_mtime = inode->i_ctime = current_time(inode);
4575
4576 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4577 flags);
4578 up_write(&EXT4_I(inode)->i_mmap_sem);
4579 if (ret)
4580 goto out_mutex;
4581 }
4582 if (!partial_begin && !partial_end)
4583 goto out_mutex;
4584
4585
4586
4587
4588
4589 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4590 if (ext4_should_journal_data(inode))
4591 credits += 2;
4592 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4593 if (IS_ERR(handle)) {
4594 ret = PTR_ERR(handle);
4595 ext4_std_error(inode->i_sb, ret);
4596 goto out_mutex;
4597 }
4598
4599 inode->i_mtime = inode->i_ctime = current_time(inode);
4600 if (new_size)
4601 ext4_update_inode_size(inode, new_size);
4602 ret = ext4_mark_inode_dirty(handle, inode);
4603 if (unlikely(ret))
4604 goto out_handle;
4605 ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits,
4606 (offset + len - 1) >> inode->i_sb->s_blocksize_bits);
4607
4608 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4609 if (ret >= 0)
4610 ext4_update_inode_fsync_trans(handle, inode, 1);
4611
4612 if (file->f_flags & O_SYNC)
4613 ext4_handle_sync(handle);
4614
4615out_handle:
4616 ext4_journal_stop(handle);
4617out_mutex:
4618 inode_unlock(inode);
4619 return ret;
4620}
4621
4622
4623
4624
4625
4626
4627
4628
4629long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4630{
4631 struct inode *inode = file_inode(file);
4632 loff_t new_size = 0;
4633 unsigned int max_blocks;
4634 int ret = 0;
4635 int flags;
4636 ext4_lblk_t lblk;
4637 unsigned int blkbits = inode->i_blkbits;
4638
4639
4640
4641
4642
4643
4644
4645 if (IS_ENCRYPTED(inode) &&
4646 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
4647 return -EOPNOTSUPP;
4648
4649
4650 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4651 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4652 FALLOC_FL_INSERT_RANGE))
4653 return -EOPNOTSUPP;
4654
4655 ext4_fc_start_update(inode);
4656
4657 if (mode & FALLOC_FL_PUNCH_HOLE) {
4658 ret = ext4_punch_hole(inode, offset, len);
4659 goto exit;
4660 }
4661
4662 ret = ext4_convert_inline_data(inode);
4663 if (ret)
4664 goto exit;
4665
4666 if (mode & FALLOC_FL_COLLAPSE_RANGE) {
4667 ret = ext4_collapse_range(inode, offset, len);
4668 goto exit;
4669 }
4670
4671 if (mode & FALLOC_FL_INSERT_RANGE) {
4672 ret = ext4_insert_range(inode, offset, len);
4673 goto exit;
4674 }
4675
4676 if (mode & FALLOC_FL_ZERO_RANGE) {
4677 ret = ext4_zero_range(file, offset, len, mode);
4678 goto exit;
4679 }
4680 trace_ext4_fallocate_enter(inode, offset, len, mode);
4681 lblk = offset >> blkbits;
4682
4683 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4684 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4685
4686 inode_lock(inode);
4687
4688
4689
4690
4691 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4692 ret = -EOPNOTSUPP;
4693 goto out;
4694 }
4695
4696 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4697 (offset + len > inode->i_size ||
4698 offset + len > EXT4_I(inode)->i_disksize)) {
4699 new_size = offset + len;
4700 ret = inode_newsize_ok(inode, new_size);
4701 if (ret)
4702 goto out;
4703 }
4704
4705
4706 inode_dio_wait(inode);
4707
4708 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4709 if (ret)
4710 goto out;
4711
4712 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4713 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4714 EXT4_I(inode)->i_sync_tid);
4715 }
4716out:
4717 inode_unlock(inode);
4718 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4719exit:
4720 ext4_fc_stop_update(inode);
4721 return ret;
4722}
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4735 loff_t offset, ssize_t len)
4736{
4737 unsigned int max_blocks;
4738 int ret = 0, ret2 = 0, ret3 = 0;
4739 struct ext4_map_blocks map;
4740 unsigned int blkbits = inode->i_blkbits;
4741 unsigned int credits = 0;
4742
4743 map.m_lblk = offset >> blkbits;
4744 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4745
4746 if (!handle) {
4747
4748
4749
4750 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4751 }
4752 while (ret >= 0 && ret < max_blocks) {
4753 map.m_lblk += ret;
4754 map.m_len = (max_blocks -= ret);
4755 if (credits) {
4756 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4757 credits);
4758 if (IS_ERR(handle)) {
4759 ret = PTR_ERR(handle);
4760 break;
4761 }
4762 }
4763 ret = ext4_map_blocks(handle, inode, &map,
4764 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4765 if (ret <= 0)
4766 ext4_warning(inode->i_sb,
4767 "inode #%lu: block %u: len %u: "
4768 "ext4_ext_map_blocks returned %d",
4769 inode->i_ino, map.m_lblk,
4770 map.m_len, ret);
4771 ret2 = ext4_mark_inode_dirty(handle, inode);
4772 if (credits) {
4773 ret3 = ext4_journal_stop(handle);
4774 if (unlikely(ret3))
4775 ret2 = ret3;
4776 }
4777
4778 if (ret <= 0 || ret2)
4779 break;
4780 }
4781 return ret > 0 ? ret2 : ret;
4782}
4783
4784int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4785{
4786 int ret = 0, err = 0;
4787 struct ext4_io_end_vec *io_end_vec;
4788
4789
4790
4791
4792
4793
4794 if (handle) {
4795 handle = ext4_journal_start_reserved(handle,
4796 EXT4_HT_EXT_CONVERT);
4797 if (IS_ERR(handle))
4798 return PTR_ERR(handle);
4799 }
4800
4801 list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4802 ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4803 io_end_vec->offset,
4804 io_end_vec->size);
4805 if (ret)
4806 break;
4807 }
4808
4809 if (handle)
4810 err = ext4_journal_stop(handle);
4811
4812 return ret < 0 ? ret : err;
4813}
4814
4815static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
4816{
4817 __u64 physical = 0;
4818 __u64 length = 0;
4819 int blockbits = inode->i_sb->s_blocksize_bits;
4820 int error = 0;
4821 u16 iomap_type;
4822
4823
4824 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4825 struct ext4_iloc iloc;
4826 int offset;
4827
4828 error = ext4_get_inode_loc(inode, &iloc);
4829 if (error)
4830 return error;
4831 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4832 offset = EXT4_GOOD_OLD_INODE_SIZE +
4833 EXT4_I(inode)->i_extra_isize;
4834 physical += offset;
4835 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4836 brelse(iloc.bh);
4837 iomap_type = IOMAP_INLINE;
4838 } else if (EXT4_I(inode)->i_file_acl) {
4839 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4840 length = inode->i_sb->s_blocksize;
4841 iomap_type = IOMAP_MAPPED;
4842 } else {
4843
4844 error = -ENOENT;
4845 goto out;
4846 }
4847
4848 iomap->addr = physical;
4849 iomap->offset = 0;
4850 iomap->length = length;
4851 iomap->type = iomap_type;
4852 iomap->flags = 0;
4853out:
4854 return error;
4855}
4856
4857static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4858 loff_t length, unsigned flags,
4859 struct iomap *iomap, struct iomap *srcmap)
4860{
4861 int error;
4862
4863 error = ext4_iomap_xattr_fiemap(inode, iomap);
4864 if (error == 0 && (offset >= iomap->length))
4865 error = -ENOENT;
4866 return error;
4867}
4868
4869static const struct iomap_ops ext4_iomap_xattr_ops = {
4870 .iomap_begin = ext4_iomap_xattr_begin,
4871};
4872
4873static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4874{
4875 u64 maxbytes;
4876
4877 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4878 maxbytes = inode->i_sb->s_maxbytes;
4879 else
4880 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
4881
4882 if (*len == 0)
4883 return -EINVAL;
4884 if (start > maxbytes)
4885 return -EFBIG;
4886
4887
4888
4889
4890 if (*len > maxbytes || (maxbytes - *len) < start)
4891 *len = maxbytes - start;
4892 return 0;
4893}
4894
4895int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4896 u64 start, u64 len)
4897{
4898 int error = 0;
4899
4900 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4901 error = ext4_ext_precache(inode);
4902 if (error)
4903 return error;
4904 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4905 }
4906
4907
4908
4909
4910
4911
4912 error = ext4_fiemap_check_ranges(inode, start, &len);
4913 if (error)
4914 return error;
4915
4916 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4917 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
4918 return iomap_fiemap(inode, fieinfo, start, len,
4919 &ext4_iomap_xattr_ops);
4920 }
4921
4922 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
4923}
4924
4925int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
4926 __u64 start, __u64 len)
4927{
4928 ext4_lblk_t start_blk, len_blks;
4929 __u64 last_blk;
4930 int error = 0;
4931
4932 if (ext4_has_inline_data(inode)) {
4933 int has_inline;
4934
4935 down_read(&EXT4_I(inode)->xattr_sem);
4936 has_inline = ext4_has_inline_data(inode);
4937 up_read(&EXT4_I(inode)->xattr_sem);
4938 if (has_inline)
4939 return 0;
4940 }
4941
4942 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4943 error = ext4_ext_precache(inode);
4944 if (error)
4945 return error;
4946 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4947 }
4948
4949 error = fiemap_prep(inode, fieinfo, start, &len, 0);
4950 if (error)
4951 return error;
4952
4953 error = ext4_fiemap_check_ranges(inode, start, &len);
4954 if (error)
4955 return error;
4956
4957 start_blk = start >> inode->i_sb->s_blocksize_bits;
4958 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4959 if (last_blk >= EXT_MAX_BLOCKS)
4960 last_blk = EXT_MAX_BLOCKS-1;
4961 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4962
4963
4964
4965
4966
4967 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
4968}
4969
4970
4971
4972
4973
4974
4975
4976static int
4977ext4_access_path(handle_t *handle, struct inode *inode,
4978 struct ext4_ext_path *path)
4979{
4980 int credits, err;
4981
4982 if (!ext4_handle_valid(handle))
4983 return 0;
4984
4985
4986
4987
4988
4989
4990
4991 credits = ext4_writepage_trans_blocks(inode);
4992 err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0);
4993 if (err < 0)
4994 return err;
4995
4996 err = ext4_ext_get_access(handle, inode, path);
4997 return err;
4998}
4999
5000
5001
5002
5003
5004
5005
5006static int
5007ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5008 struct inode *inode, handle_t *handle,
5009 enum SHIFT_DIRECTION SHIFT)
5010{
5011 int depth, err = 0;
5012 struct ext4_extent *ex_start, *ex_last;
5013 bool update = false;
5014 depth = path->p_depth;
5015
5016 while (depth >= 0) {
5017 if (depth == path->p_depth) {
5018 ex_start = path[depth].p_ext;
5019 if (!ex_start)
5020 return -EFSCORRUPTED;
5021
5022 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5023
5024 err = ext4_access_path(handle, inode, path + depth);
5025 if (err)
5026 goto out;
5027
5028 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
5029 update = true;
5030
5031 while (ex_start <= ex_last) {
5032 if (SHIFT == SHIFT_LEFT) {
5033 le32_add_cpu(&ex_start->ee_block,
5034 -shift);
5035
5036 if ((ex_start >
5037 EXT_FIRST_EXTENT(path[depth].p_hdr))
5038 &&
5039 ext4_ext_try_to_merge_right(inode,
5040 path, ex_start - 1))
5041 ex_last--;
5042 else
5043 ex_start++;
5044 } else {
5045 le32_add_cpu(&ex_last->ee_block, shift);
5046 ext4_ext_try_to_merge_right(inode, path,
5047 ex_last);
5048 ex_last--;
5049 }
5050 }
5051 err = ext4_ext_dirty(handle, inode, path + depth);
5052 if (err)
5053 goto out;
5054
5055 if (--depth < 0 || !update)
5056 break;
5057 }
5058
5059
5060 err = ext4_access_path(handle, inode, path + depth);
5061 if (err)
5062 goto out;
5063
5064 if (SHIFT == SHIFT_LEFT)
5065 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5066 else
5067 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5068 err = ext4_ext_dirty(handle, inode, path + depth);
5069 if (err)
5070 goto out;
5071
5072
5073 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5074 break;
5075
5076 depth--;
5077 }
5078
5079out:
5080 return err;
5081}
5082
5083
5084
5085
5086
5087
5088
5089
5090static int
5091ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5092 ext4_lblk_t start, ext4_lblk_t shift,
5093 enum SHIFT_DIRECTION SHIFT)
5094{
5095 struct ext4_ext_path *path;
5096 int ret = 0, depth;
5097 struct ext4_extent *extent;
5098 ext4_lblk_t stop, *iterator, ex_start, ex_end;
5099
5100
5101 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5102 EXT4_EX_NOCACHE);
5103 if (IS_ERR(path))
5104 return PTR_ERR(path);
5105
5106 depth = path->p_depth;
5107 extent = path[depth].p_ext;
5108 if (!extent)
5109 goto out;
5110
5111 stop = le32_to_cpu(extent->ee_block);
5112
5113
5114
5115
5116
5117
5118 if (SHIFT == SHIFT_LEFT) {
5119 path = ext4_find_extent(inode, start - 1, &path,
5120 EXT4_EX_NOCACHE);
5121 if (IS_ERR(path))
5122 return PTR_ERR(path);
5123 depth = path->p_depth;
5124 extent = path[depth].p_ext;
5125 if (extent) {
5126 ex_start = le32_to_cpu(extent->ee_block);
5127 ex_end = le32_to_cpu(extent->ee_block) +
5128 ext4_ext_get_actual_len(extent);
5129 } else {
5130 ex_start = 0;
5131 ex_end = 0;
5132 }
5133
5134 if ((start == ex_start && shift > ex_start) ||
5135 (shift > start - ex_end)) {
5136 ret = -EINVAL;
5137 goto out;
5138 }
5139 } else {
5140 if (shift > EXT_MAX_BLOCKS -
5141 (stop + ext4_ext_get_actual_len(extent))) {
5142 ret = -EINVAL;
5143 goto out;
5144 }
5145 }
5146
5147
5148
5149
5150
5151
5152 if (SHIFT == SHIFT_LEFT)
5153 iterator = &start;
5154 else
5155 iterator = &stop;
5156
5157
5158
5159
5160
5161
5162 while (iterator && start <= stop) {
5163 path = ext4_find_extent(inode, *iterator, &path,
5164 EXT4_EX_NOCACHE);
5165 if (IS_ERR(path))
5166 return PTR_ERR(path);
5167 depth = path->p_depth;
5168 extent = path[depth].p_ext;
5169 if (!extent) {
5170 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5171 (unsigned long) *iterator);
5172 return -EFSCORRUPTED;
5173 }
5174 if (SHIFT == SHIFT_LEFT && *iterator >
5175 le32_to_cpu(extent->ee_block)) {
5176
5177 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5178 path[depth].p_ext++;
5179 } else {
5180 *iterator = ext4_ext_next_allocated_block(path);
5181 continue;
5182 }
5183 }
5184
5185 if (SHIFT == SHIFT_LEFT) {
5186 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5187 *iterator = le32_to_cpu(extent->ee_block) +
5188 ext4_ext_get_actual_len(extent);
5189 } else {
5190 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5191 if (le32_to_cpu(extent->ee_block) > 0)
5192 *iterator = le32_to_cpu(extent->ee_block) - 1;
5193 else
5194
5195 iterator = NULL;
5196
5197 while (le32_to_cpu(extent->ee_block) < start)
5198 extent++;
5199 path[depth].p_ext = extent;
5200 }
5201 ret = ext4_ext_shift_path_extents(path, shift, inode,
5202 handle, SHIFT);
5203 if (ret)
5204 break;
5205 }
5206out:
5207 ext4_ext_drop_refs(path);
5208 kfree(path);
5209 return ret;
5210}
5211
5212
5213
5214
5215
5216
5217static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5218{
5219 struct super_block *sb = inode->i_sb;
5220 ext4_lblk_t punch_start, punch_stop;
5221 handle_t *handle;
5222 unsigned int credits;
5223 loff_t new_size, ioffset;
5224 int ret;
5225
5226
5227
5228
5229
5230
5231 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5232 return -EOPNOTSUPP;
5233
5234
5235 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5236 return -EINVAL;
5237
5238 trace_ext4_collapse_range(inode, offset, len);
5239
5240 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5241 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5242
5243
5244 if (ext4_should_journal_data(inode)) {
5245 ret = ext4_force_commit(inode->i_sb);
5246 if (ret)
5247 return ret;
5248 }
5249
5250 inode_lock(inode);
5251
5252
5253
5254
5255 if (offset + len >= inode->i_size) {
5256 ret = -EINVAL;
5257 goto out_mutex;
5258 }
5259
5260
5261 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5262 ret = -EOPNOTSUPP;
5263 goto out_mutex;
5264 }
5265
5266
5267 inode_dio_wait(inode);
5268
5269
5270
5271
5272
5273 down_write(&EXT4_I(inode)->i_mmap_sem);
5274
5275 ret = ext4_break_layouts(inode);
5276 if (ret)
5277 goto out_mmap;
5278
5279
5280
5281
5282
5283 ioffset = round_down(offset, PAGE_SIZE);
5284
5285
5286
5287
5288 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
5289 if (ret)
5290 goto out_mmap;
5291
5292
5293
5294
5295
5296 ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
5297 LLONG_MAX);
5298 if (ret)
5299 goto out_mmap;
5300 truncate_pagecache(inode, ioffset);
5301
5302 credits = ext4_writepage_trans_blocks(inode);
5303 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5304 if (IS_ERR(handle)) {
5305 ret = PTR_ERR(handle);
5306 goto out_mmap;
5307 }
5308 ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
5309
5310 down_write(&EXT4_I(inode)->i_data_sem);
5311 ext4_discard_preallocations(inode, 0);
5312
5313 ret = ext4_es_remove_extent(inode, punch_start,
5314 EXT_MAX_BLOCKS - punch_start);
5315 if (ret) {
5316 up_write(&EXT4_I(inode)->i_data_sem);
5317 goto out_stop;
5318 }
5319
5320 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5321 if (ret) {
5322 up_write(&EXT4_I(inode)->i_data_sem);
5323 goto out_stop;
5324 }
5325 ext4_discard_preallocations(inode, 0);
5326
5327 ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5328 punch_stop - punch_start, SHIFT_LEFT);
5329 if (ret) {
5330 up_write(&EXT4_I(inode)->i_data_sem);
5331 goto out_stop;
5332 }
5333
5334 new_size = inode->i_size - len;
5335 i_size_write(inode, new_size);
5336 EXT4_I(inode)->i_disksize = new_size;
5337
5338 up_write(&EXT4_I(inode)->i_data_sem);
5339 if (IS_SYNC(inode))
5340 ext4_handle_sync(handle);
5341 inode->i_mtime = inode->i_ctime = current_time(inode);
5342 ret = ext4_mark_inode_dirty(handle, inode);
5343 ext4_update_inode_fsync_trans(handle, inode, 1);
5344
5345out_stop:
5346 ext4_journal_stop(handle);
5347 ext4_fc_stop_ineligible(sb);
5348out_mmap:
5349 up_write(&EXT4_I(inode)->i_mmap_sem);
5350out_mutex:
5351 inode_unlock(inode);
5352 return ret;
5353}
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5364{
5365 struct super_block *sb = inode->i_sb;
5366 handle_t *handle;
5367 struct ext4_ext_path *path;
5368 struct ext4_extent *extent;
5369 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5370 unsigned int credits, ee_len;
5371 int ret = 0, depth, split_flag = 0;
5372 loff_t ioffset;
5373
5374
5375
5376
5377
5378
5379 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5380 return -EOPNOTSUPP;
5381
5382
5383 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5384 return -EINVAL;
5385
5386 trace_ext4_insert_range(inode, offset, len);
5387
5388 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5389 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5390
5391
5392 if (ext4_should_journal_data(inode)) {
5393 ret = ext4_force_commit(inode->i_sb);
5394 if (ret)
5395 return ret;
5396 }
5397
5398 inode_lock(inode);
5399
5400 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5401 ret = -EOPNOTSUPP;
5402 goto out_mutex;
5403 }
5404
5405
5406 if (len > inode->i_sb->s_maxbytes - inode->i_size) {
5407 ret = -EFBIG;
5408 goto out_mutex;
5409 }
5410
5411
5412 if (offset >= inode->i_size) {
5413 ret = -EINVAL;
5414 goto out_mutex;
5415 }
5416
5417
5418 inode_dio_wait(inode);
5419
5420
5421
5422
5423
5424 down_write(&EXT4_I(inode)->i_mmap_sem);
5425
5426 ret = ext4_break_layouts(inode);
5427 if (ret)
5428 goto out_mmap;
5429
5430
5431
5432
5433
5434 ioffset = round_down(offset, PAGE_SIZE);
5435
5436 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5437 LLONG_MAX);
5438 if (ret)
5439 goto out_mmap;
5440 truncate_pagecache(inode, ioffset);
5441
5442 credits = ext4_writepage_trans_blocks(inode);
5443 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5444 if (IS_ERR(handle)) {
5445 ret = PTR_ERR(handle);
5446 goto out_mmap;
5447 }
5448 ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
5449
5450
5451 inode->i_size += len;
5452 EXT4_I(inode)->i_disksize += len;
5453 inode->i_mtime = inode->i_ctime = current_time(inode);
5454 ret = ext4_mark_inode_dirty(handle, inode);
5455 if (ret)
5456 goto out_stop;
5457
5458 down_write(&EXT4_I(inode)->i_data_sem);
5459 ext4_discard_preallocations(inode, 0);
5460
5461 path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5462 if (IS_ERR(path)) {
5463 up_write(&EXT4_I(inode)->i_data_sem);
5464 goto out_stop;
5465 }
5466
5467 depth = ext_depth(inode);
5468 extent = path[depth].p_ext;
5469 if (extent) {
5470 ee_start_lblk = le32_to_cpu(extent->ee_block);
5471 ee_len = ext4_ext_get_actual_len(extent);
5472
5473
5474
5475
5476
5477 if ((offset_lblk > ee_start_lblk) &&
5478 (offset_lblk < (ee_start_lblk + ee_len))) {
5479 if (ext4_ext_is_unwritten(extent))
5480 split_flag = EXT4_EXT_MARK_UNWRIT1 |
5481 EXT4_EXT_MARK_UNWRIT2;
5482 ret = ext4_split_extent_at(handle, inode, &path,
5483 offset_lblk, split_flag,
5484 EXT4_EX_NOCACHE |
5485 EXT4_GET_BLOCKS_PRE_IO |
5486 EXT4_GET_BLOCKS_METADATA_NOFAIL);
5487 }
5488
5489 ext4_ext_drop_refs(path);
5490 kfree(path);
5491 if (ret < 0) {
5492 up_write(&EXT4_I(inode)->i_data_sem);
5493 goto out_stop;
5494 }
5495 } else {
5496 ext4_ext_drop_refs(path);
5497 kfree(path);
5498 }
5499
5500 ret = ext4_es_remove_extent(inode, offset_lblk,
5501 EXT_MAX_BLOCKS - offset_lblk);
5502 if (ret) {
5503 up_write(&EXT4_I(inode)->i_data_sem);
5504 goto out_stop;
5505 }
5506
5507
5508
5509
5510
5511 ret = ext4_ext_shift_extents(inode, handle,
5512 ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
5513 len_lblk, SHIFT_RIGHT);
5514
5515 up_write(&EXT4_I(inode)->i_data_sem);
5516 if (IS_SYNC(inode))
5517 ext4_handle_sync(handle);
5518 if (ret >= 0)
5519 ext4_update_inode_fsync_trans(handle, inode, 1);
5520
5521out_stop:
5522 ext4_journal_stop(handle);
5523 ext4_fc_stop_ineligible(sb);
5524out_mmap:
5525 up_write(&EXT4_I(inode)->i_mmap_sem);
5526out_mutex:
5527 inode_unlock(inode);
5528 return ret;
5529}
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551int
5552ext4_swap_extents(handle_t *handle, struct inode *inode1,
5553 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5554 ext4_lblk_t count, int unwritten, int *erp)
5555{
5556 struct ext4_ext_path *path1 = NULL;
5557 struct ext4_ext_path *path2 = NULL;
5558 int replaced_count = 0;
5559
5560 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5561 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5562 BUG_ON(!inode_is_locked(inode1));
5563 BUG_ON(!inode_is_locked(inode2));
5564
5565 *erp = ext4_es_remove_extent(inode1, lblk1, count);
5566 if (unlikely(*erp))
5567 return 0;
5568 *erp = ext4_es_remove_extent(inode2, lblk2, count);
5569 if (unlikely(*erp))
5570 return 0;
5571
5572 while (count) {
5573 struct ext4_extent *ex1, *ex2, tmp_ex;
5574 ext4_lblk_t e1_blk, e2_blk;
5575 int e1_len, e2_len, len;
5576 int split = 0;
5577
5578 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5579 if (IS_ERR(path1)) {
5580 *erp = PTR_ERR(path1);
5581 path1 = NULL;
5582 finish:
5583 count = 0;
5584 goto repeat;
5585 }
5586 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5587 if (IS_ERR(path2)) {
5588 *erp = PTR_ERR(path2);
5589 path2 = NULL;
5590 goto finish;
5591 }
5592 ex1 = path1[path1->p_depth].p_ext;
5593 ex2 = path2[path2->p_depth].p_ext;
5594
5595 if (unlikely(!ex2 || !ex1))
5596 goto finish;
5597
5598 e1_blk = le32_to_cpu(ex1->ee_block);
5599 e2_blk = le32_to_cpu(ex2->ee_block);
5600 e1_len = ext4_ext_get_actual_len(ex1);
5601 e2_len = ext4_ext_get_actual_len(ex2);
5602
5603
5604 if (!in_range(lblk1, e1_blk, e1_len) ||
5605 !in_range(lblk2, e2_blk, e2_len)) {
5606 ext4_lblk_t next1, next2;
5607
5608
5609 next1 = ext4_ext_next_allocated_block(path1);
5610 next2 = ext4_ext_next_allocated_block(path2);
5611
5612 if (e1_blk > lblk1)
5613 next1 = e1_blk;
5614 if (e2_blk > lblk2)
5615 next2 = e2_blk;
5616
5617 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5618 goto finish;
5619
5620 len = next1 - lblk1;
5621 if (len < next2 - lblk2)
5622 len = next2 - lblk2;
5623 if (len > count)
5624 len = count;
5625 lblk1 += len;
5626 lblk2 += len;
5627 count -= len;
5628 goto repeat;
5629 }
5630
5631
5632 if (e1_blk < lblk1) {
5633 split = 1;
5634 *erp = ext4_force_split_extent_at(handle, inode1,
5635 &path1, lblk1, 0);
5636 if (unlikely(*erp))
5637 goto finish;
5638 }
5639 if (e2_blk < lblk2) {
5640 split = 1;
5641 *erp = ext4_force_split_extent_at(handle, inode2,
5642 &path2, lblk2, 0);
5643 if (unlikely(*erp))
5644 goto finish;
5645 }
5646
5647
5648 if (split)
5649 goto repeat;
5650
5651
5652 len = count;
5653 if (len > e1_blk + e1_len - lblk1)
5654 len = e1_blk + e1_len - lblk1;
5655 if (len > e2_blk + e2_len - lblk2)
5656 len = e2_blk + e2_len - lblk2;
5657
5658 if (len != e1_len) {
5659 split = 1;
5660 *erp = ext4_force_split_extent_at(handle, inode1,
5661 &path1, lblk1 + len, 0);
5662 if (unlikely(*erp))
5663 goto finish;
5664 }
5665 if (len != e2_len) {
5666 split = 1;
5667 *erp = ext4_force_split_extent_at(handle, inode2,
5668 &path2, lblk2 + len, 0);
5669 if (*erp)
5670 goto finish;
5671 }
5672
5673
5674 if (split)
5675 goto repeat;
5676
5677 BUG_ON(e2_len != e1_len);
5678 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5679 if (unlikely(*erp))
5680 goto finish;
5681 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5682 if (unlikely(*erp))
5683 goto finish;
5684
5685
5686 tmp_ex = *ex1;
5687 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5688 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5689 ex1->ee_len = cpu_to_le16(e2_len);
5690 ex2->ee_len = cpu_to_le16(e1_len);
5691 if (unwritten)
5692 ext4_ext_mark_unwritten(ex2);
5693 if (ext4_ext_is_unwritten(&tmp_ex))
5694 ext4_ext_mark_unwritten(ex1);
5695
5696 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5697 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5698 *erp = ext4_ext_dirty(handle, inode2, path2 +
5699 path2->p_depth);
5700 if (unlikely(*erp))
5701 goto finish;
5702 *erp = ext4_ext_dirty(handle, inode1, path1 +
5703 path1->p_depth);
5704
5705
5706
5707
5708
5709
5710 if (unlikely(*erp))
5711 goto finish;
5712 lblk1 += len;
5713 lblk2 += len;
5714 replaced_count += len;
5715 count -= len;
5716
5717 repeat:
5718 ext4_ext_drop_refs(path1);
5719 kfree(path1);
5720 ext4_ext_drop_refs(path2);
5721 kfree(path2);
5722 path1 = path2 = NULL;
5723 }
5724 return replaced_count;
5725}
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5740{
5741 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5742 struct ext4_ext_path *path;
5743 int depth, mapped = 0, err = 0;
5744 struct ext4_extent *extent;
5745 ext4_lblk_t first_lblk, first_lclu, last_lclu;
5746
5747
5748 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5749 if (IS_ERR(path)) {
5750 err = PTR_ERR(path);
5751 path = NULL;
5752 goto out;
5753 }
5754
5755 depth = ext_depth(inode);
5756
5757
5758
5759
5760
5761
5762 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5763 EXT4_ERROR_INODE(inode,
5764 "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5765 (unsigned long) EXT4_C2B(sbi, lclu),
5766 depth, path[depth].p_block);
5767 err = -EFSCORRUPTED;
5768 goto out;
5769 }
5770
5771 extent = path[depth].p_ext;
5772
5773
5774 if (extent == NULL)
5775 goto out;
5776
5777 first_lblk = le32_to_cpu(extent->ee_block);
5778 first_lclu = EXT4_B2C(sbi, first_lblk);
5779
5780
5781
5782
5783
5784
5785
5786 if (lclu >= first_lclu) {
5787 last_lclu = EXT4_B2C(sbi, first_lblk +
5788 ext4_ext_get_actual_len(extent) - 1);
5789 if (lclu <= last_lclu) {
5790 mapped = 1;
5791 } else {
5792 first_lblk = ext4_ext_next_allocated_block(path);
5793 first_lclu = EXT4_B2C(sbi, first_lblk);
5794 if (lclu == first_lclu)
5795 mapped = 1;
5796 }
5797 }
5798
5799out:
5800 ext4_ext_drop_refs(path);
5801 kfree(path);
5802
5803 return err ? err : mapped;
5804}
5805
5806
5807
5808
5809
5810
5811
5812
5813int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
5814 int len, int unwritten, ext4_fsblk_t pblk)
5815{
5816 struct ext4_ext_path *path = NULL, *ppath;
5817 struct ext4_extent *ex;
5818 int ret;
5819
5820 path = ext4_find_extent(inode, start, NULL, 0);
5821 if (IS_ERR(path))
5822 return PTR_ERR(path);
5823 ex = path[path->p_depth].p_ext;
5824 if (!ex) {
5825 ret = -EFSCORRUPTED;
5826 goto out;
5827 }
5828
5829 if (le32_to_cpu(ex->ee_block) != start ||
5830 ext4_ext_get_actual_len(ex) != len) {
5831
5832 ppath = path;
5833 down_write(&EXT4_I(inode)->i_data_sem);
5834 ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
5835 up_write(&EXT4_I(inode)->i_data_sem);
5836 if (ret)
5837 goto out;
5838 kfree(path);
5839 path = ext4_find_extent(inode, start, NULL, 0);
5840 if (IS_ERR(path))
5841 return -1;
5842 ppath = path;
5843 ex = path[path->p_depth].p_ext;
5844 WARN_ON(le32_to_cpu(ex->ee_block) != start);
5845 if (ext4_ext_get_actual_len(ex) != len) {
5846 down_write(&EXT4_I(inode)->i_data_sem);
5847 ret = ext4_force_split_extent_at(NULL, inode, &ppath,
5848 start + len, 1);
5849 up_write(&EXT4_I(inode)->i_data_sem);
5850 if (ret)
5851 goto out;
5852 kfree(path);
5853 path = ext4_find_extent(inode, start, NULL, 0);
5854 if (IS_ERR(path))
5855 return -EINVAL;
5856 ex = path[path->p_depth].p_ext;
5857 }
5858 }
5859 if (unwritten)
5860 ext4_ext_mark_unwritten(ex);
5861 else
5862 ext4_ext_mark_initialized(ex);
5863 ext4_ext_store_pblock(ex, pblk);
5864 down_write(&EXT4_I(inode)->i_data_sem);
5865 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5866 up_write(&EXT4_I(inode)->i_data_sem);
5867out:
5868 ext4_ext_drop_refs(path);
5869 kfree(path);
5870 ext4_mark_inode_dirty(NULL, inode);
5871 return ret;
5872}
5873
5874
5875void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
5876{
5877 struct ext4_ext_path *path = NULL;
5878 struct ext4_extent *ex;
5879 ext4_lblk_t old_cur, cur = 0;
5880
5881 while (cur < end) {
5882 path = ext4_find_extent(inode, cur, NULL, 0);
5883 if (IS_ERR(path))
5884 return;
5885 ex = path[path->p_depth].p_ext;
5886 if (!ex) {
5887 ext4_ext_drop_refs(path);
5888 kfree(path);
5889 ext4_mark_inode_dirty(NULL, inode);
5890 return;
5891 }
5892 old_cur = cur;
5893 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5894 if (cur <= old_cur)
5895 cur = old_cur + 1;
5896 ext4_ext_try_to_merge(NULL, inode, path, ex);
5897 down_write(&EXT4_I(inode)->i_data_sem);
5898 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5899 up_write(&EXT4_I(inode)->i_data_sem);
5900 ext4_mark_inode_dirty(NULL, inode);
5901 ext4_ext_drop_refs(path);
5902 kfree(path);
5903 }
5904}
5905
5906
5907static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
5908{
5909 int ret;
5910 struct ext4_map_blocks map;
5911
5912 map.m_lblk = *cur;
5913 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
5914
5915 ret = ext4_map_blocks(NULL, inode, &map, 0);
5916 if (ret != 0)
5917 return;
5918 *cur = *cur + map.m_len;
5919}
5920
5921
5922int ext4_ext_replay_set_iblocks(struct inode *inode)
5923{
5924 struct ext4_ext_path *path = NULL, *path2 = NULL;
5925 struct ext4_extent *ex;
5926 ext4_lblk_t cur = 0, end;
5927 int numblks = 0, i, ret = 0;
5928 ext4_fsblk_t cmp1, cmp2;
5929 struct ext4_map_blocks map;
5930
5931
5932 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5933 EXT4_EX_NOCACHE);
5934 if (IS_ERR(path))
5935 return PTR_ERR(path);
5936 ex = path[path->p_depth].p_ext;
5937 if (!ex) {
5938 ext4_ext_drop_refs(path);
5939 kfree(path);
5940 goto out;
5941 }
5942 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5943 ext4_ext_drop_refs(path);
5944 kfree(path);
5945
5946
5947 cur = 0;
5948 while (cur < end) {
5949 map.m_lblk = cur;
5950 map.m_len = end - cur;
5951 ret = ext4_map_blocks(NULL, inode, &map, 0);
5952 if (ret < 0)
5953 break;
5954 if (ret > 0)
5955 numblks += ret;
5956 cur = cur + map.m_len;
5957 }
5958
5959
5960
5961
5962
5963
5964
5965
5966 cur = 0;
5967 skip_hole(inode, &cur);
5968 path = ext4_find_extent(inode, cur, NULL, 0);
5969 if (IS_ERR(path))
5970 goto out;
5971 numblks += path->p_depth;
5972 ext4_ext_drop_refs(path);
5973 kfree(path);
5974 while (cur < end) {
5975 path = ext4_find_extent(inode, cur, NULL, 0);
5976 if (IS_ERR(path))
5977 break;
5978 ex = path[path->p_depth].p_ext;
5979 if (!ex) {
5980 ext4_ext_drop_refs(path);
5981 kfree(path);
5982 return 0;
5983 }
5984 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
5985 ext4_ext_get_actual_len(ex));
5986 skip_hole(inode, &cur);
5987
5988 path2 = ext4_find_extent(inode, cur, NULL, 0);
5989 if (IS_ERR(path2)) {
5990 ext4_ext_drop_refs(path);
5991 kfree(path);
5992 break;
5993 }
5994 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
5995 cmp1 = cmp2 = 0;
5996 if (i <= path->p_depth)
5997 cmp1 = path[i].p_bh ?
5998 path[i].p_bh->b_blocknr : 0;
5999 if (i <= path2->p_depth)
6000 cmp2 = path2[i].p_bh ?
6001 path2[i].p_bh->b_blocknr : 0;
6002 if (cmp1 != cmp2 && cmp2 != 0)
6003 numblks++;
6004 }
6005 ext4_ext_drop_refs(path);
6006 ext4_ext_drop_refs(path2);
6007 kfree(path);
6008 kfree(path2);
6009 }
6010
6011out:
6012 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
6013 ext4_mark_inode_dirty(NULL, inode);
6014 return 0;
6015}
6016
6017int ext4_ext_clear_bb(struct inode *inode)
6018{
6019 struct ext4_ext_path *path = NULL;
6020 struct ext4_extent *ex;
6021 ext4_lblk_t cur = 0, end;
6022 int j, ret = 0;
6023 struct ext4_map_blocks map;
6024
6025
6026 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6027 EXT4_EX_NOCACHE);
6028 if (IS_ERR(path))
6029 return PTR_ERR(path);
6030 ex = path[path->p_depth].p_ext;
6031 if (!ex) {
6032 ext4_ext_drop_refs(path);
6033 kfree(path);
6034 return 0;
6035 }
6036 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6037 ext4_ext_drop_refs(path);
6038 kfree(path);
6039
6040 cur = 0;
6041 while (cur < end) {
6042 map.m_lblk = cur;
6043 map.m_len = end - cur;
6044 ret = ext4_map_blocks(NULL, inode, &map, 0);
6045 if (ret < 0)
6046 break;
6047 if (ret > 0) {
6048 path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6049 if (!IS_ERR_OR_NULL(path)) {
6050 for (j = 0; j < path->p_depth; j++) {
6051
6052 ext4_mb_mark_bb(inode->i_sb,
6053 path[j].p_block, 1, 0);
6054 }
6055 ext4_ext_drop_refs(path);
6056 kfree(path);
6057 }
6058 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
6059 }
6060 cur = cur + map.m_len;
6061 }
6062
6063 return 0;
6064}
6065