1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/time.h>
27#include <linux/highuid.h>
28#include <linux/pagemap.h>
29#include <linux/dax.h>
30#include <linux/blkdev.h>
31#include <linux/quotaops.h>
32#include <linux/writeback.h>
33#include <linux/buffer_head.h>
34#include <linux/mpage.h>
35#include <linux/fiemap.h>
36#include <linux/iomap.h>
37#include <linux/namei.h>
38#include <linux/uio.h>
39#include "ext2.h"
40#include "acl.h"
41#include "xattr.h"
42
43static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45
46
47
48static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49{
50 int ea_blocks = EXT2_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) &&
54 inode->i_blocks - ea_blocks == 0);
55}
56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67}
68
69
70
71
72void ext2_evict_inode(struct inode * inode)
73{
74 struct ext2_block_alloc_info *rsv;
75 int want_delete = 0;
76
77 if (!inode->i_nlink && !is_bad_inode(inode)) {
78 want_delete = 1;
79 dquot_initialize(inode);
80 } else {
81 dquot_drop(inode);
82 }
83
84 truncate_inode_pages_final(&inode->i_data);
85
86 if (want_delete) {
87 sb_start_intwrite(inode->i_sb);
88
89 EXT2_I(inode)->i_dtime = get_seconds();
90 mark_inode_dirty(inode);
91 __ext2_write_inode(inode, inode_needs_sync(inode));
92
93 inode->i_size = 0;
94 if (inode->i_blocks)
95 ext2_truncate_blocks(inode, 0);
96 ext2_xattr_delete_inode(inode);
97 }
98
99 invalidate_inode_buffers(inode);
100 clear_inode(inode);
101
102 ext2_discard_reservation(inode);
103 rsv = EXT2_I(inode)->i_block_alloc_info;
104 EXT2_I(inode)->i_block_alloc_info = NULL;
105 if (unlikely(rsv))
106 kfree(rsv);
107
108 if (want_delete) {
109 ext2_free_inode(inode);
110 sb_end_intwrite(inode->i_sb);
111 }
112}
113
114typedef struct {
115 __le32 *p;
116 __le32 key;
117 struct buffer_head *bh;
118} Indirect;
119
120static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
121{
122 p->key = *(p->p = v);
123 p->bh = bh;
124}
125
126static inline int verify_chain(Indirect *from, Indirect *to)
127{
128 while (from <= to && from->key == *from->p)
129 from++;
130 return (from > to);
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163static int ext2_block_to_path(struct inode *inode,
164 long i_block, int offsets[4], int *boundary)
165{
166 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
167 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
168 const long direct_blocks = EXT2_NDIR_BLOCKS,
169 indirect_blocks = ptrs,
170 double_blocks = (1 << (ptrs_bits * 2));
171 int n = 0;
172 int final = 0;
173
174 if (i_block < 0) {
175 ext2_msg(inode->i_sb, KERN_WARNING,
176 "warning: %s: block < 0", __func__);
177 } else if (i_block < direct_blocks) {
178 offsets[n++] = i_block;
179 final = direct_blocks;
180 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
181 offsets[n++] = EXT2_IND_BLOCK;
182 offsets[n++] = i_block;
183 final = ptrs;
184 } else if ((i_block -= indirect_blocks) < double_blocks) {
185 offsets[n++] = EXT2_DIND_BLOCK;
186 offsets[n++] = i_block >> ptrs_bits;
187 offsets[n++] = i_block & (ptrs - 1);
188 final = ptrs;
189 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
190 offsets[n++] = EXT2_TIND_BLOCK;
191 offsets[n++] = i_block >> (ptrs_bits * 2);
192 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
193 offsets[n++] = i_block & (ptrs - 1);
194 final = ptrs;
195 } else {
196 ext2_msg(inode->i_sb, KERN_WARNING,
197 "warning: %s: block is too big", __func__);
198 }
199 if (boundary)
200 *boundary = final - 1 - (i_block & (ptrs - 1));
201
202 return n;
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234static Indirect *ext2_get_branch(struct inode *inode,
235 int depth,
236 int *offsets,
237 Indirect chain[4],
238 int *err)
239{
240 struct super_block *sb = inode->i_sb;
241 Indirect *p = chain;
242 struct buffer_head *bh;
243
244 *err = 0;
245
246 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
247 if (!p->key)
248 goto no_block;
249 while (--depth) {
250 bh = sb_bread(sb, le32_to_cpu(p->key));
251 if (!bh)
252 goto failure;
253 read_lock(&EXT2_I(inode)->i_meta_lock);
254 if (!verify_chain(chain, p))
255 goto changed;
256 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
257 read_unlock(&EXT2_I(inode)->i_meta_lock);
258 if (!p->key)
259 goto no_block;
260 }
261 return NULL;
262
263changed:
264 read_unlock(&EXT2_I(inode)->i_meta_lock);
265 brelse(bh);
266 *err = -EAGAIN;
267 goto no_block;
268failure:
269 *err = -EIO;
270no_block:
271 return p;
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
295{
296 struct ext2_inode_info *ei = EXT2_I(inode);
297 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
298 __le32 *p;
299 ext2_fsblk_t bg_start;
300 ext2_fsblk_t colour;
301
302
303 for (p = ind->p - 1; p >= start; p--)
304 if (*p)
305 return le32_to_cpu(*p);
306
307
308 if (ind->bh)
309 return ind->bh->b_blocknr;
310
311
312
313
314
315 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
316 colour = (current->pid % 16) *
317 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
318 return bg_start + colour;
319}
320
321
322
323
324
325
326
327
328
329
330static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
331 Indirect *partial)
332{
333 struct ext2_block_alloc_info *block_i;
334
335 block_i = EXT2_I(inode)->i_block_alloc_info;
336
337
338
339
340
341 if (block_i && (block == block_i->last_alloc_logical_block + 1)
342 && (block_i->last_alloc_physical_block != 0)) {
343 return block_i->last_alloc_physical_block + 1;
344 }
345
346 return ext2_find_near(inode, partial);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361static int
362ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
363 int blocks_to_boundary)
364{
365 unsigned long count = 0;
366
367
368
369
370
371 if (k > 0) {
372
373 if (blks < blocks_to_boundary + 1)
374 count += blks;
375 else
376 count += blocks_to_boundary + 1;
377 return count;
378 }
379
380 count++;
381 while (count < blks && count <= blocks_to_boundary
382 && le32_to_cpu(*(branch[0].p + count)) == 0) {
383 count++;
384 }
385 return count;
386}
387
388
389
390
391
392
393
394
395
396
397
398static int ext2_alloc_blocks(struct inode *inode,
399 ext2_fsblk_t goal, int indirect_blks, int blks,
400 ext2_fsblk_t new_blocks[4], int *err)
401{
402 int target, i;
403 unsigned long count = 0;
404 int index = 0;
405 ext2_fsblk_t current_block = 0;
406 int ret = 0;
407
408
409
410
411
412
413
414
415
416 target = blks + indirect_blks;
417
418 while (1) {
419 count = target;
420
421 current_block = ext2_new_blocks(inode,goal,&count,err);
422 if (*err)
423 goto failed_out;
424
425 target -= count;
426
427 while (index < indirect_blks && count) {
428 new_blocks[index++] = current_block++;
429 count--;
430 }
431
432 if (count > 0)
433 break;
434 }
435
436
437 new_blocks[index] = current_block;
438
439
440 ret = count;
441 *err = 0;
442 return ret;
443failed_out:
444 for (i = 0; i <index; i++)
445 ext2_free_blocks(inode, new_blocks[i], 1);
446 if (index)
447 mark_inode_dirty(inode);
448 return ret;
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476static int ext2_alloc_branch(struct inode *inode,
477 int indirect_blks, int *blks, ext2_fsblk_t goal,
478 int *offsets, Indirect *branch)
479{
480 int blocksize = inode->i_sb->s_blocksize;
481 int i, n = 0;
482 int err = 0;
483 struct buffer_head *bh;
484 int num;
485 ext2_fsblk_t new_blocks[4];
486 ext2_fsblk_t current_block;
487
488 num = ext2_alloc_blocks(inode, goal, indirect_blks,
489 *blks, new_blocks, &err);
490 if (err)
491 return err;
492
493 branch[0].key = cpu_to_le32(new_blocks[0]);
494
495
496
497 for (n = 1; n <= indirect_blks; n++) {
498
499
500
501
502
503 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
504 if (unlikely(!bh)) {
505 err = -ENOMEM;
506 goto failed;
507 }
508 branch[n].bh = bh;
509 lock_buffer(bh);
510 memset(bh->b_data, 0, blocksize);
511 branch[n].p = (__le32 *) bh->b_data + offsets[n];
512 branch[n].key = cpu_to_le32(new_blocks[n]);
513 *branch[n].p = branch[n].key;
514 if ( n == indirect_blks) {
515 current_block = new_blocks[n];
516
517
518
519
520
521 for (i=1; i < num; i++)
522 *(branch[n].p + i) = cpu_to_le32(++current_block);
523 }
524 set_buffer_uptodate(bh);
525 unlock_buffer(bh);
526 mark_buffer_dirty_inode(bh, inode);
527
528
529
530
531 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
532 sync_dirty_buffer(bh);
533 }
534 *blks = num;
535 return err;
536
537failed:
538 for (i = 1; i < n; i++)
539 bforget(branch[i].bh);
540 for (i = 0; i < indirect_blks; i++)
541 ext2_free_blocks(inode, new_blocks[i], 1);
542 ext2_free_blocks(inode, new_blocks[i], num);
543 return err;
544}
545
546
547
548
549
550
551
552
553
554
555
556
557
558static void ext2_splice_branch(struct inode *inode,
559 long block, Indirect *where, int num, int blks)
560{
561 int i;
562 struct ext2_block_alloc_info *block_i;
563 ext2_fsblk_t current_block;
564
565 block_i = EXT2_I(inode)->i_block_alloc_info;
566
567
568
569
570 *where->p = where->key;
571
572
573
574
575
576 if (num == 0 && blks > 1) {
577 current_block = le32_to_cpu(where->key) + 1;
578 for (i = 1; i < blks; i++)
579 *(where->p + i ) = cpu_to_le32(current_block++);
580 }
581
582
583
584
585
586
587 if (block_i) {
588 block_i->last_alloc_logical_block = block + blks - 1;
589 block_i->last_alloc_physical_block =
590 le32_to_cpu(where[num].key) + blks - 1;
591 }
592
593
594
595
596 if (where->bh)
597 mark_buffer_dirty_inode(where->bh, inode);
598
599 inode->i_ctime = current_time(inode);
600 mark_inode_dirty(inode);
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621static int ext2_get_blocks(struct inode *inode,
622 sector_t iblock, unsigned long maxblocks,
623 u32 *bno, bool *new, bool *boundary,
624 int create)
625{
626 int err;
627 int offsets[4];
628 Indirect chain[4];
629 Indirect *partial;
630 ext2_fsblk_t goal;
631 int indirect_blks;
632 int blocks_to_boundary = 0;
633 int depth;
634 struct ext2_inode_info *ei = EXT2_I(inode);
635 int count = 0;
636 ext2_fsblk_t first_block = 0;
637
638 BUG_ON(maxblocks == 0);
639
640 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
641
642 if (depth == 0)
643 return -EIO;
644
645 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
646
647 if (!partial) {
648 first_block = le32_to_cpu(chain[depth - 1].key);
649 count++;
650
651 while (count < maxblocks && count <= blocks_to_boundary) {
652 ext2_fsblk_t blk;
653
654 if (!verify_chain(chain, chain + depth - 1)) {
655
656
657
658
659
660
661 err = -EAGAIN;
662 count = 0;
663 partial = chain + depth - 1;
664 break;
665 }
666 blk = le32_to_cpu(*(chain[depth-1].p + count));
667 if (blk == first_block + count)
668 count++;
669 else
670 break;
671 }
672 if (err != -EAGAIN)
673 goto got_it;
674 }
675
676
677 if (!create || err == -EIO)
678 goto cleanup;
679
680 mutex_lock(&ei->truncate_mutex);
681
682
683
684
685
686
687
688
689
690
691
692
693 if (err == -EAGAIN || !verify_chain(chain, partial)) {
694 while (partial > chain) {
695 brelse(partial->bh);
696 partial--;
697 }
698 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
699 if (!partial) {
700 count++;
701 mutex_unlock(&ei->truncate_mutex);
702 if (err)
703 goto cleanup;
704 goto got_it;
705 }
706 }
707
708
709
710
711
712 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
713 ext2_init_block_alloc_info(inode);
714
715 goal = ext2_find_goal(inode, iblock, partial);
716
717
718 indirect_blks = (chain + depth) - partial - 1;
719
720
721
722
723 count = ext2_blks_to_allocate(partial, indirect_blks,
724 maxblocks, blocks_to_boundary);
725
726
727
728 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
729 offsets + (partial - chain), partial);
730
731 if (err) {
732 mutex_unlock(&ei->truncate_mutex);
733 goto cleanup;
734 }
735
736 if (IS_DAX(inode)) {
737
738
739
740
741 clean_bdev_aliases(inode->i_sb->s_bdev,
742 le32_to_cpu(chain[depth-1].key),
743 count);
744
745
746
747
748
749 err = sb_issue_zeroout(inode->i_sb,
750 le32_to_cpu(chain[depth-1].key), count,
751 GFP_NOFS);
752 if (err) {
753 mutex_unlock(&ei->truncate_mutex);
754 goto cleanup;
755 }
756 }
757 *new = true;
758
759 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
760 mutex_unlock(&ei->truncate_mutex);
761got_it:
762 if (count > blocks_to_boundary)
763 *boundary = true;
764 err = count;
765
766 partial = chain + depth - 1;
767cleanup:
768 while (partial > chain) {
769 brelse(partial->bh);
770 partial--;
771 }
772 if (err > 0)
773 *bno = le32_to_cpu(chain[depth-1].key);
774 return err;
775}
776
777int ext2_get_block(struct inode *inode, sector_t iblock,
778 struct buffer_head *bh_result, int create)
779{
780 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
781 bool new = false, boundary = false;
782 u32 bno;
783 int ret;
784
785 ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
786 create);
787 if (ret <= 0)
788 return ret;
789
790 map_bh(bh_result, inode->i_sb, bno);
791 bh_result->b_size = (ret << inode->i_blkbits);
792 if (new)
793 set_buffer_new(bh_result);
794 if (boundary)
795 set_buffer_boundary(bh_result);
796 return 0;
797
798}
799
800#ifdef CONFIG_FS_DAX
801static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
802 unsigned flags, struct iomap *iomap)
803{
804 unsigned int blkbits = inode->i_blkbits;
805 unsigned long first_block = offset >> blkbits;
806 unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
807 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
808 bool new = false, boundary = false;
809 u32 bno;
810 int ret;
811
812 ret = ext2_get_blocks(inode, first_block, max_blocks,
813 &bno, &new, &boundary, flags & IOMAP_WRITE);
814 if (ret < 0)
815 return ret;
816
817 iomap->flags = 0;
818 iomap->bdev = inode->i_sb->s_bdev;
819 iomap->offset = (u64)first_block << blkbits;
820 iomap->dax_dev = sbi->s_daxdev;
821
822 if (ret == 0) {
823 iomap->type = IOMAP_HOLE;
824 iomap->addr = IOMAP_NULL_ADDR;
825 iomap->length = 1 << blkbits;
826 } else {
827 iomap->type = IOMAP_MAPPED;
828 iomap->addr = (u64)bno << blkbits;
829 iomap->length = (u64)ret << blkbits;
830 iomap->flags |= IOMAP_F_MERGED;
831 }
832
833 if (new)
834 iomap->flags |= IOMAP_F_NEW;
835 return 0;
836}
837
838static int
839ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
840 ssize_t written, unsigned flags, struct iomap *iomap)
841{
842 if (iomap->type == IOMAP_MAPPED &&
843 written < length &&
844 (flags & IOMAP_WRITE))
845 ext2_write_failed(inode->i_mapping, offset + length);
846 return 0;
847}
848
849const struct iomap_ops ext2_iomap_ops = {
850 .iomap_begin = ext2_iomap_begin,
851 .iomap_end = ext2_iomap_end,
852};
853#else
854
855const struct iomap_ops ext2_iomap_ops;
856#endif
857
858int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
859 u64 start, u64 len)
860{
861 return generic_block_fiemap(inode, fieinfo, start, len,
862 ext2_get_block);
863}
864
865static int ext2_writepage(struct page *page, struct writeback_control *wbc)
866{
867 return block_write_full_page(page, ext2_get_block, wbc);
868}
869
870static int ext2_readpage(struct file *file, struct page *page)
871{
872 return mpage_readpage(page, ext2_get_block);
873}
874
875static int
876ext2_readpages(struct file *file, struct address_space *mapping,
877 struct list_head *pages, unsigned nr_pages)
878{
879 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
880}
881
882static int
883ext2_write_begin(struct file *file, struct address_space *mapping,
884 loff_t pos, unsigned len, unsigned flags,
885 struct page **pagep, void **fsdata)
886{
887 int ret;
888
889 ret = block_write_begin(mapping, pos, len, flags, pagep,
890 ext2_get_block);
891 if (ret < 0)
892 ext2_write_failed(mapping, pos + len);
893 return ret;
894}
895
896static int ext2_write_end(struct file *file, struct address_space *mapping,
897 loff_t pos, unsigned len, unsigned copied,
898 struct page *page, void *fsdata)
899{
900 int ret;
901
902 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
903 if (ret < len)
904 ext2_write_failed(mapping, pos + len);
905 return ret;
906}
907
908static int
909ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
910 loff_t pos, unsigned len, unsigned flags,
911 struct page **pagep, void **fsdata)
912{
913 int ret;
914
915 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
916 ext2_get_block);
917 if (ret < 0)
918 ext2_write_failed(mapping, pos + len);
919 return ret;
920}
921
922static int ext2_nobh_writepage(struct page *page,
923 struct writeback_control *wbc)
924{
925 return nobh_writepage(page, ext2_get_block, wbc);
926}
927
928static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
929{
930 return generic_block_bmap(mapping,block,ext2_get_block);
931}
932
933static ssize_t
934ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
935{
936 struct file *file = iocb->ki_filp;
937 struct address_space *mapping = file->f_mapping;
938 struct inode *inode = mapping->host;
939 size_t count = iov_iter_count(iter);
940 loff_t offset = iocb->ki_pos;
941 ssize_t ret;
942
943 if (WARN_ON_ONCE(IS_DAX(inode)))
944 return -EIO;
945
946 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
947 if (ret < 0 && iov_iter_rw(iter) == WRITE)
948 ext2_write_failed(mapping, offset + count);
949 return ret;
950}
951
952static int
953ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
954{
955#ifdef CONFIG_FS_DAX
956 if (dax_mapping(mapping)) {
957 return dax_writeback_mapping_range(mapping,
958 mapping->host->i_sb->s_bdev,
959 wbc);
960 }
961#endif
962
963 return mpage_writepages(mapping, wbc, ext2_get_block);
964}
965
966const struct address_space_operations ext2_aops = {
967 .readpage = ext2_readpage,
968 .readpages = ext2_readpages,
969 .writepage = ext2_writepage,
970 .write_begin = ext2_write_begin,
971 .write_end = ext2_write_end,
972 .bmap = ext2_bmap,
973 .direct_IO = ext2_direct_IO,
974 .writepages = ext2_writepages,
975 .migratepage = buffer_migrate_page,
976 .is_partially_uptodate = block_is_partially_uptodate,
977 .error_remove_page = generic_error_remove_page,
978};
979
980const struct address_space_operations ext2_nobh_aops = {
981 .readpage = ext2_readpage,
982 .readpages = ext2_readpages,
983 .writepage = ext2_nobh_writepage,
984 .write_begin = ext2_nobh_write_begin,
985 .write_end = nobh_write_end,
986 .bmap = ext2_bmap,
987 .direct_IO = ext2_direct_IO,
988 .writepages = ext2_writepages,
989 .migratepage = buffer_migrate_page,
990 .error_remove_page = generic_error_remove_page,
991};
992
993
994
995
996
997
998static inline int all_zeroes(__le32 *p, __le32 *q)
999{
1000 while (p < q)
1001 if (*p++)
1002 return 0;
1003 return 1;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static Indirect *ext2_find_shared(struct inode *inode,
1041 int depth,
1042 int offsets[4],
1043 Indirect chain[4],
1044 __le32 *top)
1045{
1046 Indirect *partial, *p;
1047 int k, err;
1048
1049 *top = 0;
1050 for (k = depth; k > 1 && !offsets[k-1]; k--)
1051 ;
1052 partial = ext2_get_branch(inode, k, offsets, chain, &err);
1053 if (!partial)
1054 partial = chain + k-1;
1055
1056
1057
1058
1059 write_lock(&EXT2_I(inode)->i_meta_lock);
1060 if (!partial->key && *partial->p) {
1061 write_unlock(&EXT2_I(inode)->i_meta_lock);
1062 goto no_top;
1063 }
1064 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1065 ;
1066
1067
1068
1069
1070
1071
1072 if (p == chain + k - 1 && p > chain) {
1073 p->p--;
1074 } else {
1075 *top = *p->p;
1076 *p->p = 0;
1077 }
1078 write_unlock(&EXT2_I(inode)->i_meta_lock);
1079
1080 while(partial > p)
1081 {
1082 brelse(partial->bh);
1083 partial--;
1084 }
1085no_top:
1086 return partial;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1100{
1101 unsigned long block_to_free = 0, count = 0;
1102 unsigned long nr;
1103
1104 for ( ; p < q ; p++) {
1105 nr = le32_to_cpu(*p);
1106 if (nr) {
1107 *p = 0;
1108
1109 if (count == 0)
1110 goto free_this;
1111 else if (block_to_free == nr - count)
1112 count++;
1113 else {
1114 ext2_free_blocks (inode, block_to_free, count);
1115 mark_inode_dirty(inode);
1116 free_this:
1117 block_to_free = nr;
1118 count = 1;
1119 }
1120 }
1121 }
1122 if (count > 0) {
1123 ext2_free_blocks (inode, block_to_free, count);
1124 mark_inode_dirty(inode);
1125 }
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1140{
1141 struct buffer_head * bh;
1142 unsigned long nr;
1143
1144 if (depth--) {
1145 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1146 for ( ; p < q ; p++) {
1147 nr = le32_to_cpu(*p);
1148 if (!nr)
1149 continue;
1150 *p = 0;
1151 bh = sb_bread(inode->i_sb, nr);
1152
1153
1154
1155
1156 if (!bh) {
1157 ext2_error(inode->i_sb, "ext2_free_branches",
1158 "Read failure, inode=%ld, block=%ld",
1159 inode->i_ino, nr);
1160 continue;
1161 }
1162 ext2_free_branches(inode,
1163 (__le32*)bh->b_data,
1164 (__le32*)bh->b_data + addr_per_block,
1165 depth);
1166 bforget(bh);
1167 ext2_free_blocks(inode, nr, 1);
1168 mark_inode_dirty(inode);
1169 }
1170 } else
1171 ext2_free_data(inode, p, q);
1172}
1173
1174
1175static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1176{
1177 __le32 *i_data = EXT2_I(inode)->i_data;
1178 struct ext2_inode_info *ei = EXT2_I(inode);
1179 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1180 int offsets[4];
1181 Indirect chain[4];
1182 Indirect *partial;
1183 __le32 nr = 0;
1184 int n;
1185 long iblock;
1186 unsigned blocksize;
1187 blocksize = inode->i_sb->s_blocksize;
1188 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1189
1190#ifdef CONFIG_FS_DAX
1191 WARN_ON(!rwsem_is_locked(&ei->dax_sem));
1192#endif
1193
1194 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1195 if (n == 0)
1196 return;
1197
1198
1199
1200
1201
1202 mutex_lock(&ei->truncate_mutex);
1203
1204 if (n == 1) {
1205 ext2_free_data(inode, i_data+offsets[0],
1206 i_data + EXT2_NDIR_BLOCKS);
1207 goto do_indirects;
1208 }
1209
1210 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1211
1212 if (nr) {
1213 if (partial == chain)
1214 mark_inode_dirty(inode);
1215 else
1216 mark_buffer_dirty_inode(partial->bh, inode);
1217 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1218 }
1219
1220 while (partial > chain) {
1221 ext2_free_branches(inode,
1222 partial->p + 1,
1223 (__le32*)partial->bh->b_data+addr_per_block,
1224 (chain+n-1) - partial);
1225 mark_buffer_dirty_inode(partial->bh, inode);
1226 brelse (partial->bh);
1227 partial--;
1228 }
1229do_indirects:
1230
1231 switch (offsets[0]) {
1232 default:
1233 nr = i_data[EXT2_IND_BLOCK];
1234 if (nr) {
1235 i_data[EXT2_IND_BLOCK] = 0;
1236 mark_inode_dirty(inode);
1237 ext2_free_branches(inode, &nr, &nr+1, 1);
1238 }
1239 case EXT2_IND_BLOCK:
1240 nr = i_data[EXT2_DIND_BLOCK];
1241 if (nr) {
1242 i_data[EXT2_DIND_BLOCK] = 0;
1243 mark_inode_dirty(inode);
1244 ext2_free_branches(inode, &nr, &nr+1, 2);
1245 }
1246 case EXT2_DIND_BLOCK:
1247 nr = i_data[EXT2_TIND_BLOCK];
1248 if (nr) {
1249 i_data[EXT2_TIND_BLOCK] = 0;
1250 mark_inode_dirty(inode);
1251 ext2_free_branches(inode, &nr, &nr+1, 3);
1252 }
1253 case EXT2_TIND_BLOCK:
1254 ;
1255 }
1256
1257 ext2_discard_reservation(inode);
1258
1259 mutex_unlock(&ei->truncate_mutex);
1260}
1261
1262static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1263{
1264
1265
1266
1267
1268
1269
1270
1271
1272 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1273 S_ISLNK(inode->i_mode)))
1274 return;
1275 if (ext2_inode_is_fast_symlink(inode))
1276 return;
1277 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1278 return;
1279
1280 dax_sem_down_write(EXT2_I(inode));
1281 __ext2_truncate_blocks(inode, offset);
1282 dax_sem_up_write(EXT2_I(inode));
1283}
1284
1285static int ext2_setsize(struct inode *inode, loff_t newsize)
1286{
1287 int error;
1288
1289 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1290 S_ISLNK(inode->i_mode)))
1291 return -EINVAL;
1292 if (ext2_inode_is_fast_symlink(inode))
1293 return -EINVAL;
1294 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1295 return -EPERM;
1296
1297 inode_dio_wait(inode);
1298
1299 if (IS_DAX(inode)) {
1300 error = iomap_zero_range(inode, newsize,
1301 PAGE_ALIGN(newsize) - newsize, NULL,
1302 &ext2_iomap_ops);
1303 } else if (test_opt(inode->i_sb, NOBH))
1304 error = nobh_truncate_page(inode->i_mapping,
1305 newsize, ext2_get_block);
1306 else
1307 error = block_truncate_page(inode->i_mapping,
1308 newsize, ext2_get_block);
1309 if (error)
1310 return error;
1311
1312 dax_sem_down_write(EXT2_I(inode));
1313 truncate_setsize(inode, newsize);
1314 __ext2_truncate_blocks(inode, newsize);
1315 dax_sem_up_write(EXT2_I(inode));
1316
1317 inode->i_mtime = inode->i_ctime = current_time(inode);
1318 if (inode_needs_sync(inode)) {
1319 sync_mapping_buffers(inode->i_mapping);
1320 sync_inode_metadata(inode, 1);
1321 } else {
1322 mark_inode_dirty(inode);
1323 }
1324
1325 return 0;
1326}
1327
1328static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1329 struct buffer_head **p)
1330{
1331 struct buffer_head * bh;
1332 unsigned long block_group;
1333 unsigned long block;
1334 unsigned long offset;
1335 struct ext2_group_desc * gdp;
1336
1337 *p = NULL;
1338 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1339 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1340 goto Einval;
1341
1342 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1343 gdp = ext2_get_group_desc(sb, block_group, NULL);
1344 if (!gdp)
1345 goto Egdp;
1346
1347
1348
1349 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1350 block = le32_to_cpu(gdp->bg_inode_table) +
1351 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1352 if (!(bh = sb_bread(sb, block)))
1353 goto Eio;
1354
1355 *p = bh;
1356 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1357 return (struct ext2_inode *) (bh->b_data + offset);
1358
1359Einval:
1360 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1361 (unsigned long) ino);
1362 return ERR_PTR(-EINVAL);
1363Eio:
1364 ext2_error(sb, "ext2_get_inode",
1365 "unable to read inode block - inode=%lu, block=%lu",
1366 (unsigned long) ino, block);
1367Egdp:
1368 return ERR_PTR(-EIO);
1369}
1370
1371void ext2_set_inode_flags(struct inode *inode)
1372{
1373 unsigned int flags = EXT2_I(inode)->i_flags;
1374
1375 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
1376 S_DIRSYNC | S_DAX);
1377 if (flags & EXT2_SYNC_FL)
1378 inode->i_flags |= S_SYNC;
1379 if (flags & EXT2_APPEND_FL)
1380 inode->i_flags |= S_APPEND;
1381 if (flags & EXT2_IMMUTABLE_FL)
1382 inode->i_flags |= S_IMMUTABLE;
1383 if (flags & EXT2_NOATIME_FL)
1384 inode->i_flags |= S_NOATIME;
1385 if (flags & EXT2_DIRSYNC_FL)
1386 inode->i_flags |= S_DIRSYNC;
1387 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1388 inode->i_flags |= S_DAX;
1389}
1390
1391struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1392{
1393 struct ext2_inode_info *ei;
1394 struct buffer_head * bh;
1395 struct ext2_inode *raw_inode;
1396 struct inode *inode;
1397 long ret = -EIO;
1398 int n;
1399 uid_t i_uid;
1400 gid_t i_gid;
1401
1402 inode = iget_locked(sb, ino);
1403 if (!inode)
1404 return ERR_PTR(-ENOMEM);
1405 if (!(inode->i_state & I_NEW))
1406 return inode;
1407
1408 ei = EXT2_I(inode);
1409 ei->i_block_alloc_info = NULL;
1410
1411 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1412 if (IS_ERR(raw_inode)) {
1413 ret = PTR_ERR(raw_inode);
1414 goto bad_inode;
1415 }
1416
1417 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1418 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1419 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1420 if (!(test_opt (inode->i_sb, NO_UID32))) {
1421 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1422 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1423 }
1424 i_uid_write(inode, i_uid);
1425 i_gid_write(inode, i_gid);
1426 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1427 inode->i_size = le32_to_cpu(raw_inode->i_size);
1428 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1429 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1430 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1431 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1432 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1433
1434
1435
1436
1437
1438 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1439
1440 brelse (bh);
1441 ret = -ESTALE;
1442 goto bad_inode;
1443 }
1444 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1445 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1446 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1447 ei->i_frag_no = raw_inode->i_frag;
1448 ei->i_frag_size = raw_inode->i_fsize;
1449 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1450 ei->i_dir_acl = 0;
1451
1452 if (ei->i_file_acl &&
1453 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
1454 ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
1455 ei->i_file_acl);
1456 brelse(bh);
1457 ret = -EFSCORRUPTED;
1458 goto bad_inode;
1459 }
1460
1461 if (S_ISREG(inode->i_mode))
1462 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1463 else
1464 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1465 if (i_size_read(inode) < 0) {
1466 ret = -EFSCORRUPTED;
1467 goto bad_inode;
1468 }
1469 ei->i_dtime = 0;
1470 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1471 ei->i_state = 0;
1472 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1473 ei->i_dir_start_lookup = 0;
1474
1475
1476
1477
1478
1479 for (n = 0; n < EXT2_N_BLOCKS; n++)
1480 ei->i_data[n] = raw_inode->i_block[n];
1481
1482 if (S_ISREG(inode->i_mode)) {
1483 inode->i_op = &ext2_file_inode_operations;
1484 if (test_opt(inode->i_sb, NOBH)) {
1485 inode->i_mapping->a_ops = &ext2_nobh_aops;
1486 inode->i_fop = &ext2_file_operations;
1487 } else {
1488 inode->i_mapping->a_ops = &ext2_aops;
1489 inode->i_fop = &ext2_file_operations;
1490 }
1491 } else if (S_ISDIR(inode->i_mode)) {
1492 inode->i_op = &ext2_dir_inode_operations;
1493 inode->i_fop = &ext2_dir_operations;
1494 if (test_opt(inode->i_sb, NOBH))
1495 inode->i_mapping->a_ops = &ext2_nobh_aops;
1496 else
1497 inode->i_mapping->a_ops = &ext2_aops;
1498 } else if (S_ISLNK(inode->i_mode)) {
1499 if (ext2_inode_is_fast_symlink(inode)) {
1500 inode->i_link = (char *)ei->i_data;
1501 inode->i_op = &ext2_fast_symlink_inode_operations;
1502 nd_terminate_link(ei->i_data, inode->i_size,
1503 sizeof(ei->i_data) - 1);
1504 } else {
1505 inode->i_op = &ext2_symlink_inode_operations;
1506 inode_nohighmem(inode);
1507 if (test_opt(inode->i_sb, NOBH))
1508 inode->i_mapping->a_ops = &ext2_nobh_aops;
1509 else
1510 inode->i_mapping->a_ops = &ext2_aops;
1511 }
1512 } else {
1513 inode->i_op = &ext2_special_inode_operations;
1514 if (raw_inode->i_block[0])
1515 init_special_inode(inode, inode->i_mode,
1516 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1517 else
1518 init_special_inode(inode, inode->i_mode,
1519 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1520 }
1521 brelse (bh);
1522 ext2_set_inode_flags(inode);
1523 unlock_new_inode(inode);
1524 return inode;
1525
1526bad_inode:
1527 iget_failed(inode);
1528 return ERR_PTR(ret);
1529}
1530
1531static int __ext2_write_inode(struct inode *inode, int do_sync)
1532{
1533 struct ext2_inode_info *ei = EXT2_I(inode);
1534 struct super_block *sb = inode->i_sb;
1535 ino_t ino = inode->i_ino;
1536 uid_t uid = i_uid_read(inode);
1537 gid_t gid = i_gid_read(inode);
1538 struct buffer_head * bh;
1539 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1540 int n;
1541 int err = 0;
1542
1543 if (IS_ERR(raw_inode))
1544 return -EIO;
1545
1546
1547
1548 if (ei->i_state & EXT2_STATE_NEW)
1549 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1550
1551 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1552 if (!(test_opt(sb, NO_UID32))) {
1553 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1554 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1555
1556
1557
1558
1559 if (!ei->i_dtime) {
1560 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1561 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1562 } else {
1563 raw_inode->i_uid_high = 0;
1564 raw_inode->i_gid_high = 0;
1565 }
1566 } else {
1567 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1568 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1569 raw_inode->i_uid_high = 0;
1570 raw_inode->i_gid_high = 0;
1571 }
1572 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1573 raw_inode->i_size = cpu_to_le32(inode->i_size);
1574 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1575 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1576 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1577
1578 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1579 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1580 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1581 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1582 raw_inode->i_frag = ei->i_frag_no;
1583 raw_inode->i_fsize = ei->i_frag_size;
1584 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1585 if (!S_ISREG(inode->i_mode))
1586 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1587 else {
1588 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1589 if (inode->i_size > 0x7fffffffULL) {
1590 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1591 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1592 EXT2_SB(sb)->s_es->s_rev_level ==
1593 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1594
1595
1596
1597 spin_lock(&EXT2_SB(sb)->s_lock);
1598 ext2_update_dynamic_rev(sb);
1599 EXT2_SET_RO_COMPAT_FEATURE(sb,
1600 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1601 spin_unlock(&EXT2_SB(sb)->s_lock);
1602 ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1);
1603 }
1604 }
1605 }
1606
1607 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1608 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1609 if (old_valid_dev(inode->i_rdev)) {
1610 raw_inode->i_block[0] =
1611 cpu_to_le32(old_encode_dev(inode->i_rdev));
1612 raw_inode->i_block[1] = 0;
1613 } else {
1614 raw_inode->i_block[0] = 0;
1615 raw_inode->i_block[1] =
1616 cpu_to_le32(new_encode_dev(inode->i_rdev));
1617 raw_inode->i_block[2] = 0;
1618 }
1619 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1620 raw_inode->i_block[n] = ei->i_data[n];
1621 mark_buffer_dirty(bh);
1622 if (do_sync) {
1623 sync_dirty_buffer(bh);
1624 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1625 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1626 sb->s_id, (unsigned long) ino);
1627 err = -EIO;
1628 }
1629 }
1630 ei->i_state &= ~EXT2_STATE_NEW;
1631 brelse (bh);
1632 return err;
1633}
1634
1635int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1636{
1637 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1638}
1639
1640int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1641{
1642 struct inode *inode = d_inode(dentry);
1643 int error;
1644
1645 error = setattr_prepare(dentry, iattr);
1646 if (error)
1647 return error;
1648
1649 if (is_quota_modification(inode, iattr)) {
1650 error = dquot_initialize(inode);
1651 if (error)
1652 return error;
1653 }
1654 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1655 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1656 error = dquot_transfer(inode, iattr);
1657 if (error)
1658 return error;
1659 }
1660 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1661 error = ext2_setsize(inode, iattr->ia_size);
1662 if (error)
1663 return error;
1664 }
1665 setattr_copy(inode, iattr);
1666 if (iattr->ia_valid & ATTR_MODE)
1667 error = posix_acl_chmod(inode, inode->i_mode);
1668 mark_inode_dirty(inode);
1669
1670 return error;
1671}
1672