1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/time.h>
27#include <linux/highuid.h>
28#include <linux/pagemap.h>
29#include <linux/dax.h>
30#include <linux/blkdev.h>
31#include <linux/quotaops.h>
32#include <linux/writeback.h>
33#include <linux/buffer_head.h>
34#include <linux/mpage.h>
35#include <linux/fiemap.h>
36#include <linux/iomap.h>
37#include <linux/namei.h>
38#include <linux/uio.h>
39#include "ext2.h"
40#include "acl.h"
41#include "xattr.h"
42
43static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45
46
47
48static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49{
50 int ea_blocks = EXT2_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) &&
54 inode->i_blocks - ea_blocks == 0);
55}
56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67}
68
69
70
71
72void ext2_evict_inode(struct inode * inode)
73{
74 struct ext2_block_alloc_info *rsv;
75 int want_delete = 0;
76
77 if (!inode->i_nlink && !is_bad_inode(inode)) {
78 want_delete = 1;
79 dquot_initialize(inode);
80 } else {
81 dquot_drop(inode);
82 }
83
84 truncate_inode_pages_final(&inode->i_data);
85
86 if (want_delete) {
87 sb_start_intwrite(inode->i_sb);
88
89 EXT2_I(inode)->i_dtime = ktime_get_real_seconds();
90 mark_inode_dirty(inode);
91 __ext2_write_inode(inode, inode_needs_sync(inode));
92
93 inode->i_size = 0;
94 if (inode->i_blocks)
95 ext2_truncate_blocks(inode, 0);
96 ext2_xattr_delete_inode(inode);
97 }
98
99 invalidate_inode_buffers(inode);
100 clear_inode(inode);
101
102 ext2_discard_reservation(inode);
103 rsv = EXT2_I(inode)->i_block_alloc_info;
104 EXT2_I(inode)->i_block_alloc_info = NULL;
105 if (unlikely(rsv))
106 kfree(rsv);
107
108 if (want_delete) {
109 ext2_free_inode(inode);
110 sb_end_intwrite(inode->i_sb);
111 }
112}
113
114typedef struct {
115 __le32 *p;
116 __le32 key;
117 struct buffer_head *bh;
118} Indirect;
119
120static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
121{
122 p->key = *(p->p = v);
123 p->bh = bh;
124}
125
126static inline int verify_chain(Indirect *from, Indirect *to)
127{
128 while (from <= to && from->key == *from->p)
129 from++;
130 return (from > to);
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163static int ext2_block_to_path(struct inode *inode,
164 long i_block, int offsets[4], int *boundary)
165{
166 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
167 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
168 const long direct_blocks = EXT2_NDIR_BLOCKS,
169 indirect_blocks = ptrs,
170 double_blocks = (1 << (ptrs_bits * 2));
171 int n = 0;
172 int final = 0;
173
174 if (i_block < 0) {
175 ext2_msg(inode->i_sb, KERN_WARNING,
176 "warning: %s: block < 0", __func__);
177 } else if (i_block < direct_blocks) {
178 offsets[n++] = i_block;
179 final = direct_blocks;
180 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
181 offsets[n++] = EXT2_IND_BLOCK;
182 offsets[n++] = i_block;
183 final = ptrs;
184 } else if ((i_block -= indirect_blocks) < double_blocks) {
185 offsets[n++] = EXT2_DIND_BLOCK;
186 offsets[n++] = i_block >> ptrs_bits;
187 offsets[n++] = i_block & (ptrs - 1);
188 final = ptrs;
189 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
190 offsets[n++] = EXT2_TIND_BLOCK;
191 offsets[n++] = i_block >> (ptrs_bits * 2);
192 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
193 offsets[n++] = i_block & (ptrs - 1);
194 final = ptrs;
195 } else {
196 ext2_msg(inode->i_sb, KERN_WARNING,
197 "warning: %s: block is too big", __func__);
198 }
199 if (boundary)
200 *boundary = final - 1 - (i_block & (ptrs - 1));
201
202 return n;
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234static Indirect *ext2_get_branch(struct inode *inode,
235 int depth,
236 int *offsets,
237 Indirect chain[4],
238 int *err)
239{
240 struct super_block *sb = inode->i_sb;
241 Indirect *p = chain;
242 struct buffer_head *bh;
243
244 *err = 0;
245
246 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
247 if (!p->key)
248 goto no_block;
249 while (--depth) {
250 bh = sb_bread(sb, le32_to_cpu(p->key));
251 if (!bh)
252 goto failure;
253 read_lock(&EXT2_I(inode)->i_meta_lock);
254 if (!verify_chain(chain, p))
255 goto changed;
256 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
257 read_unlock(&EXT2_I(inode)->i_meta_lock);
258 if (!p->key)
259 goto no_block;
260 }
261 return NULL;
262
263changed:
264 read_unlock(&EXT2_I(inode)->i_meta_lock);
265 brelse(bh);
266 *err = -EAGAIN;
267 goto no_block;
268failure:
269 *err = -EIO;
270no_block:
271 return p;
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
295{
296 struct ext2_inode_info *ei = EXT2_I(inode);
297 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
298 __le32 *p;
299 ext2_fsblk_t bg_start;
300 ext2_fsblk_t colour;
301
302
303 for (p = ind->p - 1; p >= start; p--)
304 if (*p)
305 return le32_to_cpu(*p);
306
307
308 if (ind->bh)
309 return ind->bh->b_blocknr;
310
311
312
313
314
315 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
316 colour = (current->pid % 16) *
317 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
318 return bg_start + colour;
319}
320
321
322
323
324
325
326
327
328
329
330static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
331 Indirect *partial)
332{
333 struct ext2_block_alloc_info *block_i;
334
335 block_i = EXT2_I(inode)->i_block_alloc_info;
336
337
338
339
340
341 if (block_i && (block == block_i->last_alloc_logical_block + 1)
342 && (block_i->last_alloc_physical_block != 0)) {
343 return block_i->last_alloc_physical_block + 1;
344 }
345
346 return ext2_find_near(inode, partial);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361static int
362ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
363 int blocks_to_boundary)
364{
365 unsigned long count = 0;
366
367
368
369
370
371 if (k > 0) {
372
373 if (blks < blocks_to_boundary + 1)
374 count += blks;
375 else
376 count += blocks_to_boundary + 1;
377 return count;
378 }
379
380 count++;
381 while (count < blks && count <= blocks_to_boundary
382 && le32_to_cpu(*(branch[0].p + count)) == 0) {
383 count++;
384 }
385 return count;
386}
387
388
389
390
391
392
393
394
395
396
397
398static int ext2_alloc_blocks(struct inode *inode,
399 ext2_fsblk_t goal, int indirect_blks, int blks,
400 ext2_fsblk_t new_blocks[4], int *err)
401{
402 int target, i;
403 unsigned long count = 0;
404 int index = 0;
405 ext2_fsblk_t current_block = 0;
406 int ret = 0;
407
408
409
410
411
412
413
414
415
416 target = blks + indirect_blks;
417
418 while (1) {
419 count = target;
420
421 current_block = ext2_new_blocks(inode,goal,&count,err);
422 if (*err)
423 goto failed_out;
424
425 target -= count;
426
427 while (index < indirect_blks && count) {
428 new_blocks[index++] = current_block++;
429 count--;
430 }
431
432 if (count > 0)
433 break;
434 }
435
436
437 new_blocks[index] = current_block;
438
439
440 ret = count;
441 *err = 0;
442 return ret;
443failed_out:
444 for (i = 0; i <index; i++)
445 ext2_free_blocks(inode, new_blocks[i], 1);
446 if (index)
447 mark_inode_dirty(inode);
448 return ret;
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478static int ext2_alloc_branch(struct inode *inode,
479 int indirect_blks, int *blks, ext2_fsblk_t goal,
480 int *offsets, Indirect *branch)
481{
482 int blocksize = inode->i_sb->s_blocksize;
483 int i, n = 0;
484 int err = 0;
485 struct buffer_head *bh;
486 int num;
487 ext2_fsblk_t new_blocks[4];
488 ext2_fsblk_t current_block;
489
490 num = ext2_alloc_blocks(inode, goal, indirect_blks,
491 *blks, new_blocks, &err);
492 if (err)
493 return err;
494
495 branch[0].key = cpu_to_le32(new_blocks[0]);
496
497
498
499 for (n = 1; n <= indirect_blks; n++) {
500
501
502
503
504
505 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
506 if (unlikely(!bh)) {
507 err = -ENOMEM;
508 goto failed;
509 }
510 branch[n].bh = bh;
511 lock_buffer(bh);
512 memset(bh->b_data, 0, blocksize);
513 branch[n].p = (__le32 *) bh->b_data + offsets[n];
514 branch[n].key = cpu_to_le32(new_blocks[n]);
515 *branch[n].p = branch[n].key;
516 if ( n == indirect_blks) {
517 current_block = new_blocks[n];
518
519
520
521
522
523 for (i=1; i < num; i++)
524 *(branch[n].p + i) = cpu_to_le32(++current_block);
525 }
526 set_buffer_uptodate(bh);
527 unlock_buffer(bh);
528 mark_buffer_dirty_inode(bh, inode);
529
530
531
532
533 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
534 sync_dirty_buffer(bh);
535 }
536 *blks = num;
537 return err;
538
539failed:
540 for (i = 1; i < n; i++)
541 bforget(branch[i].bh);
542 for (i = 0; i < indirect_blks; i++)
543 ext2_free_blocks(inode, new_blocks[i], 1);
544 ext2_free_blocks(inode, new_blocks[i], num);
545 return err;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560static void ext2_splice_branch(struct inode *inode,
561 long block, Indirect *where, int num, int blks)
562{
563 int i;
564 struct ext2_block_alloc_info *block_i;
565 ext2_fsblk_t current_block;
566
567 block_i = EXT2_I(inode)->i_block_alloc_info;
568
569
570
571
572 *where->p = where->key;
573
574
575
576
577
578 if (num == 0 && blks > 1) {
579 current_block = le32_to_cpu(where->key) + 1;
580 for (i = 1; i < blks; i++)
581 *(where->p + i ) = cpu_to_le32(current_block++);
582 }
583
584
585
586
587
588
589 if (block_i) {
590 block_i->last_alloc_logical_block = block + blks - 1;
591 block_i->last_alloc_physical_block =
592 le32_to_cpu(where[num].key) + blks - 1;
593 }
594
595
596
597
598 if (where->bh)
599 mark_buffer_dirty_inode(where->bh, inode);
600
601 inode->i_ctime = current_time(inode);
602 mark_inode_dirty(inode);
603}
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623static int ext2_get_blocks(struct inode *inode,
624 sector_t iblock, unsigned long maxblocks,
625 u32 *bno, bool *new, bool *boundary,
626 int create)
627{
628 int err;
629 int offsets[4];
630 Indirect chain[4];
631 Indirect *partial;
632 ext2_fsblk_t goal;
633 int indirect_blks;
634 int blocks_to_boundary = 0;
635 int depth;
636 struct ext2_inode_info *ei = EXT2_I(inode);
637 int count = 0;
638 ext2_fsblk_t first_block = 0;
639
640 BUG_ON(maxblocks == 0);
641
642 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
643
644 if (depth == 0)
645 return -EIO;
646
647 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
648
649 if (!partial) {
650 first_block = le32_to_cpu(chain[depth - 1].key);
651 count++;
652
653 while (count < maxblocks && count <= blocks_to_boundary) {
654 ext2_fsblk_t blk;
655
656 if (!verify_chain(chain, chain + depth - 1)) {
657
658
659
660
661
662
663 err = -EAGAIN;
664 count = 0;
665 partial = chain + depth - 1;
666 break;
667 }
668 blk = le32_to_cpu(*(chain[depth-1].p + count));
669 if (blk == first_block + count)
670 count++;
671 else
672 break;
673 }
674 if (err != -EAGAIN)
675 goto got_it;
676 }
677
678
679 if (!create || err == -EIO)
680 goto cleanup;
681
682 mutex_lock(&ei->truncate_mutex);
683
684
685
686
687
688
689
690
691
692
693
694
695 if (err == -EAGAIN || !verify_chain(chain, partial)) {
696 while (partial > chain) {
697 brelse(partial->bh);
698 partial--;
699 }
700 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
701 if (!partial) {
702 count++;
703 mutex_unlock(&ei->truncate_mutex);
704 goto got_it;
705 }
706
707 if (err) {
708 mutex_unlock(&ei->truncate_mutex);
709 goto cleanup;
710 }
711 }
712
713
714
715
716
717 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
718 ext2_init_block_alloc_info(inode);
719
720 goal = ext2_find_goal(inode, iblock, partial);
721
722
723 indirect_blks = (chain + depth) - partial - 1;
724
725
726
727
728 count = ext2_blks_to_allocate(partial, indirect_blks,
729 maxblocks, blocks_to_boundary);
730
731
732
733 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
734 offsets + (partial - chain), partial);
735
736 if (err) {
737 mutex_unlock(&ei->truncate_mutex);
738 goto cleanup;
739 }
740
741 if (IS_DAX(inode)) {
742
743
744
745
746 clean_bdev_aliases(inode->i_sb->s_bdev,
747 le32_to_cpu(chain[depth-1].key),
748 count);
749
750
751
752
753
754 err = sb_issue_zeroout(inode->i_sb,
755 le32_to_cpu(chain[depth-1].key), count,
756 GFP_NOFS);
757 if (err) {
758 mutex_unlock(&ei->truncate_mutex);
759 goto cleanup;
760 }
761 }
762 *new = true;
763
764 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
765 mutex_unlock(&ei->truncate_mutex);
766got_it:
767 if (count > blocks_to_boundary)
768 *boundary = true;
769 err = count;
770
771 partial = chain + depth - 1;
772cleanup:
773 while (partial > chain) {
774 brelse(partial->bh);
775 partial--;
776 }
777 if (err > 0)
778 *bno = le32_to_cpu(chain[depth-1].key);
779 return err;
780}
781
782int ext2_get_block(struct inode *inode, sector_t iblock,
783 struct buffer_head *bh_result, int create)
784{
785 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
786 bool new = false, boundary = false;
787 u32 bno;
788 int ret;
789
790 ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
791 create);
792 if (ret <= 0)
793 return ret;
794
795 map_bh(bh_result, inode->i_sb, bno);
796 bh_result->b_size = (ret << inode->i_blkbits);
797 if (new)
798 set_buffer_new(bh_result);
799 if (boundary)
800 set_buffer_boundary(bh_result);
801 return 0;
802
803}
804
805#ifdef CONFIG_FS_DAX
806static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
807 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
808{
809 unsigned int blkbits = inode->i_blkbits;
810 unsigned long first_block = offset >> blkbits;
811 unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
812 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
813 bool new = false, boundary = false;
814 u32 bno;
815 int ret;
816
817 ret = ext2_get_blocks(inode, first_block, max_blocks,
818 &bno, &new, &boundary, flags & IOMAP_WRITE);
819 if (ret < 0)
820 return ret;
821
822 iomap->flags = 0;
823 iomap->bdev = inode->i_sb->s_bdev;
824 iomap->offset = (u64)first_block << blkbits;
825 iomap->dax_dev = sbi->s_daxdev;
826
827 if (ret == 0) {
828 iomap->type = IOMAP_HOLE;
829 iomap->addr = IOMAP_NULL_ADDR;
830 iomap->length = 1 << blkbits;
831 } else {
832 iomap->type = IOMAP_MAPPED;
833 iomap->addr = (u64)bno << blkbits;
834 iomap->length = (u64)ret << blkbits;
835 iomap->flags |= IOMAP_F_MERGED;
836 }
837
838 if (new)
839 iomap->flags |= IOMAP_F_NEW;
840 return 0;
841}
842
843static int
844ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
845 ssize_t written, unsigned flags, struct iomap *iomap)
846{
847 if (iomap->type == IOMAP_MAPPED &&
848 written < length &&
849 (flags & IOMAP_WRITE))
850 ext2_write_failed(inode->i_mapping, offset + length);
851 return 0;
852}
853
854const struct iomap_ops ext2_iomap_ops = {
855 .iomap_begin = ext2_iomap_begin,
856 .iomap_end = ext2_iomap_end,
857};
858#else
859
860const struct iomap_ops ext2_iomap_ops;
861#endif
862
863int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
864 u64 start, u64 len)
865{
866 return generic_block_fiemap(inode, fieinfo, start, len,
867 ext2_get_block);
868}
869
870static int ext2_writepage(struct page *page, struct writeback_control *wbc)
871{
872 return block_write_full_page(page, ext2_get_block, wbc);
873}
874
875static int ext2_readpage(struct file *file, struct page *page)
876{
877 return mpage_readpage(page, ext2_get_block);
878}
879
880static int
881ext2_readpages(struct file *file, struct address_space *mapping,
882 struct list_head *pages, unsigned nr_pages)
883{
884 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
885}
886
887static int
888ext2_write_begin(struct file *file, struct address_space *mapping,
889 loff_t pos, unsigned len, unsigned flags,
890 struct page **pagep, void **fsdata)
891{
892 int ret;
893
894 ret = block_write_begin(mapping, pos, len, flags, pagep,
895 ext2_get_block);
896 if (ret < 0)
897 ext2_write_failed(mapping, pos + len);
898 return ret;
899}
900
901static int ext2_write_end(struct file *file, struct address_space *mapping,
902 loff_t pos, unsigned len, unsigned copied,
903 struct page *page, void *fsdata)
904{
905 int ret;
906
907 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
908 if (ret < len)
909 ext2_write_failed(mapping, pos + len);
910 return ret;
911}
912
913static int
914ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
915 loff_t pos, unsigned len, unsigned flags,
916 struct page **pagep, void **fsdata)
917{
918 int ret;
919
920 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
921 ext2_get_block);
922 if (ret < 0)
923 ext2_write_failed(mapping, pos + len);
924 return ret;
925}
926
927static int ext2_nobh_writepage(struct page *page,
928 struct writeback_control *wbc)
929{
930 return nobh_writepage(page, ext2_get_block, wbc);
931}
932
933static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
934{
935 return generic_block_bmap(mapping,block,ext2_get_block);
936}
937
938static ssize_t
939ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
940{
941 struct file *file = iocb->ki_filp;
942 struct address_space *mapping = file->f_mapping;
943 struct inode *inode = mapping->host;
944 size_t count = iov_iter_count(iter);
945 loff_t offset = iocb->ki_pos;
946 ssize_t ret;
947
948 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
949 if (ret < 0 && iov_iter_rw(iter) == WRITE)
950 ext2_write_failed(mapping, offset + count);
951 return ret;
952}
953
954static int
955ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
956{
957 return mpage_writepages(mapping, wbc, ext2_get_block);
958}
959
960static int
961ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
962{
963 struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
964
965 return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
966}
967
968const struct address_space_operations ext2_aops = {
969 .readpage = ext2_readpage,
970 .readpages = ext2_readpages,
971 .writepage = ext2_writepage,
972 .write_begin = ext2_write_begin,
973 .write_end = ext2_write_end,
974 .bmap = ext2_bmap,
975 .direct_IO = ext2_direct_IO,
976 .writepages = ext2_writepages,
977 .migratepage = buffer_migrate_page,
978 .is_partially_uptodate = block_is_partially_uptodate,
979 .error_remove_page = generic_error_remove_page,
980};
981
982const struct address_space_operations ext2_nobh_aops = {
983 .readpage = ext2_readpage,
984 .readpages = ext2_readpages,
985 .writepage = ext2_nobh_writepage,
986 .write_begin = ext2_nobh_write_begin,
987 .write_end = nobh_write_end,
988 .bmap = ext2_bmap,
989 .direct_IO = ext2_direct_IO,
990 .writepages = ext2_writepages,
991 .migratepage = buffer_migrate_page,
992 .error_remove_page = generic_error_remove_page,
993};
994
995static const struct address_space_operations ext2_dax_aops = {
996 .writepages = ext2_dax_writepages,
997 .direct_IO = noop_direct_IO,
998 .set_page_dirty = noop_set_page_dirty,
999 .invalidatepage = noop_invalidatepage,
1000};
1001
1002
1003
1004
1005
1006
1007static inline int all_zeroes(__le32 *p, __le32 *q)
1008{
1009 while (p < q)
1010 if (*p++)
1011 return 0;
1012 return 1;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static Indirect *ext2_find_shared(struct inode *inode,
1050 int depth,
1051 int offsets[4],
1052 Indirect chain[4],
1053 __le32 *top)
1054{
1055 Indirect *partial, *p;
1056 int k, err;
1057
1058 *top = 0;
1059 for (k = depth; k > 1 && !offsets[k-1]; k--)
1060 ;
1061 partial = ext2_get_branch(inode, k, offsets, chain, &err);
1062 if (!partial)
1063 partial = chain + k-1;
1064
1065
1066
1067
1068 write_lock(&EXT2_I(inode)->i_meta_lock);
1069 if (!partial->key && *partial->p) {
1070 write_unlock(&EXT2_I(inode)->i_meta_lock);
1071 goto no_top;
1072 }
1073 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1074 ;
1075
1076
1077
1078
1079
1080
1081 if (p == chain + k - 1 && p > chain) {
1082 p->p--;
1083 } else {
1084 *top = *p->p;
1085 *p->p = 0;
1086 }
1087 write_unlock(&EXT2_I(inode)->i_meta_lock);
1088
1089 while(partial > p)
1090 {
1091 brelse(partial->bh);
1092 partial--;
1093 }
1094no_top:
1095 return partial;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1109{
1110 unsigned long block_to_free = 0, count = 0;
1111 unsigned long nr;
1112
1113 for ( ; p < q ; p++) {
1114 nr = le32_to_cpu(*p);
1115 if (nr) {
1116 *p = 0;
1117
1118 if (count == 0)
1119 goto free_this;
1120 else if (block_to_free == nr - count)
1121 count++;
1122 else {
1123 ext2_free_blocks (inode, block_to_free, count);
1124 mark_inode_dirty(inode);
1125 free_this:
1126 block_to_free = nr;
1127 count = 1;
1128 }
1129 }
1130 }
1131 if (count > 0) {
1132 ext2_free_blocks (inode, block_to_free, count);
1133 mark_inode_dirty(inode);
1134 }
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1149{
1150 struct buffer_head * bh;
1151 unsigned long nr;
1152
1153 if (depth--) {
1154 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1155 for ( ; p < q ; p++) {
1156 nr = le32_to_cpu(*p);
1157 if (!nr)
1158 continue;
1159 *p = 0;
1160 bh = sb_bread(inode->i_sb, nr);
1161
1162
1163
1164
1165 if (!bh) {
1166 ext2_error(inode->i_sb, "ext2_free_branches",
1167 "Read failure, inode=%ld, block=%ld",
1168 inode->i_ino, nr);
1169 continue;
1170 }
1171 ext2_free_branches(inode,
1172 (__le32*)bh->b_data,
1173 (__le32*)bh->b_data + addr_per_block,
1174 depth);
1175 bforget(bh);
1176 ext2_free_blocks(inode, nr, 1);
1177 mark_inode_dirty(inode);
1178 }
1179 } else
1180 ext2_free_data(inode, p, q);
1181}
1182
1183
1184static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1185{
1186 __le32 *i_data = EXT2_I(inode)->i_data;
1187 struct ext2_inode_info *ei = EXT2_I(inode);
1188 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1189 int offsets[4];
1190 Indirect chain[4];
1191 Indirect *partial;
1192 __le32 nr = 0;
1193 int n;
1194 long iblock;
1195 unsigned blocksize;
1196 blocksize = inode->i_sb->s_blocksize;
1197 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1198
1199#ifdef CONFIG_FS_DAX
1200 WARN_ON(!rwsem_is_locked(&ei->dax_sem));
1201#endif
1202
1203 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1204 if (n == 0)
1205 return;
1206
1207
1208
1209
1210
1211 mutex_lock(&ei->truncate_mutex);
1212
1213 if (n == 1) {
1214 ext2_free_data(inode, i_data+offsets[0],
1215 i_data + EXT2_NDIR_BLOCKS);
1216 goto do_indirects;
1217 }
1218
1219 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1220
1221 if (nr) {
1222 if (partial == chain)
1223 mark_inode_dirty(inode);
1224 else
1225 mark_buffer_dirty_inode(partial->bh, inode);
1226 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1227 }
1228
1229 while (partial > chain) {
1230 ext2_free_branches(inode,
1231 partial->p + 1,
1232 (__le32*)partial->bh->b_data+addr_per_block,
1233 (chain+n-1) - partial);
1234 mark_buffer_dirty_inode(partial->bh, inode);
1235 brelse (partial->bh);
1236 partial--;
1237 }
1238do_indirects:
1239
1240 switch (offsets[0]) {
1241 default:
1242 nr = i_data[EXT2_IND_BLOCK];
1243 if (nr) {
1244 i_data[EXT2_IND_BLOCK] = 0;
1245 mark_inode_dirty(inode);
1246 ext2_free_branches(inode, &nr, &nr+1, 1);
1247 }
1248
1249 case EXT2_IND_BLOCK:
1250 nr = i_data[EXT2_DIND_BLOCK];
1251 if (nr) {
1252 i_data[EXT2_DIND_BLOCK] = 0;
1253 mark_inode_dirty(inode);
1254 ext2_free_branches(inode, &nr, &nr+1, 2);
1255 }
1256
1257 case EXT2_DIND_BLOCK:
1258 nr = i_data[EXT2_TIND_BLOCK];
1259 if (nr) {
1260 i_data[EXT2_TIND_BLOCK] = 0;
1261 mark_inode_dirty(inode);
1262 ext2_free_branches(inode, &nr, &nr+1, 3);
1263 }
1264 case EXT2_TIND_BLOCK:
1265 ;
1266 }
1267
1268 ext2_discard_reservation(inode);
1269
1270 mutex_unlock(&ei->truncate_mutex);
1271}
1272
1273static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1274{
1275 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1276 S_ISLNK(inode->i_mode)))
1277 return;
1278 if (ext2_inode_is_fast_symlink(inode))
1279 return;
1280
1281 dax_sem_down_write(EXT2_I(inode));
1282 __ext2_truncate_blocks(inode, offset);
1283 dax_sem_up_write(EXT2_I(inode));
1284}
1285
1286static int ext2_setsize(struct inode *inode, loff_t newsize)
1287{
1288 int error;
1289
1290 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1291 S_ISLNK(inode->i_mode)))
1292 return -EINVAL;
1293 if (ext2_inode_is_fast_symlink(inode))
1294 return -EINVAL;
1295 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1296 return -EPERM;
1297
1298 inode_dio_wait(inode);
1299
1300 if (IS_DAX(inode)) {
1301 error = iomap_zero_range(inode, newsize,
1302 PAGE_ALIGN(newsize) - newsize, NULL,
1303 &ext2_iomap_ops);
1304 } else if (test_opt(inode->i_sb, NOBH))
1305 error = nobh_truncate_page(inode->i_mapping,
1306 newsize, ext2_get_block);
1307 else
1308 error = block_truncate_page(inode->i_mapping,
1309 newsize, ext2_get_block);
1310 if (error)
1311 return error;
1312
1313 dax_sem_down_write(EXT2_I(inode));
1314 truncate_setsize(inode, newsize);
1315 __ext2_truncate_blocks(inode, newsize);
1316 dax_sem_up_write(EXT2_I(inode));
1317
1318 inode->i_mtime = inode->i_ctime = current_time(inode);
1319 if (inode_needs_sync(inode)) {
1320 sync_mapping_buffers(inode->i_mapping);
1321 sync_inode_metadata(inode, 1);
1322 } else {
1323 mark_inode_dirty(inode);
1324 }
1325
1326 return 0;
1327}
1328
1329static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1330 struct buffer_head **p)
1331{
1332 struct buffer_head * bh;
1333 unsigned long block_group;
1334 unsigned long block;
1335 unsigned long offset;
1336 struct ext2_group_desc * gdp;
1337
1338 *p = NULL;
1339 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1340 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1341 goto Einval;
1342
1343 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1344 gdp = ext2_get_group_desc(sb, block_group, NULL);
1345 if (!gdp)
1346 goto Egdp;
1347
1348
1349
1350 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1351 block = le32_to_cpu(gdp->bg_inode_table) +
1352 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1353 if (!(bh = sb_bread(sb, block)))
1354 goto Eio;
1355
1356 *p = bh;
1357 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1358 return (struct ext2_inode *) (bh->b_data + offset);
1359
1360Einval:
1361 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1362 (unsigned long) ino);
1363 return ERR_PTR(-EINVAL);
1364Eio:
1365 ext2_error(sb, "ext2_get_inode",
1366 "unable to read inode block - inode=%lu, block=%lu",
1367 (unsigned long) ino, block);
1368Egdp:
1369 return ERR_PTR(-EIO);
1370}
1371
1372void ext2_set_inode_flags(struct inode *inode)
1373{
1374 unsigned int flags = EXT2_I(inode)->i_flags;
1375
1376 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
1377 S_DIRSYNC | S_DAX);
1378 if (flags & EXT2_SYNC_FL)
1379 inode->i_flags |= S_SYNC;
1380 if (flags & EXT2_APPEND_FL)
1381 inode->i_flags |= S_APPEND;
1382 if (flags & EXT2_IMMUTABLE_FL)
1383 inode->i_flags |= S_IMMUTABLE;
1384 if (flags & EXT2_NOATIME_FL)
1385 inode->i_flags |= S_NOATIME;
1386 if (flags & EXT2_DIRSYNC_FL)
1387 inode->i_flags |= S_DIRSYNC;
1388 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1389 inode->i_flags |= S_DAX;
1390}
1391
1392void ext2_set_file_ops(struct inode *inode)
1393{
1394 inode->i_op = &ext2_file_inode_operations;
1395 inode->i_fop = &ext2_file_operations;
1396 if (IS_DAX(inode))
1397 inode->i_mapping->a_ops = &ext2_dax_aops;
1398 else if (test_opt(inode->i_sb, NOBH))
1399 inode->i_mapping->a_ops = &ext2_nobh_aops;
1400 else
1401 inode->i_mapping->a_ops = &ext2_aops;
1402}
1403
1404struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1405{
1406 struct ext2_inode_info *ei;
1407 struct buffer_head * bh = NULL;
1408 struct ext2_inode *raw_inode;
1409 struct inode *inode;
1410 long ret = -EIO;
1411 int n;
1412 uid_t i_uid;
1413 gid_t i_gid;
1414
1415 inode = iget_locked(sb, ino);
1416 if (!inode)
1417 return ERR_PTR(-ENOMEM);
1418 if (!(inode->i_state & I_NEW))
1419 return inode;
1420
1421 ei = EXT2_I(inode);
1422 ei->i_block_alloc_info = NULL;
1423
1424 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1425 if (IS_ERR(raw_inode)) {
1426 ret = PTR_ERR(raw_inode);
1427 goto bad_inode;
1428 }
1429
1430 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1431 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1432 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1433 if (!(test_opt (inode->i_sb, NO_UID32))) {
1434 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1435 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1436 }
1437 i_uid_write(inode, i_uid);
1438 i_gid_write(inode, i_gid);
1439 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1440 inode->i_size = le32_to_cpu(raw_inode->i_size);
1441 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1442 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1443 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1444 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1445 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1446
1447
1448
1449
1450
1451 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1452
1453 ret = -ESTALE;
1454 goto bad_inode;
1455 }
1456 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1457 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1458 ext2_set_inode_flags(inode);
1459 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1460 ei->i_frag_no = raw_inode->i_frag;
1461 ei->i_frag_size = raw_inode->i_fsize;
1462 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1463 ei->i_dir_acl = 0;
1464
1465 if (ei->i_file_acl &&
1466 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
1467 ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
1468 ei->i_file_acl);
1469 ret = -EFSCORRUPTED;
1470 goto bad_inode;
1471 }
1472
1473 if (S_ISREG(inode->i_mode))
1474 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1475 else
1476 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1477 if (i_size_read(inode) < 0) {
1478 ret = -EFSCORRUPTED;
1479 goto bad_inode;
1480 }
1481 ei->i_dtime = 0;
1482 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1483 ei->i_state = 0;
1484 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1485 ei->i_dir_start_lookup = 0;
1486
1487
1488
1489
1490
1491 for (n = 0; n < EXT2_N_BLOCKS; n++)
1492 ei->i_data[n] = raw_inode->i_block[n];
1493
1494 if (S_ISREG(inode->i_mode)) {
1495 ext2_set_file_ops(inode);
1496 } else if (S_ISDIR(inode->i_mode)) {
1497 inode->i_op = &ext2_dir_inode_operations;
1498 inode->i_fop = &ext2_dir_operations;
1499 if (test_opt(inode->i_sb, NOBH))
1500 inode->i_mapping->a_ops = &ext2_nobh_aops;
1501 else
1502 inode->i_mapping->a_ops = &ext2_aops;
1503 } else if (S_ISLNK(inode->i_mode)) {
1504 if (ext2_inode_is_fast_symlink(inode)) {
1505 inode->i_link = (char *)ei->i_data;
1506 inode->i_op = &ext2_fast_symlink_inode_operations;
1507 nd_terminate_link(ei->i_data, inode->i_size,
1508 sizeof(ei->i_data) - 1);
1509 } else {
1510 inode->i_op = &ext2_symlink_inode_operations;
1511 inode_nohighmem(inode);
1512 if (test_opt(inode->i_sb, NOBH))
1513 inode->i_mapping->a_ops = &ext2_nobh_aops;
1514 else
1515 inode->i_mapping->a_ops = &ext2_aops;
1516 }
1517 } else {
1518 inode->i_op = &ext2_special_inode_operations;
1519 if (raw_inode->i_block[0])
1520 init_special_inode(inode, inode->i_mode,
1521 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1522 else
1523 init_special_inode(inode, inode->i_mode,
1524 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1525 }
1526 brelse (bh);
1527 unlock_new_inode(inode);
1528 return inode;
1529
1530bad_inode:
1531 brelse(bh);
1532 iget_failed(inode);
1533 return ERR_PTR(ret);
1534}
1535
1536static int __ext2_write_inode(struct inode *inode, int do_sync)
1537{
1538 struct ext2_inode_info *ei = EXT2_I(inode);
1539 struct super_block *sb = inode->i_sb;
1540 ino_t ino = inode->i_ino;
1541 uid_t uid = i_uid_read(inode);
1542 gid_t gid = i_gid_read(inode);
1543 struct buffer_head * bh;
1544 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1545 int n;
1546 int err = 0;
1547
1548 if (IS_ERR(raw_inode))
1549 return -EIO;
1550
1551
1552
1553 if (ei->i_state & EXT2_STATE_NEW)
1554 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1555
1556 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1557 if (!(test_opt(sb, NO_UID32))) {
1558 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1559 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1560
1561
1562
1563
1564 if (!ei->i_dtime) {
1565 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1566 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1567 } else {
1568 raw_inode->i_uid_high = 0;
1569 raw_inode->i_gid_high = 0;
1570 }
1571 } else {
1572 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1573 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1574 raw_inode->i_uid_high = 0;
1575 raw_inode->i_gid_high = 0;
1576 }
1577 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1578 raw_inode->i_size = cpu_to_le32(inode->i_size);
1579 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1580 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1581 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1582
1583 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1584 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1585 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1586 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1587 raw_inode->i_frag = ei->i_frag_no;
1588 raw_inode->i_fsize = ei->i_frag_size;
1589 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1590 if (!S_ISREG(inode->i_mode))
1591 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1592 else {
1593 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1594 if (inode->i_size > 0x7fffffffULL) {
1595 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1596 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1597 EXT2_SB(sb)->s_es->s_rev_level ==
1598 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1599
1600
1601
1602 spin_lock(&EXT2_SB(sb)->s_lock);
1603 ext2_update_dynamic_rev(sb);
1604 EXT2_SET_RO_COMPAT_FEATURE(sb,
1605 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1606 spin_unlock(&EXT2_SB(sb)->s_lock);
1607 ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1);
1608 }
1609 }
1610 }
1611
1612 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1613 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1614 if (old_valid_dev(inode->i_rdev)) {
1615 raw_inode->i_block[0] =
1616 cpu_to_le32(old_encode_dev(inode->i_rdev));
1617 raw_inode->i_block[1] = 0;
1618 } else {
1619 raw_inode->i_block[0] = 0;
1620 raw_inode->i_block[1] =
1621 cpu_to_le32(new_encode_dev(inode->i_rdev));
1622 raw_inode->i_block[2] = 0;
1623 }
1624 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1625 raw_inode->i_block[n] = ei->i_data[n];
1626 mark_buffer_dirty(bh);
1627 if (do_sync) {
1628 sync_dirty_buffer(bh);
1629 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1630 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1631 sb->s_id, (unsigned long) ino);
1632 err = -EIO;
1633 }
1634 }
1635 ei->i_state &= ~EXT2_STATE_NEW;
1636 brelse (bh);
1637 return err;
1638}
1639
1640int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1641{
1642 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1643}
1644
1645int ext2_getattr(const struct path *path, struct kstat *stat,
1646 u32 request_mask, unsigned int query_flags)
1647{
1648 struct inode *inode = d_inode(path->dentry);
1649 struct ext2_inode_info *ei = EXT2_I(inode);
1650 unsigned int flags;
1651
1652 flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
1653 if (flags & EXT2_APPEND_FL)
1654 stat->attributes |= STATX_ATTR_APPEND;
1655 if (flags & EXT2_COMPR_FL)
1656 stat->attributes |= STATX_ATTR_COMPRESSED;
1657 if (flags & EXT2_IMMUTABLE_FL)
1658 stat->attributes |= STATX_ATTR_IMMUTABLE;
1659 if (flags & EXT2_NODUMP_FL)
1660 stat->attributes |= STATX_ATTR_NODUMP;
1661 stat->attributes_mask |= (STATX_ATTR_APPEND |
1662 STATX_ATTR_COMPRESSED |
1663 STATX_ATTR_ENCRYPTED |
1664 STATX_ATTR_IMMUTABLE |
1665 STATX_ATTR_NODUMP);
1666
1667 generic_fillattr(inode, stat);
1668 return 0;
1669}
1670
1671int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1672{
1673 struct inode *inode = d_inode(dentry);
1674 int error;
1675
1676 error = setattr_prepare(dentry, iattr);
1677 if (error)
1678 return error;
1679
1680 if (is_quota_modification(inode, iattr)) {
1681 error = dquot_initialize(inode);
1682 if (error)
1683 return error;
1684 }
1685 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1686 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1687 error = dquot_transfer(inode, iattr);
1688 if (error)
1689 return error;
1690 }
1691 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1692 error = ext2_setsize(inode, iattr->ia_size);
1693 if (error)
1694 return error;
1695 }
1696 setattr_copy(inode, iattr);
1697 if (iattr->ia_valid & ATTR_MODE)
1698 error = posix_acl_chmod(inode, inode->i_mode);
1699 mark_inode_dirty(inode);
1700
1701 return error;
1702}
1703