1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/time.h>
27#include <linux/highuid.h>
28#include <linux/pagemap.h>
29#include <linux/dax.h>
30#include <linux/blkdev.h>
31#include <linux/quotaops.h>
32#include <linux/writeback.h>
33#include <linux/buffer_head.h>
34#include <linux/mpage.h>
35#include <linux/fiemap.h>
36#include <linux/iomap.h>
37#include <linux/namei.h>
38#include <linux/uio.h>
39#include "ext2.h"
40#include "acl.h"
41#include "xattr.h"
42
43static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45
46
47
48static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49{
50 int ea_blocks = EXT2_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) &&
54 inode->i_blocks - ea_blocks == 0);
55}
56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67}
68
69
70
71
72void ext2_evict_inode(struct inode * inode)
73{
74 struct ext2_block_alloc_info *rsv;
75 int want_delete = 0;
76
77 if (!inode->i_nlink && !is_bad_inode(inode)) {
78 want_delete = 1;
79 dquot_initialize(inode);
80 } else {
81 dquot_drop(inode);
82 }
83
84 truncate_inode_pages_final(&inode->i_data);
85
86 if (want_delete) {
87 sb_start_intwrite(inode->i_sb);
88
89 EXT2_I(inode)->i_dtime = ktime_get_real_seconds();
90 mark_inode_dirty(inode);
91 __ext2_write_inode(inode, inode_needs_sync(inode));
92
93 inode->i_size = 0;
94 if (inode->i_blocks)
95 ext2_truncate_blocks(inode, 0);
96 ext2_xattr_delete_inode(inode);
97 }
98
99 invalidate_inode_buffers(inode);
100 clear_inode(inode);
101
102 ext2_discard_reservation(inode);
103 rsv = EXT2_I(inode)->i_block_alloc_info;
104 EXT2_I(inode)->i_block_alloc_info = NULL;
105 if (unlikely(rsv))
106 kfree(rsv);
107
108 if (want_delete) {
109 ext2_free_inode(inode);
110 sb_end_intwrite(inode->i_sb);
111 }
112}
113
114typedef struct {
115 __le32 *p;
116 __le32 key;
117 struct buffer_head *bh;
118} Indirect;
119
120static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
121{
122 p->key = *(p->p = v);
123 p->bh = bh;
124}
125
126static inline int verify_chain(Indirect *from, Indirect *to)
127{
128 while (from <= to && from->key == *from->p)
129 from++;
130 return (from > to);
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163static int ext2_block_to_path(struct inode *inode,
164 long i_block, int offsets[4], int *boundary)
165{
166 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
167 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
168 const long direct_blocks = EXT2_NDIR_BLOCKS,
169 indirect_blocks = ptrs,
170 double_blocks = (1 << (ptrs_bits * 2));
171 int n = 0;
172 int final = 0;
173
174 if (i_block < 0) {
175 ext2_msg(inode->i_sb, KERN_WARNING,
176 "warning: %s: block < 0", __func__);
177 } else if (i_block < direct_blocks) {
178 offsets[n++] = i_block;
179 final = direct_blocks;
180 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
181 offsets[n++] = EXT2_IND_BLOCK;
182 offsets[n++] = i_block;
183 final = ptrs;
184 } else if ((i_block -= indirect_blocks) < double_blocks) {
185 offsets[n++] = EXT2_DIND_BLOCK;
186 offsets[n++] = i_block >> ptrs_bits;
187 offsets[n++] = i_block & (ptrs - 1);
188 final = ptrs;
189 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
190 offsets[n++] = EXT2_TIND_BLOCK;
191 offsets[n++] = i_block >> (ptrs_bits * 2);
192 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
193 offsets[n++] = i_block & (ptrs - 1);
194 final = ptrs;
195 } else {
196 ext2_msg(inode->i_sb, KERN_WARNING,
197 "warning: %s: block is too big", __func__);
198 }
199 if (boundary)
200 *boundary = final - 1 - (i_block & (ptrs - 1));
201
202 return n;
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234static Indirect *ext2_get_branch(struct inode *inode,
235 int depth,
236 int *offsets,
237 Indirect chain[4],
238 int *err)
239{
240 struct super_block *sb = inode->i_sb;
241 Indirect *p = chain;
242 struct buffer_head *bh;
243
244 *err = 0;
245
246 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
247 if (!p->key)
248 goto no_block;
249 while (--depth) {
250 bh = sb_bread(sb, le32_to_cpu(p->key));
251 if (!bh)
252 goto failure;
253 read_lock(&EXT2_I(inode)->i_meta_lock);
254 if (!verify_chain(chain, p))
255 goto changed;
256 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
257 read_unlock(&EXT2_I(inode)->i_meta_lock);
258 if (!p->key)
259 goto no_block;
260 }
261 return NULL;
262
263changed:
264 read_unlock(&EXT2_I(inode)->i_meta_lock);
265 brelse(bh);
266 *err = -EAGAIN;
267 goto no_block;
268failure:
269 *err = -EIO;
270no_block:
271 return p;
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
295{
296 struct ext2_inode_info *ei = EXT2_I(inode);
297 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
298 __le32 *p;
299 ext2_fsblk_t bg_start;
300 ext2_fsblk_t colour;
301
302
303 for (p = ind->p - 1; p >= start; p--)
304 if (*p)
305 return le32_to_cpu(*p);
306
307
308 if (ind->bh)
309 return ind->bh->b_blocknr;
310
311
312
313
314
315 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
316 colour = (current->pid % 16) *
317 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
318 return bg_start + colour;
319}
320
321
322
323
324
325
326
327
328
329
330static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
331 Indirect *partial)
332{
333 struct ext2_block_alloc_info *block_i;
334
335 block_i = EXT2_I(inode)->i_block_alloc_info;
336
337
338
339
340
341 if (block_i && (block == block_i->last_alloc_logical_block + 1)
342 && (block_i->last_alloc_physical_block != 0)) {
343 return block_i->last_alloc_physical_block + 1;
344 }
345
346 return ext2_find_near(inode, partial);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361static int
362ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
363 int blocks_to_boundary)
364{
365 unsigned long count = 0;
366
367
368
369
370
371 if (k > 0) {
372
373 if (blks < blocks_to_boundary + 1)
374 count += blks;
375 else
376 count += blocks_to_boundary + 1;
377 return count;
378 }
379
380 count++;
381 while (count < blks && count <= blocks_to_boundary
382 && le32_to_cpu(*(branch[0].p + count)) == 0) {
383 count++;
384 }
385 return count;
386}
387
388
389
390
391
392
393
394
395
396
397
398static int ext2_alloc_blocks(struct inode *inode,
399 ext2_fsblk_t goal, int indirect_blks, int blks,
400 ext2_fsblk_t new_blocks[4], int *err)
401{
402 int target, i;
403 unsigned long count = 0;
404 int index = 0;
405 ext2_fsblk_t current_block = 0;
406 int ret = 0;
407
408
409
410
411
412
413
414
415
416 target = blks + indirect_blks;
417
418 while (1) {
419 count = target;
420
421 current_block = ext2_new_blocks(inode,goal,&count,err);
422 if (*err)
423 goto failed_out;
424
425 target -= count;
426
427 while (index < indirect_blks && count) {
428 new_blocks[index++] = current_block++;
429 count--;
430 }
431
432 if (count > 0)
433 break;
434 }
435
436
437 new_blocks[index] = current_block;
438
439
440 ret = count;
441 *err = 0;
442 return ret;
443failed_out:
444 for (i = 0; i <index; i++)
445 ext2_free_blocks(inode, new_blocks[i], 1);
446 if (index)
447 mark_inode_dirty(inode);
448 return ret;
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476static int ext2_alloc_branch(struct inode *inode,
477 int indirect_blks, int *blks, ext2_fsblk_t goal,
478 int *offsets, Indirect *branch)
479{
480 int blocksize = inode->i_sb->s_blocksize;
481 int i, n = 0;
482 int err = 0;
483 struct buffer_head *bh;
484 int num;
485 ext2_fsblk_t new_blocks[4];
486 ext2_fsblk_t current_block;
487
488 num = ext2_alloc_blocks(inode, goal, indirect_blks,
489 *blks, new_blocks, &err);
490 if (err)
491 return err;
492
493 branch[0].key = cpu_to_le32(new_blocks[0]);
494
495
496
497 for (n = 1; n <= indirect_blks; n++) {
498
499
500
501
502
503 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
504 if (unlikely(!bh)) {
505 err = -ENOMEM;
506 goto failed;
507 }
508 branch[n].bh = bh;
509 lock_buffer(bh);
510 memset(bh->b_data, 0, blocksize);
511 branch[n].p = (__le32 *) bh->b_data + offsets[n];
512 branch[n].key = cpu_to_le32(new_blocks[n]);
513 *branch[n].p = branch[n].key;
514 if ( n == indirect_blks) {
515 current_block = new_blocks[n];
516
517
518
519
520
521 for (i=1; i < num; i++)
522 *(branch[n].p + i) = cpu_to_le32(++current_block);
523 }
524 set_buffer_uptodate(bh);
525 unlock_buffer(bh);
526 mark_buffer_dirty_inode(bh, inode);
527
528
529
530
531 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
532 sync_dirty_buffer(bh);
533 }
534 *blks = num;
535 return err;
536
537failed:
538 for (i = 1; i < n; i++)
539 bforget(branch[i].bh);
540 for (i = 0; i < indirect_blks; i++)
541 ext2_free_blocks(inode, new_blocks[i], 1);
542 ext2_free_blocks(inode, new_blocks[i], num);
543 return err;
544}
545
546
547
548
549
550
551
552
553
554
555
556
557
558static void ext2_splice_branch(struct inode *inode,
559 long block, Indirect *where, int num, int blks)
560{
561 int i;
562 struct ext2_block_alloc_info *block_i;
563 ext2_fsblk_t current_block;
564
565 block_i = EXT2_I(inode)->i_block_alloc_info;
566
567
568
569
570 *where->p = where->key;
571
572
573
574
575
576 if (num == 0 && blks > 1) {
577 current_block = le32_to_cpu(where->key) + 1;
578 for (i = 1; i < blks; i++)
579 *(where->p + i ) = cpu_to_le32(current_block++);
580 }
581
582
583
584
585
586
587 if (block_i) {
588 block_i->last_alloc_logical_block = block + blks - 1;
589 block_i->last_alloc_physical_block =
590 le32_to_cpu(where[num].key) + blks - 1;
591 }
592
593
594
595
596 if (where->bh)
597 mark_buffer_dirty_inode(where->bh, inode);
598
599 inode->i_ctime = current_time(inode);
600 mark_inode_dirty(inode);
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621static int ext2_get_blocks(struct inode *inode,
622 sector_t iblock, unsigned long maxblocks,
623 u32 *bno, bool *new, bool *boundary,
624 int create)
625{
626 int err;
627 int offsets[4];
628 Indirect chain[4];
629 Indirect *partial;
630 ext2_fsblk_t goal;
631 int indirect_blks;
632 int blocks_to_boundary = 0;
633 int depth;
634 struct ext2_inode_info *ei = EXT2_I(inode);
635 int count = 0;
636 ext2_fsblk_t first_block = 0;
637
638 BUG_ON(maxblocks == 0);
639
640 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
641
642 if (depth == 0)
643 return -EIO;
644
645 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
646
647 if (!partial) {
648 first_block = le32_to_cpu(chain[depth - 1].key);
649 count++;
650
651 while (count < maxblocks && count <= blocks_to_boundary) {
652 ext2_fsblk_t blk;
653
654 if (!verify_chain(chain, chain + depth - 1)) {
655
656
657
658
659
660
661 err = -EAGAIN;
662 count = 0;
663 partial = chain + depth - 1;
664 break;
665 }
666 blk = le32_to_cpu(*(chain[depth-1].p + count));
667 if (blk == first_block + count)
668 count++;
669 else
670 break;
671 }
672 if (err != -EAGAIN)
673 goto got_it;
674 }
675
676
677 if (!create || err == -EIO)
678 goto cleanup;
679
680 mutex_lock(&ei->truncate_mutex);
681
682
683
684
685
686
687
688
689
690
691
692
693 if (err == -EAGAIN || !verify_chain(chain, partial)) {
694 while (partial > chain) {
695 brelse(partial->bh);
696 partial--;
697 }
698 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
699 if (!partial) {
700 count++;
701 mutex_unlock(&ei->truncate_mutex);
702 if (err)
703 goto cleanup;
704 goto got_it;
705 }
706 }
707
708
709
710
711
712 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
713 ext2_init_block_alloc_info(inode);
714
715 goal = ext2_find_goal(inode, iblock, partial);
716
717
718 indirect_blks = (chain + depth) - partial - 1;
719
720
721
722
723 count = ext2_blks_to_allocate(partial, indirect_blks,
724 maxblocks, blocks_to_boundary);
725
726
727
728 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
729 offsets + (partial - chain), partial);
730
731 if (err) {
732 mutex_unlock(&ei->truncate_mutex);
733 goto cleanup;
734 }
735
736 if (IS_DAX(inode)) {
737
738
739
740
741 clean_bdev_aliases(inode->i_sb->s_bdev,
742 le32_to_cpu(chain[depth-1].key),
743 count);
744
745
746
747
748
749 err = sb_issue_zeroout(inode->i_sb,
750 le32_to_cpu(chain[depth-1].key), count,
751 GFP_NOFS);
752 if (err) {
753 mutex_unlock(&ei->truncate_mutex);
754 goto cleanup;
755 }
756 }
757 *new = true;
758
759 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
760 mutex_unlock(&ei->truncate_mutex);
761got_it:
762 if (count > blocks_to_boundary)
763 *boundary = true;
764 err = count;
765
766 partial = chain + depth - 1;
767cleanup:
768 while (partial > chain) {
769 brelse(partial->bh);
770 partial--;
771 }
772 if (err > 0)
773 *bno = le32_to_cpu(chain[depth-1].key);
774 return err;
775}
776
777int ext2_get_block(struct inode *inode, sector_t iblock,
778 struct buffer_head *bh_result, int create)
779{
780 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
781 bool new = false, boundary = false;
782 u32 bno;
783 int ret;
784
785 ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
786 create);
787 if (ret <= 0)
788 return ret;
789
790 map_bh(bh_result, inode->i_sb, bno);
791 bh_result->b_size = (ret << inode->i_blkbits);
792 if (new)
793 set_buffer_new(bh_result);
794 if (boundary)
795 set_buffer_boundary(bh_result);
796 return 0;
797
798}
799
800#ifdef CONFIG_FS_DAX
801static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
802 unsigned flags, struct iomap *iomap)
803{
804 unsigned int blkbits = inode->i_blkbits;
805 unsigned long first_block = offset >> blkbits;
806 unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
807 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
808 bool new = false, boundary = false;
809 u32 bno;
810 int ret;
811
812 ret = ext2_get_blocks(inode, first_block, max_blocks,
813 &bno, &new, &boundary, flags & IOMAP_WRITE);
814 if (ret < 0)
815 return ret;
816
817 iomap->flags = 0;
818 iomap->bdev = inode->i_sb->s_bdev;
819 iomap->offset = (u64)first_block << blkbits;
820 iomap->dax_dev = sbi->s_daxdev;
821
822 if (ret == 0) {
823 iomap->type = IOMAP_HOLE;
824 iomap->addr = IOMAP_NULL_ADDR;
825 iomap->length = 1 << blkbits;
826 } else {
827 iomap->type = IOMAP_MAPPED;
828 iomap->addr = (u64)bno << blkbits;
829 iomap->length = (u64)ret << blkbits;
830 iomap->flags |= IOMAP_F_MERGED;
831 }
832
833 if (new)
834 iomap->flags |= IOMAP_F_NEW;
835 return 0;
836}
837
838static int
839ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
840 ssize_t written, unsigned flags, struct iomap *iomap)
841{
842 if (iomap->type == IOMAP_MAPPED &&
843 written < length &&
844 (flags & IOMAP_WRITE))
845 ext2_write_failed(inode->i_mapping, offset + length);
846 return 0;
847}
848
849const struct iomap_ops ext2_iomap_ops = {
850 .iomap_begin = ext2_iomap_begin,
851 .iomap_end = ext2_iomap_end,
852};
853#else
854
855const struct iomap_ops ext2_iomap_ops;
856#endif
857
858int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
859 u64 start, u64 len)
860{
861 return generic_block_fiemap(inode, fieinfo, start, len,
862 ext2_get_block);
863}
864
865static int ext2_writepage(struct page *page, struct writeback_control *wbc)
866{
867 return block_write_full_page(page, ext2_get_block, wbc);
868}
869
870static int ext2_readpage(struct file *file, struct page *page)
871{
872 return mpage_readpage(page, ext2_get_block);
873}
874
875static int
876ext2_readpages(struct file *file, struct address_space *mapping,
877 struct list_head *pages, unsigned nr_pages)
878{
879 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
880}
881
882static int
883ext2_write_begin(struct file *file, struct address_space *mapping,
884 loff_t pos, unsigned len, unsigned flags,
885 struct page **pagep, void **fsdata)
886{
887 int ret;
888
889 ret = block_write_begin(mapping, pos, len, flags, pagep,
890 ext2_get_block);
891 if (ret < 0)
892 ext2_write_failed(mapping, pos + len);
893 return ret;
894}
895
896static int ext2_write_end(struct file *file, struct address_space *mapping,
897 loff_t pos, unsigned len, unsigned copied,
898 struct page *page, void *fsdata)
899{
900 int ret;
901
902 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
903 if (ret < len)
904 ext2_write_failed(mapping, pos + len);
905 return ret;
906}
907
908static int
909ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
910 loff_t pos, unsigned len, unsigned flags,
911 struct page **pagep, void **fsdata)
912{
913 int ret;
914
915 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
916 ext2_get_block);
917 if (ret < 0)
918 ext2_write_failed(mapping, pos + len);
919 return ret;
920}
921
922static int ext2_nobh_writepage(struct page *page,
923 struct writeback_control *wbc)
924{
925 return nobh_writepage(page, ext2_get_block, wbc);
926}
927
928static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
929{
930 return generic_block_bmap(mapping,block,ext2_get_block);
931}
932
933static ssize_t
934ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
935{
936 struct file *file = iocb->ki_filp;
937 struct address_space *mapping = file->f_mapping;
938 struct inode *inode = mapping->host;
939 size_t count = iov_iter_count(iter);
940 loff_t offset = iocb->ki_pos;
941 ssize_t ret;
942
943 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
944 if (ret < 0 && iov_iter_rw(iter) == WRITE)
945 ext2_write_failed(mapping, offset + count);
946 return ret;
947}
948
949static int
950ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
951{
952 return mpage_writepages(mapping, wbc, ext2_get_block);
953}
954
955static int
956ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
957{
958 return dax_writeback_mapping_range(mapping,
959 mapping->host->i_sb->s_bdev, wbc);
960}
961
962const struct address_space_operations ext2_aops = {
963 .readpage = ext2_readpage,
964 .readpages = ext2_readpages,
965 .writepage = ext2_writepage,
966 .write_begin = ext2_write_begin,
967 .write_end = ext2_write_end,
968 .bmap = ext2_bmap,
969 .direct_IO = ext2_direct_IO,
970 .writepages = ext2_writepages,
971 .migratepage = buffer_migrate_page,
972 .is_partially_uptodate = block_is_partially_uptodate,
973 .error_remove_page = generic_error_remove_page,
974};
975
976const struct address_space_operations ext2_nobh_aops = {
977 .readpage = ext2_readpage,
978 .readpages = ext2_readpages,
979 .writepage = ext2_nobh_writepage,
980 .write_begin = ext2_nobh_write_begin,
981 .write_end = nobh_write_end,
982 .bmap = ext2_bmap,
983 .direct_IO = ext2_direct_IO,
984 .writepages = ext2_writepages,
985 .migratepage = buffer_migrate_page,
986 .error_remove_page = generic_error_remove_page,
987};
988
989static const struct address_space_operations ext2_dax_aops = {
990 .writepages = ext2_dax_writepages,
991 .direct_IO = noop_direct_IO,
992 .set_page_dirty = noop_set_page_dirty,
993 .invalidatepage = noop_invalidatepage,
994};
995
996
997
998
999
1000
1001static inline int all_zeroes(__le32 *p, __le32 *q)
1002{
1003 while (p < q)
1004 if (*p++)
1005 return 0;
1006 return 1;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043static Indirect *ext2_find_shared(struct inode *inode,
1044 int depth,
1045 int offsets[4],
1046 Indirect chain[4],
1047 __le32 *top)
1048{
1049 Indirect *partial, *p;
1050 int k, err;
1051
1052 *top = 0;
1053 for (k = depth; k > 1 && !offsets[k-1]; k--)
1054 ;
1055 partial = ext2_get_branch(inode, k, offsets, chain, &err);
1056 if (!partial)
1057 partial = chain + k-1;
1058
1059
1060
1061
1062 write_lock(&EXT2_I(inode)->i_meta_lock);
1063 if (!partial->key && *partial->p) {
1064 write_unlock(&EXT2_I(inode)->i_meta_lock);
1065 goto no_top;
1066 }
1067 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1068 ;
1069
1070
1071
1072
1073
1074
1075 if (p == chain + k - 1 && p > chain) {
1076 p->p--;
1077 } else {
1078 *top = *p->p;
1079 *p->p = 0;
1080 }
1081 write_unlock(&EXT2_I(inode)->i_meta_lock);
1082
1083 while(partial > p)
1084 {
1085 brelse(partial->bh);
1086 partial--;
1087 }
1088no_top:
1089 return partial;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1103{
1104 unsigned long block_to_free = 0, count = 0;
1105 unsigned long nr;
1106
1107 for ( ; p < q ; p++) {
1108 nr = le32_to_cpu(*p);
1109 if (nr) {
1110 *p = 0;
1111
1112 if (count == 0)
1113 goto free_this;
1114 else if (block_to_free == nr - count)
1115 count++;
1116 else {
1117 ext2_free_blocks (inode, block_to_free, count);
1118 mark_inode_dirty(inode);
1119 free_this:
1120 block_to_free = nr;
1121 count = 1;
1122 }
1123 }
1124 }
1125 if (count > 0) {
1126 ext2_free_blocks (inode, block_to_free, count);
1127 mark_inode_dirty(inode);
1128 }
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1143{
1144 struct buffer_head * bh;
1145 unsigned long nr;
1146
1147 if (depth--) {
1148 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1149 for ( ; p < q ; p++) {
1150 nr = le32_to_cpu(*p);
1151 if (!nr)
1152 continue;
1153 *p = 0;
1154 bh = sb_bread(inode->i_sb, nr);
1155
1156
1157
1158
1159 if (!bh) {
1160 ext2_error(inode->i_sb, "ext2_free_branches",
1161 "Read failure, inode=%ld, block=%ld",
1162 inode->i_ino, nr);
1163 continue;
1164 }
1165 ext2_free_branches(inode,
1166 (__le32*)bh->b_data,
1167 (__le32*)bh->b_data + addr_per_block,
1168 depth);
1169 bforget(bh);
1170 ext2_free_blocks(inode, nr, 1);
1171 mark_inode_dirty(inode);
1172 }
1173 } else
1174 ext2_free_data(inode, p, q);
1175}
1176
1177
1178static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1179{
1180 __le32 *i_data = EXT2_I(inode)->i_data;
1181 struct ext2_inode_info *ei = EXT2_I(inode);
1182 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1183 int offsets[4];
1184 Indirect chain[4];
1185 Indirect *partial;
1186 __le32 nr = 0;
1187 int n;
1188 long iblock;
1189 unsigned blocksize;
1190 blocksize = inode->i_sb->s_blocksize;
1191 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1192
1193#ifdef CONFIG_FS_DAX
1194 WARN_ON(!rwsem_is_locked(&ei->dax_sem));
1195#endif
1196
1197 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1198 if (n == 0)
1199 return;
1200
1201
1202
1203
1204
1205 mutex_lock(&ei->truncate_mutex);
1206
1207 if (n == 1) {
1208 ext2_free_data(inode, i_data+offsets[0],
1209 i_data + EXT2_NDIR_BLOCKS);
1210 goto do_indirects;
1211 }
1212
1213 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1214
1215 if (nr) {
1216 if (partial == chain)
1217 mark_inode_dirty(inode);
1218 else
1219 mark_buffer_dirty_inode(partial->bh, inode);
1220 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1221 }
1222
1223 while (partial > chain) {
1224 ext2_free_branches(inode,
1225 partial->p + 1,
1226 (__le32*)partial->bh->b_data+addr_per_block,
1227 (chain+n-1) - partial);
1228 mark_buffer_dirty_inode(partial->bh, inode);
1229 brelse (partial->bh);
1230 partial--;
1231 }
1232do_indirects:
1233
1234 switch (offsets[0]) {
1235 default:
1236 nr = i_data[EXT2_IND_BLOCK];
1237 if (nr) {
1238 i_data[EXT2_IND_BLOCK] = 0;
1239 mark_inode_dirty(inode);
1240 ext2_free_branches(inode, &nr, &nr+1, 1);
1241 }
1242
1243 case EXT2_IND_BLOCK:
1244 nr = i_data[EXT2_DIND_BLOCK];
1245 if (nr) {
1246 i_data[EXT2_DIND_BLOCK] = 0;
1247 mark_inode_dirty(inode);
1248 ext2_free_branches(inode, &nr, &nr+1, 2);
1249 }
1250
1251 case EXT2_DIND_BLOCK:
1252 nr = i_data[EXT2_TIND_BLOCK];
1253 if (nr) {
1254 i_data[EXT2_TIND_BLOCK] = 0;
1255 mark_inode_dirty(inode);
1256 ext2_free_branches(inode, &nr, &nr+1, 3);
1257 }
1258 case EXT2_TIND_BLOCK:
1259 ;
1260 }
1261
1262 ext2_discard_reservation(inode);
1263
1264 mutex_unlock(&ei->truncate_mutex);
1265}
1266
1267static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1268{
1269 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1270 S_ISLNK(inode->i_mode)))
1271 return;
1272 if (ext2_inode_is_fast_symlink(inode))
1273 return;
1274
1275 dax_sem_down_write(EXT2_I(inode));
1276 __ext2_truncate_blocks(inode, offset);
1277 dax_sem_up_write(EXT2_I(inode));
1278}
1279
1280static int ext2_setsize(struct inode *inode, loff_t newsize)
1281{
1282 int error;
1283
1284 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1285 S_ISLNK(inode->i_mode)))
1286 return -EINVAL;
1287 if (ext2_inode_is_fast_symlink(inode))
1288 return -EINVAL;
1289 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1290 return -EPERM;
1291
1292 inode_dio_wait(inode);
1293
1294 if (IS_DAX(inode)) {
1295 error = iomap_zero_range(inode, newsize,
1296 PAGE_ALIGN(newsize) - newsize, NULL,
1297 &ext2_iomap_ops);
1298 } else if (test_opt(inode->i_sb, NOBH))
1299 error = nobh_truncate_page(inode->i_mapping,
1300 newsize, ext2_get_block);
1301 else
1302 error = block_truncate_page(inode->i_mapping,
1303 newsize, ext2_get_block);
1304 if (error)
1305 return error;
1306
1307 dax_sem_down_write(EXT2_I(inode));
1308 truncate_setsize(inode, newsize);
1309 __ext2_truncate_blocks(inode, newsize);
1310 dax_sem_up_write(EXT2_I(inode));
1311
1312 inode->i_mtime = inode->i_ctime = current_time(inode);
1313 if (inode_needs_sync(inode)) {
1314 sync_mapping_buffers(inode->i_mapping);
1315 sync_inode_metadata(inode, 1);
1316 } else {
1317 mark_inode_dirty(inode);
1318 }
1319
1320 return 0;
1321}
1322
1323static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1324 struct buffer_head **p)
1325{
1326 struct buffer_head * bh;
1327 unsigned long block_group;
1328 unsigned long block;
1329 unsigned long offset;
1330 struct ext2_group_desc * gdp;
1331
1332 *p = NULL;
1333 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1334 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1335 goto Einval;
1336
1337 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1338 gdp = ext2_get_group_desc(sb, block_group, NULL);
1339 if (!gdp)
1340 goto Egdp;
1341
1342
1343
1344 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1345 block = le32_to_cpu(gdp->bg_inode_table) +
1346 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1347 if (!(bh = sb_bread(sb, block)))
1348 goto Eio;
1349
1350 *p = bh;
1351 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1352 return (struct ext2_inode *) (bh->b_data + offset);
1353
1354Einval:
1355 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1356 (unsigned long) ino);
1357 return ERR_PTR(-EINVAL);
1358Eio:
1359 ext2_error(sb, "ext2_get_inode",
1360 "unable to read inode block - inode=%lu, block=%lu",
1361 (unsigned long) ino, block);
1362Egdp:
1363 return ERR_PTR(-EIO);
1364}
1365
1366void ext2_set_inode_flags(struct inode *inode)
1367{
1368 unsigned int flags = EXT2_I(inode)->i_flags;
1369
1370 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
1371 S_DIRSYNC | S_DAX);
1372 if (flags & EXT2_SYNC_FL)
1373 inode->i_flags |= S_SYNC;
1374 if (flags & EXT2_APPEND_FL)
1375 inode->i_flags |= S_APPEND;
1376 if (flags & EXT2_IMMUTABLE_FL)
1377 inode->i_flags |= S_IMMUTABLE;
1378 if (flags & EXT2_NOATIME_FL)
1379 inode->i_flags |= S_NOATIME;
1380 if (flags & EXT2_DIRSYNC_FL)
1381 inode->i_flags |= S_DIRSYNC;
1382 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1383 inode->i_flags |= S_DAX;
1384}
1385
1386void ext2_set_file_ops(struct inode *inode)
1387{
1388 inode->i_op = &ext2_file_inode_operations;
1389 inode->i_fop = &ext2_file_operations;
1390 if (IS_DAX(inode))
1391 inode->i_mapping->a_ops = &ext2_dax_aops;
1392 else if (test_opt(inode->i_sb, NOBH))
1393 inode->i_mapping->a_ops = &ext2_nobh_aops;
1394 else
1395 inode->i_mapping->a_ops = &ext2_aops;
1396}
1397
1398struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1399{
1400 struct ext2_inode_info *ei;
1401 struct buffer_head * bh;
1402 struct ext2_inode *raw_inode;
1403 struct inode *inode;
1404 long ret = -EIO;
1405 int n;
1406 uid_t i_uid;
1407 gid_t i_gid;
1408
1409 inode = iget_locked(sb, ino);
1410 if (!inode)
1411 return ERR_PTR(-ENOMEM);
1412 if (!(inode->i_state & I_NEW))
1413 return inode;
1414
1415 ei = EXT2_I(inode);
1416 ei->i_block_alloc_info = NULL;
1417
1418 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1419 if (IS_ERR(raw_inode)) {
1420 ret = PTR_ERR(raw_inode);
1421 goto bad_inode;
1422 }
1423
1424 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1425 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1426 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1427 if (!(test_opt (inode->i_sb, NO_UID32))) {
1428 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1429 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1430 }
1431 i_uid_write(inode, i_uid);
1432 i_gid_write(inode, i_gid);
1433 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1434 inode->i_size = le32_to_cpu(raw_inode->i_size);
1435 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1436 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1437 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1438 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1439 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1440
1441
1442
1443
1444
1445 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1446
1447 brelse (bh);
1448 ret = -ESTALE;
1449 goto bad_inode;
1450 }
1451 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1452 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1453 ext2_set_inode_flags(inode);
1454 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1455 ei->i_frag_no = raw_inode->i_frag;
1456 ei->i_frag_size = raw_inode->i_fsize;
1457 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1458 ei->i_dir_acl = 0;
1459
1460 if (ei->i_file_acl &&
1461 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
1462 ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
1463 ei->i_file_acl);
1464 brelse(bh);
1465 ret = -EFSCORRUPTED;
1466 goto bad_inode;
1467 }
1468
1469 if (S_ISREG(inode->i_mode))
1470 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1471 else
1472 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1473 if (i_size_read(inode) < 0) {
1474 ret = -EFSCORRUPTED;
1475 goto bad_inode;
1476 }
1477 ei->i_dtime = 0;
1478 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1479 ei->i_state = 0;
1480 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1481 ei->i_dir_start_lookup = 0;
1482
1483
1484
1485
1486
1487 for (n = 0; n < EXT2_N_BLOCKS; n++)
1488 ei->i_data[n] = raw_inode->i_block[n];
1489
1490 if (S_ISREG(inode->i_mode)) {
1491 ext2_set_file_ops(inode);
1492 } else if (S_ISDIR(inode->i_mode)) {
1493 inode->i_op = &ext2_dir_inode_operations;
1494 inode->i_fop = &ext2_dir_operations;
1495 if (test_opt(inode->i_sb, NOBH))
1496 inode->i_mapping->a_ops = &ext2_nobh_aops;
1497 else
1498 inode->i_mapping->a_ops = &ext2_aops;
1499 } else if (S_ISLNK(inode->i_mode)) {
1500 if (ext2_inode_is_fast_symlink(inode)) {
1501 inode->i_link = (char *)ei->i_data;
1502 inode->i_op = &ext2_fast_symlink_inode_operations;
1503 nd_terminate_link(ei->i_data, inode->i_size,
1504 sizeof(ei->i_data) - 1);
1505 } else {
1506 inode->i_op = &ext2_symlink_inode_operations;
1507 inode_nohighmem(inode);
1508 if (test_opt(inode->i_sb, NOBH))
1509 inode->i_mapping->a_ops = &ext2_nobh_aops;
1510 else
1511 inode->i_mapping->a_ops = &ext2_aops;
1512 }
1513 } else {
1514 inode->i_op = &ext2_special_inode_operations;
1515 if (raw_inode->i_block[0])
1516 init_special_inode(inode, inode->i_mode,
1517 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1518 else
1519 init_special_inode(inode, inode->i_mode,
1520 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1521 }
1522 brelse (bh);
1523 unlock_new_inode(inode);
1524 return inode;
1525
1526bad_inode:
1527 iget_failed(inode);
1528 return ERR_PTR(ret);
1529}
1530
1531static int __ext2_write_inode(struct inode *inode, int do_sync)
1532{
1533 struct ext2_inode_info *ei = EXT2_I(inode);
1534 struct super_block *sb = inode->i_sb;
1535 ino_t ino = inode->i_ino;
1536 uid_t uid = i_uid_read(inode);
1537 gid_t gid = i_gid_read(inode);
1538 struct buffer_head * bh;
1539 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1540 int n;
1541 int err = 0;
1542
1543 if (IS_ERR(raw_inode))
1544 return -EIO;
1545
1546
1547
1548 if (ei->i_state & EXT2_STATE_NEW)
1549 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1550
1551 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1552 if (!(test_opt(sb, NO_UID32))) {
1553 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1554 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1555
1556
1557
1558
1559 if (!ei->i_dtime) {
1560 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1561 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1562 } else {
1563 raw_inode->i_uid_high = 0;
1564 raw_inode->i_gid_high = 0;
1565 }
1566 } else {
1567 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1568 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1569 raw_inode->i_uid_high = 0;
1570 raw_inode->i_gid_high = 0;
1571 }
1572 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1573 raw_inode->i_size = cpu_to_le32(inode->i_size);
1574 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1575 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1576 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1577
1578 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1579 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1580 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1581 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1582 raw_inode->i_frag = ei->i_frag_no;
1583 raw_inode->i_fsize = ei->i_frag_size;
1584 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1585 if (!S_ISREG(inode->i_mode))
1586 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1587 else {
1588 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1589 if (inode->i_size > 0x7fffffffULL) {
1590 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1591 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1592 EXT2_SB(sb)->s_es->s_rev_level ==
1593 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1594
1595
1596
1597 spin_lock(&EXT2_SB(sb)->s_lock);
1598 ext2_update_dynamic_rev(sb);
1599 EXT2_SET_RO_COMPAT_FEATURE(sb,
1600 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1601 spin_unlock(&EXT2_SB(sb)->s_lock);
1602 ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1);
1603 }
1604 }
1605 }
1606
1607 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1608 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1609 if (old_valid_dev(inode->i_rdev)) {
1610 raw_inode->i_block[0] =
1611 cpu_to_le32(old_encode_dev(inode->i_rdev));
1612 raw_inode->i_block[1] = 0;
1613 } else {
1614 raw_inode->i_block[0] = 0;
1615 raw_inode->i_block[1] =
1616 cpu_to_le32(new_encode_dev(inode->i_rdev));
1617 raw_inode->i_block[2] = 0;
1618 }
1619 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1620 raw_inode->i_block[n] = ei->i_data[n];
1621 mark_buffer_dirty(bh);
1622 if (do_sync) {
1623 sync_dirty_buffer(bh);
1624 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1625 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1626 sb->s_id, (unsigned long) ino);
1627 err = -EIO;
1628 }
1629 }
1630 ei->i_state &= ~EXT2_STATE_NEW;
1631 brelse (bh);
1632 return err;
1633}
1634
1635int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1636{
1637 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1638}
1639
1640int ext2_getattr(const struct path *path, struct kstat *stat,
1641 u32 request_mask, unsigned int query_falgs)
1642{
1643 struct inode *inode = d_inode(path->dentry);
1644 struct ext2_inode_info *ei = EXT2_I(inode);
1645 unsigned int flags;
1646
1647 flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
1648 if (flags & EXT2_APPEND_FL)
1649 stat->attributes |= STATX_ATTR_APPEND;
1650 if (flags & EXT2_COMPR_FL)
1651 stat->attributes |= STATX_ATTR_COMPRESSED;
1652 if (flags & EXT2_IMMUTABLE_FL)
1653 stat->attributes |= STATX_ATTR_IMMUTABLE;
1654 if (flags & EXT2_NODUMP_FL)
1655 stat->attributes |= STATX_ATTR_NODUMP;
1656 stat->attributes_mask |= (STATX_ATTR_APPEND |
1657 STATX_ATTR_COMPRESSED |
1658 STATX_ATTR_ENCRYPTED |
1659 STATX_ATTR_IMMUTABLE |
1660 STATX_ATTR_NODUMP);
1661
1662 generic_fillattr(inode, stat);
1663 return 0;
1664}
1665
1666int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1667{
1668 struct inode *inode = d_inode(dentry);
1669 int error;
1670
1671 error = setattr_prepare(dentry, iattr);
1672 if (error)
1673 return error;
1674
1675 if (is_quota_modification(inode, iattr)) {
1676 error = dquot_initialize(inode);
1677 if (error)
1678 return error;
1679 }
1680 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1681 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1682 error = dquot_transfer(inode, iattr);
1683 if (error)
1684 return error;
1685 }
1686 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1687 error = ext2_setsize(inode, iattr->ia_size);
1688 if (error)
1689 return error;
1690 }
1691 setattr_copy(inode, iattr);
1692 if (iattr->ia_valid & ATTR_MODE)
1693 error = posix_acl_chmod(inode, inode->i_mode);
1694 mark_inode_dirty(inode);
1695
1696 return error;
1697}
1698