1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/time.h>
27#include <linux/highuid.h>
28#include <linux/pagemap.h>
29#include <linux/dax.h>
30#include <linux/blkdev.h>
31#include <linux/quotaops.h>
32#include <linux/writeback.h>
33#include <linux/buffer_head.h>
34#include <linux/mpage.h>
35#include <linux/fiemap.h>
36#include <linux/iomap.h>
37#include <linux/namei.h>
38#include <linux/uio.h>
39#include "ext2.h"
40#include "acl.h"
41#include "xattr.h"
42
43static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45
46
47
48static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49{
50 int ea_blocks = EXT2_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) &&
54 inode->i_blocks - ea_blocks == 0);
55}
56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67}
68
69
70
71
72void ext2_evict_inode(struct inode * inode)
73{
74 struct ext2_block_alloc_info *rsv;
75 int want_delete = 0;
76
77 if (!inode->i_nlink && !is_bad_inode(inode)) {
78 want_delete = 1;
79 dquot_initialize(inode);
80 } else {
81 dquot_drop(inode);
82 }
83
84 truncate_inode_pages_final(&inode->i_data);
85
86 if (want_delete) {
87 sb_start_intwrite(inode->i_sb);
88
89 EXT2_I(inode)->i_dtime = ktime_get_real_seconds();
90 mark_inode_dirty(inode);
91 __ext2_write_inode(inode, inode_needs_sync(inode));
92
93 inode->i_size = 0;
94 if (inode->i_blocks)
95 ext2_truncate_blocks(inode, 0);
96 ext2_xattr_delete_inode(inode);
97 }
98
99 invalidate_inode_buffers(inode);
100 clear_inode(inode);
101
102 ext2_discard_reservation(inode);
103 rsv = EXT2_I(inode)->i_block_alloc_info;
104 EXT2_I(inode)->i_block_alloc_info = NULL;
105 if (unlikely(rsv))
106 kfree(rsv);
107
108 if (want_delete) {
109 ext2_free_inode(inode);
110 sb_end_intwrite(inode->i_sb);
111 }
112}
113
114typedef struct {
115 __le32 *p;
116 __le32 key;
117 struct buffer_head *bh;
118} Indirect;
119
120static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
121{
122 p->key = *(p->p = v);
123 p->bh = bh;
124}
125
126static inline int verify_chain(Indirect *from, Indirect *to)
127{
128 while (from <= to && from->key == *from->p)
129 from++;
130 return (from > to);
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163static int ext2_block_to_path(struct inode *inode,
164 long i_block, int offsets[4], int *boundary)
165{
166 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
167 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
168 const long direct_blocks = EXT2_NDIR_BLOCKS,
169 indirect_blocks = ptrs,
170 double_blocks = (1 << (ptrs_bits * 2));
171 int n = 0;
172 int final = 0;
173
174 if (i_block < 0) {
175 ext2_msg(inode->i_sb, KERN_WARNING,
176 "warning: %s: block < 0", __func__);
177 } else if (i_block < direct_blocks) {
178 offsets[n++] = i_block;
179 final = direct_blocks;
180 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
181 offsets[n++] = EXT2_IND_BLOCK;
182 offsets[n++] = i_block;
183 final = ptrs;
184 } else if ((i_block -= indirect_blocks) < double_blocks) {
185 offsets[n++] = EXT2_DIND_BLOCK;
186 offsets[n++] = i_block >> ptrs_bits;
187 offsets[n++] = i_block & (ptrs - 1);
188 final = ptrs;
189 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
190 offsets[n++] = EXT2_TIND_BLOCK;
191 offsets[n++] = i_block >> (ptrs_bits * 2);
192 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
193 offsets[n++] = i_block & (ptrs - 1);
194 final = ptrs;
195 } else {
196 ext2_msg(inode->i_sb, KERN_WARNING,
197 "warning: %s: block is too big", __func__);
198 }
199 if (boundary)
200 *boundary = final - 1 - (i_block & (ptrs - 1));
201
202 return n;
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234static Indirect *ext2_get_branch(struct inode *inode,
235 int depth,
236 int *offsets,
237 Indirect chain[4],
238 int *err)
239{
240 struct super_block *sb = inode->i_sb;
241 Indirect *p = chain;
242 struct buffer_head *bh;
243
244 *err = 0;
245
246 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
247 if (!p->key)
248 goto no_block;
249 while (--depth) {
250 bh = sb_bread(sb, le32_to_cpu(p->key));
251 if (!bh)
252 goto failure;
253 read_lock(&EXT2_I(inode)->i_meta_lock);
254 if (!verify_chain(chain, p))
255 goto changed;
256 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
257 read_unlock(&EXT2_I(inode)->i_meta_lock);
258 if (!p->key)
259 goto no_block;
260 }
261 return NULL;
262
263changed:
264 read_unlock(&EXT2_I(inode)->i_meta_lock);
265 brelse(bh);
266 *err = -EAGAIN;
267 goto no_block;
268failure:
269 *err = -EIO;
270no_block:
271 return p;
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
295{
296 struct ext2_inode_info *ei = EXT2_I(inode);
297 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
298 __le32 *p;
299 ext2_fsblk_t bg_start;
300 ext2_fsblk_t colour;
301
302
303 for (p = ind->p - 1; p >= start; p--)
304 if (*p)
305 return le32_to_cpu(*p);
306
307
308 if (ind->bh)
309 return ind->bh->b_blocknr;
310
311
312
313
314
315 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
316 colour = (current->pid % 16) *
317 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
318 return bg_start + colour;
319}
320
321
322
323
324
325
326
327
328
329
330static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
331 Indirect *partial)
332{
333 struct ext2_block_alloc_info *block_i;
334
335 block_i = EXT2_I(inode)->i_block_alloc_info;
336
337
338
339
340
341 if (block_i && (block == block_i->last_alloc_logical_block + 1)
342 && (block_i->last_alloc_physical_block != 0)) {
343 return block_i->last_alloc_physical_block + 1;
344 }
345
346 return ext2_find_near(inode, partial);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360static int
361ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
362 int blocks_to_boundary)
363{
364 unsigned long count = 0;
365
366
367
368
369
370 if (k > 0) {
371
372 if (blks < blocks_to_boundary + 1)
373 count += blks;
374 else
375 count += blocks_to_boundary + 1;
376 return count;
377 }
378
379 count++;
380 while (count < blks && count <= blocks_to_boundary
381 && le32_to_cpu(*(branch[0].p + count)) == 0) {
382 count++;
383 }
384 return count;
385}
386
387
388
389
390
391
392
393
394
395static int ext2_alloc_blocks(struct inode *inode,
396 ext2_fsblk_t goal, int indirect_blks, int blks,
397 ext2_fsblk_t new_blocks[4], int *err)
398{
399 int target, i;
400 unsigned long count = 0;
401 int index = 0;
402 ext2_fsblk_t current_block = 0;
403 int ret = 0;
404
405
406
407
408
409
410
411
412
413 target = blks + indirect_blks;
414
415 while (1) {
416 count = target;
417
418 current_block = ext2_new_blocks(inode,goal,&count,err);
419 if (*err)
420 goto failed_out;
421
422 target -= count;
423
424 while (index < indirect_blks && count) {
425 new_blocks[index++] = current_block++;
426 count--;
427 }
428
429 if (count > 0)
430 break;
431 }
432
433
434 new_blocks[index] = current_block;
435
436
437 ret = count;
438 *err = 0;
439 return ret;
440failed_out:
441 for (i = 0; i <index; i++)
442 ext2_free_blocks(inode, new_blocks[i], 1);
443 if (index)
444 mark_inode_dirty(inode);
445 return ret;
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475static int ext2_alloc_branch(struct inode *inode,
476 int indirect_blks, int *blks, ext2_fsblk_t goal,
477 int *offsets, Indirect *branch)
478{
479 int blocksize = inode->i_sb->s_blocksize;
480 int i, n = 0;
481 int err = 0;
482 struct buffer_head *bh;
483 int num;
484 ext2_fsblk_t new_blocks[4];
485 ext2_fsblk_t current_block;
486
487 num = ext2_alloc_blocks(inode, goal, indirect_blks,
488 *blks, new_blocks, &err);
489 if (err)
490 return err;
491
492 branch[0].key = cpu_to_le32(new_blocks[0]);
493
494
495
496 for (n = 1; n <= indirect_blks; n++) {
497
498
499
500
501
502 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
503 if (unlikely(!bh)) {
504 err = -ENOMEM;
505 goto failed;
506 }
507 branch[n].bh = bh;
508 lock_buffer(bh);
509 memset(bh->b_data, 0, blocksize);
510 branch[n].p = (__le32 *) bh->b_data + offsets[n];
511 branch[n].key = cpu_to_le32(new_blocks[n]);
512 *branch[n].p = branch[n].key;
513 if ( n == indirect_blks) {
514 current_block = new_blocks[n];
515
516
517
518
519
520 for (i=1; i < num; i++)
521 *(branch[n].p + i) = cpu_to_le32(++current_block);
522 }
523 set_buffer_uptodate(bh);
524 unlock_buffer(bh);
525 mark_buffer_dirty_inode(bh, inode);
526
527
528
529
530 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
531 sync_dirty_buffer(bh);
532 }
533 *blks = num;
534 return err;
535
536failed:
537 for (i = 1; i < n; i++)
538 bforget(branch[i].bh);
539 for (i = 0; i < indirect_blks; i++)
540 ext2_free_blocks(inode, new_blocks[i], 1);
541 ext2_free_blocks(inode, new_blocks[i], num);
542 return err;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557static void ext2_splice_branch(struct inode *inode,
558 long block, Indirect *where, int num, int blks)
559{
560 int i;
561 struct ext2_block_alloc_info *block_i;
562 ext2_fsblk_t current_block;
563
564 block_i = EXT2_I(inode)->i_block_alloc_info;
565
566
567
568
569 *where->p = where->key;
570
571
572
573
574
575 if (num == 0 && blks > 1) {
576 current_block = le32_to_cpu(where->key) + 1;
577 for (i = 1; i < blks; i++)
578 *(where->p + i ) = cpu_to_le32(current_block++);
579 }
580
581
582
583
584
585
586 if (block_i) {
587 block_i->last_alloc_logical_block = block + blks - 1;
588 block_i->last_alloc_physical_block =
589 le32_to_cpu(where[num].key) + blks - 1;
590 }
591
592
593
594
595 if (where->bh)
596 mark_buffer_dirty_inode(where->bh, inode);
597
598 inode->i_ctime = current_time(inode);
599 mark_inode_dirty(inode);
600}
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620static int ext2_get_blocks(struct inode *inode,
621 sector_t iblock, unsigned long maxblocks,
622 u32 *bno, bool *new, bool *boundary,
623 int create)
624{
625 int err;
626 int offsets[4];
627 Indirect chain[4];
628 Indirect *partial;
629 ext2_fsblk_t goal;
630 int indirect_blks;
631 int blocks_to_boundary = 0;
632 int depth;
633 struct ext2_inode_info *ei = EXT2_I(inode);
634 int count = 0;
635 ext2_fsblk_t first_block = 0;
636
637 BUG_ON(maxblocks == 0);
638
639 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
640
641 if (depth == 0)
642 return -EIO;
643
644 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
645
646 if (!partial) {
647 first_block = le32_to_cpu(chain[depth - 1].key);
648 count++;
649
650 while (count < maxblocks && count <= blocks_to_boundary) {
651 ext2_fsblk_t blk;
652
653 if (!verify_chain(chain, chain + depth - 1)) {
654
655
656
657
658
659
660 err = -EAGAIN;
661 count = 0;
662 partial = chain + depth - 1;
663 break;
664 }
665 blk = le32_to_cpu(*(chain[depth-1].p + count));
666 if (blk == first_block + count)
667 count++;
668 else
669 break;
670 }
671 if (err != -EAGAIN)
672 goto got_it;
673 }
674
675
676 if (!create || err == -EIO)
677 goto cleanup;
678
679 mutex_lock(&ei->truncate_mutex);
680
681
682
683
684
685
686
687
688
689
690
691
692 if (err == -EAGAIN || !verify_chain(chain, partial)) {
693 while (partial > chain) {
694 brelse(partial->bh);
695 partial--;
696 }
697 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
698 if (!partial) {
699 count++;
700 mutex_unlock(&ei->truncate_mutex);
701 goto got_it;
702 }
703
704 if (err) {
705 mutex_unlock(&ei->truncate_mutex);
706 goto cleanup;
707 }
708 }
709
710
711
712
713
714 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
715 ext2_init_block_alloc_info(inode);
716
717 goal = ext2_find_goal(inode, iblock, partial);
718
719
720 indirect_blks = (chain + depth) - partial - 1;
721
722
723
724
725 count = ext2_blks_to_allocate(partial, indirect_blks,
726 maxblocks, blocks_to_boundary);
727
728
729
730 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
731 offsets + (partial - chain), partial);
732
733 if (err) {
734 mutex_unlock(&ei->truncate_mutex);
735 goto cleanup;
736 }
737
738 if (IS_DAX(inode)) {
739
740
741
742
743 clean_bdev_aliases(inode->i_sb->s_bdev,
744 le32_to_cpu(chain[depth-1].key),
745 count);
746
747
748
749
750
751 err = sb_issue_zeroout(inode->i_sb,
752 le32_to_cpu(chain[depth-1].key), count,
753 GFP_NOFS);
754 if (err) {
755 mutex_unlock(&ei->truncate_mutex);
756 goto cleanup;
757 }
758 }
759 *new = true;
760
761 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
762 mutex_unlock(&ei->truncate_mutex);
763got_it:
764 if (count > blocks_to_boundary)
765 *boundary = true;
766 err = count;
767
768 partial = chain + depth - 1;
769cleanup:
770 while (partial > chain) {
771 brelse(partial->bh);
772 partial--;
773 }
774 if (err > 0)
775 *bno = le32_to_cpu(chain[depth-1].key);
776 return err;
777}
778
779int ext2_get_block(struct inode *inode, sector_t iblock,
780 struct buffer_head *bh_result, int create)
781{
782 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
783 bool new = false, boundary = false;
784 u32 bno;
785 int ret;
786
787 ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
788 create);
789 if (ret <= 0)
790 return ret;
791
792 map_bh(bh_result, inode->i_sb, bno);
793 bh_result->b_size = (ret << inode->i_blkbits);
794 if (new)
795 set_buffer_new(bh_result);
796 if (boundary)
797 set_buffer_boundary(bh_result);
798 return 0;
799
800}
801
802#ifdef CONFIG_FS_DAX
803static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
804 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
805{
806 unsigned int blkbits = inode->i_blkbits;
807 unsigned long first_block = offset >> blkbits;
808 unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
809 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
810 bool new = false, boundary = false;
811 u32 bno;
812 int ret;
813
814 ret = ext2_get_blocks(inode, first_block, max_blocks,
815 &bno, &new, &boundary, flags & IOMAP_WRITE);
816 if (ret < 0)
817 return ret;
818
819 iomap->flags = 0;
820 iomap->bdev = inode->i_sb->s_bdev;
821 iomap->offset = (u64)first_block << blkbits;
822 iomap->dax_dev = sbi->s_daxdev;
823
824 if (ret == 0) {
825 iomap->type = IOMAP_HOLE;
826 iomap->addr = IOMAP_NULL_ADDR;
827 iomap->length = 1 << blkbits;
828 } else {
829 iomap->type = IOMAP_MAPPED;
830 iomap->addr = (u64)bno << blkbits;
831 iomap->length = (u64)ret << blkbits;
832 iomap->flags |= IOMAP_F_MERGED;
833 }
834
835 if (new)
836 iomap->flags |= IOMAP_F_NEW;
837 return 0;
838}
839
840static int
841ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
842 ssize_t written, unsigned flags, struct iomap *iomap)
843{
844 if (iomap->type == IOMAP_MAPPED &&
845 written < length &&
846 (flags & IOMAP_WRITE))
847 ext2_write_failed(inode->i_mapping, offset + length);
848 return 0;
849}
850
851const struct iomap_ops ext2_iomap_ops = {
852 .iomap_begin = ext2_iomap_begin,
853 .iomap_end = ext2_iomap_end,
854};
855#else
856
857const struct iomap_ops ext2_iomap_ops;
858#endif
859
860int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
861 u64 start, u64 len)
862{
863 return generic_block_fiemap(inode, fieinfo, start, len,
864 ext2_get_block);
865}
866
867static int ext2_writepage(struct page *page, struct writeback_control *wbc)
868{
869 return block_write_full_page(page, ext2_get_block, wbc);
870}
871
872static int ext2_readpage(struct file *file, struct page *page)
873{
874 return mpage_readpage(page, ext2_get_block);
875}
876
877static void ext2_readahead(struct readahead_control *rac)
878{
879 mpage_readahead(rac, ext2_get_block);
880}
881
882static int
883ext2_write_begin(struct file *file, struct address_space *mapping,
884 loff_t pos, unsigned len, unsigned flags,
885 struct page **pagep, void **fsdata)
886{
887 int ret;
888
889 ret = block_write_begin(mapping, pos, len, flags, pagep,
890 ext2_get_block);
891 if (ret < 0)
892 ext2_write_failed(mapping, pos + len);
893 return ret;
894}
895
896static int ext2_write_end(struct file *file, struct address_space *mapping,
897 loff_t pos, unsigned len, unsigned copied,
898 struct page *page, void *fsdata)
899{
900 int ret;
901
902 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
903 if (ret < len)
904 ext2_write_failed(mapping, pos + len);
905 return ret;
906}
907
908static int
909ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
910 loff_t pos, unsigned len, unsigned flags,
911 struct page **pagep, void **fsdata)
912{
913 int ret;
914
915 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
916 ext2_get_block);
917 if (ret < 0)
918 ext2_write_failed(mapping, pos + len);
919 return ret;
920}
921
922static int ext2_nobh_writepage(struct page *page,
923 struct writeback_control *wbc)
924{
925 return nobh_writepage(page, ext2_get_block, wbc);
926}
927
928static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
929{
930 return generic_block_bmap(mapping,block,ext2_get_block);
931}
932
933static ssize_t
934ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
935{
936 struct file *file = iocb->ki_filp;
937 struct address_space *mapping = file->f_mapping;
938 struct inode *inode = mapping->host;
939 size_t count = iov_iter_count(iter);
940 loff_t offset = iocb->ki_pos;
941 ssize_t ret;
942
943 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
944 if (ret < 0 && iov_iter_rw(iter) == WRITE)
945 ext2_write_failed(mapping, offset + count);
946 return ret;
947}
948
949static int
950ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
951{
952 return mpage_writepages(mapping, wbc, ext2_get_block);
953}
954
955static int
956ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
957{
958 struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
959
960 return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
961}
962
963const struct address_space_operations ext2_aops = {
964 .set_page_dirty = __set_page_dirty_buffers,
965 .readpage = ext2_readpage,
966 .readahead = ext2_readahead,
967 .writepage = ext2_writepage,
968 .write_begin = ext2_write_begin,
969 .write_end = ext2_write_end,
970 .bmap = ext2_bmap,
971 .direct_IO = ext2_direct_IO,
972 .writepages = ext2_writepages,
973 .migratepage = buffer_migrate_page,
974 .is_partially_uptodate = block_is_partially_uptodate,
975 .error_remove_page = generic_error_remove_page,
976};
977
978const struct address_space_operations ext2_nobh_aops = {
979 .set_page_dirty = __set_page_dirty_buffers,
980 .readpage = ext2_readpage,
981 .readahead = ext2_readahead,
982 .writepage = ext2_nobh_writepage,
983 .write_begin = ext2_nobh_write_begin,
984 .write_end = nobh_write_end,
985 .bmap = ext2_bmap,
986 .direct_IO = ext2_direct_IO,
987 .writepages = ext2_writepages,
988 .migratepage = buffer_migrate_page,
989 .error_remove_page = generic_error_remove_page,
990};
991
992static const struct address_space_operations ext2_dax_aops = {
993 .writepages = ext2_dax_writepages,
994 .direct_IO = noop_direct_IO,
995 .set_page_dirty = __set_page_dirty_no_writeback,
996 .invalidatepage = noop_invalidatepage,
997};
998
999
1000
1001
1002
1003
1004static inline int all_zeroes(__le32 *p, __le32 *q)
1005{
1006 while (p < q)
1007 if (*p++)
1008 return 0;
1009 return 1;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static Indirect *ext2_find_shared(struct inode *inode,
1047 int depth,
1048 int offsets[4],
1049 Indirect chain[4],
1050 __le32 *top)
1051{
1052 Indirect *partial, *p;
1053 int k, err;
1054
1055 *top = 0;
1056 for (k = depth; k > 1 && !offsets[k-1]; k--)
1057 ;
1058 partial = ext2_get_branch(inode, k, offsets, chain, &err);
1059 if (!partial)
1060 partial = chain + k-1;
1061
1062
1063
1064
1065 write_lock(&EXT2_I(inode)->i_meta_lock);
1066 if (!partial->key && *partial->p) {
1067 write_unlock(&EXT2_I(inode)->i_meta_lock);
1068 goto no_top;
1069 }
1070 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1071 ;
1072
1073
1074
1075
1076
1077
1078 if (p == chain + k - 1 && p > chain) {
1079 p->p--;
1080 } else {
1081 *top = *p->p;
1082 *p->p = 0;
1083 }
1084 write_unlock(&EXT2_I(inode)->i_meta_lock);
1085
1086 while(partial > p)
1087 {
1088 brelse(partial->bh);
1089 partial--;
1090 }
1091no_top:
1092 return partial;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1106{
1107 unsigned long block_to_free = 0, count = 0;
1108 unsigned long nr;
1109
1110 for ( ; p < q ; p++) {
1111 nr = le32_to_cpu(*p);
1112 if (nr) {
1113 *p = 0;
1114
1115 if (count == 0)
1116 goto free_this;
1117 else if (block_to_free == nr - count)
1118 count++;
1119 else {
1120 ext2_free_blocks (inode, block_to_free, count);
1121 mark_inode_dirty(inode);
1122 free_this:
1123 block_to_free = nr;
1124 count = 1;
1125 }
1126 }
1127 }
1128 if (count > 0) {
1129 ext2_free_blocks (inode, block_to_free, count);
1130 mark_inode_dirty(inode);
1131 }
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1146{
1147 struct buffer_head * bh;
1148 unsigned long nr;
1149
1150 if (depth--) {
1151 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1152 for ( ; p < q ; p++) {
1153 nr = le32_to_cpu(*p);
1154 if (!nr)
1155 continue;
1156 *p = 0;
1157 bh = sb_bread(inode->i_sb, nr);
1158
1159
1160
1161
1162 if (!bh) {
1163 ext2_error(inode->i_sb, "ext2_free_branches",
1164 "Read failure, inode=%ld, block=%ld",
1165 inode->i_ino, nr);
1166 continue;
1167 }
1168 ext2_free_branches(inode,
1169 (__le32*)bh->b_data,
1170 (__le32*)bh->b_data + addr_per_block,
1171 depth);
1172 bforget(bh);
1173 ext2_free_blocks(inode, nr, 1);
1174 mark_inode_dirty(inode);
1175 }
1176 } else
1177 ext2_free_data(inode, p, q);
1178}
1179
1180
1181static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1182{
1183 __le32 *i_data = EXT2_I(inode)->i_data;
1184 struct ext2_inode_info *ei = EXT2_I(inode);
1185 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1186 int offsets[4];
1187 Indirect chain[4];
1188 Indirect *partial;
1189 __le32 nr = 0;
1190 int n;
1191 long iblock;
1192 unsigned blocksize;
1193 blocksize = inode->i_sb->s_blocksize;
1194 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1195
1196#ifdef CONFIG_FS_DAX
1197 WARN_ON(!rwsem_is_locked(&ei->dax_sem));
1198#endif
1199
1200 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1201 if (n == 0)
1202 return;
1203
1204
1205
1206
1207
1208 mutex_lock(&ei->truncate_mutex);
1209
1210 if (n == 1) {
1211 ext2_free_data(inode, i_data+offsets[0],
1212 i_data + EXT2_NDIR_BLOCKS);
1213 goto do_indirects;
1214 }
1215
1216 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1217
1218 if (nr) {
1219 if (partial == chain)
1220 mark_inode_dirty(inode);
1221 else
1222 mark_buffer_dirty_inode(partial->bh, inode);
1223 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1224 }
1225
1226 while (partial > chain) {
1227 ext2_free_branches(inode,
1228 partial->p + 1,
1229 (__le32*)partial->bh->b_data+addr_per_block,
1230 (chain+n-1) - partial);
1231 mark_buffer_dirty_inode(partial->bh, inode);
1232 brelse (partial->bh);
1233 partial--;
1234 }
1235do_indirects:
1236
1237 switch (offsets[0]) {
1238 default:
1239 nr = i_data[EXT2_IND_BLOCK];
1240 if (nr) {
1241 i_data[EXT2_IND_BLOCK] = 0;
1242 mark_inode_dirty(inode);
1243 ext2_free_branches(inode, &nr, &nr+1, 1);
1244 }
1245 fallthrough;
1246 case EXT2_IND_BLOCK:
1247 nr = i_data[EXT2_DIND_BLOCK];
1248 if (nr) {
1249 i_data[EXT2_DIND_BLOCK] = 0;
1250 mark_inode_dirty(inode);
1251 ext2_free_branches(inode, &nr, &nr+1, 2);
1252 }
1253 fallthrough;
1254 case EXT2_DIND_BLOCK:
1255 nr = i_data[EXT2_TIND_BLOCK];
1256 if (nr) {
1257 i_data[EXT2_TIND_BLOCK] = 0;
1258 mark_inode_dirty(inode);
1259 ext2_free_branches(inode, &nr, &nr+1, 3);
1260 }
1261 break;
1262 case EXT2_TIND_BLOCK:
1263 ;
1264 }
1265
1266 ext2_discard_reservation(inode);
1267
1268 mutex_unlock(&ei->truncate_mutex);
1269}
1270
1271static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1272{
1273 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1274 S_ISLNK(inode->i_mode)))
1275 return;
1276 if (ext2_inode_is_fast_symlink(inode))
1277 return;
1278
1279 dax_sem_down_write(EXT2_I(inode));
1280 __ext2_truncate_blocks(inode, offset);
1281 dax_sem_up_write(EXT2_I(inode));
1282}
1283
1284static int ext2_setsize(struct inode *inode, loff_t newsize)
1285{
1286 int error;
1287
1288 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1289 S_ISLNK(inode->i_mode)))
1290 return -EINVAL;
1291 if (ext2_inode_is_fast_symlink(inode))
1292 return -EINVAL;
1293 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1294 return -EPERM;
1295
1296 inode_dio_wait(inode);
1297
1298 if (IS_DAX(inode)) {
1299 error = iomap_zero_range(inode, newsize,
1300 PAGE_ALIGN(newsize) - newsize, NULL,
1301 &ext2_iomap_ops);
1302 } else if (test_opt(inode->i_sb, NOBH))
1303 error = nobh_truncate_page(inode->i_mapping,
1304 newsize, ext2_get_block);
1305 else
1306 error = block_truncate_page(inode->i_mapping,
1307 newsize, ext2_get_block);
1308 if (error)
1309 return error;
1310
1311 dax_sem_down_write(EXT2_I(inode));
1312 truncate_setsize(inode, newsize);
1313 __ext2_truncate_blocks(inode, newsize);
1314 dax_sem_up_write(EXT2_I(inode));
1315
1316 inode->i_mtime = inode->i_ctime = current_time(inode);
1317 if (inode_needs_sync(inode)) {
1318 sync_mapping_buffers(inode->i_mapping);
1319 sync_inode_metadata(inode, 1);
1320 } else {
1321 mark_inode_dirty(inode);
1322 }
1323
1324 return 0;
1325}
1326
1327static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1328 struct buffer_head **p)
1329{
1330 struct buffer_head * bh;
1331 unsigned long block_group;
1332 unsigned long block;
1333 unsigned long offset;
1334 struct ext2_group_desc * gdp;
1335
1336 *p = NULL;
1337 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1338 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1339 goto Einval;
1340
1341 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1342 gdp = ext2_get_group_desc(sb, block_group, NULL);
1343 if (!gdp)
1344 goto Egdp;
1345
1346
1347
1348 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1349 block = le32_to_cpu(gdp->bg_inode_table) +
1350 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1351 if (!(bh = sb_bread(sb, block)))
1352 goto Eio;
1353
1354 *p = bh;
1355 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1356 return (struct ext2_inode *) (bh->b_data + offset);
1357
1358Einval:
1359 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1360 (unsigned long) ino);
1361 return ERR_PTR(-EINVAL);
1362Eio:
1363 ext2_error(sb, "ext2_get_inode",
1364 "unable to read inode block - inode=%lu, block=%lu",
1365 (unsigned long) ino, block);
1366Egdp:
1367 return ERR_PTR(-EIO);
1368}
1369
1370void ext2_set_inode_flags(struct inode *inode)
1371{
1372 unsigned int flags = EXT2_I(inode)->i_flags;
1373
1374 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
1375 S_DIRSYNC | S_DAX);
1376 if (flags & EXT2_SYNC_FL)
1377 inode->i_flags |= S_SYNC;
1378 if (flags & EXT2_APPEND_FL)
1379 inode->i_flags |= S_APPEND;
1380 if (flags & EXT2_IMMUTABLE_FL)
1381 inode->i_flags |= S_IMMUTABLE;
1382 if (flags & EXT2_NOATIME_FL)
1383 inode->i_flags |= S_NOATIME;
1384 if (flags & EXT2_DIRSYNC_FL)
1385 inode->i_flags |= S_DIRSYNC;
1386 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1387 inode->i_flags |= S_DAX;
1388}
1389
1390void ext2_set_file_ops(struct inode *inode)
1391{
1392 inode->i_op = &ext2_file_inode_operations;
1393 inode->i_fop = &ext2_file_operations;
1394 if (IS_DAX(inode))
1395 inode->i_mapping->a_ops = &ext2_dax_aops;
1396 else if (test_opt(inode->i_sb, NOBH))
1397 inode->i_mapping->a_ops = &ext2_nobh_aops;
1398 else
1399 inode->i_mapping->a_ops = &ext2_aops;
1400}
1401
1402struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1403{
1404 struct ext2_inode_info *ei;
1405 struct buffer_head * bh = NULL;
1406 struct ext2_inode *raw_inode;
1407 struct inode *inode;
1408 long ret = -EIO;
1409 int n;
1410 uid_t i_uid;
1411 gid_t i_gid;
1412
1413 inode = iget_locked(sb, ino);
1414 if (!inode)
1415 return ERR_PTR(-ENOMEM);
1416 if (!(inode->i_state & I_NEW))
1417 return inode;
1418
1419 ei = EXT2_I(inode);
1420 ei->i_block_alloc_info = NULL;
1421
1422 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1423 if (IS_ERR(raw_inode)) {
1424 ret = PTR_ERR(raw_inode);
1425 goto bad_inode;
1426 }
1427
1428 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1429 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1430 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1431 if (!(test_opt (inode->i_sb, NO_UID32))) {
1432 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1433 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1434 }
1435 i_uid_write(inode, i_uid);
1436 i_gid_write(inode, i_gid);
1437 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1438 inode->i_size = le32_to_cpu(raw_inode->i_size);
1439 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1440 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1441 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1442 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1443 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1444
1445
1446
1447
1448
1449 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1450
1451 ret = -ESTALE;
1452 goto bad_inode;
1453 }
1454 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1455 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1456 ext2_set_inode_flags(inode);
1457 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1458 ei->i_frag_no = raw_inode->i_frag;
1459 ei->i_frag_size = raw_inode->i_fsize;
1460 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1461 ei->i_dir_acl = 0;
1462
1463 if (ei->i_file_acl &&
1464 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
1465 ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
1466 ei->i_file_acl);
1467 ret = -EFSCORRUPTED;
1468 goto bad_inode;
1469 }
1470
1471 if (S_ISREG(inode->i_mode))
1472 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1473 else
1474 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1475 if (i_size_read(inode) < 0) {
1476 ret = -EFSCORRUPTED;
1477 goto bad_inode;
1478 }
1479 ei->i_dtime = 0;
1480 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1481 ei->i_state = 0;
1482 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1483 ei->i_dir_start_lookup = 0;
1484
1485
1486
1487
1488
1489 for (n = 0; n < EXT2_N_BLOCKS; n++)
1490 ei->i_data[n] = raw_inode->i_block[n];
1491
1492 if (S_ISREG(inode->i_mode)) {
1493 ext2_set_file_ops(inode);
1494 } else if (S_ISDIR(inode->i_mode)) {
1495 inode->i_op = &ext2_dir_inode_operations;
1496 inode->i_fop = &ext2_dir_operations;
1497 if (test_opt(inode->i_sb, NOBH))
1498 inode->i_mapping->a_ops = &ext2_nobh_aops;
1499 else
1500 inode->i_mapping->a_ops = &ext2_aops;
1501 } else if (S_ISLNK(inode->i_mode)) {
1502 if (ext2_inode_is_fast_symlink(inode)) {
1503 inode->i_link = (char *)ei->i_data;
1504 inode->i_op = &ext2_fast_symlink_inode_operations;
1505 nd_terminate_link(ei->i_data, inode->i_size,
1506 sizeof(ei->i_data) - 1);
1507 } else {
1508 inode->i_op = &ext2_symlink_inode_operations;
1509 inode_nohighmem(inode);
1510 if (test_opt(inode->i_sb, NOBH))
1511 inode->i_mapping->a_ops = &ext2_nobh_aops;
1512 else
1513 inode->i_mapping->a_ops = &ext2_aops;
1514 }
1515 } else {
1516 inode->i_op = &ext2_special_inode_operations;
1517 if (raw_inode->i_block[0])
1518 init_special_inode(inode, inode->i_mode,
1519 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1520 else
1521 init_special_inode(inode, inode->i_mode,
1522 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1523 }
1524 brelse (bh);
1525 unlock_new_inode(inode);
1526 return inode;
1527
1528bad_inode:
1529 brelse(bh);
1530 iget_failed(inode);
1531 return ERR_PTR(ret);
1532}
1533
1534static int __ext2_write_inode(struct inode *inode, int do_sync)
1535{
1536 struct ext2_inode_info *ei = EXT2_I(inode);
1537 struct super_block *sb = inode->i_sb;
1538 ino_t ino = inode->i_ino;
1539 uid_t uid = i_uid_read(inode);
1540 gid_t gid = i_gid_read(inode);
1541 struct buffer_head * bh;
1542 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1543 int n;
1544 int err = 0;
1545
1546 if (IS_ERR(raw_inode))
1547 return -EIO;
1548
1549
1550
1551 if (ei->i_state & EXT2_STATE_NEW)
1552 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1553
1554 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1555 if (!(test_opt(sb, NO_UID32))) {
1556 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1557 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1558
1559
1560
1561
1562 if (!ei->i_dtime) {
1563 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1564 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1565 } else {
1566 raw_inode->i_uid_high = 0;
1567 raw_inode->i_gid_high = 0;
1568 }
1569 } else {
1570 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1571 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1572 raw_inode->i_uid_high = 0;
1573 raw_inode->i_gid_high = 0;
1574 }
1575 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1576 raw_inode->i_size = cpu_to_le32(inode->i_size);
1577 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1578 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1579 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1580
1581 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1582 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1583 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1584 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1585 raw_inode->i_frag = ei->i_frag_no;
1586 raw_inode->i_fsize = ei->i_frag_size;
1587 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1588 if (!S_ISREG(inode->i_mode))
1589 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1590 else {
1591 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1592 if (inode->i_size > 0x7fffffffULL) {
1593 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1594 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1595 EXT2_SB(sb)->s_es->s_rev_level ==
1596 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1597
1598
1599
1600 spin_lock(&EXT2_SB(sb)->s_lock);
1601 ext2_update_dynamic_rev(sb);
1602 EXT2_SET_RO_COMPAT_FEATURE(sb,
1603 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1604 spin_unlock(&EXT2_SB(sb)->s_lock);
1605 ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1);
1606 }
1607 }
1608 }
1609
1610 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1611 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1612 if (old_valid_dev(inode->i_rdev)) {
1613 raw_inode->i_block[0] =
1614 cpu_to_le32(old_encode_dev(inode->i_rdev));
1615 raw_inode->i_block[1] = 0;
1616 } else {
1617 raw_inode->i_block[0] = 0;
1618 raw_inode->i_block[1] =
1619 cpu_to_le32(new_encode_dev(inode->i_rdev));
1620 raw_inode->i_block[2] = 0;
1621 }
1622 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1623 raw_inode->i_block[n] = ei->i_data[n];
1624 mark_buffer_dirty(bh);
1625 if (do_sync) {
1626 sync_dirty_buffer(bh);
1627 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1628 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1629 sb->s_id, (unsigned long) ino);
1630 err = -EIO;
1631 }
1632 }
1633 ei->i_state &= ~EXT2_STATE_NEW;
1634 brelse (bh);
1635 return err;
1636}
1637
1638int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1639{
1640 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1641}
1642
1643int ext2_getattr(struct user_namespace *mnt_userns, const struct path *path,
1644 struct kstat *stat, u32 request_mask, unsigned int query_flags)
1645{
1646 struct inode *inode = d_inode(path->dentry);
1647 struct ext2_inode_info *ei = EXT2_I(inode);
1648 unsigned int flags;
1649
1650 flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
1651 if (flags & EXT2_APPEND_FL)
1652 stat->attributes |= STATX_ATTR_APPEND;
1653 if (flags & EXT2_COMPR_FL)
1654 stat->attributes |= STATX_ATTR_COMPRESSED;
1655 if (flags & EXT2_IMMUTABLE_FL)
1656 stat->attributes |= STATX_ATTR_IMMUTABLE;
1657 if (flags & EXT2_NODUMP_FL)
1658 stat->attributes |= STATX_ATTR_NODUMP;
1659 stat->attributes_mask |= (STATX_ATTR_APPEND |
1660 STATX_ATTR_COMPRESSED |
1661 STATX_ATTR_ENCRYPTED |
1662 STATX_ATTR_IMMUTABLE |
1663 STATX_ATTR_NODUMP);
1664
1665 generic_fillattr(&init_user_ns, inode, stat);
1666 return 0;
1667}
1668
1669int ext2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1670 struct iattr *iattr)
1671{
1672 struct inode *inode = d_inode(dentry);
1673 int error;
1674
1675 error = setattr_prepare(&init_user_ns, dentry, iattr);
1676 if (error)
1677 return error;
1678
1679 if (is_quota_modification(inode, iattr)) {
1680 error = dquot_initialize(inode);
1681 if (error)
1682 return error;
1683 }
1684 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1685 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1686 error = dquot_transfer(inode, iattr);
1687 if (error)
1688 return error;
1689 }
1690 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1691 error = ext2_setsize(inode, iattr->ia_size);
1692 if (error)
1693 return error;
1694 }
1695 setattr_copy(&init_user_ns, inode, iattr);
1696 if (iattr->ia_valid & ATTR_MODE)
1697 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1698 mark_inode_dirty(inode);
1699
1700 return error;
1701}
1702