1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/time.h>
26#include <linux/highuid.h>
27#include <linux/pagemap.h>
28#include <linux/dax.h>
29#include <linux/blkdev.h>
30#include <linux/quotaops.h>
31#include <linux/writeback.h>
32#include <linux/buffer_head.h>
33#include <linux/mpage.h>
34#include <linux/fiemap.h>
35#include <linux/namei.h>
36#include <linux/uio.h>
37#include "ext2.h"
38#include "acl.h"
39#include "xattr.h"
40
41static int __ext2_write_inode(struct inode *inode, int do_sync);
42
43
44
45
46static inline int ext2_inode_is_fast_symlink(struct inode *inode)
47{
48 int ea_blocks = EXT2_I(inode)->i_file_acl ?
49 (inode->i_sb->s_blocksize >> 9) : 0;
50
51 return (S_ISLNK(inode->i_mode) &&
52 inode->i_blocks - ea_blocks == 0);
53}
54
55static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
56
57static void ext2_write_failed(struct address_space *mapping, loff_t to)
58{
59 struct inode *inode = mapping->host;
60
61 if (to > inode->i_size) {
62 truncate_pagecache(inode, inode->i_size);
63 ext2_truncate_blocks(inode, inode->i_size);
64 }
65}
66
67
68
69
70void ext2_evict_inode(struct inode * inode)
71{
72 struct ext2_block_alloc_info *rsv;
73 int want_delete = 0;
74
75 if (!inode->i_nlink && !is_bad_inode(inode)) {
76 want_delete = 1;
77 dquot_initialize(inode);
78 } else {
79 dquot_drop(inode);
80 }
81
82 truncate_inode_pages_final(&inode->i_data);
83
84 if (want_delete) {
85 sb_start_intwrite(inode->i_sb);
86
87 EXT2_I(inode)->i_dtime = get_seconds();
88 mark_inode_dirty(inode);
89 __ext2_write_inode(inode, inode_needs_sync(inode));
90
91 inode->i_size = 0;
92 if (inode->i_blocks)
93 ext2_truncate_blocks(inode, 0);
94 ext2_xattr_delete_inode(inode);
95 }
96
97 invalidate_inode_buffers(inode);
98 clear_inode(inode);
99
100 ext2_discard_reservation(inode);
101 rsv = EXT2_I(inode)->i_block_alloc_info;
102 EXT2_I(inode)->i_block_alloc_info = NULL;
103 if (unlikely(rsv))
104 kfree(rsv);
105
106 if (want_delete) {
107 ext2_free_inode(inode);
108 sb_end_intwrite(inode->i_sb);
109 }
110}
111
112typedef struct {
113 __le32 *p;
114 __le32 key;
115 struct buffer_head *bh;
116} Indirect;
117
118static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
119{
120 p->key = *(p->p = v);
121 p->bh = bh;
122}
123
124static inline int verify_chain(Indirect *from, Indirect *to)
125{
126 while (from <= to && from->key == *from->p)
127 from++;
128 return (from > to);
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161static int ext2_block_to_path(struct inode *inode,
162 long i_block, int offsets[4], int *boundary)
163{
164 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
165 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
166 const long direct_blocks = EXT2_NDIR_BLOCKS,
167 indirect_blocks = ptrs,
168 double_blocks = (1 << (ptrs_bits * 2));
169 int n = 0;
170 int final = 0;
171
172 if (i_block < 0) {
173 ext2_msg(inode->i_sb, KERN_WARNING,
174 "warning: %s: block < 0", __func__);
175 } else if (i_block < direct_blocks) {
176 offsets[n++] = i_block;
177 final = direct_blocks;
178 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
179 offsets[n++] = EXT2_IND_BLOCK;
180 offsets[n++] = i_block;
181 final = ptrs;
182 } else if ((i_block -= indirect_blocks) < double_blocks) {
183 offsets[n++] = EXT2_DIND_BLOCK;
184 offsets[n++] = i_block >> ptrs_bits;
185 offsets[n++] = i_block & (ptrs - 1);
186 final = ptrs;
187 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
188 offsets[n++] = EXT2_TIND_BLOCK;
189 offsets[n++] = i_block >> (ptrs_bits * 2);
190 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
191 offsets[n++] = i_block & (ptrs - 1);
192 final = ptrs;
193 } else {
194 ext2_msg(inode->i_sb, KERN_WARNING,
195 "warning: %s: block is too big", __func__);
196 }
197 if (boundary)
198 *boundary = final - 1 - (i_block & (ptrs - 1));
199
200 return n;
201}
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232static Indirect *ext2_get_branch(struct inode *inode,
233 int depth,
234 int *offsets,
235 Indirect chain[4],
236 int *err)
237{
238 struct super_block *sb = inode->i_sb;
239 Indirect *p = chain;
240 struct buffer_head *bh;
241
242 *err = 0;
243
244 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
245 if (!p->key)
246 goto no_block;
247 while (--depth) {
248 bh = sb_bread(sb, le32_to_cpu(p->key));
249 if (!bh)
250 goto failure;
251 read_lock(&EXT2_I(inode)->i_meta_lock);
252 if (!verify_chain(chain, p))
253 goto changed;
254 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
255 read_unlock(&EXT2_I(inode)->i_meta_lock);
256 if (!p->key)
257 goto no_block;
258 }
259 return NULL;
260
261changed:
262 read_unlock(&EXT2_I(inode)->i_meta_lock);
263 brelse(bh);
264 *err = -EAGAIN;
265 goto no_block;
266failure:
267 *err = -EIO;
268no_block:
269 return p;
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
293{
294 struct ext2_inode_info *ei = EXT2_I(inode);
295 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
296 __le32 *p;
297 ext2_fsblk_t bg_start;
298 ext2_fsblk_t colour;
299
300
301 for (p = ind->p - 1; p >= start; p--)
302 if (*p)
303 return le32_to_cpu(*p);
304
305
306 if (ind->bh)
307 return ind->bh->b_blocknr;
308
309
310
311
312
313 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
314 colour = (current->pid % 16) *
315 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
316 return bg_start + colour;
317}
318
319
320
321
322
323
324
325
326
327
328static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
329 Indirect *partial)
330{
331 struct ext2_block_alloc_info *block_i;
332
333 block_i = EXT2_I(inode)->i_block_alloc_info;
334
335
336
337
338
339 if (block_i && (block == block_i->last_alloc_logical_block + 1)
340 && (block_i->last_alloc_physical_block != 0)) {
341 return block_i->last_alloc_physical_block + 1;
342 }
343
344 return ext2_find_near(inode, partial);
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359static int
360ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
361 int blocks_to_boundary)
362{
363 unsigned long count = 0;
364
365
366
367
368
369 if (k > 0) {
370
371 if (blks < blocks_to_boundary + 1)
372 count += blks;
373 else
374 count += blocks_to_boundary + 1;
375 return count;
376 }
377
378 count++;
379 while (count < blks && count <= blocks_to_boundary
380 && le32_to_cpu(*(branch[0].p + count)) == 0) {
381 count++;
382 }
383 return count;
384}
385
386
387
388
389
390
391
392
393
394
395
396static int ext2_alloc_blocks(struct inode *inode,
397 ext2_fsblk_t goal, int indirect_blks, int blks,
398 ext2_fsblk_t new_blocks[4], int *err)
399{
400 int target, i;
401 unsigned long count = 0;
402 int index = 0;
403 ext2_fsblk_t current_block = 0;
404 int ret = 0;
405
406
407
408
409
410
411
412
413
414 target = blks + indirect_blks;
415
416 while (1) {
417 count = target;
418
419 current_block = ext2_new_blocks(inode,goal,&count,err);
420 if (*err)
421 goto failed_out;
422
423 target -= count;
424
425 while (index < indirect_blks && count) {
426 new_blocks[index++] = current_block++;
427 count--;
428 }
429
430 if (count > 0)
431 break;
432 }
433
434
435 new_blocks[index] = current_block;
436
437
438 ret = count;
439 *err = 0;
440 return ret;
441failed_out:
442 for (i = 0; i <index; i++)
443 ext2_free_blocks(inode, new_blocks[i], 1);
444 if (index)
445 mark_inode_dirty(inode);
446 return ret;
447}
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474static int ext2_alloc_branch(struct inode *inode,
475 int indirect_blks, int *blks, ext2_fsblk_t goal,
476 int *offsets, Indirect *branch)
477{
478 int blocksize = inode->i_sb->s_blocksize;
479 int i, n = 0;
480 int err = 0;
481 struct buffer_head *bh;
482 int num;
483 ext2_fsblk_t new_blocks[4];
484 ext2_fsblk_t current_block;
485
486 num = ext2_alloc_blocks(inode, goal, indirect_blks,
487 *blks, new_blocks, &err);
488 if (err)
489 return err;
490
491 branch[0].key = cpu_to_le32(new_blocks[0]);
492
493
494
495 for (n = 1; n <= indirect_blks; n++) {
496
497
498
499
500
501 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
502 if (unlikely(!bh)) {
503 err = -ENOMEM;
504 goto failed;
505 }
506 branch[n].bh = bh;
507 lock_buffer(bh);
508 memset(bh->b_data, 0, blocksize);
509 branch[n].p = (__le32 *) bh->b_data + offsets[n];
510 branch[n].key = cpu_to_le32(new_blocks[n]);
511 *branch[n].p = branch[n].key;
512 if ( n == indirect_blks) {
513 current_block = new_blocks[n];
514
515
516
517
518
519 for (i=1; i < num; i++)
520 *(branch[n].p + i) = cpu_to_le32(++current_block);
521 }
522 set_buffer_uptodate(bh);
523 unlock_buffer(bh);
524 mark_buffer_dirty_inode(bh, inode);
525
526
527
528
529 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
530 sync_dirty_buffer(bh);
531 }
532 *blks = num;
533 return err;
534
535failed:
536 for (i = 1; i < n; i++)
537 bforget(branch[i].bh);
538 for (i = 0; i < indirect_blks; i++)
539 ext2_free_blocks(inode, new_blocks[i], 1);
540 ext2_free_blocks(inode, new_blocks[i], num);
541 return err;
542}
543
544
545
546
547
548
549
550
551
552
553
554
555
556static void ext2_splice_branch(struct inode *inode,
557 long block, Indirect *where, int num, int blks)
558{
559 int i;
560 struct ext2_block_alloc_info *block_i;
561 ext2_fsblk_t current_block;
562
563 block_i = EXT2_I(inode)->i_block_alloc_info;
564
565
566
567
568 *where->p = where->key;
569
570
571
572
573
574 if (num == 0 && blks > 1) {
575 current_block = le32_to_cpu(where->key) + 1;
576 for (i = 1; i < blks; i++)
577 *(where->p + i ) = cpu_to_le32(current_block++);
578 }
579
580
581
582
583
584
585 if (block_i) {
586 block_i->last_alloc_logical_block = block + blks - 1;
587 block_i->last_alloc_physical_block =
588 le32_to_cpu(where[num].key) + blks - 1;
589 }
590
591
592
593
594 if (where->bh)
595 mark_buffer_dirty_inode(where->bh, inode);
596
597 inode->i_ctime = CURRENT_TIME_SEC;
598 mark_inode_dirty(inode);
599}
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619static int ext2_get_blocks(struct inode *inode,
620 sector_t iblock, unsigned long maxblocks,
621 struct buffer_head *bh_result,
622 int create)
623{
624 int err = -EIO;
625 int offsets[4];
626 Indirect chain[4];
627 Indirect *partial;
628 ext2_fsblk_t goal;
629 int indirect_blks;
630 int blocks_to_boundary = 0;
631 int depth;
632 struct ext2_inode_info *ei = EXT2_I(inode);
633 int count = 0;
634 ext2_fsblk_t first_block = 0;
635
636 BUG_ON(maxblocks == 0);
637
638 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
639
640 if (depth == 0)
641 return (err);
642
643 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
644
645 if (!partial) {
646 first_block = le32_to_cpu(chain[depth - 1].key);
647 clear_buffer_new(bh_result);
648 count++;
649
650 while (count < maxblocks && count <= blocks_to_boundary) {
651 ext2_fsblk_t blk;
652
653 if (!verify_chain(chain, chain + depth - 1)) {
654
655
656
657
658
659
660 err = -EAGAIN;
661 count = 0;
662 break;
663 }
664 blk = le32_to_cpu(*(chain[depth-1].p + count));
665 if (blk == first_block + count)
666 count++;
667 else
668 break;
669 }
670 if (err != -EAGAIN)
671 goto got_it;
672 }
673
674
675 if (!create || err == -EIO)
676 goto cleanup;
677
678 mutex_lock(&ei->truncate_mutex);
679
680
681
682
683
684
685
686
687
688
689
690
691 if (err == -EAGAIN || !verify_chain(chain, partial)) {
692 while (partial > chain) {
693 brelse(partial->bh);
694 partial--;
695 }
696 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
697 if (!partial) {
698 count++;
699 mutex_unlock(&ei->truncate_mutex);
700 if (err)
701 goto cleanup;
702 clear_buffer_new(bh_result);
703 goto got_it;
704 }
705 }
706
707
708
709
710
711 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
712 ext2_init_block_alloc_info(inode);
713
714 goal = ext2_find_goal(inode, iblock, partial);
715
716
717 indirect_blks = (chain + depth) - partial - 1;
718
719
720
721
722 count = ext2_blks_to_allocate(partial, indirect_blks,
723 maxblocks, blocks_to_boundary);
724
725
726
727 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
728 offsets + (partial - chain), partial);
729
730 if (err) {
731 mutex_unlock(&ei->truncate_mutex);
732 goto cleanup;
733 }
734
735 if (IS_DAX(inode)) {
736
737
738
739
740
741 err = sb_issue_zeroout(inode->i_sb,
742 le32_to_cpu(chain[depth-1].key), count,
743 GFP_NOFS);
744 if (err) {
745 mutex_unlock(&ei->truncate_mutex);
746 goto cleanup;
747 }
748 } else
749 set_buffer_new(bh_result);
750
751 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
752 mutex_unlock(&ei->truncate_mutex);
753got_it:
754 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
755 if (count > blocks_to_boundary)
756 set_buffer_boundary(bh_result);
757 err = count;
758
759 partial = chain + depth - 1;
760cleanup:
761 while (partial > chain) {
762 brelse(partial->bh);
763 partial--;
764 }
765 return err;
766}
767
768int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
769{
770 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
771 int ret = ext2_get_blocks(inode, iblock, max_blocks,
772 bh_result, create);
773 if (ret > 0) {
774 bh_result->b_size = (ret << inode->i_blkbits);
775 ret = 0;
776 }
777 return ret;
778
779}
780
781int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
782 u64 start, u64 len)
783{
784 return generic_block_fiemap(inode, fieinfo, start, len,
785 ext2_get_block);
786}
787
788static int ext2_writepage(struct page *page, struct writeback_control *wbc)
789{
790 return block_write_full_page(page, ext2_get_block, wbc);
791}
792
793static int ext2_readpage(struct file *file, struct page *page)
794{
795 return mpage_readpage(page, ext2_get_block);
796}
797
798static int
799ext2_readpages(struct file *file, struct address_space *mapping,
800 struct list_head *pages, unsigned nr_pages)
801{
802 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
803}
804
805static int
806ext2_write_begin(struct file *file, struct address_space *mapping,
807 loff_t pos, unsigned len, unsigned flags,
808 struct page **pagep, void **fsdata)
809{
810 int ret;
811
812 ret = block_write_begin(mapping, pos, len, flags, pagep,
813 ext2_get_block);
814 if (ret < 0)
815 ext2_write_failed(mapping, pos + len);
816 return ret;
817}
818
819static int ext2_write_end(struct file *file, struct address_space *mapping,
820 loff_t pos, unsigned len, unsigned copied,
821 struct page *page, void *fsdata)
822{
823 int ret;
824
825 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
826 if (ret < len)
827 ext2_write_failed(mapping, pos + len);
828 return ret;
829}
830
831static int
832ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
833 loff_t pos, unsigned len, unsigned flags,
834 struct page **pagep, void **fsdata)
835{
836 int ret;
837
838 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
839 ext2_get_block);
840 if (ret < 0)
841 ext2_write_failed(mapping, pos + len);
842 return ret;
843}
844
845static int ext2_nobh_writepage(struct page *page,
846 struct writeback_control *wbc)
847{
848 return nobh_writepage(page, ext2_get_block, wbc);
849}
850
851static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
852{
853 return generic_block_bmap(mapping,block,ext2_get_block);
854}
855
856static ssize_t
857ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
858{
859 struct file *file = iocb->ki_filp;
860 struct address_space *mapping = file->f_mapping;
861 struct inode *inode = mapping->host;
862 size_t count = iov_iter_count(iter);
863 loff_t offset = iocb->ki_pos;
864 ssize_t ret;
865
866 if (IS_DAX(inode))
867 ret = dax_do_io(iocb, inode, iter, ext2_get_block, NULL,
868 DIO_LOCKING);
869 else
870 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
871 if (ret < 0 && iov_iter_rw(iter) == WRITE)
872 ext2_write_failed(mapping, offset + count);
873 return ret;
874}
875
876static int
877ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
878{
879#ifdef CONFIG_FS_DAX
880 if (dax_mapping(mapping)) {
881 return dax_writeback_mapping_range(mapping,
882 mapping->host->i_sb->s_bdev,
883 wbc);
884 }
885#endif
886
887 return mpage_writepages(mapping, wbc, ext2_get_block);
888}
889
890const struct address_space_operations ext2_aops = {
891 .readpage = ext2_readpage,
892 .readpages = ext2_readpages,
893 .writepage = ext2_writepage,
894 .write_begin = ext2_write_begin,
895 .write_end = ext2_write_end,
896 .bmap = ext2_bmap,
897 .direct_IO = ext2_direct_IO,
898 .writepages = ext2_writepages,
899 .migratepage = buffer_migrate_page,
900 .is_partially_uptodate = block_is_partially_uptodate,
901 .error_remove_page = generic_error_remove_page,
902};
903
904const struct address_space_operations ext2_nobh_aops = {
905 .readpage = ext2_readpage,
906 .readpages = ext2_readpages,
907 .writepage = ext2_nobh_writepage,
908 .write_begin = ext2_nobh_write_begin,
909 .write_end = nobh_write_end,
910 .bmap = ext2_bmap,
911 .direct_IO = ext2_direct_IO,
912 .writepages = ext2_writepages,
913 .migratepage = buffer_migrate_page,
914 .error_remove_page = generic_error_remove_page,
915};
916
917
918
919
920
921
922static inline int all_zeroes(__le32 *p, __le32 *q)
923{
924 while (p < q)
925 if (*p++)
926 return 0;
927 return 1;
928}
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964static Indirect *ext2_find_shared(struct inode *inode,
965 int depth,
966 int offsets[4],
967 Indirect chain[4],
968 __le32 *top)
969{
970 Indirect *partial, *p;
971 int k, err;
972
973 *top = 0;
974 for (k = depth; k > 1 && !offsets[k-1]; k--)
975 ;
976 partial = ext2_get_branch(inode, k, offsets, chain, &err);
977 if (!partial)
978 partial = chain + k-1;
979
980
981
982
983 write_lock(&EXT2_I(inode)->i_meta_lock);
984 if (!partial->key && *partial->p) {
985 write_unlock(&EXT2_I(inode)->i_meta_lock);
986 goto no_top;
987 }
988 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
989 ;
990
991
992
993
994
995
996 if (p == chain + k - 1 && p > chain) {
997 p->p--;
998 } else {
999 *top = *p->p;
1000 *p->p = 0;
1001 }
1002 write_unlock(&EXT2_I(inode)->i_meta_lock);
1003
1004 while(partial > p)
1005 {
1006 brelse(partial->bh);
1007 partial--;
1008 }
1009no_top:
1010 return partial;
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1024{
1025 unsigned long block_to_free = 0, count = 0;
1026 unsigned long nr;
1027
1028 for ( ; p < q ; p++) {
1029 nr = le32_to_cpu(*p);
1030 if (nr) {
1031 *p = 0;
1032
1033 if (count == 0)
1034 goto free_this;
1035 else if (block_to_free == nr - count)
1036 count++;
1037 else {
1038 ext2_free_blocks (inode, block_to_free, count);
1039 mark_inode_dirty(inode);
1040 free_this:
1041 block_to_free = nr;
1042 count = 1;
1043 }
1044 }
1045 }
1046 if (count > 0) {
1047 ext2_free_blocks (inode, block_to_free, count);
1048 mark_inode_dirty(inode);
1049 }
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1064{
1065 struct buffer_head * bh;
1066 unsigned long nr;
1067
1068 if (depth--) {
1069 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1070 for ( ; p < q ; p++) {
1071 nr = le32_to_cpu(*p);
1072 if (!nr)
1073 continue;
1074 *p = 0;
1075 bh = sb_bread(inode->i_sb, nr);
1076
1077
1078
1079
1080 if (!bh) {
1081 ext2_error(inode->i_sb, "ext2_free_branches",
1082 "Read failure, inode=%ld, block=%ld",
1083 inode->i_ino, nr);
1084 continue;
1085 }
1086 ext2_free_branches(inode,
1087 (__le32*)bh->b_data,
1088 (__le32*)bh->b_data + addr_per_block,
1089 depth);
1090 bforget(bh);
1091 ext2_free_blocks(inode, nr, 1);
1092 mark_inode_dirty(inode);
1093 }
1094 } else
1095 ext2_free_data(inode, p, q);
1096}
1097
1098
1099static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1100{
1101 __le32 *i_data = EXT2_I(inode)->i_data;
1102 struct ext2_inode_info *ei = EXT2_I(inode);
1103 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1104 int offsets[4];
1105 Indirect chain[4];
1106 Indirect *partial;
1107 __le32 nr = 0;
1108 int n;
1109 long iblock;
1110 unsigned blocksize;
1111 blocksize = inode->i_sb->s_blocksize;
1112 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1113
1114#ifdef CONFIG_FS_DAX
1115 WARN_ON(!rwsem_is_locked(&ei->dax_sem));
1116#endif
1117
1118 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1119 if (n == 0)
1120 return;
1121
1122
1123
1124
1125
1126 mutex_lock(&ei->truncate_mutex);
1127
1128 if (n == 1) {
1129 ext2_free_data(inode, i_data+offsets[0],
1130 i_data + EXT2_NDIR_BLOCKS);
1131 goto do_indirects;
1132 }
1133
1134 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1135
1136 if (nr) {
1137 if (partial == chain)
1138 mark_inode_dirty(inode);
1139 else
1140 mark_buffer_dirty_inode(partial->bh, inode);
1141 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1142 }
1143
1144 while (partial > chain) {
1145 ext2_free_branches(inode,
1146 partial->p + 1,
1147 (__le32*)partial->bh->b_data+addr_per_block,
1148 (chain+n-1) - partial);
1149 mark_buffer_dirty_inode(partial->bh, inode);
1150 brelse (partial->bh);
1151 partial--;
1152 }
1153do_indirects:
1154
1155 switch (offsets[0]) {
1156 default:
1157 nr = i_data[EXT2_IND_BLOCK];
1158 if (nr) {
1159 i_data[EXT2_IND_BLOCK] = 0;
1160 mark_inode_dirty(inode);
1161 ext2_free_branches(inode, &nr, &nr+1, 1);
1162 }
1163 case EXT2_IND_BLOCK:
1164 nr = i_data[EXT2_DIND_BLOCK];
1165 if (nr) {
1166 i_data[EXT2_DIND_BLOCK] = 0;
1167 mark_inode_dirty(inode);
1168 ext2_free_branches(inode, &nr, &nr+1, 2);
1169 }
1170 case EXT2_DIND_BLOCK:
1171 nr = i_data[EXT2_TIND_BLOCK];
1172 if (nr) {
1173 i_data[EXT2_TIND_BLOCK] = 0;
1174 mark_inode_dirty(inode);
1175 ext2_free_branches(inode, &nr, &nr+1, 3);
1176 }
1177 case EXT2_TIND_BLOCK:
1178 ;
1179 }
1180
1181 ext2_discard_reservation(inode);
1182
1183 mutex_unlock(&ei->truncate_mutex);
1184}
1185
1186static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1187{
1188
1189
1190
1191
1192
1193
1194
1195
1196 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1197 S_ISLNK(inode->i_mode)))
1198 return;
1199 if (ext2_inode_is_fast_symlink(inode))
1200 return;
1201 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1202 return;
1203
1204 dax_sem_down_write(EXT2_I(inode));
1205 __ext2_truncate_blocks(inode, offset);
1206 dax_sem_up_write(EXT2_I(inode));
1207}
1208
1209static int ext2_setsize(struct inode *inode, loff_t newsize)
1210{
1211 int error;
1212
1213 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1214 S_ISLNK(inode->i_mode)))
1215 return -EINVAL;
1216 if (ext2_inode_is_fast_symlink(inode))
1217 return -EINVAL;
1218 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1219 return -EPERM;
1220
1221 inode_dio_wait(inode);
1222
1223 if (IS_DAX(inode))
1224 error = dax_truncate_page(inode, newsize, ext2_get_block);
1225 else if (test_opt(inode->i_sb, NOBH))
1226 error = nobh_truncate_page(inode->i_mapping,
1227 newsize, ext2_get_block);
1228 else
1229 error = block_truncate_page(inode->i_mapping,
1230 newsize, ext2_get_block);
1231 if (error)
1232 return error;
1233
1234 dax_sem_down_write(EXT2_I(inode));
1235 truncate_setsize(inode, newsize);
1236 __ext2_truncate_blocks(inode, newsize);
1237 dax_sem_up_write(EXT2_I(inode));
1238
1239 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1240 if (inode_needs_sync(inode)) {
1241 sync_mapping_buffers(inode->i_mapping);
1242 sync_inode_metadata(inode, 1);
1243 } else {
1244 mark_inode_dirty(inode);
1245 }
1246
1247 return 0;
1248}
1249
1250static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1251 struct buffer_head **p)
1252{
1253 struct buffer_head * bh;
1254 unsigned long block_group;
1255 unsigned long block;
1256 unsigned long offset;
1257 struct ext2_group_desc * gdp;
1258
1259 *p = NULL;
1260 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1261 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1262 goto Einval;
1263
1264 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1265 gdp = ext2_get_group_desc(sb, block_group, NULL);
1266 if (!gdp)
1267 goto Egdp;
1268
1269
1270
1271 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1272 block = le32_to_cpu(gdp->bg_inode_table) +
1273 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1274 if (!(bh = sb_bread(sb, block)))
1275 goto Eio;
1276
1277 *p = bh;
1278 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1279 return (struct ext2_inode *) (bh->b_data + offset);
1280
1281Einval:
1282 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1283 (unsigned long) ino);
1284 return ERR_PTR(-EINVAL);
1285Eio:
1286 ext2_error(sb, "ext2_get_inode",
1287 "unable to read inode block - inode=%lu, block=%lu",
1288 (unsigned long) ino, block);
1289Egdp:
1290 return ERR_PTR(-EIO);
1291}
1292
1293void ext2_set_inode_flags(struct inode *inode)
1294{
1295 unsigned int flags = EXT2_I(inode)->i_flags;
1296
1297 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
1298 S_DIRSYNC | S_DAX);
1299 if (flags & EXT2_SYNC_FL)
1300 inode->i_flags |= S_SYNC;
1301 if (flags & EXT2_APPEND_FL)
1302 inode->i_flags |= S_APPEND;
1303 if (flags & EXT2_IMMUTABLE_FL)
1304 inode->i_flags |= S_IMMUTABLE;
1305 if (flags & EXT2_NOATIME_FL)
1306 inode->i_flags |= S_NOATIME;
1307 if (flags & EXT2_DIRSYNC_FL)
1308 inode->i_flags |= S_DIRSYNC;
1309 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1310 inode->i_flags |= S_DAX;
1311}
1312
1313
1314void ext2_get_inode_flags(struct ext2_inode_info *ei)
1315{
1316 unsigned int flags = ei->vfs_inode.i_flags;
1317
1318 ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
1319 EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
1320 if (flags & S_SYNC)
1321 ei->i_flags |= EXT2_SYNC_FL;
1322 if (flags & S_APPEND)
1323 ei->i_flags |= EXT2_APPEND_FL;
1324 if (flags & S_IMMUTABLE)
1325 ei->i_flags |= EXT2_IMMUTABLE_FL;
1326 if (flags & S_NOATIME)
1327 ei->i_flags |= EXT2_NOATIME_FL;
1328 if (flags & S_DIRSYNC)
1329 ei->i_flags |= EXT2_DIRSYNC_FL;
1330}
1331
1332struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1333{
1334 struct ext2_inode_info *ei;
1335 struct buffer_head * bh;
1336 struct ext2_inode *raw_inode;
1337 struct inode *inode;
1338 long ret = -EIO;
1339 int n;
1340 uid_t i_uid;
1341 gid_t i_gid;
1342
1343 inode = iget_locked(sb, ino);
1344 if (!inode)
1345 return ERR_PTR(-ENOMEM);
1346 if (!(inode->i_state & I_NEW))
1347 return inode;
1348
1349 ei = EXT2_I(inode);
1350 ei->i_block_alloc_info = NULL;
1351
1352 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1353 if (IS_ERR(raw_inode)) {
1354 ret = PTR_ERR(raw_inode);
1355 goto bad_inode;
1356 }
1357
1358 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1359 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1360 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1361 if (!(test_opt (inode->i_sb, NO_UID32))) {
1362 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1363 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1364 }
1365 i_uid_write(inode, i_uid);
1366 i_gid_write(inode, i_gid);
1367 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1368 inode->i_size = le32_to_cpu(raw_inode->i_size);
1369 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1370 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1371 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1372 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1373 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1374
1375
1376
1377
1378
1379 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1380
1381 brelse (bh);
1382 ret = -ESTALE;
1383 goto bad_inode;
1384 }
1385 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1386 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1387 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1388 ei->i_frag_no = raw_inode->i_frag;
1389 ei->i_frag_size = raw_inode->i_fsize;
1390 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1391 ei->i_dir_acl = 0;
1392 if (S_ISREG(inode->i_mode))
1393 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1394 else
1395 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1396 ei->i_dtime = 0;
1397 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1398 ei->i_state = 0;
1399 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1400 ei->i_dir_start_lookup = 0;
1401
1402
1403
1404
1405
1406 for (n = 0; n < EXT2_N_BLOCKS; n++)
1407 ei->i_data[n] = raw_inode->i_block[n];
1408
1409 if (S_ISREG(inode->i_mode)) {
1410 inode->i_op = &ext2_file_inode_operations;
1411 if (test_opt(inode->i_sb, NOBH)) {
1412 inode->i_mapping->a_ops = &ext2_nobh_aops;
1413 inode->i_fop = &ext2_file_operations;
1414 } else {
1415 inode->i_mapping->a_ops = &ext2_aops;
1416 inode->i_fop = &ext2_file_operations;
1417 }
1418 } else if (S_ISDIR(inode->i_mode)) {
1419 inode->i_op = &ext2_dir_inode_operations;
1420 inode->i_fop = &ext2_dir_operations;
1421 if (test_opt(inode->i_sb, NOBH))
1422 inode->i_mapping->a_ops = &ext2_nobh_aops;
1423 else
1424 inode->i_mapping->a_ops = &ext2_aops;
1425 } else if (S_ISLNK(inode->i_mode)) {
1426 if (ext2_inode_is_fast_symlink(inode)) {
1427 inode->i_link = (char *)ei->i_data;
1428 inode->i_op = &ext2_fast_symlink_inode_operations;
1429 nd_terminate_link(ei->i_data, inode->i_size,
1430 sizeof(ei->i_data) - 1);
1431 } else {
1432 inode->i_op = &ext2_symlink_inode_operations;
1433 inode_nohighmem(inode);
1434 if (test_opt(inode->i_sb, NOBH))
1435 inode->i_mapping->a_ops = &ext2_nobh_aops;
1436 else
1437 inode->i_mapping->a_ops = &ext2_aops;
1438 }
1439 } else {
1440 inode->i_op = &ext2_special_inode_operations;
1441 if (raw_inode->i_block[0])
1442 init_special_inode(inode, inode->i_mode,
1443 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1444 else
1445 init_special_inode(inode, inode->i_mode,
1446 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1447 }
1448 brelse (bh);
1449 ext2_set_inode_flags(inode);
1450 unlock_new_inode(inode);
1451 return inode;
1452
1453bad_inode:
1454 iget_failed(inode);
1455 return ERR_PTR(ret);
1456}
1457
1458static int __ext2_write_inode(struct inode *inode, int do_sync)
1459{
1460 struct ext2_inode_info *ei = EXT2_I(inode);
1461 struct super_block *sb = inode->i_sb;
1462 ino_t ino = inode->i_ino;
1463 uid_t uid = i_uid_read(inode);
1464 gid_t gid = i_gid_read(inode);
1465 struct buffer_head * bh;
1466 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1467 int n;
1468 int err = 0;
1469
1470 if (IS_ERR(raw_inode))
1471 return -EIO;
1472
1473
1474
1475 if (ei->i_state & EXT2_STATE_NEW)
1476 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1477
1478 ext2_get_inode_flags(ei);
1479 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1480 if (!(test_opt(sb, NO_UID32))) {
1481 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1482 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1483
1484
1485
1486
1487 if (!ei->i_dtime) {
1488 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1489 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1490 } else {
1491 raw_inode->i_uid_high = 0;
1492 raw_inode->i_gid_high = 0;
1493 }
1494 } else {
1495 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1496 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1497 raw_inode->i_uid_high = 0;
1498 raw_inode->i_gid_high = 0;
1499 }
1500 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1501 raw_inode->i_size = cpu_to_le32(inode->i_size);
1502 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1503 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1504 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1505
1506 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1507 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1508 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1509 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1510 raw_inode->i_frag = ei->i_frag_no;
1511 raw_inode->i_fsize = ei->i_frag_size;
1512 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1513 if (!S_ISREG(inode->i_mode))
1514 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1515 else {
1516 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1517 if (inode->i_size > 0x7fffffffULL) {
1518 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1519 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1520 EXT2_SB(sb)->s_es->s_rev_level ==
1521 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1522
1523
1524
1525 spin_lock(&EXT2_SB(sb)->s_lock);
1526 ext2_update_dynamic_rev(sb);
1527 EXT2_SET_RO_COMPAT_FEATURE(sb,
1528 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1529 spin_unlock(&EXT2_SB(sb)->s_lock);
1530 ext2_write_super(sb);
1531 }
1532 }
1533 }
1534
1535 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1536 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1537 if (old_valid_dev(inode->i_rdev)) {
1538 raw_inode->i_block[0] =
1539 cpu_to_le32(old_encode_dev(inode->i_rdev));
1540 raw_inode->i_block[1] = 0;
1541 } else {
1542 raw_inode->i_block[0] = 0;
1543 raw_inode->i_block[1] =
1544 cpu_to_le32(new_encode_dev(inode->i_rdev));
1545 raw_inode->i_block[2] = 0;
1546 }
1547 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1548 raw_inode->i_block[n] = ei->i_data[n];
1549 mark_buffer_dirty(bh);
1550 if (do_sync) {
1551 sync_dirty_buffer(bh);
1552 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1553 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1554 sb->s_id, (unsigned long) ino);
1555 err = -EIO;
1556 }
1557 }
1558 ei->i_state &= ~EXT2_STATE_NEW;
1559 brelse (bh);
1560 return err;
1561}
1562
1563int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1564{
1565 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1566}
1567
1568int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1569{
1570 struct inode *inode = d_inode(dentry);
1571 int error;
1572
1573 error = inode_change_ok(inode, iattr);
1574 if (error)
1575 return error;
1576
1577 if (is_quota_modification(inode, iattr)) {
1578 error = dquot_initialize(inode);
1579 if (error)
1580 return error;
1581 }
1582 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1583 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1584 error = dquot_transfer(inode, iattr);
1585 if (error)
1586 return error;
1587 }
1588 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1589 error = ext2_setsize(inode, iattr->ia_size);
1590 if (error)
1591 return error;
1592 }
1593 setattr_copy(inode, iattr);
1594 if (iattr->ia_valid & ATTR_MODE)
1595 error = posix_acl_chmod(inode, inode->i_mode);
1596 mark_inode_dirty(inode);
1597
1598 return error;
1599}
1600