1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/time.h>
26#include <linux/highuid.h>
27#include <linux/pagemap.h>
28#include <linux/quotaops.h>
29#include <linux/module.h>
30#include <linux/writeback.h>
31#include <linux/buffer_head.h>
32#include <linux/mpage.h>
33#include <linux/fiemap.h>
34#include <linux/namei.h>
35#include "ext2.h"
36#include "acl.h"
37#include "xip.h"
38
39MODULE_AUTHOR("Remy Card and others");
40MODULE_DESCRIPTION("Second Extended Filesystem");
41MODULE_LICENSE("GPL");
42
43static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45
46
47
48static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49{
50 int ea_blocks = EXT2_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) &&
54 inode->i_blocks - ea_blocks == 0);
55}
56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, to, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67}
68
69
70
71
72void ext2_evict_inode(struct inode * inode)
73{
74 struct ext2_block_alloc_info *rsv;
75 int want_delete = 0;
76
77 if (!inode->i_nlink && !is_bad_inode(inode)) {
78 want_delete = 1;
79 dquot_initialize(inode);
80 } else {
81 dquot_drop(inode);
82 }
83
84 truncate_inode_pages(&inode->i_data, 0);
85
86 if (want_delete) {
87
88 EXT2_I(inode)->i_dtime = get_seconds();
89 mark_inode_dirty(inode);
90 __ext2_write_inode(inode, inode_needs_sync(inode));
91
92 inode->i_size = 0;
93 if (inode->i_blocks)
94 ext2_truncate_blocks(inode, 0);
95 }
96
97 invalidate_inode_buffers(inode);
98 end_writeback(inode);
99
100 ext2_discard_reservation(inode);
101 rsv = EXT2_I(inode)->i_block_alloc_info;
102 EXT2_I(inode)->i_block_alloc_info = NULL;
103 if (unlikely(rsv))
104 kfree(rsv);
105
106 if (want_delete)
107 ext2_free_inode(inode);
108}
109
110typedef struct {
111 __le32 *p;
112 __le32 key;
113 struct buffer_head *bh;
114} Indirect;
115
116static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
117{
118 p->key = *(p->p = v);
119 p->bh = bh;
120}
121
122static inline int verify_chain(Indirect *from, Indirect *to)
123{
124 while (from <= to && from->key == *from->p)
125 from++;
126 return (from > to);
127}
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159static int ext2_block_to_path(struct inode *inode,
160 long i_block, int offsets[4], int *boundary)
161{
162 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
163 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
164 const long direct_blocks = EXT2_NDIR_BLOCKS,
165 indirect_blocks = ptrs,
166 double_blocks = (1 << (ptrs_bits * 2));
167 int n = 0;
168 int final = 0;
169
170 if (i_block < 0) {
171 ext2_msg(inode->i_sb, KERN_WARNING,
172 "warning: %s: block < 0", __func__);
173 } else if (i_block < direct_blocks) {
174 offsets[n++] = i_block;
175 final = direct_blocks;
176 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
177 offsets[n++] = EXT2_IND_BLOCK;
178 offsets[n++] = i_block;
179 final = ptrs;
180 } else if ((i_block -= indirect_blocks) < double_blocks) {
181 offsets[n++] = EXT2_DIND_BLOCK;
182 offsets[n++] = i_block >> ptrs_bits;
183 offsets[n++] = i_block & (ptrs - 1);
184 final = ptrs;
185 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
186 offsets[n++] = EXT2_TIND_BLOCK;
187 offsets[n++] = i_block >> (ptrs_bits * 2);
188 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
189 offsets[n++] = i_block & (ptrs - 1);
190 final = ptrs;
191 } else {
192 ext2_msg(inode->i_sb, KERN_WARNING,
193 "warning: %s: block is too big", __func__);
194 }
195 if (boundary)
196 *boundary = final - 1 - (i_block & (ptrs - 1));
197
198 return n;
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230static Indirect *ext2_get_branch(struct inode *inode,
231 int depth,
232 int *offsets,
233 Indirect chain[4],
234 int *err)
235{
236 struct super_block *sb = inode->i_sb;
237 Indirect *p = chain;
238 struct buffer_head *bh;
239
240 *err = 0;
241
242 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
243 if (!p->key)
244 goto no_block;
245 while (--depth) {
246 bh = sb_bread(sb, le32_to_cpu(p->key));
247 if (!bh)
248 goto failure;
249 read_lock(&EXT2_I(inode)->i_meta_lock);
250 if (!verify_chain(chain, p))
251 goto changed;
252 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
253 read_unlock(&EXT2_I(inode)->i_meta_lock);
254 if (!p->key)
255 goto no_block;
256 }
257 return NULL;
258
259changed:
260 read_unlock(&EXT2_I(inode)->i_meta_lock);
261 brelse(bh);
262 *err = -EAGAIN;
263 goto no_block;
264failure:
265 *err = -EIO;
266no_block:
267 return p;
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
291{
292 struct ext2_inode_info *ei = EXT2_I(inode);
293 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
294 __le32 *p;
295 ext2_fsblk_t bg_start;
296 ext2_fsblk_t colour;
297
298
299 for (p = ind->p - 1; p >= start; p--)
300 if (*p)
301 return le32_to_cpu(*p);
302
303
304 if (ind->bh)
305 return ind->bh->b_blocknr;
306
307
308
309
310
311 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
312 colour = (current->pid % 16) *
313 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
314 return bg_start + colour;
315}
316
317
318
319
320
321
322
323
324
325
326static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
327 Indirect *partial)
328{
329 struct ext2_block_alloc_info *block_i;
330
331 block_i = EXT2_I(inode)->i_block_alloc_info;
332
333
334
335
336
337 if (block_i && (block == block_i->last_alloc_logical_block + 1)
338 && (block_i->last_alloc_physical_block != 0)) {
339 return block_i->last_alloc_physical_block + 1;
340 }
341
342 return ext2_find_near(inode, partial);
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357static int
358ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
359 int blocks_to_boundary)
360{
361 unsigned long count = 0;
362
363
364
365
366
367 if (k > 0) {
368
369 if (blks < blocks_to_boundary + 1)
370 count += blks;
371 else
372 count += blocks_to_boundary + 1;
373 return count;
374 }
375
376 count++;
377 while (count < blks && count <= blocks_to_boundary
378 && le32_to_cpu(*(branch[0].p + count)) == 0) {
379 count++;
380 }
381 return count;
382}
383
384
385
386
387
388
389
390
391
392
393
394static int ext2_alloc_blocks(struct inode *inode,
395 ext2_fsblk_t goal, int indirect_blks, int blks,
396 ext2_fsblk_t new_blocks[4], int *err)
397{
398 int target, i;
399 unsigned long count = 0;
400 int index = 0;
401 ext2_fsblk_t current_block = 0;
402 int ret = 0;
403
404
405
406
407
408
409
410
411
412 target = blks + indirect_blks;
413
414 while (1) {
415 count = target;
416
417 current_block = ext2_new_blocks(inode,goal,&count,err);
418 if (*err)
419 goto failed_out;
420
421 target -= count;
422
423 while (index < indirect_blks && count) {
424 new_blocks[index++] = current_block++;
425 count--;
426 }
427
428 if (count > 0)
429 break;
430 }
431
432
433 new_blocks[index] = current_block;
434
435
436 ret = count;
437 *err = 0;
438 return ret;
439failed_out:
440 for (i = 0; i <index; i++)
441 ext2_free_blocks(inode, new_blocks[i], 1);
442 if (index)
443 mark_inode_dirty(inode);
444 return ret;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472static int ext2_alloc_branch(struct inode *inode,
473 int indirect_blks, int *blks, ext2_fsblk_t goal,
474 int *offsets, Indirect *branch)
475{
476 int blocksize = inode->i_sb->s_blocksize;
477 int i, n = 0;
478 int err = 0;
479 struct buffer_head *bh;
480 int num;
481 ext2_fsblk_t new_blocks[4];
482 ext2_fsblk_t current_block;
483
484 num = ext2_alloc_blocks(inode, goal, indirect_blks,
485 *blks, new_blocks, &err);
486 if (err)
487 return err;
488
489 branch[0].key = cpu_to_le32(new_blocks[0]);
490
491
492
493 for (n = 1; n <= indirect_blks; n++) {
494
495
496
497
498
499 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
500 branch[n].bh = bh;
501 lock_buffer(bh);
502 memset(bh->b_data, 0, blocksize);
503 branch[n].p = (__le32 *) bh->b_data + offsets[n];
504 branch[n].key = cpu_to_le32(new_blocks[n]);
505 *branch[n].p = branch[n].key;
506 if ( n == indirect_blks) {
507 current_block = new_blocks[n];
508
509
510
511
512
513 for (i=1; i < num; i++)
514 *(branch[n].p + i) = cpu_to_le32(++current_block);
515 }
516 set_buffer_uptodate(bh);
517 unlock_buffer(bh);
518 mark_buffer_dirty_inode(bh, inode);
519
520
521
522
523 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
524 sync_dirty_buffer(bh);
525 }
526 *blks = num;
527 return err;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542static void ext2_splice_branch(struct inode *inode,
543 long block, Indirect *where, int num, int blks)
544{
545 int i;
546 struct ext2_block_alloc_info *block_i;
547 ext2_fsblk_t current_block;
548
549 block_i = EXT2_I(inode)->i_block_alloc_info;
550
551
552
553
554 *where->p = where->key;
555
556
557
558
559
560 if (num == 0 && blks > 1) {
561 current_block = le32_to_cpu(where->key) + 1;
562 for (i = 1; i < blks; i++)
563 *(where->p + i ) = cpu_to_le32(current_block++);
564 }
565
566
567
568
569
570
571 if (block_i) {
572 block_i->last_alloc_logical_block = block + blks - 1;
573 block_i->last_alloc_physical_block =
574 le32_to_cpu(where[num].key) + blks - 1;
575 }
576
577
578
579
580 if (where->bh)
581 mark_buffer_dirty_inode(where->bh, inode);
582
583 inode->i_ctime = CURRENT_TIME_SEC;
584 mark_inode_dirty(inode);
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605static int ext2_get_blocks(struct inode *inode,
606 sector_t iblock, unsigned long maxblocks,
607 struct buffer_head *bh_result,
608 int create)
609{
610 int err = -EIO;
611 int offsets[4];
612 Indirect chain[4];
613 Indirect *partial;
614 ext2_fsblk_t goal;
615 int indirect_blks;
616 int blocks_to_boundary = 0;
617 int depth;
618 struct ext2_inode_info *ei = EXT2_I(inode);
619 int count = 0;
620 ext2_fsblk_t first_block = 0;
621
622 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
623
624 if (depth == 0)
625 return (err);
626
627 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
628
629 if (!partial) {
630 first_block = le32_to_cpu(chain[depth - 1].key);
631 clear_buffer_new(bh_result);
632 count++;
633
634 while (count < maxblocks && count <= blocks_to_boundary) {
635 ext2_fsblk_t blk;
636
637 if (!verify_chain(chain, chain + depth - 1)) {
638
639
640
641
642
643
644 err = -EAGAIN;
645 count = 0;
646 break;
647 }
648 blk = le32_to_cpu(*(chain[depth-1].p + count));
649 if (blk == first_block + count)
650 count++;
651 else
652 break;
653 }
654 if (err != -EAGAIN)
655 goto got_it;
656 }
657
658
659 if (!create || err == -EIO)
660 goto cleanup;
661
662 mutex_lock(&ei->truncate_mutex);
663
664
665
666
667
668
669
670
671
672
673
674
675 if (err == -EAGAIN || !verify_chain(chain, partial)) {
676 while (partial > chain) {
677 brelse(partial->bh);
678 partial--;
679 }
680 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
681 if (!partial) {
682 count++;
683 mutex_unlock(&ei->truncate_mutex);
684 if (err)
685 goto cleanup;
686 clear_buffer_new(bh_result);
687 goto got_it;
688 }
689 }
690
691
692
693
694
695 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
696 ext2_init_block_alloc_info(inode);
697
698 goal = ext2_find_goal(inode, iblock, partial);
699
700
701 indirect_blks = (chain + depth) - partial - 1;
702
703
704
705
706 count = ext2_blks_to_allocate(partial, indirect_blks,
707 maxblocks, blocks_to_boundary);
708
709
710
711 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
712 offsets + (partial - chain), partial);
713
714 if (err) {
715 mutex_unlock(&ei->truncate_mutex);
716 goto cleanup;
717 }
718
719 if (ext2_use_xip(inode->i_sb)) {
720
721
722
723 err = ext2_clear_xip_target (inode,
724 le32_to_cpu(chain[depth-1].key));
725 if (err) {
726 mutex_unlock(&ei->truncate_mutex);
727 goto cleanup;
728 }
729 }
730
731 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
732 mutex_unlock(&ei->truncate_mutex);
733 set_buffer_new(bh_result);
734got_it:
735 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
736 if (count > blocks_to_boundary)
737 set_buffer_boundary(bh_result);
738 err = count;
739
740 partial = chain + depth - 1;
741cleanup:
742 while (partial > chain) {
743 brelse(partial->bh);
744 partial--;
745 }
746 return err;
747}
748
749int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
750{
751 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
752 int ret = ext2_get_blocks(inode, iblock, max_blocks,
753 bh_result, create);
754 if (ret > 0) {
755 bh_result->b_size = (ret << inode->i_blkbits);
756 ret = 0;
757 }
758 return ret;
759
760}
761
762int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
763 u64 start, u64 len)
764{
765 return generic_block_fiemap(inode, fieinfo, start, len,
766 ext2_get_block);
767}
768
769static int ext2_writepage(struct page *page, struct writeback_control *wbc)
770{
771 return block_write_full_page(page, ext2_get_block, wbc);
772}
773
774static int ext2_readpage(struct file *file, struct page *page)
775{
776 return mpage_readpage(page, ext2_get_block);
777}
778
779static int
780ext2_readpages(struct file *file, struct address_space *mapping,
781 struct list_head *pages, unsigned nr_pages)
782{
783 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
784}
785
786static int
787ext2_write_begin(struct file *file, struct address_space *mapping,
788 loff_t pos, unsigned len, unsigned flags,
789 struct page **pagep, void **fsdata)
790{
791 int ret;
792
793 ret = block_write_begin(mapping, pos, len, flags, pagep,
794 ext2_get_block);
795 if (ret < 0)
796 ext2_write_failed(mapping, pos + len);
797 return ret;
798}
799
800static int ext2_write_end(struct file *file, struct address_space *mapping,
801 loff_t pos, unsigned len, unsigned copied,
802 struct page *page, void *fsdata)
803{
804 int ret;
805
806 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
807 if (ret < len)
808 ext2_write_failed(mapping, pos + len);
809 return ret;
810}
811
812static int
813ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
814 loff_t pos, unsigned len, unsigned flags,
815 struct page **pagep, void **fsdata)
816{
817 int ret;
818
819 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
820 ext2_get_block);
821 if (ret < 0)
822 ext2_write_failed(mapping, pos + len);
823 return ret;
824}
825
826static int ext2_nobh_writepage(struct page *page,
827 struct writeback_control *wbc)
828{
829 return nobh_writepage(page, ext2_get_block, wbc);
830}
831
832static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
833{
834 return generic_block_bmap(mapping,block,ext2_get_block);
835}
836
837static ssize_t
838ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
839 loff_t offset, unsigned long nr_segs)
840{
841 struct file *file = iocb->ki_filp;
842 struct address_space *mapping = file->f_mapping;
843 struct inode *inode = mapping->host;
844 ssize_t ret;
845
846 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
847 iov, offset, nr_segs, ext2_get_block, NULL);
848 if (ret < 0 && (rw & WRITE))
849 ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
850 return ret;
851}
852
853static int
854ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
855{
856 return mpage_writepages(mapping, wbc, ext2_get_block);
857}
858
859const struct address_space_operations ext2_aops = {
860 .readpage = ext2_readpage,
861 .readpages = ext2_readpages,
862 .writepage = ext2_writepage,
863 .sync_page = block_sync_page,
864 .write_begin = ext2_write_begin,
865 .write_end = ext2_write_end,
866 .bmap = ext2_bmap,
867 .direct_IO = ext2_direct_IO,
868 .writepages = ext2_writepages,
869 .migratepage = buffer_migrate_page,
870 .is_partially_uptodate = block_is_partially_uptodate,
871 .error_remove_page = generic_error_remove_page,
872};
873
874const struct address_space_operations ext2_aops_xip = {
875 .bmap = ext2_bmap,
876 .get_xip_mem = ext2_get_xip_mem,
877};
878
879const struct address_space_operations ext2_nobh_aops = {
880 .readpage = ext2_readpage,
881 .readpages = ext2_readpages,
882 .writepage = ext2_nobh_writepage,
883 .sync_page = block_sync_page,
884 .write_begin = ext2_nobh_write_begin,
885 .write_end = nobh_write_end,
886 .bmap = ext2_bmap,
887 .direct_IO = ext2_direct_IO,
888 .writepages = ext2_writepages,
889 .migratepage = buffer_migrate_page,
890 .error_remove_page = generic_error_remove_page,
891};
892
893
894
895
896
897
898static inline int all_zeroes(__le32 *p, __le32 *q)
899{
900 while (p < q)
901 if (*p++)
902 return 0;
903 return 1;
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940static Indirect *ext2_find_shared(struct inode *inode,
941 int depth,
942 int offsets[4],
943 Indirect chain[4],
944 __le32 *top)
945{
946 Indirect *partial, *p;
947 int k, err;
948
949 *top = 0;
950 for (k = depth; k > 1 && !offsets[k-1]; k--)
951 ;
952 partial = ext2_get_branch(inode, k, offsets, chain, &err);
953 if (!partial)
954 partial = chain + k-1;
955
956
957
958
959 write_lock(&EXT2_I(inode)->i_meta_lock);
960 if (!partial->key && *partial->p) {
961 write_unlock(&EXT2_I(inode)->i_meta_lock);
962 goto no_top;
963 }
964 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
965 ;
966
967
968
969
970
971
972 if (p == chain + k - 1 && p > chain) {
973 p->p--;
974 } else {
975 *top = *p->p;
976 *p->p = 0;
977 }
978 write_unlock(&EXT2_I(inode)->i_meta_lock);
979
980 while(partial > p)
981 {
982 brelse(partial->bh);
983 partial--;
984 }
985no_top:
986 return partial;
987}
988
989
990
991
992
993
994
995
996
997
998
999static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1000{
1001 unsigned long block_to_free = 0, count = 0;
1002 unsigned long nr;
1003
1004 for ( ; p < q ; p++) {
1005 nr = le32_to_cpu(*p);
1006 if (nr) {
1007 *p = 0;
1008
1009 if (count == 0)
1010 goto free_this;
1011 else if (block_to_free == nr - count)
1012 count++;
1013 else {
1014 ext2_free_blocks (inode, block_to_free, count);
1015 mark_inode_dirty(inode);
1016 free_this:
1017 block_to_free = nr;
1018 count = 1;
1019 }
1020 }
1021 }
1022 if (count > 0) {
1023 ext2_free_blocks (inode, block_to_free, count);
1024 mark_inode_dirty(inode);
1025 }
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1040{
1041 struct buffer_head * bh;
1042 unsigned long nr;
1043
1044 if (depth--) {
1045 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1046 for ( ; p < q ; p++) {
1047 nr = le32_to_cpu(*p);
1048 if (!nr)
1049 continue;
1050 *p = 0;
1051 bh = sb_bread(inode->i_sb, nr);
1052
1053
1054
1055
1056 if (!bh) {
1057 ext2_error(inode->i_sb, "ext2_free_branches",
1058 "Read failure, inode=%ld, block=%ld",
1059 inode->i_ino, nr);
1060 continue;
1061 }
1062 ext2_free_branches(inode,
1063 (__le32*)bh->b_data,
1064 (__le32*)bh->b_data + addr_per_block,
1065 depth);
1066 bforget(bh);
1067 ext2_free_blocks(inode, nr, 1);
1068 mark_inode_dirty(inode);
1069 }
1070 } else
1071 ext2_free_data(inode, p, q);
1072}
1073
1074static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1075{
1076 __le32 *i_data = EXT2_I(inode)->i_data;
1077 struct ext2_inode_info *ei = EXT2_I(inode);
1078 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1079 int offsets[4];
1080 Indirect chain[4];
1081 Indirect *partial;
1082 __le32 nr = 0;
1083 int n;
1084 long iblock;
1085 unsigned blocksize;
1086 blocksize = inode->i_sb->s_blocksize;
1087 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1088
1089 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1090 if (n == 0)
1091 return;
1092
1093
1094
1095
1096
1097 mutex_lock(&ei->truncate_mutex);
1098
1099 if (n == 1) {
1100 ext2_free_data(inode, i_data+offsets[0],
1101 i_data + EXT2_NDIR_BLOCKS);
1102 goto do_indirects;
1103 }
1104
1105 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1106
1107 if (nr) {
1108 if (partial == chain)
1109 mark_inode_dirty(inode);
1110 else
1111 mark_buffer_dirty_inode(partial->bh, inode);
1112 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1113 }
1114
1115 while (partial > chain) {
1116 ext2_free_branches(inode,
1117 partial->p + 1,
1118 (__le32*)partial->bh->b_data+addr_per_block,
1119 (chain+n-1) - partial);
1120 mark_buffer_dirty_inode(partial->bh, inode);
1121 brelse (partial->bh);
1122 partial--;
1123 }
1124do_indirects:
1125
1126 switch (offsets[0]) {
1127 default:
1128 nr = i_data[EXT2_IND_BLOCK];
1129 if (nr) {
1130 i_data[EXT2_IND_BLOCK] = 0;
1131 mark_inode_dirty(inode);
1132 ext2_free_branches(inode, &nr, &nr+1, 1);
1133 }
1134 case EXT2_IND_BLOCK:
1135 nr = i_data[EXT2_DIND_BLOCK];
1136 if (nr) {
1137 i_data[EXT2_DIND_BLOCK] = 0;
1138 mark_inode_dirty(inode);
1139 ext2_free_branches(inode, &nr, &nr+1, 2);
1140 }
1141 case EXT2_DIND_BLOCK:
1142 nr = i_data[EXT2_TIND_BLOCK];
1143 if (nr) {
1144 i_data[EXT2_TIND_BLOCK] = 0;
1145 mark_inode_dirty(inode);
1146 ext2_free_branches(inode, &nr, &nr+1, 3);
1147 }
1148 case EXT2_TIND_BLOCK:
1149 ;
1150 }
1151
1152 ext2_discard_reservation(inode);
1153
1154 mutex_unlock(&ei->truncate_mutex);
1155}
1156
1157static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1158{
1159
1160
1161
1162
1163
1164
1165
1166
1167 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1168 S_ISLNK(inode->i_mode)))
1169 return;
1170 if (ext2_inode_is_fast_symlink(inode))
1171 return;
1172 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1173 return;
1174 __ext2_truncate_blocks(inode, offset);
1175}
1176
1177static int ext2_setsize(struct inode *inode, loff_t newsize)
1178{
1179 int error;
1180
1181 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1182 S_ISLNK(inode->i_mode)))
1183 return -EINVAL;
1184 if (ext2_inode_is_fast_symlink(inode))
1185 return -EINVAL;
1186 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1187 return -EPERM;
1188
1189 if (mapping_is_xip(inode->i_mapping))
1190 error = xip_truncate_page(inode->i_mapping, newsize);
1191 else if (test_opt(inode->i_sb, NOBH))
1192 error = nobh_truncate_page(inode->i_mapping,
1193 newsize, ext2_get_block);
1194 else
1195 error = block_truncate_page(inode->i_mapping,
1196 newsize, ext2_get_block);
1197 if (error)
1198 return error;
1199
1200 truncate_setsize(inode, newsize);
1201 __ext2_truncate_blocks(inode, newsize);
1202
1203 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1204 if (inode_needs_sync(inode)) {
1205 sync_mapping_buffers(inode->i_mapping);
1206 sync_inode_metadata(inode, 1);
1207 } else {
1208 mark_inode_dirty(inode);
1209 }
1210
1211 return 0;
1212}
1213
1214static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1215 struct buffer_head **p)
1216{
1217 struct buffer_head * bh;
1218 unsigned long block_group;
1219 unsigned long block;
1220 unsigned long offset;
1221 struct ext2_group_desc * gdp;
1222
1223 *p = NULL;
1224 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1225 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1226 goto Einval;
1227
1228 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1229 gdp = ext2_get_group_desc(sb, block_group, NULL);
1230 if (!gdp)
1231 goto Egdp;
1232
1233
1234
1235 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1236 block = le32_to_cpu(gdp->bg_inode_table) +
1237 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1238 if (!(bh = sb_bread(sb, block)))
1239 goto Eio;
1240
1241 *p = bh;
1242 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1243 return (struct ext2_inode *) (bh->b_data + offset);
1244
1245Einval:
1246 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1247 (unsigned long) ino);
1248 return ERR_PTR(-EINVAL);
1249Eio:
1250 ext2_error(sb, "ext2_get_inode",
1251 "unable to read inode block - inode=%lu, block=%lu",
1252 (unsigned long) ino, block);
1253Egdp:
1254 return ERR_PTR(-EIO);
1255}
1256
1257void ext2_set_inode_flags(struct inode *inode)
1258{
1259 unsigned int flags = EXT2_I(inode)->i_flags;
1260
1261 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
1262 if (flags & EXT2_SYNC_FL)
1263 inode->i_flags |= S_SYNC;
1264 if (flags & EXT2_APPEND_FL)
1265 inode->i_flags |= S_APPEND;
1266 if (flags & EXT2_IMMUTABLE_FL)
1267 inode->i_flags |= S_IMMUTABLE;
1268 if (flags & EXT2_NOATIME_FL)
1269 inode->i_flags |= S_NOATIME;
1270 if (flags & EXT2_DIRSYNC_FL)
1271 inode->i_flags |= S_DIRSYNC;
1272}
1273
1274
1275void ext2_get_inode_flags(struct ext2_inode_info *ei)
1276{
1277 unsigned int flags = ei->vfs_inode.i_flags;
1278
1279 ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
1280 EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
1281 if (flags & S_SYNC)
1282 ei->i_flags |= EXT2_SYNC_FL;
1283 if (flags & S_APPEND)
1284 ei->i_flags |= EXT2_APPEND_FL;
1285 if (flags & S_IMMUTABLE)
1286 ei->i_flags |= EXT2_IMMUTABLE_FL;
1287 if (flags & S_NOATIME)
1288 ei->i_flags |= EXT2_NOATIME_FL;
1289 if (flags & S_DIRSYNC)
1290 ei->i_flags |= EXT2_DIRSYNC_FL;
1291}
1292
1293struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1294{
1295 struct ext2_inode_info *ei;
1296 struct buffer_head * bh;
1297 struct ext2_inode *raw_inode;
1298 struct inode *inode;
1299 long ret = -EIO;
1300 int n;
1301
1302 inode = iget_locked(sb, ino);
1303 if (!inode)
1304 return ERR_PTR(-ENOMEM);
1305 if (!(inode->i_state & I_NEW))
1306 return inode;
1307
1308 ei = EXT2_I(inode);
1309 ei->i_block_alloc_info = NULL;
1310
1311 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1312 if (IS_ERR(raw_inode)) {
1313 ret = PTR_ERR(raw_inode);
1314 goto bad_inode;
1315 }
1316
1317 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1318 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1319 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1320 if (!(test_opt (inode->i_sb, NO_UID32))) {
1321 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1322 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1323 }
1324 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
1325 inode->i_size = le32_to_cpu(raw_inode->i_size);
1326 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1327 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1328 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1329 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1330 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1331
1332
1333
1334
1335
1336 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1337
1338 brelse (bh);
1339 ret = -ESTALE;
1340 goto bad_inode;
1341 }
1342 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1343 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1344 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1345 ei->i_frag_no = raw_inode->i_frag;
1346 ei->i_frag_size = raw_inode->i_fsize;
1347 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1348 ei->i_dir_acl = 0;
1349 if (S_ISREG(inode->i_mode))
1350 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1351 else
1352 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1353 ei->i_dtime = 0;
1354 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1355 ei->i_state = 0;
1356 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1357 ei->i_dir_start_lookup = 0;
1358
1359
1360
1361
1362
1363 for (n = 0; n < EXT2_N_BLOCKS; n++)
1364 ei->i_data[n] = raw_inode->i_block[n];
1365
1366 if (S_ISREG(inode->i_mode)) {
1367 inode->i_op = &ext2_file_inode_operations;
1368 if (ext2_use_xip(inode->i_sb)) {
1369 inode->i_mapping->a_ops = &ext2_aops_xip;
1370 inode->i_fop = &ext2_xip_file_operations;
1371 } else if (test_opt(inode->i_sb, NOBH)) {
1372 inode->i_mapping->a_ops = &ext2_nobh_aops;
1373 inode->i_fop = &ext2_file_operations;
1374 } else {
1375 inode->i_mapping->a_ops = &ext2_aops;
1376 inode->i_fop = &ext2_file_operations;
1377 }
1378 } else if (S_ISDIR(inode->i_mode)) {
1379 inode->i_op = &ext2_dir_inode_operations;
1380 inode->i_fop = &ext2_dir_operations;
1381 if (test_opt(inode->i_sb, NOBH))
1382 inode->i_mapping->a_ops = &ext2_nobh_aops;
1383 else
1384 inode->i_mapping->a_ops = &ext2_aops;
1385 } else if (S_ISLNK(inode->i_mode)) {
1386 if (ext2_inode_is_fast_symlink(inode)) {
1387 inode->i_op = &ext2_fast_symlink_inode_operations;
1388 nd_terminate_link(ei->i_data, inode->i_size,
1389 sizeof(ei->i_data) - 1);
1390 } else {
1391 inode->i_op = &ext2_symlink_inode_operations;
1392 if (test_opt(inode->i_sb, NOBH))
1393 inode->i_mapping->a_ops = &ext2_nobh_aops;
1394 else
1395 inode->i_mapping->a_ops = &ext2_aops;
1396 }
1397 } else {
1398 inode->i_op = &ext2_special_inode_operations;
1399 if (raw_inode->i_block[0])
1400 init_special_inode(inode, inode->i_mode,
1401 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1402 else
1403 init_special_inode(inode, inode->i_mode,
1404 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1405 }
1406 brelse (bh);
1407 ext2_set_inode_flags(inode);
1408 unlock_new_inode(inode);
1409 return inode;
1410
1411bad_inode:
1412 iget_failed(inode);
1413 return ERR_PTR(ret);
1414}
1415
1416static int __ext2_write_inode(struct inode *inode, int do_sync)
1417{
1418 struct ext2_inode_info *ei = EXT2_I(inode);
1419 struct super_block *sb = inode->i_sb;
1420 ino_t ino = inode->i_ino;
1421 uid_t uid = inode->i_uid;
1422 gid_t gid = inode->i_gid;
1423 struct buffer_head * bh;
1424 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1425 int n;
1426 int err = 0;
1427
1428 if (IS_ERR(raw_inode))
1429 return -EIO;
1430
1431
1432
1433 if (ei->i_state & EXT2_STATE_NEW)
1434 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1435
1436 ext2_get_inode_flags(ei);
1437 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1438 if (!(test_opt(sb, NO_UID32))) {
1439 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1440 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1441
1442
1443
1444
1445 if (!ei->i_dtime) {
1446 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1447 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1448 } else {
1449 raw_inode->i_uid_high = 0;
1450 raw_inode->i_gid_high = 0;
1451 }
1452 } else {
1453 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1454 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1455 raw_inode->i_uid_high = 0;
1456 raw_inode->i_gid_high = 0;
1457 }
1458 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1459 raw_inode->i_size = cpu_to_le32(inode->i_size);
1460 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1461 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1462 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1463
1464 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1465 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1466 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1467 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1468 raw_inode->i_frag = ei->i_frag_no;
1469 raw_inode->i_fsize = ei->i_frag_size;
1470 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1471 if (!S_ISREG(inode->i_mode))
1472 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1473 else {
1474 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1475 if (inode->i_size > 0x7fffffffULL) {
1476 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1477 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1478 EXT2_SB(sb)->s_es->s_rev_level ==
1479 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1480
1481
1482
1483 spin_lock(&EXT2_SB(sb)->s_lock);
1484 ext2_update_dynamic_rev(sb);
1485 EXT2_SET_RO_COMPAT_FEATURE(sb,
1486 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1487 spin_unlock(&EXT2_SB(sb)->s_lock);
1488 ext2_write_super(sb);
1489 }
1490 }
1491 }
1492
1493 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1494 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1495 if (old_valid_dev(inode->i_rdev)) {
1496 raw_inode->i_block[0] =
1497 cpu_to_le32(old_encode_dev(inode->i_rdev));
1498 raw_inode->i_block[1] = 0;
1499 } else {
1500 raw_inode->i_block[0] = 0;
1501 raw_inode->i_block[1] =
1502 cpu_to_le32(new_encode_dev(inode->i_rdev));
1503 raw_inode->i_block[2] = 0;
1504 }
1505 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1506 raw_inode->i_block[n] = ei->i_data[n];
1507 mark_buffer_dirty(bh);
1508 if (do_sync) {
1509 sync_dirty_buffer(bh);
1510 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1511 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1512 sb->s_id, (unsigned long) ino);
1513 err = -EIO;
1514 }
1515 }
1516 ei->i_state &= ~EXT2_STATE_NEW;
1517 brelse (bh);
1518 return err;
1519}
1520
1521int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1522{
1523 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1524}
1525
1526int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1527{
1528 struct inode *inode = dentry->d_inode;
1529 int error;
1530
1531 error = inode_change_ok(inode, iattr);
1532 if (error)
1533 return error;
1534
1535 if (is_quota_modification(inode, iattr))
1536 dquot_initialize(inode);
1537 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
1538 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
1539 error = dquot_transfer(inode, iattr);
1540 if (error)
1541 return error;
1542 }
1543 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1544 error = ext2_setsize(inode, iattr->ia_size);
1545 if (error)
1546 return error;
1547 }
1548 setattr_copy(inode, iattr);
1549 if (iattr->ia_valid & ATTR_MODE)
1550 error = ext2_acl_chmod(inode);
1551 mark_inode_dirty(inode);
1552
1553 return error;
1554}
1555