1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/smp_lock.h>
26#include <linux/time.h>
27#include <linux/highuid.h>
28#include <linux/pagemap.h>
29#include <linux/quotaops.h>
30#include <linux/module.h>
31#include <linux/writeback.h>
32#include <linux/buffer_head.h>
33#include <linux/mpage.h>
34#include <linux/fiemap.h>
35#include <linux/namei.h>
36#include "ext2.h"
37#include "acl.h"
38#include "xip.h"
39
40MODULE_AUTHOR("Remy Card and others");
41MODULE_DESCRIPTION("Second Extended Filesystem");
42MODULE_LICENSE("GPL");
43
44
45
46
47static inline int ext2_inode_is_fast_symlink(struct inode *inode)
48{
49 int ea_blocks = EXT2_I(inode)->i_file_acl ?
50 (inode->i_sb->s_blocksize >> 9) : 0;
51
52 return (S_ISLNK(inode->i_mode) &&
53 inode->i_blocks - ea_blocks == 0);
54}
55
56
57
58
59void ext2_delete_inode (struct inode * inode)
60{
61 truncate_inode_pages(&inode->i_data, 0);
62
63 if (is_bad_inode(inode))
64 goto no_delete;
65 EXT2_I(inode)->i_dtime = get_seconds();
66 mark_inode_dirty(inode);
67 ext2_write_inode(inode, inode_needs_sync(inode));
68
69 inode->i_size = 0;
70 if (inode->i_blocks)
71 ext2_truncate (inode);
72 ext2_free_inode (inode);
73
74 return;
75no_delete:
76 clear_inode(inode);
77}
78
79typedef struct {
80 __le32 *p;
81 __le32 key;
82 struct buffer_head *bh;
83} Indirect;
84
85static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
86{
87 p->key = *(p->p = v);
88 p->bh = bh;
89}
90
91static inline int verify_chain(Indirect *from, Indirect *to)
92{
93 while (from <= to && from->key == *from->p)
94 from++;
95 return (from > to);
96}
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128static int ext2_block_to_path(struct inode *inode,
129 long i_block, int offsets[4], int *boundary)
130{
131 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
132 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
133 const long direct_blocks = EXT2_NDIR_BLOCKS,
134 indirect_blocks = ptrs,
135 double_blocks = (1 << (ptrs_bits * 2));
136 int n = 0;
137 int final = 0;
138
139 if (i_block < 0) {
140 ext2_warning (inode->i_sb, "ext2_block_to_path", "block < 0");
141 } else if (i_block < direct_blocks) {
142 offsets[n++] = i_block;
143 final = direct_blocks;
144 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
145 offsets[n++] = EXT2_IND_BLOCK;
146 offsets[n++] = i_block;
147 final = ptrs;
148 } else if ((i_block -= indirect_blocks) < double_blocks) {
149 offsets[n++] = EXT2_DIND_BLOCK;
150 offsets[n++] = i_block >> ptrs_bits;
151 offsets[n++] = i_block & (ptrs - 1);
152 final = ptrs;
153 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
154 offsets[n++] = EXT2_TIND_BLOCK;
155 offsets[n++] = i_block >> (ptrs_bits * 2);
156 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
157 offsets[n++] = i_block & (ptrs - 1);
158 final = ptrs;
159 } else {
160 ext2_warning (inode->i_sb, "ext2_block_to_path", "block > big");
161 }
162 if (boundary)
163 *boundary = final - 1 - (i_block & (ptrs - 1));
164
165 return n;
166}
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197static Indirect *ext2_get_branch(struct inode *inode,
198 int depth,
199 int *offsets,
200 Indirect chain[4],
201 int *err)
202{
203 struct super_block *sb = inode->i_sb;
204 Indirect *p = chain;
205 struct buffer_head *bh;
206
207 *err = 0;
208
209 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
210 if (!p->key)
211 goto no_block;
212 while (--depth) {
213 bh = sb_bread(sb, le32_to_cpu(p->key));
214 if (!bh)
215 goto failure;
216 read_lock(&EXT2_I(inode)->i_meta_lock);
217 if (!verify_chain(chain, p))
218 goto changed;
219 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
220 read_unlock(&EXT2_I(inode)->i_meta_lock);
221 if (!p->key)
222 goto no_block;
223 }
224 return NULL;
225
226changed:
227 read_unlock(&EXT2_I(inode)->i_meta_lock);
228 brelse(bh);
229 *err = -EAGAIN;
230 goto no_block;
231failure:
232 *err = -EIO;
233no_block:
234 return p;
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
258{
259 struct ext2_inode_info *ei = EXT2_I(inode);
260 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
261 __le32 *p;
262 ext2_fsblk_t bg_start;
263 ext2_fsblk_t colour;
264
265
266 for (p = ind->p - 1; p >= start; p--)
267 if (*p)
268 return le32_to_cpu(*p);
269
270
271 if (ind->bh)
272 return ind->bh->b_blocknr;
273
274
275
276
277
278 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
279 colour = (current->pid % 16) *
280 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
281 return bg_start + colour;
282}
283
284
285
286
287
288
289
290
291
292
293static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
294 Indirect *partial)
295{
296 struct ext2_block_alloc_info *block_i;
297
298 block_i = EXT2_I(inode)->i_block_alloc_info;
299
300
301
302
303
304 if (block_i && (block == block_i->last_alloc_logical_block + 1)
305 && (block_i->last_alloc_physical_block != 0)) {
306 return block_i->last_alloc_physical_block + 1;
307 }
308
309 return ext2_find_near(inode, partial);
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324static int
325ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
326 int blocks_to_boundary)
327{
328 unsigned long count = 0;
329
330
331
332
333
334 if (k > 0) {
335
336 if (blks < blocks_to_boundary + 1)
337 count += blks;
338 else
339 count += blocks_to_boundary + 1;
340 return count;
341 }
342
343 count++;
344 while (count < blks && count <= blocks_to_boundary
345 && le32_to_cpu(*(branch[0].p + count)) == 0) {
346 count++;
347 }
348 return count;
349}
350
351
352
353
354
355
356
357
358
359
360
361static int ext2_alloc_blocks(struct inode *inode,
362 ext2_fsblk_t goal, int indirect_blks, int blks,
363 ext2_fsblk_t new_blocks[4], int *err)
364{
365 int target, i;
366 unsigned long count = 0;
367 int index = 0;
368 ext2_fsblk_t current_block = 0;
369 int ret = 0;
370
371
372
373
374
375
376
377
378
379 target = blks + indirect_blks;
380
381 while (1) {
382 count = target;
383
384 current_block = ext2_new_blocks(inode,goal,&count,err);
385 if (*err)
386 goto failed_out;
387
388 target -= count;
389
390 while (index < indirect_blks && count) {
391 new_blocks[index++] = current_block++;
392 count--;
393 }
394
395 if (count > 0)
396 break;
397 }
398
399
400 new_blocks[index] = current_block;
401
402
403 ret = count;
404 *err = 0;
405 return ret;
406failed_out:
407 for (i = 0; i <index; i++)
408 ext2_free_blocks(inode, new_blocks[i], 1);
409 return ret;
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437static int ext2_alloc_branch(struct inode *inode,
438 int indirect_blks, int *blks, ext2_fsblk_t goal,
439 int *offsets, Indirect *branch)
440{
441 int blocksize = inode->i_sb->s_blocksize;
442 int i, n = 0;
443 int err = 0;
444 struct buffer_head *bh;
445 int num;
446 ext2_fsblk_t new_blocks[4];
447 ext2_fsblk_t current_block;
448
449 num = ext2_alloc_blocks(inode, goal, indirect_blks,
450 *blks, new_blocks, &err);
451 if (err)
452 return err;
453
454 branch[0].key = cpu_to_le32(new_blocks[0]);
455
456
457
458 for (n = 1; n <= indirect_blks; n++) {
459
460
461
462
463
464 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
465 branch[n].bh = bh;
466 lock_buffer(bh);
467 memset(bh->b_data, 0, blocksize);
468 branch[n].p = (__le32 *) bh->b_data + offsets[n];
469 branch[n].key = cpu_to_le32(new_blocks[n]);
470 *branch[n].p = branch[n].key;
471 if ( n == indirect_blks) {
472 current_block = new_blocks[n];
473
474
475
476
477
478 for (i=1; i < num; i++)
479 *(branch[n].p + i) = cpu_to_le32(++current_block);
480 }
481 set_buffer_uptodate(bh);
482 unlock_buffer(bh);
483 mark_buffer_dirty_inode(bh, inode);
484
485
486
487
488 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
489 sync_dirty_buffer(bh);
490 }
491 *blks = num;
492 return err;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507static void ext2_splice_branch(struct inode *inode,
508 long block, Indirect *where, int num, int blks)
509{
510 int i;
511 struct ext2_block_alloc_info *block_i;
512 ext2_fsblk_t current_block;
513
514 block_i = EXT2_I(inode)->i_block_alloc_info;
515
516
517
518
519 *where->p = where->key;
520
521
522
523
524
525 if (num == 0 && blks > 1) {
526 current_block = le32_to_cpu(where->key) + 1;
527 for (i = 1; i < blks; i++)
528 *(where->p + i ) = cpu_to_le32(current_block++);
529 }
530
531
532
533
534
535
536 if (block_i) {
537 block_i->last_alloc_logical_block = block + blks - 1;
538 block_i->last_alloc_physical_block =
539 le32_to_cpu(where[num].key) + blks - 1;
540 }
541
542
543
544
545 if (where->bh)
546 mark_buffer_dirty_inode(where->bh, inode);
547
548 inode->i_ctime = CURRENT_TIME_SEC;
549 mark_inode_dirty(inode);
550}
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570static int ext2_get_blocks(struct inode *inode,
571 sector_t iblock, unsigned long maxblocks,
572 struct buffer_head *bh_result,
573 int create)
574{
575 int err = -EIO;
576 int offsets[4];
577 Indirect chain[4];
578 Indirect *partial;
579 ext2_fsblk_t goal;
580 int indirect_blks;
581 int blocks_to_boundary = 0;
582 int depth;
583 struct ext2_inode_info *ei = EXT2_I(inode);
584 int count = 0;
585 ext2_fsblk_t first_block = 0;
586
587 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
588
589 if (depth == 0)
590 return (err);
591
592 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
593
594 if (!partial) {
595 first_block = le32_to_cpu(chain[depth - 1].key);
596 clear_buffer_new(bh_result);
597 count++;
598
599 while (count < maxblocks && count <= blocks_to_boundary) {
600 ext2_fsblk_t blk;
601
602 if (!verify_chain(chain, chain + depth - 1)) {
603
604
605
606
607
608
609 err = -EAGAIN;
610 count = 0;
611 break;
612 }
613 blk = le32_to_cpu(*(chain[depth-1].p + count));
614 if (blk == first_block + count)
615 count++;
616 else
617 break;
618 }
619 if (err != -EAGAIN)
620 goto got_it;
621 }
622
623
624 if (!create || err == -EIO)
625 goto cleanup;
626
627 mutex_lock(&ei->truncate_mutex);
628
629
630
631
632
633
634
635
636
637
638
639
640 if (err == -EAGAIN || !verify_chain(chain, partial)) {
641 while (partial > chain) {
642 brelse(partial->bh);
643 partial--;
644 }
645 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
646 if (!partial) {
647 count++;
648 mutex_unlock(&ei->truncate_mutex);
649 if (err)
650 goto cleanup;
651 clear_buffer_new(bh_result);
652 goto got_it;
653 }
654 }
655
656
657
658
659
660 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
661 ext2_init_block_alloc_info(inode);
662
663 goal = ext2_find_goal(inode, iblock, partial);
664
665
666 indirect_blks = (chain + depth) - partial - 1;
667
668
669
670
671 count = ext2_blks_to_allocate(partial, indirect_blks,
672 maxblocks, blocks_to_boundary);
673
674
675
676 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
677 offsets + (partial - chain), partial);
678
679 if (err) {
680 mutex_unlock(&ei->truncate_mutex);
681 goto cleanup;
682 }
683
684 if (ext2_use_xip(inode->i_sb)) {
685
686
687
688 err = ext2_clear_xip_target (inode,
689 le32_to_cpu(chain[depth-1].key));
690 if (err) {
691 mutex_unlock(&ei->truncate_mutex);
692 goto cleanup;
693 }
694 }
695
696 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
697 mutex_unlock(&ei->truncate_mutex);
698 set_buffer_new(bh_result);
699got_it:
700 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
701 if (count > blocks_to_boundary)
702 set_buffer_boundary(bh_result);
703 err = count;
704
705 partial = chain + depth - 1;
706cleanup:
707 while (partial > chain) {
708 brelse(partial->bh);
709 partial--;
710 }
711 return err;
712}
713
714int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
715{
716 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
717 int ret = ext2_get_blocks(inode, iblock, max_blocks,
718 bh_result, create);
719 if (ret > 0) {
720 bh_result->b_size = (ret << inode->i_blkbits);
721 ret = 0;
722 }
723 return ret;
724
725}
726
727int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
728 u64 start, u64 len)
729{
730 return generic_block_fiemap(inode, fieinfo, start, len,
731 ext2_get_block);
732}
733
734static int ext2_writepage(struct page *page, struct writeback_control *wbc)
735{
736 return block_write_full_page(page, ext2_get_block, wbc);
737}
738
739static int ext2_readpage(struct file *file, struct page *page)
740{
741 return mpage_readpage(page, ext2_get_block);
742}
743
744static int
745ext2_readpages(struct file *file, struct address_space *mapping,
746 struct list_head *pages, unsigned nr_pages)
747{
748 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
749}
750
751int __ext2_write_begin(struct file *file, struct address_space *mapping,
752 loff_t pos, unsigned len, unsigned flags,
753 struct page **pagep, void **fsdata)
754{
755 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
756 ext2_get_block);
757}
758
759static int
760ext2_write_begin(struct file *file, struct address_space *mapping,
761 loff_t pos, unsigned len, unsigned flags,
762 struct page **pagep, void **fsdata)
763{
764 *pagep = NULL;
765 return __ext2_write_begin(file, mapping, pos, len, flags, pagep,fsdata);
766}
767
768static int
769ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
770 loff_t pos, unsigned len, unsigned flags,
771 struct page **pagep, void **fsdata)
772{
773
774
775
776
777
778 return nobh_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
779 ext2_get_block);
780}
781
782static int ext2_nobh_writepage(struct page *page,
783 struct writeback_control *wbc)
784{
785 return nobh_writepage(page, ext2_get_block, wbc);
786}
787
788static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
789{
790 return generic_block_bmap(mapping,block,ext2_get_block);
791}
792
793static ssize_t
794ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
795 loff_t offset, unsigned long nr_segs)
796{
797 struct file *file = iocb->ki_filp;
798 struct inode *inode = file->f_mapping->host;
799
800 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
801 offset, nr_segs, ext2_get_block, NULL);
802}
803
804static int
805ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
806{
807 return mpage_writepages(mapping, wbc, ext2_get_block);
808}
809
810const struct address_space_operations ext2_aops = {
811 .readpage = ext2_readpage,
812 .readpages = ext2_readpages,
813 .writepage = ext2_writepage,
814 .sync_page = block_sync_page,
815 .write_begin = ext2_write_begin,
816 .write_end = generic_write_end,
817 .bmap = ext2_bmap,
818 .direct_IO = ext2_direct_IO,
819 .writepages = ext2_writepages,
820 .migratepage = buffer_migrate_page,
821 .is_partially_uptodate = block_is_partially_uptodate,
822 .error_remove_page = generic_error_remove_page,
823};
824
825const struct address_space_operations ext2_aops_xip = {
826 .bmap = ext2_bmap,
827 .get_xip_mem = ext2_get_xip_mem,
828};
829
830const struct address_space_operations ext2_nobh_aops = {
831 .readpage = ext2_readpage,
832 .readpages = ext2_readpages,
833 .writepage = ext2_nobh_writepage,
834 .sync_page = block_sync_page,
835 .write_begin = ext2_nobh_write_begin,
836 .write_end = nobh_write_end,
837 .bmap = ext2_bmap,
838 .direct_IO = ext2_direct_IO,
839 .writepages = ext2_writepages,
840 .migratepage = buffer_migrate_page,
841 .error_remove_page = generic_error_remove_page,
842};
843
844
845
846
847
848
849static inline int all_zeroes(__le32 *p, __le32 *q)
850{
851 while (p < q)
852 if (*p++)
853 return 0;
854 return 1;
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891static Indirect *ext2_find_shared(struct inode *inode,
892 int depth,
893 int offsets[4],
894 Indirect chain[4],
895 __le32 *top)
896{
897 Indirect *partial, *p;
898 int k, err;
899
900 *top = 0;
901 for (k = depth; k > 1 && !offsets[k-1]; k--)
902 ;
903 partial = ext2_get_branch(inode, k, offsets, chain, &err);
904 if (!partial)
905 partial = chain + k-1;
906
907
908
909
910 write_lock(&EXT2_I(inode)->i_meta_lock);
911 if (!partial->key && *partial->p) {
912 write_unlock(&EXT2_I(inode)->i_meta_lock);
913 goto no_top;
914 }
915 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
916 ;
917
918
919
920
921
922
923 if (p == chain + k - 1 && p > chain) {
924 p->p--;
925 } else {
926 *top = *p->p;
927 *p->p = 0;
928 }
929 write_unlock(&EXT2_I(inode)->i_meta_lock);
930
931 while(partial > p)
932 {
933 brelse(partial->bh);
934 partial--;
935 }
936no_top:
937 return partial;
938}
939
940
941
942
943
944
945
946
947
948
949
950static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
951{
952 unsigned long block_to_free = 0, count = 0;
953 unsigned long nr;
954
955 for ( ; p < q ; p++) {
956 nr = le32_to_cpu(*p);
957 if (nr) {
958 *p = 0;
959
960 if (count == 0)
961 goto free_this;
962 else if (block_to_free == nr - count)
963 count++;
964 else {
965 mark_inode_dirty(inode);
966 ext2_free_blocks (inode, block_to_free, count);
967 free_this:
968 block_to_free = nr;
969 count = 1;
970 }
971 }
972 }
973 if (count > 0) {
974 mark_inode_dirty(inode);
975 ext2_free_blocks (inode, block_to_free, count);
976 }
977}
978
979
980
981
982
983
984
985
986
987
988
989
990static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
991{
992 struct buffer_head * bh;
993 unsigned long nr;
994
995 if (depth--) {
996 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
997 for ( ; p < q ; p++) {
998 nr = le32_to_cpu(*p);
999 if (!nr)
1000 continue;
1001 *p = 0;
1002 bh = sb_bread(inode->i_sb, nr);
1003
1004
1005
1006
1007 if (!bh) {
1008 ext2_error(inode->i_sb, "ext2_free_branches",
1009 "Read failure, inode=%ld, block=%ld",
1010 inode->i_ino, nr);
1011 continue;
1012 }
1013 ext2_free_branches(inode,
1014 (__le32*)bh->b_data,
1015 (__le32*)bh->b_data + addr_per_block,
1016 depth);
1017 bforget(bh);
1018 ext2_free_blocks(inode, nr, 1);
1019 mark_inode_dirty(inode);
1020 }
1021 } else
1022 ext2_free_data(inode, p, q);
1023}
1024
1025void ext2_truncate(struct inode *inode)
1026{
1027 __le32 *i_data = EXT2_I(inode)->i_data;
1028 struct ext2_inode_info *ei = EXT2_I(inode);
1029 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1030 int offsets[4];
1031 Indirect chain[4];
1032 Indirect *partial;
1033 __le32 nr = 0;
1034 int n;
1035 long iblock;
1036 unsigned blocksize;
1037
1038 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1039 S_ISLNK(inode->i_mode)))
1040 return;
1041 if (ext2_inode_is_fast_symlink(inode))
1042 return;
1043 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1044 return;
1045
1046 blocksize = inode->i_sb->s_blocksize;
1047 iblock = (inode->i_size + blocksize-1)
1048 >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1049
1050 if (mapping_is_xip(inode->i_mapping))
1051 xip_truncate_page(inode->i_mapping, inode->i_size);
1052 else if (test_opt(inode->i_sb, NOBH))
1053 nobh_truncate_page(inode->i_mapping,
1054 inode->i_size, ext2_get_block);
1055 else
1056 block_truncate_page(inode->i_mapping,
1057 inode->i_size, ext2_get_block);
1058
1059 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1060 if (n == 0)
1061 return;
1062
1063
1064
1065
1066
1067 mutex_lock(&ei->truncate_mutex);
1068
1069 if (n == 1) {
1070 ext2_free_data(inode, i_data+offsets[0],
1071 i_data + EXT2_NDIR_BLOCKS);
1072 goto do_indirects;
1073 }
1074
1075 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1076
1077 if (nr) {
1078 if (partial == chain)
1079 mark_inode_dirty(inode);
1080 else
1081 mark_buffer_dirty_inode(partial->bh, inode);
1082 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1083 }
1084
1085 while (partial > chain) {
1086 ext2_free_branches(inode,
1087 partial->p + 1,
1088 (__le32*)partial->bh->b_data+addr_per_block,
1089 (chain+n-1) - partial);
1090 mark_buffer_dirty_inode(partial->bh, inode);
1091 brelse (partial->bh);
1092 partial--;
1093 }
1094do_indirects:
1095
1096 switch (offsets[0]) {
1097 default:
1098 nr = i_data[EXT2_IND_BLOCK];
1099 if (nr) {
1100 i_data[EXT2_IND_BLOCK] = 0;
1101 mark_inode_dirty(inode);
1102 ext2_free_branches(inode, &nr, &nr+1, 1);
1103 }
1104 case EXT2_IND_BLOCK:
1105 nr = i_data[EXT2_DIND_BLOCK];
1106 if (nr) {
1107 i_data[EXT2_DIND_BLOCK] = 0;
1108 mark_inode_dirty(inode);
1109 ext2_free_branches(inode, &nr, &nr+1, 2);
1110 }
1111 case EXT2_DIND_BLOCK:
1112 nr = i_data[EXT2_TIND_BLOCK];
1113 if (nr) {
1114 i_data[EXT2_TIND_BLOCK] = 0;
1115 mark_inode_dirty(inode);
1116 ext2_free_branches(inode, &nr, &nr+1, 3);
1117 }
1118 case EXT2_TIND_BLOCK:
1119 ;
1120 }
1121
1122 ext2_discard_reservation(inode);
1123
1124 mutex_unlock(&ei->truncate_mutex);
1125 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1126 if (inode_needs_sync(inode)) {
1127 sync_mapping_buffers(inode->i_mapping);
1128 ext2_sync_inode (inode);
1129 } else {
1130 mark_inode_dirty(inode);
1131 }
1132}
1133
1134static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1135 struct buffer_head **p)
1136{
1137 struct buffer_head * bh;
1138 unsigned long block_group;
1139 unsigned long block;
1140 unsigned long offset;
1141 struct ext2_group_desc * gdp;
1142
1143 *p = NULL;
1144 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1145 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1146 goto Einval;
1147
1148 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1149 gdp = ext2_get_group_desc(sb, block_group, NULL);
1150 if (!gdp)
1151 goto Egdp;
1152
1153
1154
1155 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1156 block = le32_to_cpu(gdp->bg_inode_table) +
1157 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1158 if (!(bh = sb_bread(sb, block)))
1159 goto Eio;
1160
1161 *p = bh;
1162 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1163 return (struct ext2_inode *) (bh->b_data + offset);
1164
1165Einval:
1166 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1167 (unsigned long) ino);
1168 return ERR_PTR(-EINVAL);
1169Eio:
1170 ext2_error(sb, "ext2_get_inode",
1171 "unable to read inode block - inode=%lu, block=%lu",
1172 (unsigned long) ino, block);
1173Egdp:
1174 return ERR_PTR(-EIO);
1175}
1176
1177void ext2_set_inode_flags(struct inode *inode)
1178{
1179 unsigned int flags = EXT2_I(inode)->i_flags;
1180
1181 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
1182 if (flags & EXT2_SYNC_FL)
1183 inode->i_flags |= S_SYNC;
1184 if (flags & EXT2_APPEND_FL)
1185 inode->i_flags |= S_APPEND;
1186 if (flags & EXT2_IMMUTABLE_FL)
1187 inode->i_flags |= S_IMMUTABLE;
1188 if (flags & EXT2_NOATIME_FL)
1189 inode->i_flags |= S_NOATIME;
1190 if (flags & EXT2_DIRSYNC_FL)
1191 inode->i_flags |= S_DIRSYNC;
1192}
1193
1194
1195void ext2_get_inode_flags(struct ext2_inode_info *ei)
1196{
1197 unsigned int flags = ei->vfs_inode.i_flags;
1198
1199 ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
1200 EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
1201 if (flags & S_SYNC)
1202 ei->i_flags |= EXT2_SYNC_FL;
1203 if (flags & S_APPEND)
1204 ei->i_flags |= EXT2_APPEND_FL;
1205 if (flags & S_IMMUTABLE)
1206 ei->i_flags |= EXT2_IMMUTABLE_FL;
1207 if (flags & S_NOATIME)
1208 ei->i_flags |= EXT2_NOATIME_FL;
1209 if (flags & S_DIRSYNC)
1210 ei->i_flags |= EXT2_DIRSYNC_FL;
1211}
1212
1213struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1214{
1215 struct ext2_inode_info *ei;
1216 struct buffer_head * bh;
1217 struct ext2_inode *raw_inode;
1218 struct inode *inode;
1219 long ret = -EIO;
1220 int n;
1221
1222 inode = iget_locked(sb, ino);
1223 if (!inode)
1224 return ERR_PTR(-ENOMEM);
1225 if (!(inode->i_state & I_NEW))
1226 return inode;
1227
1228 ei = EXT2_I(inode);
1229 ei->i_block_alloc_info = NULL;
1230
1231 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1232 if (IS_ERR(raw_inode)) {
1233 ret = PTR_ERR(raw_inode);
1234 goto bad_inode;
1235 }
1236
1237 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1238 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1239 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1240 if (!(test_opt (inode->i_sb, NO_UID32))) {
1241 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1242 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1243 }
1244 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
1245 inode->i_size = le32_to_cpu(raw_inode->i_size);
1246 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1247 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1248 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1249 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1250 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1251
1252
1253
1254
1255
1256 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1257
1258 brelse (bh);
1259 ret = -ESTALE;
1260 goto bad_inode;
1261 }
1262 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1263 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1264 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1265 ei->i_frag_no = raw_inode->i_frag;
1266 ei->i_frag_size = raw_inode->i_fsize;
1267 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1268 ei->i_dir_acl = 0;
1269 if (S_ISREG(inode->i_mode))
1270 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1271 else
1272 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1273 ei->i_dtime = 0;
1274 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1275 ei->i_state = 0;
1276 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1277 ei->i_dir_start_lookup = 0;
1278
1279
1280
1281
1282
1283 for (n = 0; n < EXT2_N_BLOCKS; n++)
1284 ei->i_data[n] = raw_inode->i_block[n];
1285
1286 if (S_ISREG(inode->i_mode)) {
1287 inode->i_op = &ext2_file_inode_operations;
1288 if (ext2_use_xip(inode->i_sb)) {
1289 inode->i_mapping->a_ops = &ext2_aops_xip;
1290 inode->i_fop = &ext2_xip_file_operations;
1291 } else if (test_opt(inode->i_sb, NOBH)) {
1292 inode->i_mapping->a_ops = &ext2_nobh_aops;
1293 inode->i_fop = &ext2_file_operations;
1294 } else {
1295 inode->i_mapping->a_ops = &ext2_aops;
1296 inode->i_fop = &ext2_file_operations;
1297 }
1298 } else if (S_ISDIR(inode->i_mode)) {
1299 inode->i_op = &ext2_dir_inode_operations;
1300 inode->i_fop = &ext2_dir_operations;
1301 if (test_opt(inode->i_sb, NOBH))
1302 inode->i_mapping->a_ops = &ext2_nobh_aops;
1303 else
1304 inode->i_mapping->a_ops = &ext2_aops;
1305 } else if (S_ISLNK(inode->i_mode)) {
1306 if (ext2_inode_is_fast_symlink(inode)) {
1307 inode->i_op = &ext2_fast_symlink_inode_operations;
1308 nd_terminate_link(ei->i_data, inode->i_size,
1309 sizeof(ei->i_data) - 1);
1310 } else {
1311 inode->i_op = &ext2_symlink_inode_operations;
1312 if (test_opt(inode->i_sb, NOBH))
1313 inode->i_mapping->a_ops = &ext2_nobh_aops;
1314 else
1315 inode->i_mapping->a_ops = &ext2_aops;
1316 }
1317 } else {
1318 inode->i_op = &ext2_special_inode_operations;
1319 if (raw_inode->i_block[0])
1320 init_special_inode(inode, inode->i_mode,
1321 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1322 else
1323 init_special_inode(inode, inode->i_mode,
1324 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1325 }
1326 brelse (bh);
1327 ext2_set_inode_flags(inode);
1328 unlock_new_inode(inode);
1329 return inode;
1330
1331bad_inode:
1332 iget_failed(inode);
1333 return ERR_PTR(ret);
1334}
1335
1336int ext2_write_inode(struct inode *inode, int do_sync)
1337{
1338 struct ext2_inode_info *ei = EXT2_I(inode);
1339 struct super_block *sb = inode->i_sb;
1340 ino_t ino = inode->i_ino;
1341 uid_t uid = inode->i_uid;
1342 gid_t gid = inode->i_gid;
1343 struct buffer_head * bh;
1344 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1345 int n;
1346 int err = 0;
1347
1348 if (IS_ERR(raw_inode))
1349 return -EIO;
1350
1351
1352
1353 if (ei->i_state & EXT2_STATE_NEW)
1354 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1355
1356 ext2_get_inode_flags(ei);
1357 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1358 if (!(test_opt(sb, NO_UID32))) {
1359 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1360 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1361
1362
1363
1364
1365 if (!ei->i_dtime) {
1366 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1367 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1368 } else {
1369 raw_inode->i_uid_high = 0;
1370 raw_inode->i_gid_high = 0;
1371 }
1372 } else {
1373 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1374 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1375 raw_inode->i_uid_high = 0;
1376 raw_inode->i_gid_high = 0;
1377 }
1378 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1379 raw_inode->i_size = cpu_to_le32(inode->i_size);
1380 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1381 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1382 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1383
1384 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1385 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1386 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1387 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1388 raw_inode->i_frag = ei->i_frag_no;
1389 raw_inode->i_fsize = ei->i_frag_size;
1390 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1391 if (!S_ISREG(inode->i_mode))
1392 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1393 else {
1394 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1395 if (inode->i_size > 0x7fffffffULL) {
1396 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1397 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1398 EXT2_SB(sb)->s_es->s_rev_level ==
1399 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1400
1401
1402
1403 lock_kernel();
1404 ext2_update_dynamic_rev(sb);
1405 EXT2_SET_RO_COMPAT_FEATURE(sb,
1406 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1407 unlock_kernel();
1408 ext2_write_super(sb);
1409 }
1410 }
1411 }
1412
1413 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1414 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1415 if (old_valid_dev(inode->i_rdev)) {
1416 raw_inode->i_block[0] =
1417 cpu_to_le32(old_encode_dev(inode->i_rdev));
1418 raw_inode->i_block[1] = 0;
1419 } else {
1420 raw_inode->i_block[0] = 0;
1421 raw_inode->i_block[1] =
1422 cpu_to_le32(new_encode_dev(inode->i_rdev));
1423 raw_inode->i_block[2] = 0;
1424 }
1425 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1426 raw_inode->i_block[n] = ei->i_data[n];
1427 mark_buffer_dirty(bh);
1428 if (do_sync) {
1429 sync_dirty_buffer(bh);
1430 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1431 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1432 sb->s_id, (unsigned long) ino);
1433 err = -EIO;
1434 }
1435 }
1436 ei->i_state &= ~EXT2_STATE_NEW;
1437 brelse (bh);
1438 return err;
1439}
1440
1441int ext2_sync_inode(struct inode *inode)
1442{
1443 struct writeback_control wbc = {
1444 .sync_mode = WB_SYNC_ALL,
1445 .nr_to_write = 0,
1446 };
1447 return sync_inode(inode, &wbc);
1448}
1449
1450int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1451{
1452 struct inode *inode = dentry->d_inode;
1453 int error;
1454
1455 error = inode_change_ok(inode, iattr);
1456 if (error)
1457 return error;
1458 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
1459 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
1460 error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
1461 if (error)
1462 return error;
1463 }
1464 error = inode_setattr(inode, iattr);
1465 if (!error && (iattr->ia_valid & ATTR_MODE))
1466 error = ext2_acl_chmod(inode);
1467 return error;
1468}
1469