1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/time.h>
26#include <linux/highuid.h>
27#include <linux/pagemap.h>
28#include <linux/quotaops.h>
29#include <linux/module.h>
30#include <linux/writeback.h>
31#include <linux/buffer_head.h>
32#include <linux/mpage.h>
33#include <linux/fiemap.h>
34#include <linux/namei.h>
35#include "ext2.h"
36#include "acl.h"
37#include "xip.h"
38
39MODULE_AUTHOR("Remy Card and others");
40MODULE_DESCRIPTION("Second Extended Filesystem");
41MODULE_LICENSE("GPL");
42
43static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45
46
47
48static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49{
50 int ea_blocks = EXT2_I(inode)->i_file_acl ?
51 (inode->i_sb->s_blocksize >> 9) : 0;
52
53 return (S_ISLNK(inode->i_mode) &&
54 inode->i_blocks - ea_blocks == 0);
55}
56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, to, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67}
68
69
70
71
72void ext2_evict_inode(struct inode * inode)
73{
74 struct ext2_block_alloc_info *rsv;
75 int want_delete = 0;
76
77 if (!inode->i_nlink && !is_bad_inode(inode)) {
78 want_delete = 1;
79 dquot_initialize(inode);
80 } else {
81 dquot_drop(inode);
82 }
83
84 truncate_inode_pages(&inode->i_data, 0);
85
86 if (want_delete) {
87
88 EXT2_I(inode)->i_dtime = get_seconds();
89 mark_inode_dirty(inode);
90 __ext2_write_inode(inode, inode_needs_sync(inode));
91
92 inode->i_size = 0;
93 if (inode->i_blocks)
94 ext2_truncate_blocks(inode, 0);
95 }
96
97 invalidate_inode_buffers(inode);
98 end_writeback(inode);
99
100 ext2_discard_reservation(inode);
101 rsv = EXT2_I(inode)->i_block_alloc_info;
102 EXT2_I(inode)->i_block_alloc_info = NULL;
103 if (unlikely(rsv))
104 kfree(rsv);
105
106 if (want_delete)
107 ext2_free_inode(inode);
108}
109
110typedef struct {
111 __le32 *p;
112 __le32 key;
113 struct buffer_head *bh;
114} Indirect;
115
116static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
117{
118 p->key = *(p->p = v);
119 p->bh = bh;
120}
121
122static inline int verify_chain(Indirect *from, Indirect *to)
123{
124 while (from <= to && from->key == *from->p)
125 from++;
126 return (from > to);
127}
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159static int ext2_block_to_path(struct inode *inode,
160 long i_block, int offsets[4], int *boundary)
161{
162 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
163 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
164 const long direct_blocks = EXT2_NDIR_BLOCKS,
165 indirect_blocks = ptrs,
166 double_blocks = (1 << (ptrs_bits * 2));
167 int n = 0;
168 int final = 0;
169
170 if (i_block < 0) {
171 ext2_msg(inode->i_sb, KERN_WARNING,
172 "warning: %s: block < 0", __func__);
173 } else if (i_block < direct_blocks) {
174 offsets[n++] = i_block;
175 final = direct_blocks;
176 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
177 offsets[n++] = EXT2_IND_BLOCK;
178 offsets[n++] = i_block;
179 final = ptrs;
180 } else if ((i_block -= indirect_blocks) < double_blocks) {
181 offsets[n++] = EXT2_DIND_BLOCK;
182 offsets[n++] = i_block >> ptrs_bits;
183 offsets[n++] = i_block & (ptrs - 1);
184 final = ptrs;
185 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
186 offsets[n++] = EXT2_TIND_BLOCK;
187 offsets[n++] = i_block >> (ptrs_bits * 2);
188 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
189 offsets[n++] = i_block & (ptrs - 1);
190 final = ptrs;
191 } else {
192 ext2_msg(inode->i_sb, KERN_WARNING,
193 "warning: %s: block is too big", __func__);
194 }
195 if (boundary)
196 *boundary = final - 1 - (i_block & (ptrs - 1));
197
198 return n;
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230static Indirect *ext2_get_branch(struct inode *inode,
231 int depth,
232 int *offsets,
233 Indirect chain[4],
234 int *err)
235{
236 struct super_block *sb = inode->i_sb;
237 Indirect *p = chain;
238 struct buffer_head *bh;
239
240 *err = 0;
241
242 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
243 if (!p->key)
244 goto no_block;
245 while (--depth) {
246 bh = sb_bread(sb, le32_to_cpu(p->key));
247 if (!bh)
248 goto failure;
249 read_lock(&EXT2_I(inode)->i_meta_lock);
250 if (!verify_chain(chain, p))
251 goto changed;
252 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
253 read_unlock(&EXT2_I(inode)->i_meta_lock);
254 if (!p->key)
255 goto no_block;
256 }
257 return NULL;
258
259changed:
260 read_unlock(&EXT2_I(inode)->i_meta_lock);
261 brelse(bh);
262 *err = -EAGAIN;
263 goto no_block;
264failure:
265 *err = -EIO;
266no_block:
267 return p;
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
291{
292 struct ext2_inode_info *ei = EXT2_I(inode);
293 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
294 __le32 *p;
295 ext2_fsblk_t bg_start;
296 ext2_fsblk_t colour;
297
298
299 for (p = ind->p - 1; p >= start; p--)
300 if (*p)
301 return le32_to_cpu(*p);
302
303
304 if (ind->bh)
305 return ind->bh->b_blocknr;
306
307
308
309
310
311 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
312 colour = (current->pid % 16) *
313 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
314 return bg_start + colour;
315}
316
317
318
319
320
321
322
323
324
325
326static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
327 Indirect *partial)
328{
329 struct ext2_block_alloc_info *block_i;
330
331 block_i = EXT2_I(inode)->i_block_alloc_info;
332
333
334
335
336
337 if (block_i && (block == block_i->last_alloc_logical_block + 1)
338 && (block_i->last_alloc_physical_block != 0)) {
339 return block_i->last_alloc_physical_block + 1;
340 }
341
342 return ext2_find_near(inode, partial);
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357static int
358ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
359 int blocks_to_boundary)
360{
361 unsigned long count = 0;
362
363
364
365
366
367 if (k > 0) {
368
369 if (blks < blocks_to_boundary + 1)
370 count += blks;
371 else
372 count += blocks_to_boundary + 1;
373 return count;
374 }
375
376 count++;
377 while (count < blks && count <= blocks_to_boundary
378 && le32_to_cpu(*(branch[0].p + count)) == 0) {
379 count++;
380 }
381 return count;
382}
383
384
385
386
387
388
389
390
391
392
393
394static int ext2_alloc_blocks(struct inode *inode,
395 ext2_fsblk_t goal, int indirect_blks, int blks,
396 ext2_fsblk_t new_blocks[4], int *err)
397{
398 int target, i;
399 unsigned long count = 0;
400 int index = 0;
401 ext2_fsblk_t current_block = 0;
402 int ret = 0;
403
404
405
406
407
408
409
410
411
412 target = blks + indirect_blks;
413
414 while (1) {
415 count = target;
416
417 current_block = ext2_new_blocks(inode,goal,&count,err);
418 if (*err)
419 goto failed_out;
420
421 target -= count;
422
423 while (index < indirect_blks && count) {
424 new_blocks[index++] = current_block++;
425 count--;
426 }
427
428 if (count > 0)
429 break;
430 }
431
432
433 new_blocks[index] = current_block;
434
435
436 ret = count;
437 *err = 0;
438 return ret;
439failed_out:
440 for (i = 0; i <index; i++)
441 ext2_free_blocks(inode, new_blocks[i], 1);
442 if (index)
443 mark_inode_dirty(inode);
444 return ret;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472static int ext2_alloc_branch(struct inode *inode,
473 int indirect_blks, int *blks, ext2_fsblk_t goal,
474 int *offsets, Indirect *branch)
475{
476 int blocksize = inode->i_sb->s_blocksize;
477 int i, n = 0;
478 int err = 0;
479 struct buffer_head *bh;
480 int num;
481 ext2_fsblk_t new_blocks[4];
482 ext2_fsblk_t current_block;
483
484 num = ext2_alloc_blocks(inode, goal, indirect_blks,
485 *blks, new_blocks, &err);
486 if (err)
487 return err;
488
489 branch[0].key = cpu_to_le32(new_blocks[0]);
490
491
492
493 for (n = 1; n <= indirect_blks; n++) {
494
495
496
497
498
499 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
500 branch[n].bh = bh;
501 lock_buffer(bh);
502 memset(bh->b_data, 0, blocksize);
503 branch[n].p = (__le32 *) bh->b_data + offsets[n];
504 branch[n].key = cpu_to_le32(new_blocks[n]);
505 *branch[n].p = branch[n].key;
506 if ( n == indirect_blks) {
507 current_block = new_blocks[n];
508
509
510
511
512
513 for (i=1; i < num; i++)
514 *(branch[n].p + i) = cpu_to_le32(++current_block);
515 }
516 set_buffer_uptodate(bh);
517 unlock_buffer(bh);
518 mark_buffer_dirty_inode(bh, inode);
519
520
521
522
523 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
524 sync_dirty_buffer(bh);
525 }
526 *blks = num;
527 return err;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542static void ext2_splice_branch(struct inode *inode,
543 long block, Indirect *where, int num, int blks)
544{
545 int i;
546 struct ext2_block_alloc_info *block_i;
547 ext2_fsblk_t current_block;
548
549 block_i = EXT2_I(inode)->i_block_alloc_info;
550
551
552
553
554 *where->p = where->key;
555
556
557
558
559
560 if (num == 0 && blks > 1) {
561 current_block = le32_to_cpu(where->key) + 1;
562 for (i = 1; i < blks; i++)
563 *(where->p + i ) = cpu_to_le32(current_block++);
564 }
565
566
567
568
569
570
571 if (block_i) {
572 block_i->last_alloc_logical_block = block + blks - 1;
573 block_i->last_alloc_physical_block =
574 le32_to_cpu(where[num].key) + blks - 1;
575 }
576
577
578
579
580 if (where->bh)
581 mark_buffer_dirty_inode(where->bh, inode);
582
583 inode->i_ctime = CURRENT_TIME_SEC;
584 mark_inode_dirty(inode);
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605static int ext2_get_blocks(struct inode *inode,
606 sector_t iblock, unsigned long maxblocks,
607 struct buffer_head *bh_result,
608 int create)
609{
610 int err = -EIO;
611 int offsets[4];
612 Indirect chain[4];
613 Indirect *partial;
614 ext2_fsblk_t goal;
615 int indirect_blks;
616 int blocks_to_boundary = 0;
617 int depth;
618 struct ext2_inode_info *ei = EXT2_I(inode);
619 int count = 0;
620 ext2_fsblk_t first_block = 0;
621
622 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
623
624 if (depth == 0)
625 return (err);
626
627 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
628
629 if (!partial) {
630 first_block = le32_to_cpu(chain[depth - 1].key);
631 clear_buffer_new(bh_result);
632 count++;
633
634 while (count < maxblocks && count <= blocks_to_boundary) {
635 ext2_fsblk_t blk;
636
637 if (!verify_chain(chain, chain + depth - 1)) {
638
639
640
641
642
643
644 err = -EAGAIN;
645 count = 0;
646 break;
647 }
648 blk = le32_to_cpu(*(chain[depth-1].p + count));
649 if (blk == first_block + count)
650 count++;
651 else
652 break;
653 }
654 if (err != -EAGAIN)
655 goto got_it;
656 }
657
658
659 if (!create || err == -EIO)
660 goto cleanup;
661
662 mutex_lock(&ei->truncate_mutex);
663
664
665
666
667
668
669
670
671
672
673
674
675 if (err == -EAGAIN || !verify_chain(chain, partial)) {
676 while (partial > chain) {
677 brelse(partial->bh);
678 partial--;
679 }
680 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
681 if (!partial) {
682 count++;
683 mutex_unlock(&ei->truncate_mutex);
684 if (err)
685 goto cleanup;
686 clear_buffer_new(bh_result);
687 goto got_it;
688 }
689 }
690
691
692
693
694
695 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
696 ext2_init_block_alloc_info(inode);
697
698 goal = ext2_find_goal(inode, iblock, partial);
699
700
701 indirect_blks = (chain + depth) - partial - 1;
702
703
704
705
706 count = ext2_blks_to_allocate(partial, indirect_blks,
707 maxblocks, blocks_to_boundary);
708
709
710
711 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
712 offsets + (partial - chain), partial);
713
714 if (err) {
715 mutex_unlock(&ei->truncate_mutex);
716 goto cleanup;
717 }
718
719 if (ext2_use_xip(inode->i_sb)) {
720
721
722
723 err = ext2_clear_xip_target (inode,
724 le32_to_cpu(chain[depth-1].key));
725 if (err) {
726 mutex_unlock(&ei->truncate_mutex);
727 goto cleanup;
728 }
729 }
730
731 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
732 mutex_unlock(&ei->truncate_mutex);
733 set_buffer_new(bh_result);
734got_it:
735 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
736 if (count > blocks_to_boundary)
737 set_buffer_boundary(bh_result);
738 err = count;
739
740 partial = chain + depth - 1;
741cleanup:
742 while (partial > chain) {
743 brelse(partial->bh);
744 partial--;
745 }
746 return err;
747}
748
749int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
750{
751 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
752 int ret = ext2_get_blocks(inode, iblock, max_blocks,
753 bh_result, create);
754 if (ret > 0) {
755 bh_result->b_size = (ret << inode->i_blkbits);
756 ret = 0;
757 }
758 return ret;
759
760}
761
762int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
763 u64 start, u64 len)
764{
765 return generic_block_fiemap(inode, fieinfo, start, len,
766 ext2_get_block);
767}
768
769static int ext2_writepage(struct page *page, struct writeback_control *wbc)
770{
771 return block_write_full_page(page, ext2_get_block, wbc);
772}
773
774static int ext2_readpage(struct file *file, struct page *page)
775{
776 return mpage_readpage(page, ext2_get_block);
777}
778
779static int
780ext2_readpages(struct file *file, struct address_space *mapping,
781 struct list_head *pages, unsigned nr_pages)
782{
783 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
784}
785
786static int
787ext2_write_begin(struct file *file, struct address_space *mapping,
788 loff_t pos, unsigned len, unsigned flags,
789 struct page **pagep, void **fsdata)
790{
791 int ret;
792
793 ret = block_write_begin(mapping, pos, len, flags, pagep,
794 ext2_get_block);
795 if (ret < 0)
796 ext2_write_failed(mapping, pos + len);
797 return ret;
798}
799
800static int ext2_write_end(struct file *file, struct address_space *mapping,
801 loff_t pos, unsigned len, unsigned copied,
802 struct page *page, void *fsdata)
803{
804 int ret;
805
806 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
807 if (ret < len)
808 ext2_write_failed(mapping, pos + len);
809 return ret;
810}
811
812static int
813ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
814 loff_t pos, unsigned len, unsigned flags,
815 struct page **pagep, void **fsdata)
816{
817 int ret;
818
819 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
820 ext2_get_block);
821 if (ret < 0)
822 ext2_write_failed(mapping, pos + len);
823 return ret;
824}
825
826static int ext2_nobh_writepage(struct page *page,
827 struct writeback_control *wbc)
828{
829 return nobh_writepage(page, ext2_get_block, wbc);
830}
831
832static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
833{
834 return generic_block_bmap(mapping,block,ext2_get_block);
835}
836
837static ssize_t
838ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
839 loff_t offset, unsigned long nr_segs)
840{
841 struct file *file = iocb->ki_filp;
842 struct address_space *mapping = file->f_mapping;
843 struct inode *inode = mapping->host;
844 ssize_t ret;
845
846 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
847 iov, offset, nr_segs, ext2_get_block, NULL);
848 if (ret < 0 && (rw & WRITE))
849 ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
850 return ret;
851}
852
853static int
854ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
855{
856 return mpage_writepages(mapping, wbc, ext2_get_block);
857}
858
859const struct address_space_operations ext2_aops = {
860 .readpage = ext2_readpage,
861 .readpages = ext2_readpages,
862 .writepage = ext2_writepage,
863 .write_begin = ext2_write_begin,
864 .write_end = ext2_write_end,
865 .bmap = ext2_bmap,
866 .direct_IO = ext2_direct_IO,
867 .writepages = ext2_writepages,
868 .migratepage = buffer_migrate_page,
869 .is_partially_uptodate = block_is_partially_uptodate,
870 .error_remove_page = generic_error_remove_page,
871};
872
873const struct address_space_operations ext2_aops_xip = {
874 .bmap = ext2_bmap,
875 .get_xip_mem = ext2_get_xip_mem,
876};
877
878const struct address_space_operations ext2_nobh_aops = {
879 .readpage = ext2_readpage,
880 .readpages = ext2_readpages,
881 .writepage = ext2_nobh_writepage,
882 .write_begin = ext2_nobh_write_begin,
883 .write_end = nobh_write_end,
884 .bmap = ext2_bmap,
885 .direct_IO = ext2_direct_IO,
886 .writepages = ext2_writepages,
887 .migratepage = buffer_migrate_page,
888 .error_remove_page = generic_error_remove_page,
889};
890
891
892
893
894
895
896static inline int all_zeroes(__le32 *p, __le32 *q)
897{
898 while (p < q)
899 if (*p++)
900 return 0;
901 return 1;
902}
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938static Indirect *ext2_find_shared(struct inode *inode,
939 int depth,
940 int offsets[4],
941 Indirect chain[4],
942 __le32 *top)
943{
944 Indirect *partial, *p;
945 int k, err;
946
947 *top = 0;
948 for (k = depth; k > 1 && !offsets[k-1]; k--)
949 ;
950 partial = ext2_get_branch(inode, k, offsets, chain, &err);
951 if (!partial)
952 partial = chain + k-1;
953
954
955
956
957 write_lock(&EXT2_I(inode)->i_meta_lock);
958 if (!partial->key && *partial->p) {
959 write_unlock(&EXT2_I(inode)->i_meta_lock);
960 goto no_top;
961 }
962 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
963 ;
964
965
966
967
968
969
970 if (p == chain + k - 1 && p > chain) {
971 p->p--;
972 } else {
973 *top = *p->p;
974 *p->p = 0;
975 }
976 write_unlock(&EXT2_I(inode)->i_meta_lock);
977
978 while(partial > p)
979 {
980 brelse(partial->bh);
981 partial--;
982 }
983no_top:
984 return partial;
985}
986
987
988
989
990
991
992
993
994
995
996
997static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
998{
999 unsigned long block_to_free = 0, count = 0;
1000 unsigned long nr;
1001
1002 for ( ; p < q ; p++) {
1003 nr = le32_to_cpu(*p);
1004 if (nr) {
1005 *p = 0;
1006
1007 if (count == 0)
1008 goto free_this;
1009 else if (block_to_free == nr - count)
1010 count++;
1011 else {
1012 ext2_free_blocks (inode, block_to_free, count);
1013 mark_inode_dirty(inode);
1014 free_this:
1015 block_to_free = nr;
1016 count = 1;
1017 }
1018 }
1019 }
1020 if (count > 0) {
1021 ext2_free_blocks (inode, block_to_free, count);
1022 mark_inode_dirty(inode);
1023 }
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1038{
1039 struct buffer_head * bh;
1040 unsigned long nr;
1041
1042 if (depth--) {
1043 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1044 for ( ; p < q ; p++) {
1045 nr = le32_to_cpu(*p);
1046 if (!nr)
1047 continue;
1048 *p = 0;
1049 bh = sb_bread(inode->i_sb, nr);
1050
1051
1052
1053
1054 if (!bh) {
1055 ext2_error(inode->i_sb, "ext2_free_branches",
1056 "Read failure, inode=%ld, block=%ld",
1057 inode->i_ino, nr);
1058 continue;
1059 }
1060 ext2_free_branches(inode,
1061 (__le32*)bh->b_data,
1062 (__le32*)bh->b_data + addr_per_block,
1063 depth);
1064 bforget(bh);
1065 ext2_free_blocks(inode, nr, 1);
1066 mark_inode_dirty(inode);
1067 }
1068 } else
1069 ext2_free_data(inode, p, q);
1070}
1071
1072static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1073{
1074 __le32 *i_data = EXT2_I(inode)->i_data;
1075 struct ext2_inode_info *ei = EXT2_I(inode);
1076 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1077 int offsets[4];
1078 Indirect chain[4];
1079 Indirect *partial;
1080 __le32 nr = 0;
1081 int n;
1082 long iblock;
1083 unsigned blocksize;
1084 blocksize = inode->i_sb->s_blocksize;
1085 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1086
1087 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1088 if (n == 0)
1089 return;
1090
1091
1092
1093
1094
1095 mutex_lock(&ei->truncate_mutex);
1096
1097 if (n == 1) {
1098 ext2_free_data(inode, i_data+offsets[0],
1099 i_data + EXT2_NDIR_BLOCKS);
1100 goto do_indirects;
1101 }
1102
1103 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1104
1105 if (nr) {
1106 if (partial == chain)
1107 mark_inode_dirty(inode);
1108 else
1109 mark_buffer_dirty_inode(partial->bh, inode);
1110 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1111 }
1112
1113 while (partial > chain) {
1114 ext2_free_branches(inode,
1115 partial->p + 1,
1116 (__le32*)partial->bh->b_data+addr_per_block,
1117 (chain+n-1) - partial);
1118 mark_buffer_dirty_inode(partial->bh, inode);
1119 brelse (partial->bh);
1120 partial--;
1121 }
1122do_indirects:
1123
1124 switch (offsets[0]) {
1125 default:
1126 nr = i_data[EXT2_IND_BLOCK];
1127 if (nr) {
1128 i_data[EXT2_IND_BLOCK] = 0;
1129 mark_inode_dirty(inode);
1130 ext2_free_branches(inode, &nr, &nr+1, 1);
1131 }
1132 case EXT2_IND_BLOCK:
1133 nr = i_data[EXT2_DIND_BLOCK];
1134 if (nr) {
1135 i_data[EXT2_DIND_BLOCK] = 0;
1136 mark_inode_dirty(inode);
1137 ext2_free_branches(inode, &nr, &nr+1, 2);
1138 }
1139 case EXT2_DIND_BLOCK:
1140 nr = i_data[EXT2_TIND_BLOCK];
1141 if (nr) {
1142 i_data[EXT2_TIND_BLOCK] = 0;
1143 mark_inode_dirty(inode);
1144 ext2_free_branches(inode, &nr, &nr+1, 3);
1145 }
1146 case EXT2_TIND_BLOCK:
1147 ;
1148 }
1149
1150 ext2_discard_reservation(inode);
1151
1152 mutex_unlock(&ei->truncate_mutex);
1153}
1154
1155static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1156{
1157
1158
1159
1160
1161
1162
1163
1164
1165 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1166 S_ISLNK(inode->i_mode)))
1167 return;
1168 if (ext2_inode_is_fast_symlink(inode))
1169 return;
1170 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1171 return;
1172 __ext2_truncate_blocks(inode, offset);
1173}
1174
1175static int ext2_setsize(struct inode *inode, loff_t newsize)
1176{
1177 int error;
1178
1179 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1180 S_ISLNK(inode->i_mode)))
1181 return -EINVAL;
1182 if (ext2_inode_is_fast_symlink(inode))
1183 return -EINVAL;
1184 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1185 return -EPERM;
1186
1187 if (mapping_is_xip(inode->i_mapping))
1188 error = xip_truncate_page(inode->i_mapping, newsize);
1189 else if (test_opt(inode->i_sb, NOBH))
1190 error = nobh_truncate_page(inode->i_mapping,
1191 newsize, ext2_get_block);
1192 else
1193 error = block_truncate_page(inode->i_mapping,
1194 newsize, ext2_get_block);
1195 if (error)
1196 return error;
1197
1198 truncate_setsize(inode, newsize);
1199 __ext2_truncate_blocks(inode, newsize);
1200
1201 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1202 if (inode_needs_sync(inode)) {
1203 sync_mapping_buffers(inode->i_mapping);
1204 sync_inode_metadata(inode, 1);
1205 } else {
1206 mark_inode_dirty(inode);
1207 }
1208
1209 return 0;
1210}
1211
1212static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1213 struct buffer_head **p)
1214{
1215 struct buffer_head * bh;
1216 unsigned long block_group;
1217 unsigned long block;
1218 unsigned long offset;
1219 struct ext2_group_desc * gdp;
1220
1221 *p = NULL;
1222 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1223 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1224 goto Einval;
1225
1226 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1227 gdp = ext2_get_group_desc(sb, block_group, NULL);
1228 if (!gdp)
1229 goto Egdp;
1230
1231
1232
1233 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1234 block = le32_to_cpu(gdp->bg_inode_table) +
1235 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1236 if (!(bh = sb_bread(sb, block)))
1237 goto Eio;
1238
1239 *p = bh;
1240 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1241 return (struct ext2_inode *) (bh->b_data + offset);
1242
1243Einval:
1244 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1245 (unsigned long) ino);
1246 return ERR_PTR(-EINVAL);
1247Eio:
1248 ext2_error(sb, "ext2_get_inode",
1249 "unable to read inode block - inode=%lu, block=%lu",
1250 (unsigned long) ino, block);
1251Egdp:
1252 return ERR_PTR(-EIO);
1253}
1254
1255void ext2_set_inode_flags(struct inode *inode)
1256{
1257 unsigned int flags = EXT2_I(inode)->i_flags;
1258
1259 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
1260 if (flags & EXT2_SYNC_FL)
1261 inode->i_flags |= S_SYNC;
1262 if (flags & EXT2_APPEND_FL)
1263 inode->i_flags |= S_APPEND;
1264 if (flags & EXT2_IMMUTABLE_FL)
1265 inode->i_flags |= S_IMMUTABLE;
1266 if (flags & EXT2_NOATIME_FL)
1267 inode->i_flags |= S_NOATIME;
1268 if (flags & EXT2_DIRSYNC_FL)
1269 inode->i_flags |= S_DIRSYNC;
1270}
1271
1272
1273void ext2_get_inode_flags(struct ext2_inode_info *ei)
1274{
1275 unsigned int flags = ei->vfs_inode.i_flags;
1276
1277 ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
1278 EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
1279 if (flags & S_SYNC)
1280 ei->i_flags |= EXT2_SYNC_FL;
1281 if (flags & S_APPEND)
1282 ei->i_flags |= EXT2_APPEND_FL;
1283 if (flags & S_IMMUTABLE)
1284 ei->i_flags |= EXT2_IMMUTABLE_FL;
1285 if (flags & S_NOATIME)
1286 ei->i_flags |= EXT2_NOATIME_FL;
1287 if (flags & S_DIRSYNC)
1288 ei->i_flags |= EXT2_DIRSYNC_FL;
1289}
1290
1291struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1292{
1293 struct ext2_inode_info *ei;
1294 struct buffer_head * bh;
1295 struct ext2_inode *raw_inode;
1296 struct inode *inode;
1297 long ret = -EIO;
1298 int n;
1299
1300 inode = iget_locked(sb, ino);
1301 if (!inode)
1302 return ERR_PTR(-ENOMEM);
1303 if (!(inode->i_state & I_NEW))
1304 return inode;
1305
1306 ei = EXT2_I(inode);
1307 ei->i_block_alloc_info = NULL;
1308
1309 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1310 if (IS_ERR(raw_inode)) {
1311 ret = PTR_ERR(raw_inode);
1312 goto bad_inode;
1313 }
1314
1315 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1316 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1317 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1318 if (!(test_opt (inode->i_sb, NO_UID32))) {
1319 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1320 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1321 }
1322 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
1323 inode->i_size = le32_to_cpu(raw_inode->i_size);
1324 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1325 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1326 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1327 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1328 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1329
1330
1331
1332
1333
1334 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1335
1336 brelse (bh);
1337 ret = -ESTALE;
1338 goto bad_inode;
1339 }
1340 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1341 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1342 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1343 ei->i_frag_no = raw_inode->i_frag;
1344 ei->i_frag_size = raw_inode->i_fsize;
1345 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1346 ei->i_dir_acl = 0;
1347 if (S_ISREG(inode->i_mode))
1348 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1349 else
1350 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1351 ei->i_dtime = 0;
1352 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1353 ei->i_state = 0;
1354 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1355 ei->i_dir_start_lookup = 0;
1356
1357
1358
1359
1360
1361 for (n = 0; n < EXT2_N_BLOCKS; n++)
1362 ei->i_data[n] = raw_inode->i_block[n];
1363
1364 if (S_ISREG(inode->i_mode)) {
1365 inode->i_op = &ext2_file_inode_operations;
1366 if (ext2_use_xip(inode->i_sb)) {
1367 inode->i_mapping->a_ops = &ext2_aops_xip;
1368 inode->i_fop = &ext2_xip_file_operations;
1369 } else if (test_opt(inode->i_sb, NOBH)) {
1370 inode->i_mapping->a_ops = &ext2_nobh_aops;
1371 inode->i_fop = &ext2_file_operations;
1372 } else {
1373 inode->i_mapping->a_ops = &ext2_aops;
1374 inode->i_fop = &ext2_file_operations;
1375 }
1376 } else if (S_ISDIR(inode->i_mode)) {
1377 inode->i_op = &ext2_dir_inode_operations;
1378 inode->i_fop = &ext2_dir_operations;
1379 if (test_opt(inode->i_sb, NOBH))
1380 inode->i_mapping->a_ops = &ext2_nobh_aops;
1381 else
1382 inode->i_mapping->a_ops = &ext2_aops;
1383 } else if (S_ISLNK(inode->i_mode)) {
1384 if (ext2_inode_is_fast_symlink(inode)) {
1385 inode->i_op = &ext2_fast_symlink_inode_operations;
1386 nd_terminate_link(ei->i_data, inode->i_size,
1387 sizeof(ei->i_data) - 1);
1388 } else {
1389 inode->i_op = &ext2_symlink_inode_operations;
1390 if (test_opt(inode->i_sb, NOBH))
1391 inode->i_mapping->a_ops = &ext2_nobh_aops;
1392 else
1393 inode->i_mapping->a_ops = &ext2_aops;
1394 }
1395 } else {
1396 inode->i_op = &ext2_special_inode_operations;
1397 if (raw_inode->i_block[0])
1398 init_special_inode(inode, inode->i_mode,
1399 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1400 else
1401 init_special_inode(inode, inode->i_mode,
1402 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1403 }
1404 brelse (bh);
1405 ext2_set_inode_flags(inode);
1406 unlock_new_inode(inode);
1407 return inode;
1408
1409bad_inode:
1410 iget_failed(inode);
1411 return ERR_PTR(ret);
1412}
1413
1414static int __ext2_write_inode(struct inode *inode, int do_sync)
1415{
1416 struct ext2_inode_info *ei = EXT2_I(inode);
1417 struct super_block *sb = inode->i_sb;
1418 ino_t ino = inode->i_ino;
1419 uid_t uid = inode->i_uid;
1420 gid_t gid = inode->i_gid;
1421 struct buffer_head * bh;
1422 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1423 int n;
1424 int err = 0;
1425
1426 if (IS_ERR(raw_inode))
1427 return -EIO;
1428
1429
1430
1431 if (ei->i_state & EXT2_STATE_NEW)
1432 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1433
1434 ext2_get_inode_flags(ei);
1435 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1436 if (!(test_opt(sb, NO_UID32))) {
1437 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1438 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1439
1440
1441
1442
1443 if (!ei->i_dtime) {
1444 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1445 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1446 } else {
1447 raw_inode->i_uid_high = 0;
1448 raw_inode->i_gid_high = 0;
1449 }
1450 } else {
1451 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1452 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1453 raw_inode->i_uid_high = 0;
1454 raw_inode->i_gid_high = 0;
1455 }
1456 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1457 raw_inode->i_size = cpu_to_le32(inode->i_size);
1458 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1459 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1460 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1461
1462 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1463 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1464 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1465 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1466 raw_inode->i_frag = ei->i_frag_no;
1467 raw_inode->i_fsize = ei->i_frag_size;
1468 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1469 if (!S_ISREG(inode->i_mode))
1470 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1471 else {
1472 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1473 if (inode->i_size > 0x7fffffffULL) {
1474 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1475 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1476 EXT2_SB(sb)->s_es->s_rev_level ==
1477 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1478
1479
1480
1481 spin_lock(&EXT2_SB(sb)->s_lock);
1482 ext2_update_dynamic_rev(sb);
1483 EXT2_SET_RO_COMPAT_FEATURE(sb,
1484 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1485 spin_unlock(&EXT2_SB(sb)->s_lock);
1486 ext2_write_super(sb);
1487 }
1488 }
1489 }
1490
1491 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1492 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1493 if (old_valid_dev(inode->i_rdev)) {
1494 raw_inode->i_block[0] =
1495 cpu_to_le32(old_encode_dev(inode->i_rdev));
1496 raw_inode->i_block[1] = 0;
1497 } else {
1498 raw_inode->i_block[0] = 0;
1499 raw_inode->i_block[1] =
1500 cpu_to_le32(new_encode_dev(inode->i_rdev));
1501 raw_inode->i_block[2] = 0;
1502 }
1503 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1504 raw_inode->i_block[n] = ei->i_data[n];
1505 mark_buffer_dirty(bh);
1506 if (do_sync) {
1507 sync_dirty_buffer(bh);
1508 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1509 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1510 sb->s_id, (unsigned long) ino);
1511 err = -EIO;
1512 }
1513 }
1514 ei->i_state &= ~EXT2_STATE_NEW;
1515 brelse (bh);
1516 return err;
1517}
1518
1519int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1520{
1521 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1522}
1523
1524int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1525{
1526 struct inode *inode = dentry->d_inode;
1527 int error;
1528
1529 error = inode_change_ok(inode, iattr);
1530 if (error)
1531 return error;
1532
1533 if (is_quota_modification(inode, iattr))
1534 dquot_initialize(inode);
1535 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
1536 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
1537 error = dquot_transfer(inode, iattr);
1538 if (error)
1539 return error;
1540 }
1541 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1542 error = ext2_setsize(inode, iattr->ia_size);
1543 if (error)
1544 return error;
1545 }
1546 setattr_copy(inode, iattr);
1547 if (iattr->ia_valid & ATTR_MODE)
1548 error = ext2_acl_chmod(inode);
1549 mark_inode_dirty(inode);
1550
1551 return error;
1552}
1553