1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/time.h>
26#include <linux/highuid.h>
27#include <linux/pagemap.h>
28#include <linux/quotaops.h>
29#include <linux/writeback.h>
30#include <linux/buffer_head.h>
31#include <linux/mpage.h>
32#include <linux/fiemap.h>
33#include <linux/namei.h>
34#include <linux/aio.h>
35#include "ext2.h"
36#include "acl.h"
37#include "xip.h"
38#include "xattr.h"
39
40static int __ext2_write_inode(struct inode *inode, int do_sync);
41
42
43
44
45static inline int ext2_inode_is_fast_symlink(struct inode *inode)
46{
47 int ea_blocks = EXT2_I(inode)->i_file_acl ?
48 (inode->i_sb->s_blocksize >> 9) : 0;
49
50 return (S_ISLNK(inode->i_mode) &&
51 inode->i_blocks - ea_blocks == 0);
52}
53
54static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
55
56static void ext2_write_failed(struct address_space *mapping, loff_t to)
57{
58 struct inode *inode = mapping->host;
59
60 if (to > inode->i_size) {
61 truncate_pagecache(inode, to, inode->i_size);
62 ext2_truncate_blocks(inode, inode->i_size);
63 }
64}
65
66
67
68
69void ext2_evict_inode(struct inode * inode)
70{
71 struct ext2_block_alloc_info *rsv;
72 int want_delete = 0;
73
74 if (!inode->i_nlink && !is_bad_inode(inode)) {
75 want_delete = 1;
76 dquot_initialize(inode);
77 } else {
78 dquot_drop(inode);
79 }
80
81 truncate_inode_pages(&inode->i_data, 0);
82
83 if (want_delete) {
84 sb_start_intwrite(inode->i_sb);
85
86 EXT2_I(inode)->i_dtime = get_seconds();
87 mark_inode_dirty(inode);
88 __ext2_write_inode(inode, inode_needs_sync(inode));
89
90 inode->i_size = 0;
91 if (inode->i_blocks)
92 ext2_truncate_blocks(inode, 0);
93 ext2_xattr_delete_inode(inode);
94 }
95
96 invalidate_inode_buffers(inode);
97 clear_inode(inode);
98
99 ext2_discard_reservation(inode);
100 rsv = EXT2_I(inode)->i_block_alloc_info;
101 EXT2_I(inode)->i_block_alloc_info = NULL;
102 if (unlikely(rsv))
103 kfree(rsv);
104
105 if (want_delete) {
106 ext2_free_inode(inode);
107 sb_end_intwrite(inode->i_sb);
108 }
109}
110
111typedef struct {
112 __le32 *p;
113 __le32 key;
114 struct buffer_head *bh;
115} Indirect;
116
117static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
118{
119 p->key = *(p->p = v);
120 p->bh = bh;
121}
122
123static inline int verify_chain(Indirect *from, Indirect *to)
124{
125 while (from <= to && from->key == *from->p)
126 from++;
127 return (from > to);
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static int ext2_block_to_path(struct inode *inode,
161 long i_block, int offsets[4], int *boundary)
162{
163 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
164 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
165 const long direct_blocks = EXT2_NDIR_BLOCKS,
166 indirect_blocks = ptrs,
167 double_blocks = (1 << (ptrs_bits * 2));
168 int n = 0;
169 int final = 0;
170
171 if (i_block < 0) {
172 ext2_msg(inode->i_sb, KERN_WARNING,
173 "warning: %s: block < 0", __func__);
174 } else if (i_block < direct_blocks) {
175 offsets[n++] = i_block;
176 final = direct_blocks;
177 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
178 offsets[n++] = EXT2_IND_BLOCK;
179 offsets[n++] = i_block;
180 final = ptrs;
181 } else if ((i_block -= indirect_blocks) < double_blocks) {
182 offsets[n++] = EXT2_DIND_BLOCK;
183 offsets[n++] = i_block >> ptrs_bits;
184 offsets[n++] = i_block & (ptrs - 1);
185 final = ptrs;
186 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
187 offsets[n++] = EXT2_TIND_BLOCK;
188 offsets[n++] = i_block >> (ptrs_bits * 2);
189 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
190 offsets[n++] = i_block & (ptrs - 1);
191 final = ptrs;
192 } else {
193 ext2_msg(inode->i_sb, KERN_WARNING,
194 "warning: %s: block is too big", __func__);
195 }
196 if (boundary)
197 *boundary = final - 1 - (i_block & (ptrs - 1));
198
199 return n;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231static Indirect *ext2_get_branch(struct inode *inode,
232 int depth,
233 int *offsets,
234 Indirect chain[4],
235 int *err)
236{
237 struct super_block *sb = inode->i_sb;
238 Indirect *p = chain;
239 struct buffer_head *bh;
240
241 *err = 0;
242
243 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
244 if (!p->key)
245 goto no_block;
246 while (--depth) {
247 bh = sb_bread(sb, le32_to_cpu(p->key));
248 if (!bh)
249 goto failure;
250 read_lock(&EXT2_I(inode)->i_meta_lock);
251 if (!verify_chain(chain, p))
252 goto changed;
253 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
254 read_unlock(&EXT2_I(inode)->i_meta_lock);
255 if (!p->key)
256 goto no_block;
257 }
258 return NULL;
259
260changed:
261 read_unlock(&EXT2_I(inode)->i_meta_lock);
262 brelse(bh);
263 *err = -EAGAIN;
264 goto no_block;
265failure:
266 *err = -EIO;
267no_block:
268 return p;
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
292{
293 struct ext2_inode_info *ei = EXT2_I(inode);
294 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
295 __le32 *p;
296 ext2_fsblk_t bg_start;
297 ext2_fsblk_t colour;
298
299
300 for (p = ind->p - 1; p >= start; p--)
301 if (*p)
302 return le32_to_cpu(*p);
303
304
305 if (ind->bh)
306 return ind->bh->b_blocknr;
307
308
309
310
311
312 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
313 colour = (current->pid % 16) *
314 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
315 return bg_start + colour;
316}
317
318
319
320
321
322
323
324
325
326
327static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
328 Indirect *partial)
329{
330 struct ext2_block_alloc_info *block_i;
331
332 block_i = EXT2_I(inode)->i_block_alloc_info;
333
334
335
336
337
338 if (block_i && (block == block_i->last_alloc_logical_block + 1)
339 && (block_i->last_alloc_physical_block != 0)) {
340 return block_i->last_alloc_physical_block + 1;
341 }
342
343 return ext2_find_near(inode, partial);
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358static int
359ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
360 int blocks_to_boundary)
361{
362 unsigned long count = 0;
363
364
365
366
367
368 if (k > 0) {
369
370 if (blks < blocks_to_boundary + 1)
371 count += blks;
372 else
373 count += blocks_to_boundary + 1;
374 return count;
375 }
376
377 count++;
378 while (count < blks && count <= blocks_to_boundary
379 && le32_to_cpu(*(branch[0].p + count)) == 0) {
380 count++;
381 }
382 return count;
383}
384
385
386
387
388
389
390
391
392
393
394
395static int ext2_alloc_blocks(struct inode *inode,
396 ext2_fsblk_t goal, int indirect_blks, int blks,
397 ext2_fsblk_t new_blocks[4], int *err)
398{
399 int target, i;
400 unsigned long count = 0;
401 int index = 0;
402 ext2_fsblk_t current_block = 0;
403 int ret = 0;
404
405
406
407
408
409
410
411
412
413 target = blks + indirect_blks;
414
415 while (1) {
416 count = target;
417
418 current_block = ext2_new_blocks(inode,goal,&count,err);
419 if (*err)
420 goto failed_out;
421
422 target -= count;
423
424 while (index < indirect_blks && count) {
425 new_blocks[index++] = current_block++;
426 count--;
427 }
428
429 if (count > 0)
430 break;
431 }
432
433
434 new_blocks[index] = current_block;
435
436
437 ret = count;
438 *err = 0;
439 return ret;
440failed_out:
441 for (i = 0; i <index; i++)
442 ext2_free_blocks(inode, new_blocks[i], 1);
443 if (index)
444 mark_inode_dirty(inode);
445 return ret;
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473static int ext2_alloc_branch(struct inode *inode,
474 int indirect_blks, int *blks, ext2_fsblk_t goal,
475 int *offsets, Indirect *branch)
476{
477 int blocksize = inode->i_sb->s_blocksize;
478 int i, n = 0;
479 int err = 0;
480 struct buffer_head *bh;
481 int num;
482 ext2_fsblk_t new_blocks[4];
483 ext2_fsblk_t current_block;
484
485 num = ext2_alloc_blocks(inode, goal, indirect_blks,
486 *blks, new_blocks, &err);
487 if (err)
488 return err;
489
490 branch[0].key = cpu_to_le32(new_blocks[0]);
491
492
493
494 for (n = 1; n <= indirect_blks; n++) {
495
496
497
498
499
500 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
501 if (unlikely(!bh)) {
502 err = -ENOMEM;
503 goto failed;
504 }
505 branch[n].bh = bh;
506 lock_buffer(bh);
507 memset(bh->b_data, 0, blocksize);
508 branch[n].p = (__le32 *) bh->b_data + offsets[n];
509 branch[n].key = cpu_to_le32(new_blocks[n]);
510 *branch[n].p = branch[n].key;
511 if ( n == indirect_blks) {
512 current_block = new_blocks[n];
513
514
515
516
517
518 for (i=1; i < num; i++)
519 *(branch[n].p + i) = cpu_to_le32(++current_block);
520 }
521 set_buffer_uptodate(bh);
522 unlock_buffer(bh);
523 mark_buffer_dirty_inode(bh, inode);
524
525
526
527
528 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
529 sync_dirty_buffer(bh);
530 }
531 *blks = num;
532 return err;
533
534failed:
535 for (i = 1; i < n; i++)
536 bforget(branch[i].bh);
537 for (i = 0; i < indirect_blks; i++)
538 ext2_free_blocks(inode, new_blocks[i], 1);
539 ext2_free_blocks(inode, new_blocks[i], num);
540 return err;
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555static void ext2_splice_branch(struct inode *inode,
556 long block, Indirect *where, int num, int blks)
557{
558 int i;
559 struct ext2_block_alloc_info *block_i;
560 ext2_fsblk_t current_block;
561
562 block_i = EXT2_I(inode)->i_block_alloc_info;
563
564
565
566
567 *where->p = where->key;
568
569
570
571
572
573 if (num == 0 && blks > 1) {
574 current_block = le32_to_cpu(where->key) + 1;
575 for (i = 1; i < blks; i++)
576 *(where->p + i ) = cpu_to_le32(current_block++);
577 }
578
579
580
581
582
583
584 if (block_i) {
585 block_i->last_alloc_logical_block = block + blks - 1;
586 block_i->last_alloc_physical_block =
587 le32_to_cpu(where[num].key) + blks - 1;
588 }
589
590
591
592
593 if (where->bh)
594 mark_buffer_dirty_inode(where->bh, inode);
595
596 inode->i_ctime = CURRENT_TIME_SEC;
597 mark_inode_dirty(inode);
598}
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618static int ext2_get_blocks(struct inode *inode,
619 sector_t iblock, unsigned long maxblocks,
620 struct buffer_head *bh_result,
621 int create)
622{
623 int err = -EIO;
624 int offsets[4];
625 Indirect chain[4];
626 Indirect *partial;
627 ext2_fsblk_t goal;
628 int indirect_blks;
629 int blocks_to_boundary = 0;
630 int depth;
631 struct ext2_inode_info *ei = EXT2_I(inode);
632 int count = 0;
633 ext2_fsblk_t first_block = 0;
634
635 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
636
637 if (depth == 0)
638 return (err);
639
640 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
641
642 if (!partial) {
643 first_block = le32_to_cpu(chain[depth - 1].key);
644 clear_buffer_new(bh_result);
645 count++;
646
647 while (count < maxblocks && count <= blocks_to_boundary) {
648 ext2_fsblk_t blk;
649
650 if (!verify_chain(chain, chain + depth - 1)) {
651
652
653
654
655
656
657 err = -EAGAIN;
658 count = 0;
659 break;
660 }
661 blk = le32_to_cpu(*(chain[depth-1].p + count));
662 if (blk == first_block + count)
663 count++;
664 else
665 break;
666 }
667 if (err != -EAGAIN)
668 goto got_it;
669 }
670
671
672 if (!create || err == -EIO)
673 goto cleanup;
674
675 mutex_lock(&ei->truncate_mutex);
676
677
678
679
680
681
682
683
684
685
686
687
688 if (err == -EAGAIN || !verify_chain(chain, partial)) {
689 while (partial > chain) {
690 brelse(partial->bh);
691 partial--;
692 }
693 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
694 if (!partial) {
695 count++;
696 mutex_unlock(&ei->truncate_mutex);
697 if (err)
698 goto cleanup;
699 clear_buffer_new(bh_result);
700 goto got_it;
701 }
702 }
703
704
705
706
707
708 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
709 ext2_init_block_alloc_info(inode);
710
711 goal = ext2_find_goal(inode, iblock, partial);
712
713
714 indirect_blks = (chain + depth) - partial - 1;
715
716
717
718
719 count = ext2_blks_to_allocate(partial, indirect_blks,
720 maxblocks, blocks_to_boundary);
721
722
723
724 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
725 offsets + (partial - chain), partial);
726
727 if (err) {
728 mutex_unlock(&ei->truncate_mutex);
729 goto cleanup;
730 }
731
732 if (ext2_use_xip(inode->i_sb)) {
733
734
735
736 err = ext2_clear_xip_target (inode,
737 le32_to_cpu(chain[depth-1].key));
738 if (err) {
739 mutex_unlock(&ei->truncate_mutex);
740 goto cleanup;
741 }
742 }
743
744 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
745 mutex_unlock(&ei->truncate_mutex);
746 set_buffer_new(bh_result);
747got_it:
748 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
749 if (count > blocks_to_boundary)
750 set_buffer_boundary(bh_result);
751 err = count;
752
753 partial = chain + depth - 1;
754cleanup:
755 while (partial > chain) {
756 brelse(partial->bh);
757 partial--;
758 }
759 return err;
760}
761
762int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
763{
764 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
765 int ret = ext2_get_blocks(inode, iblock, max_blocks,
766 bh_result, create);
767 if (ret > 0) {
768 bh_result->b_size = (ret << inode->i_blkbits);
769 ret = 0;
770 }
771 return ret;
772
773}
774
775int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
776 u64 start, u64 len)
777{
778 return generic_block_fiemap(inode, fieinfo, start, len,
779 ext2_get_block);
780}
781
782static int ext2_writepage(struct page *page, struct writeback_control *wbc)
783{
784 return block_write_full_page(page, ext2_get_block, wbc);
785}
786
787static int ext2_readpage(struct file *file, struct page *page)
788{
789 return mpage_readpage(page, ext2_get_block);
790}
791
792static int
793ext2_readpages(struct file *file, struct address_space *mapping,
794 struct list_head *pages, unsigned nr_pages)
795{
796 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
797}
798
799static int
800ext2_write_begin(struct file *file, struct address_space *mapping,
801 loff_t pos, unsigned len, unsigned flags,
802 struct page **pagep, void **fsdata)
803{
804 int ret;
805
806 ret = block_write_begin(mapping, pos, len, flags, pagep,
807 ext2_get_block);
808 if (ret < 0)
809 ext2_write_failed(mapping, pos + len);
810 return ret;
811}
812
813static int ext2_write_end(struct file *file, struct address_space *mapping,
814 loff_t pos, unsigned len, unsigned copied,
815 struct page *page, void *fsdata)
816{
817 int ret;
818
819 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
820 if (ret < len)
821 ext2_write_failed(mapping, pos + len);
822 return ret;
823}
824
825static int
826ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
827 loff_t pos, unsigned len, unsigned flags,
828 struct page **pagep, void **fsdata)
829{
830 int ret;
831
832 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
833 ext2_get_block);
834 if (ret < 0)
835 ext2_write_failed(mapping, pos + len);
836 return ret;
837}
838
839static int ext2_nobh_writepage(struct page *page,
840 struct writeback_control *wbc)
841{
842 return nobh_writepage(page, ext2_get_block, wbc);
843}
844
845static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
846{
847 return generic_block_bmap(mapping,block,ext2_get_block);
848}
849
850static ssize_t
851ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
852 loff_t offset, unsigned long nr_segs)
853{
854 struct file *file = iocb->ki_filp;
855 struct address_space *mapping = file->f_mapping;
856 struct inode *inode = mapping->host;
857 ssize_t ret;
858
859 ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
860 ext2_get_block);
861 if (ret < 0 && (rw & WRITE))
862 ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
863 return ret;
864}
865
866static int
867ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
868{
869 return mpage_writepages(mapping, wbc, ext2_get_block);
870}
871
872const struct address_space_operations ext2_aops = {
873 .readpage = ext2_readpage,
874 .readpages = ext2_readpages,
875 .writepage = ext2_writepage,
876 .write_begin = ext2_write_begin,
877 .write_end = ext2_write_end,
878 .bmap = ext2_bmap,
879 .direct_IO = ext2_direct_IO,
880 .writepages = ext2_writepages,
881 .migratepage = buffer_migrate_page,
882 .is_partially_uptodate = block_is_partially_uptodate,
883 .error_remove_page = generic_error_remove_page,
884};
885
886const struct address_space_operations ext2_aops_xip = {
887 .bmap = ext2_bmap,
888 .get_xip_mem = ext2_get_xip_mem,
889};
890
891const struct address_space_operations ext2_nobh_aops = {
892 .readpage = ext2_readpage,
893 .readpages = ext2_readpages,
894 .writepage = ext2_nobh_writepage,
895 .write_begin = ext2_nobh_write_begin,
896 .write_end = nobh_write_end,
897 .bmap = ext2_bmap,
898 .direct_IO = ext2_direct_IO,
899 .writepages = ext2_writepages,
900 .migratepage = buffer_migrate_page,
901 .error_remove_page = generic_error_remove_page,
902};
903
904
905
906
907
908
909static inline int all_zeroes(__le32 *p, __le32 *q)
910{
911 while (p < q)
912 if (*p++)
913 return 0;
914 return 1;
915}
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951static Indirect *ext2_find_shared(struct inode *inode,
952 int depth,
953 int offsets[4],
954 Indirect chain[4],
955 __le32 *top)
956{
957 Indirect *partial, *p;
958 int k, err;
959
960 *top = 0;
961 for (k = depth; k > 1 && !offsets[k-1]; k--)
962 ;
963 partial = ext2_get_branch(inode, k, offsets, chain, &err);
964 if (!partial)
965 partial = chain + k-1;
966
967
968
969
970 write_lock(&EXT2_I(inode)->i_meta_lock);
971 if (!partial->key && *partial->p) {
972 write_unlock(&EXT2_I(inode)->i_meta_lock);
973 goto no_top;
974 }
975 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
976 ;
977
978
979
980
981
982
983 if (p == chain + k - 1 && p > chain) {
984 p->p--;
985 } else {
986 *top = *p->p;
987 *p->p = 0;
988 }
989 write_unlock(&EXT2_I(inode)->i_meta_lock);
990
991 while(partial > p)
992 {
993 brelse(partial->bh);
994 partial--;
995 }
996no_top:
997 return partial;
998}
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1011{
1012 unsigned long block_to_free = 0, count = 0;
1013 unsigned long nr;
1014
1015 for ( ; p < q ; p++) {
1016 nr = le32_to_cpu(*p);
1017 if (nr) {
1018 *p = 0;
1019
1020 if (count == 0)
1021 goto free_this;
1022 else if (block_to_free == nr - count)
1023 count++;
1024 else {
1025 ext2_free_blocks (inode, block_to_free, count);
1026 mark_inode_dirty(inode);
1027 free_this:
1028 block_to_free = nr;
1029 count = 1;
1030 }
1031 }
1032 }
1033 if (count > 0) {
1034 ext2_free_blocks (inode, block_to_free, count);
1035 mark_inode_dirty(inode);
1036 }
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1051{
1052 struct buffer_head * bh;
1053 unsigned long nr;
1054
1055 if (depth--) {
1056 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1057 for ( ; p < q ; p++) {
1058 nr = le32_to_cpu(*p);
1059 if (!nr)
1060 continue;
1061 *p = 0;
1062 bh = sb_bread(inode->i_sb, nr);
1063
1064
1065
1066
1067 if (!bh) {
1068 ext2_error(inode->i_sb, "ext2_free_branches",
1069 "Read failure, inode=%ld, block=%ld",
1070 inode->i_ino, nr);
1071 continue;
1072 }
1073 ext2_free_branches(inode,
1074 (__le32*)bh->b_data,
1075 (__le32*)bh->b_data + addr_per_block,
1076 depth);
1077 bforget(bh);
1078 ext2_free_blocks(inode, nr, 1);
1079 mark_inode_dirty(inode);
1080 }
1081 } else
1082 ext2_free_data(inode, p, q);
1083}
1084
1085static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1086{
1087 __le32 *i_data = EXT2_I(inode)->i_data;
1088 struct ext2_inode_info *ei = EXT2_I(inode);
1089 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1090 int offsets[4];
1091 Indirect chain[4];
1092 Indirect *partial;
1093 __le32 nr = 0;
1094 int n;
1095 long iblock;
1096 unsigned blocksize;
1097 blocksize = inode->i_sb->s_blocksize;
1098 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1099
1100 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1101 if (n == 0)
1102 return;
1103
1104
1105
1106
1107
1108 mutex_lock(&ei->truncate_mutex);
1109
1110 if (n == 1) {
1111 ext2_free_data(inode, i_data+offsets[0],
1112 i_data + EXT2_NDIR_BLOCKS);
1113 goto do_indirects;
1114 }
1115
1116 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1117
1118 if (nr) {
1119 if (partial == chain)
1120 mark_inode_dirty(inode);
1121 else
1122 mark_buffer_dirty_inode(partial->bh, inode);
1123 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1124 }
1125
1126 while (partial > chain) {
1127 ext2_free_branches(inode,
1128 partial->p + 1,
1129 (__le32*)partial->bh->b_data+addr_per_block,
1130 (chain+n-1) - partial);
1131 mark_buffer_dirty_inode(partial->bh, inode);
1132 brelse (partial->bh);
1133 partial--;
1134 }
1135do_indirects:
1136
1137 switch (offsets[0]) {
1138 default:
1139 nr = i_data[EXT2_IND_BLOCK];
1140 if (nr) {
1141 i_data[EXT2_IND_BLOCK] = 0;
1142 mark_inode_dirty(inode);
1143 ext2_free_branches(inode, &nr, &nr+1, 1);
1144 }
1145 case EXT2_IND_BLOCK:
1146 nr = i_data[EXT2_DIND_BLOCK];
1147 if (nr) {
1148 i_data[EXT2_DIND_BLOCK] = 0;
1149 mark_inode_dirty(inode);
1150 ext2_free_branches(inode, &nr, &nr+1, 2);
1151 }
1152 case EXT2_DIND_BLOCK:
1153 nr = i_data[EXT2_TIND_BLOCK];
1154 if (nr) {
1155 i_data[EXT2_TIND_BLOCK] = 0;
1156 mark_inode_dirty(inode);
1157 ext2_free_branches(inode, &nr, &nr+1, 3);
1158 }
1159 case EXT2_TIND_BLOCK:
1160 ;
1161 }
1162
1163 ext2_discard_reservation(inode);
1164
1165 mutex_unlock(&ei->truncate_mutex);
1166}
1167
1168static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1169{
1170
1171
1172
1173
1174
1175
1176
1177
1178 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1179 S_ISLNK(inode->i_mode)))
1180 return;
1181 if (ext2_inode_is_fast_symlink(inode))
1182 return;
1183 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1184 return;
1185 __ext2_truncate_blocks(inode, offset);
1186}
1187
1188static int ext2_setsize(struct inode *inode, loff_t newsize)
1189{
1190 int error;
1191
1192 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1193 S_ISLNK(inode->i_mode)))
1194 return -EINVAL;
1195 if (ext2_inode_is_fast_symlink(inode))
1196 return -EINVAL;
1197 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1198 return -EPERM;
1199
1200 inode_dio_wait(inode);
1201
1202 if (mapping_is_xip(inode->i_mapping))
1203 error = xip_truncate_page(inode->i_mapping, newsize);
1204 else if (test_opt(inode->i_sb, NOBH))
1205 error = nobh_truncate_page(inode->i_mapping,
1206 newsize, ext2_get_block);
1207 else
1208 error = block_truncate_page(inode->i_mapping,
1209 newsize, ext2_get_block);
1210 if (error)
1211 return error;
1212
1213 truncate_setsize(inode, newsize);
1214 __ext2_truncate_blocks(inode, newsize);
1215
1216 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1217 if (inode_needs_sync(inode)) {
1218 sync_mapping_buffers(inode->i_mapping);
1219 sync_inode_metadata(inode, 1);
1220 } else {
1221 mark_inode_dirty(inode);
1222 }
1223
1224 return 0;
1225}
1226
1227static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1228 struct buffer_head **p)
1229{
1230 struct buffer_head * bh;
1231 unsigned long block_group;
1232 unsigned long block;
1233 unsigned long offset;
1234 struct ext2_group_desc * gdp;
1235
1236 *p = NULL;
1237 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1238 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1239 goto Einval;
1240
1241 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1242 gdp = ext2_get_group_desc(sb, block_group, NULL);
1243 if (!gdp)
1244 goto Egdp;
1245
1246
1247
1248 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1249 block = le32_to_cpu(gdp->bg_inode_table) +
1250 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1251 if (!(bh = sb_bread(sb, block)))
1252 goto Eio;
1253
1254 *p = bh;
1255 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1256 return (struct ext2_inode *) (bh->b_data + offset);
1257
1258Einval:
1259 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1260 (unsigned long) ino);
1261 return ERR_PTR(-EINVAL);
1262Eio:
1263 ext2_error(sb, "ext2_get_inode",
1264 "unable to read inode block - inode=%lu, block=%lu",
1265 (unsigned long) ino, block);
1266Egdp:
1267 return ERR_PTR(-EIO);
1268}
1269
1270void ext2_set_inode_flags(struct inode *inode)
1271{
1272 unsigned int flags = EXT2_I(inode)->i_flags;
1273
1274 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
1275 if (flags & EXT2_SYNC_FL)
1276 inode->i_flags |= S_SYNC;
1277 if (flags & EXT2_APPEND_FL)
1278 inode->i_flags |= S_APPEND;
1279 if (flags & EXT2_IMMUTABLE_FL)
1280 inode->i_flags |= S_IMMUTABLE;
1281 if (flags & EXT2_NOATIME_FL)
1282 inode->i_flags |= S_NOATIME;
1283 if (flags & EXT2_DIRSYNC_FL)
1284 inode->i_flags |= S_DIRSYNC;
1285}
1286
1287
1288void ext2_get_inode_flags(struct ext2_inode_info *ei)
1289{
1290 unsigned int flags = ei->vfs_inode.i_flags;
1291
1292 ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
1293 EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
1294 if (flags & S_SYNC)
1295 ei->i_flags |= EXT2_SYNC_FL;
1296 if (flags & S_APPEND)
1297 ei->i_flags |= EXT2_APPEND_FL;
1298 if (flags & S_IMMUTABLE)
1299 ei->i_flags |= EXT2_IMMUTABLE_FL;
1300 if (flags & S_NOATIME)
1301 ei->i_flags |= EXT2_NOATIME_FL;
1302 if (flags & S_DIRSYNC)
1303 ei->i_flags |= EXT2_DIRSYNC_FL;
1304}
1305
1306struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1307{
1308 struct ext2_inode_info *ei;
1309 struct buffer_head * bh;
1310 struct ext2_inode *raw_inode;
1311 struct inode *inode;
1312 long ret = -EIO;
1313 int n;
1314 uid_t i_uid;
1315 gid_t i_gid;
1316
1317 inode = iget_locked(sb, ino);
1318 if (!inode)
1319 return ERR_PTR(-ENOMEM);
1320 if (!(inode->i_state & I_NEW))
1321 return inode;
1322
1323 ei = EXT2_I(inode);
1324 ei->i_block_alloc_info = NULL;
1325
1326 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1327 if (IS_ERR(raw_inode)) {
1328 ret = PTR_ERR(raw_inode);
1329 goto bad_inode;
1330 }
1331
1332 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1333 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1334 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1335 if (!(test_opt (inode->i_sb, NO_UID32))) {
1336 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1337 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1338 }
1339 i_uid_write(inode, i_uid);
1340 i_gid_write(inode, i_gid);
1341 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1342 inode->i_size = le32_to_cpu(raw_inode->i_size);
1343 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1344 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1345 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1346 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1347 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1348
1349
1350
1351
1352
1353 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1354
1355 brelse (bh);
1356 ret = -ESTALE;
1357 goto bad_inode;
1358 }
1359 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1360 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1361 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1362 ei->i_frag_no = raw_inode->i_frag;
1363 ei->i_frag_size = raw_inode->i_fsize;
1364 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1365 ei->i_dir_acl = 0;
1366 if (S_ISREG(inode->i_mode))
1367 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1368 else
1369 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1370 ei->i_dtime = 0;
1371 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1372 ei->i_state = 0;
1373 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1374 ei->i_dir_start_lookup = 0;
1375
1376
1377
1378
1379
1380 for (n = 0; n < EXT2_N_BLOCKS; n++)
1381 ei->i_data[n] = raw_inode->i_block[n];
1382
1383 if (S_ISREG(inode->i_mode)) {
1384 inode->i_op = &ext2_file_inode_operations;
1385 if (ext2_use_xip(inode->i_sb)) {
1386 inode->i_mapping->a_ops = &ext2_aops_xip;
1387 inode->i_fop = &ext2_xip_file_operations;
1388 } else if (test_opt(inode->i_sb, NOBH)) {
1389 inode->i_mapping->a_ops = &ext2_nobh_aops;
1390 inode->i_fop = &ext2_file_operations;
1391 } else {
1392 inode->i_mapping->a_ops = &ext2_aops;
1393 inode->i_fop = &ext2_file_operations;
1394 }
1395 } else if (S_ISDIR(inode->i_mode)) {
1396 inode->i_op = &ext2_dir_inode_operations;
1397 inode->i_fop = &ext2_dir_operations;
1398 if (test_opt(inode->i_sb, NOBH))
1399 inode->i_mapping->a_ops = &ext2_nobh_aops;
1400 else
1401 inode->i_mapping->a_ops = &ext2_aops;
1402 } else if (S_ISLNK(inode->i_mode)) {
1403 if (ext2_inode_is_fast_symlink(inode)) {
1404 inode->i_op = &ext2_fast_symlink_inode_operations;
1405 nd_terminate_link(ei->i_data, inode->i_size,
1406 sizeof(ei->i_data) - 1);
1407 } else {
1408 inode->i_op = &ext2_symlink_inode_operations;
1409 if (test_opt(inode->i_sb, NOBH))
1410 inode->i_mapping->a_ops = &ext2_nobh_aops;
1411 else
1412 inode->i_mapping->a_ops = &ext2_aops;
1413 }
1414 } else {
1415 inode->i_op = &ext2_special_inode_operations;
1416 if (raw_inode->i_block[0])
1417 init_special_inode(inode, inode->i_mode,
1418 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1419 else
1420 init_special_inode(inode, inode->i_mode,
1421 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1422 }
1423 brelse (bh);
1424 ext2_set_inode_flags(inode);
1425 unlock_new_inode(inode);
1426 return inode;
1427
1428bad_inode:
1429 iget_failed(inode);
1430 return ERR_PTR(ret);
1431}
1432
1433static int __ext2_write_inode(struct inode *inode, int do_sync)
1434{
1435 struct ext2_inode_info *ei = EXT2_I(inode);
1436 struct super_block *sb = inode->i_sb;
1437 ino_t ino = inode->i_ino;
1438 uid_t uid = i_uid_read(inode);
1439 gid_t gid = i_gid_read(inode);
1440 struct buffer_head * bh;
1441 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1442 int n;
1443 int err = 0;
1444
1445 if (IS_ERR(raw_inode))
1446 return -EIO;
1447
1448
1449
1450 if (ei->i_state & EXT2_STATE_NEW)
1451 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1452
1453 ext2_get_inode_flags(ei);
1454 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1455 if (!(test_opt(sb, NO_UID32))) {
1456 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1457 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1458
1459
1460
1461
1462 if (!ei->i_dtime) {
1463 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1464 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1465 } else {
1466 raw_inode->i_uid_high = 0;
1467 raw_inode->i_gid_high = 0;
1468 }
1469 } else {
1470 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1471 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1472 raw_inode->i_uid_high = 0;
1473 raw_inode->i_gid_high = 0;
1474 }
1475 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1476 raw_inode->i_size = cpu_to_le32(inode->i_size);
1477 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1478 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1479 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1480
1481 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1482 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1483 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1484 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1485 raw_inode->i_frag = ei->i_frag_no;
1486 raw_inode->i_fsize = ei->i_frag_size;
1487 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1488 if (!S_ISREG(inode->i_mode))
1489 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1490 else {
1491 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1492 if (inode->i_size > 0x7fffffffULL) {
1493 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1494 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1495 EXT2_SB(sb)->s_es->s_rev_level ==
1496 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1497
1498
1499
1500 spin_lock(&EXT2_SB(sb)->s_lock);
1501 ext2_update_dynamic_rev(sb);
1502 EXT2_SET_RO_COMPAT_FEATURE(sb,
1503 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1504 spin_unlock(&EXT2_SB(sb)->s_lock);
1505 ext2_write_super(sb);
1506 }
1507 }
1508 }
1509
1510 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1511 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1512 if (old_valid_dev(inode->i_rdev)) {
1513 raw_inode->i_block[0] =
1514 cpu_to_le32(old_encode_dev(inode->i_rdev));
1515 raw_inode->i_block[1] = 0;
1516 } else {
1517 raw_inode->i_block[0] = 0;
1518 raw_inode->i_block[1] =
1519 cpu_to_le32(new_encode_dev(inode->i_rdev));
1520 raw_inode->i_block[2] = 0;
1521 }
1522 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1523 raw_inode->i_block[n] = ei->i_data[n];
1524 mark_buffer_dirty(bh);
1525 if (do_sync) {
1526 sync_dirty_buffer(bh);
1527 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1528 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1529 sb->s_id, (unsigned long) ino);
1530 err = -EIO;
1531 }
1532 }
1533 ei->i_state &= ~EXT2_STATE_NEW;
1534 brelse (bh);
1535 return err;
1536}
1537
1538int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1539{
1540 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1541}
1542
1543int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1544{
1545 struct inode *inode = dentry->d_inode;
1546 int error;
1547
1548 error = inode_change_ok(inode, iattr);
1549 if (error)
1550 return error;
1551
1552 if (is_quota_modification(inode, iattr))
1553 dquot_initialize(inode);
1554 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1555 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1556 error = dquot_transfer(inode, iattr);
1557 if (error)
1558 return error;
1559 }
1560 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1561 error = ext2_setsize(inode, iattr->ia_size);
1562 if (error)
1563 return error;
1564 }
1565 setattr_copy(inode, iattr);
1566 if (iattr->ia_valid & ATTR_MODE)
1567 error = ext2_acl_chmod(inode);
1568 mark_inode_dirty(inode);
1569
1570 return error;
1571}
1572