1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/time.h>
26#include <linux/highuid.h>
27#include <linux/pagemap.h>
28#include <linux/quotaops.h>
29#include <linux/writeback.h>
30#include <linux/buffer_head.h>
31#include <linux/mpage.h>
32#include <linux/fiemap.h>
33#include <linux/namei.h>
34#include <linux/aio.h>
35#include "ext2.h"
36#include "acl.h"
37#include "xip.h"
38#include "xattr.h"
39
40static int __ext2_write_inode(struct inode *inode, int do_sync);
41
42
43
44
45static inline int ext2_inode_is_fast_symlink(struct inode *inode)
46{
47 int ea_blocks = EXT2_I(inode)->i_file_acl ?
48 (inode->i_sb->s_blocksize >> 9) : 0;
49
50 return (S_ISLNK(inode->i_mode) &&
51 inode->i_blocks - ea_blocks == 0);
52}
53
54static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
55
56static void ext2_write_failed(struct address_space *mapping, loff_t to)
57{
58 struct inode *inode = mapping->host;
59
60 if (to > inode->i_size) {
61 truncate_pagecache(inode, inode->i_size);
62 ext2_truncate_blocks(inode, inode->i_size);
63 }
64}
65
66
67
68
69void ext2_evict_inode(struct inode * inode)
70{
71 struct ext2_block_alloc_info *rsv;
72 int want_delete = 0;
73
74 if (!inode->i_nlink && !is_bad_inode(inode)) {
75 want_delete = 1;
76 dquot_initialize(inode);
77 } else {
78 dquot_drop(inode);
79 }
80
81 truncate_inode_pages_final(&inode->i_data);
82
83 if (want_delete) {
84 sb_start_intwrite(inode->i_sb);
85
86 EXT2_I(inode)->i_dtime = get_seconds();
87 mark_inode_dirty(inode);
88 __ext2_write_inode(inode, inode_needs_sync(inode));
89
90 inode->i_size = 0;
91 if (inode->i_blocks)
92 ext2_truncate_blocks(inode, 0);
93 ext2_xattr_delete_inode(inode);
94 }
95
96 invalidate_inode_buffers(inode);
97 clear_inode(inode);
98
99 ext2_discard_reservation(inode);
100 rsv = EXT2_I(inode)->i_block_alloc_info;
101 EXT2_I(inode)->i_block_alloc_info = NULL;
102 if (unlikely(rsv))
103 kfree(rsv);
104
105 if (want_delete) {
106 ext2_free_inode(inode);
107 sb_end_intwrite(inode->i_sb);
108 }
109}
110
111typedef struct {
112 __le32 *p;
113 __le32 key;
114 struct buffer_head *bh;
115} Indirect;
116
117static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
118{
119 p->key = *(p->p = v);
120 p->bh = bh;
121}
122
123static inline int verify_chain(Indirect *from, Indirect *to)
124{
125 while (from <= to && from->key == *from->p)
126 from++;
127 return (from > to);
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static int ext2_block_to_path(struct inode *inode,
161 long i_block, int offsets[4], int *boundary)
162{
163 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
164 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
165 const long direct_blocks = EXT2_NDIR_BLOCKS,
166 indirect_blocks = ptrs,
167 double_blocks = (1 << (ptrs_bits * 2));
168 int n = 0;
169 int final = 0;
170
171 if (i_block < 0) {
172 ext2_msg(inode->i_sb, KERN_WARNING,
173 "warning: %s: block < 0", __func__);
174 } else if (i_block < direct_blocks) {
175 offsets[n++] = i_block;
176 final = direct_blocks;
177 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
178 offsets[n++] = EXT2_IND_BLOCK;
179 offsets[n++] = i_block;
180 final = ptrs;
181 } else if ((i_block -= indirect_blocks) < double_blocks) {
182 offsets[n++] = EXT2_DIND_BLOCK;
183 offsets[n++] = i_block >> ptrs_bits;
184 offsets[n++] = i_block & (ptrs - 1);
185 final = ptrs;
186 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
187 offsets[n++] = EXT2_TIND_BLOCK;
188 offsets[n++] = i_block >> (ptrs_bits * 2);
189 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
190 offsets[n++] = i_block & (ptrs - 1);
191 final = ptrs;
192 } else {
193 ext2_msg(inode->i_sb, KERN_WARNING,
194 "warning: %s: block is too big", __func__);
195 }
196 if (boundary)
197 *boundary = final - 1 - (i_block & (ptrs - 1));
198
199 return n;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231static Indirect *ext2_get_branch(struct inode *inode,
232 int depth,
233 int *offsets,
234 Indirect chain[4],
235 int *err)
236{
237 struct super_block *sb = inode->i_sb;
238 Indirect *p = chain;
239 struct buffer_head *bh;
240
241 *err = 0;
242
243 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
244 if (!p->key)
245 goto no_block;
246 while (--depth) {
247 bh = sb_bread(sb, le32_to_cpu(p->key));
248 if (!bh)
249 goto failure;
250 read_lock(&EXT2_I(inode)->i_meta_lock);
251 if (!verify_chain(chain, p))
252 goto changed;
253 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
254 read_unlock(&EXT2_I(inode)->i_meta_lock);
255 if (!p->key)
256 goto no_block;
257 }
258 return NULL;
259
260changed:
261 read_unlock(&EXT2_I(inode)->i_meta_lock);
262 brelse(bh);
263 *err = -EAGAIN;
264 goto no_block;
265failure:
266 *err = -EIO;
267no_block:
268 return p;
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
292{
293 struct ext2_inode_info *ei = EXT2_I(inode);
294 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
295 __le32 *p;
296 ext2_fsblk_t bg_start;
297 ext2_fsblk_t colour;
298
299
300 for (p = ind->p - 1; p >= start; p--)
301 if (*p)
302 return le32_to_cpu(*p);
303
304
305 if (ind->bh)
306 return ind->bh->b_blocknr;
307
308
309
310
311
312 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
313 colour = (current->pid % 16) *
314 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
315 return bg_start + colour;
316}
317
318
319
320
321
322
323
324
325
326
327static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
328 Indirect *partial)
329{
330 struct ext2_block_alloc_info *block_i;
331
332 block_i = EXT2_I(inode)->i_block_alloc_info;
333
334
335
336
337
338 if (block_i && (block == block_i->last_alloc_logical_block + 1)
339 && (block_i->last_alloc_physical_block != 0)) {
340 return block_i->last_alloc_physical_block + 1;
341 }
342
343 return ext2_find_near(inode, partial);
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358static int
359ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
360 int blocks_to_boundary)
361{
362 unsigned long count = 0;
363
364
365
366
367
368 if (k > 0) {
369
370 if (blks < blocks_to_boundary + 1)
371 count += blks;
372 else
373 count += blocks_to_boundary + 1;
374 return count;
375 }
376
377 count++;
378 while (count < blks && count <= blocks_to_boundary
379 && le32_to_cpu(*(branch[0].p + count)) == 0) {
380 count++;
381 }
382 return count;
383}
384
385
386
387
388
389
390
391
392
393
394
395static int ext2_alloc_blocks(struct inode *inode,
396 ext2_fsblk_t goal, int indirect_blks, int blks,
397 ext2_fsblk_t new_blocks[4], int *err)
398{
399 int target, i;
400 unsigned long count = 0;
401 int index = 0;
402 ext2_fsblk_t current_block = 0;
403 int ret = 0;
404
405
406
407
408
409
410
411
412
413 target = blks + indirect_blks;
414
415 while (1) {
416 count = target;
417
418 current_block = ext2_new_blocks(inode,goal,&count,err);
419 if (*err)
420 goto failed_out;
421
422 target -= count;
423
424 while (index < indirect_blks && count) {
425 new_blocks[index++] = current_block++;
426 count--;
427 }
428
429 if (count > 0)
430 break;
431 }
432
433
434 new_blocks[index] = current_block;
435
436
437 ret = count;
438 *err = 0;
439 return ret;
440failed_out:
441 for (i = 0; i <index; i++)
442 ext2_free_blocks(inode, new_blocks[i], 1);
443 if (index)
444 mark_inode_dirty(inode);
445 return ret;
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473static int ext2_alloc_branch(struct inode *inode,
474 int indirect_blks, int *blks, ext2_fsblk_t goal,
475 int *offsets, Indirect *branch)
476{
477 int blocksize = inode->i_sb->s_blocksize;
478 int i, n = 0;
479 int err = 0;
480 struct buffer_head *bh;
481 int num;
482 ext2_fsblk_t new_blocks[4];
483 ext2_fsblk_t current_block;
484
485 num = ext2_alloc_blocks(inode, goal, indirect_blks,
486 *blks, new_blocks, &err);
487 if (err)
488 return err;
489
490 branch[0].key = cpu_to_le32(new_blocks[0]);
491
492
493
494 for (n = 1; n <= indirect_blks; n++) {
495
496
497
498
499
500 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
501 if (unlikely(!bh)) {
502 err = -ENOMEM;
503 goto failed;
504 }
505 branch[n].bh = bh;
506 lock_buffer(bh);
507 memset(bh->b_data, 0, blocksize);
508 branch[n].p = (__le32 *) bh->b_data + offsets[n];
509 branch[n].key = cpu_to_le32(new_blocks[n]);
510 *branch[n].p = branch[n].key;
511 if ( n == indirect_blks) {
512 current_block = new_blocks[n];
513
514
515
516
517
518 for (i=1; i < num; i++)
519 *(branch[n].p + i) = cpu_to_le32(++current_block);
520 }
521 set_buffer_uptodate(bh);
522 unlock_buffer(bh);
523 mark_buffer_dirty_inode(bh, inode);
524
525
526
527
528 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
529 sync_dirty_buffer(bh);
530 }
531 *blks = num;
532 return err;
533
534failed:
535 for (i = 1; i < n; i++)
536 bforget(branch[i].bh);
537 for (i = 0; i < indirect_blks; i++)
538 ext2_free_blocks(inode, new_blocks[i], 1);
539 ext2_free_blocks(inode, new_blocks[i], num);
540 return err;
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555static void ext2_splice_branch(struct inode *inode,
556 long block, Indirect *where, int num, int blks)
557{
558 int i;
559 struct ext2_block_alloc_info *block_i;
560 ext2_fsblk_t current_block;
561
562 block_i = EXT2_I(inode)->i_block_alloc_info;
563
564
565
566
567 *where->p = where->key;
568
569
570
571
572
573 if (num == 0 && blks > 1) {
574 current_block = le32_to_cpu(where->key) + 1;
575 for (i = 1; i < blks; i++)
576 *(where->p + i ) = cpu_to_le32(current_block++);
577 }
578
579
580
581
582
583
584 if (block_i) {
585 block_i->last_alloc_logical_block = block + blks - 1;
586 block_i->last_alloc_physical_block =
587 le32_to_cpu(where[num].key) + blks - 1;
588 }
589
590
591
592
593 if (where->bh)
594 mark_buffer_dirty_inode(where->bh, inode);
595
596 inode->i_ctime = CURRENT_TIME_SEC;
597 mark_inode_dirty(inode);
598}
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618static int ext2_get_blocks(struct inode *inode,
619 sector_t iblock, unsigned long maxblocks,
620 struct buffer_head *bh_result,
621 int create)
622{
623 int err = -EIO;
624 int offsets[4];
625 Indirect chain[4];
626 Indirect *partial;
627 ext2_fsblk_t goal;
628 int indirect_blks;
629 int blocks_to_boundary = 0;
630 int depth;
631 struct ext2_inode_info *ei = EXT2_I(inode);
632 int count = 0;
633 ext2_fsblk_t first_block = 0;
634
635 BUG_ON(maxblocks == 0);
636
637 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
638
639 if (depth == 0)
640 return (err);
641
642 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
643
644 if (!partial) {
645 first_block = le32_to_cpu(chain[depth - 1].key);
646 clear_buffer_new(bh_result);
647 count++;
648
649 while (count < maxblocks && count <= blocks_to_boundary) {
650 ext2_fsblk_t blk;
651
652 if (!verify_chain(chain, chain + depth - 1)) {
653
654
655
656
657
658
659 err = -EAGAIN;
660 count = 0;
661 break;
662 }
663 blk = le32_to_cpu(*(chain[depth-1].p + count));
664 if (blk == first_block + count)
665 count++;
666 else
667 break;
668 }
669 if (err != -EAGAIN)
670 goto got_it;
671 }
672
673
674 if (!create || err == -EIO)
675 goto cleanup;
676
677 mutex_lock(&ei->truncate_mutex);
678
679
680
681
682
683
684
685
686
687
688
689
690 if (err == -EAGAIN || !verify_chain(chain, partial)) {
691 while (partial > chain) {
692 brelse(partial->bh);
693 partial--;
694 }
695 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
696 if (!partial) {
697 count++;
698 mutex_unlock(&ei->truncate_mutex);
699 if (err)
700 goto cleanup;
701 clear_buffer_new(bh_result);
702 goto got_it;
703 }
704 }
705
706
707
708
709
710 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
711 ext2_init_block_alloc_info(inode);
712
713 goal = ext2_find_goal(inode, iblock, partial);
714
715
716 indirect_blks = (chain + depth) - partial - 1;
717
718
719
720
721 count = ext2_blks_to_allocate(partial, indirect_blks,
722 maxblocks, blocks_to_boundary);
723
724
725
726 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
727 offsets + (partial - chain), partial);
728
729 if (err) {
730 mutex_unlock(&ei->truncate_mutex);
731 goto cleanup;
732 }
733
734 if (ext2_use_xip(inode->i_sb)) {
735
736
737
738 err = ext2_clear_xip_target (inode,
739 le32_to_cpu(chain[depth-1].key));
740 if (err) {
741 mutex_unlock(&ei->truncate_mutex);
742 goto cleanup;
743 }
744 }
745
746 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
747 mutex_unlock(&ei->truncate_mutex);
748 set_buffer_new(bh_result);
749got_it:
750 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
751 if (count > blocks_to_boundary)
752 set_buffer_boundary(bh_result);
753 err = count;
754
755 partial = chain + depth - 1;
756cleanup:
757 while (partial > chain) {
758 brelse(partial->bh);
759 partial--;
760 }
761 return err;
762}
763
764int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
765{
766 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
767 int ret = ext2_get_blocks(inode, iblock, max_blocks,
768 bh_result, create);
769 if (ret > 0) {
770 bh_result->b_size = (ret << inode->i_blkbits);
771 ret = 0;
772 }
773 return ret;
774
775}
776
777int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
778 u64 start, u64 len)
779{
780 return generic_block_fiemap(inode, fieinfo, start, len,
781 ext2_get_block);
782}
783
784static int ext2_writepage(struct page *page, struct writeback_control *wbc)
785{
786 return block_write_full_page(page, ext2_get_block, wbc);
787}
788
789static int ext2_readpage(struct file *file, struct page *page)
790{
791 return mpage_readpage(page, ext2_get_block);
792}
793
794static int
795ext2_readpages(struct file *file, struct address_space *mapping,
796 struct list_head *pages, unsigned nr_pages)
797{
798 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
799}
800
801static int
802ext2_write_begin(struct file *file, struct address_space *mapping,
803 loff_t pos, unsigned len, unsigned flags,
804 struct page **pagep, void **fsdata)
805{
806 int ret;
807
808 ret = block_write_begin(mapping, pos, len, flags, pagep,
809 ext2_get_block);
810 if (ret < 0)
811 ext2_write_failed(mapping, pos + len);
812 return ret;
813}
814
815static int ext2_write_end(struct file *file, struct address_space *mapping,
816 loff_t pos, unsigned len, unsigned copied,
817 struct page *page, void *fsdata)
818{
819 int ret;
820
821 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
822 if (ret < len)
823 ext2_write_failed(mapping, pos + len);
824 return ret;
825}
826
827static int
828ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
829 loff_t pos, unsigned len, unsigned flags,
830 struct page **pagep, void **fsdata)
831{
832 int ret;
833
834 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
835 ext2_get_block);
836 if (ret < 0)
837 ext2_write_failed(mapping, pos + len);
838 return ret;
839}
840
841static int ext2_nobh_writepage(struct page *page,
842 struct writeback_control *wbc)
843{
844 return nobh_writepage(page, ext2_get_block, wbc);
845}
846
847static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
848{
849 return generic_block_bmap(mapping,block,ext2_get_block);
850}
851
852static ssize_t
853ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
854 loff_t offset)
855{
856 struct file *file = iocb->ki_filp;
857 struct address_space *mapping = file->f_mapping;
858 struct inode *inode = mapping->host;
859 size_t count = iov_iter_count(iter);
860 ssize_t ret;
861
862 ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext2_get_block);
863 if (ret < 0 && (rw & WRITE))
864 ext2_write_failed(mapping, offset + count);
865 return ret;
866}
867
868static int
869ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
870{
871 return mpage_writepages(mapping, wbc, ext2_get_block);
872}
873
874const struct address_space_operations ext2_aops = {
875 .readpage = ext2_readpage,
876 .readpages = ext2_readpages,
877 .writepage = ext2_writepage,
878 .write_begin = ext2_write_begin,
879 .write_end = ext2_write_end,
880 .bmap = ext2_bmap,
881 .direct_IO = ext2_direct_IO,
882 .writepages = ext2_writepages,
883 .migratepage = buffer_migrate_page,
884 .is_partially_uptodate = block_is_partially_uptodate,
885 .error_remove_page = generic_error_remove_page,
886};
887
888const struct address_space_operations ext2_aops_xip = {
889 .bmap = ext2_bmap,
890 .get_xip_mem = ext2_get_xip_mem,
891};
892
893const struct address_space_operations ext2_nobh_aops = {
894 .readpage = ext2_readpage,
895 .readpages = ext2_readpages,
896 .writepage = ext2_nobh_writepage,
897 .write_begin = ext2_nobh_write_begin,
898 .write_end = nobh_write_end,
899 .bmap = ext2_bmap,
900 .direct_IO = ext2_direct_IO,
901 .writepages = ext2_writepages,
902 .migratepage = buffer_migrate_page,
903 .error_remove_page = generic_error_remove_page,
904};
905
906
907
908
909
910
911static inline int all_zeroes(__le32 *p, __le32 *q)
912{
913 while (p < q)
914 if (*p++)
915 return 0;
916 return 1;
917}
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953static Indirect *ext2_find_shared(struct inode *inode,
954 int depth,
955 int offsets[4],
956 Indirect chain[4],
957 __le32 *top)
958{
959 Indirect *partial, *p;
960 int k, err;
961
962 *top = 0;
963 for (k = depth; k > 1 && !offsets[k-1]; k--)
964 ;
965 partial = ext2_get_branch(inode, k, offsets, chain, &err);
966 if (!partial)
967 partial = chain + k-1;
968
969
970
971
972 write_lock(&EXT2_I(inode)->i_meta_lock);
973 if (!partial->key && *partial->p) {
974 write_unlock(&EXT2_I(inode)->i_meta_lock);
975 goto no_top;
976 }
977 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
978 ;
979
980
981
982
983
984
985 if (p == chain + k - 1 && p > chain) {
986 p->p--;
987 } else {
988 *top = *p->p;
989 *p->p = 0;
990 }
991 write_unlock(&EXT2_I(inode)->i_meta_lock);
992
993 while(partial > p)
994 {
995 brelse(partial->bh);
996 partial--;
997 }
998no_top:
999 return partial;
1000}
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1013{
1014 unsigned long block_to_free = 0, count = 0;
1015 unsigned long nr;
1016
1017 for ( ; p < q ; p++) {
1018 nr = le32_to_cpu(*p);
1019 if (nr) {
1020 *p = 0;
1021
1022 if (count == 0)
1023 goto free_this;
1024 else if (block_to_free == nr - count)
1025 count++;
1026 else {
1027 ext2_free_blocks (inode, block_to_free, count);
1028 mark_inode_dirty(inode);
1029 free_this:
1030 block_to_free = nr;
1031 count = 1;
1032 }
1033 }
1034 }
1035 if (count > 0) {
1036 ext2_free_blocks (inode, block_to_free, count);
1037 mark_inode_dirty(inode);
1038 }
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1053{
1054 struct buffer_head * bh;
1055 unsigned long nr;
1056
1057 if (depth--) {
1058 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1059 for ( ; p < q ; p++) {
1060 nr = le32_to_cpu(*p);
1061 if (!nr)
1062 continue;
1063 *p = 0;
1064 bh = sb_bread(inode->i_sb, nr);
1065
1066
1067
1068
1069 if (!bh) {
1070 ext2_error(inode->i_sb, "ext2_free_branches",
1071 "Read failure, inode=%ld, block=%ld",
1072 inode->i_ino, nr);
1073 continue;
1074 }
1075 ext2_free_branches(inode,
1076 (__le32*)bh->b_data,
1077 (__le32*)bh->b_data + addr_per_block,
1078 depth);
1079 bforget(bh);
1080 ext2_free_blocks(inode, nr, 1);
1081 mark_inode_dirty(inode);
1082 }
1083 } else
1084 ext2_free_data(inode, p, q);
1085}
1086
1087static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1088{
1089 __le32 *i_data = EXT2_I(inode)->i_data;
1090 struct ext2_inode_info *ei = EXT2_I(inode);
1091 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1092 int offsets[4];
1093 Indirect chain[4];
1094 Indirect *partial;
1095 __le32 nr = 0;
1096 int n;
1097 long iblock;
1098 unsigned blocksize;
1099 blocksize = inode->i_sb->s_blocksize;
1100 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1101
1102 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1103 if (n == 0)
1104 return;
1105
1106
1107
1108
1109
1110 mutex_lock(&ei->truncate_mutex);
1111
1112 if (n == 1) {
1113 ext2_free_data(inode, i_data+offsets[0],
1114 i_data + EXT2_NDIR_BLOCKS);
1115 goto do_indirects;
1116 }
1117
1118 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1119
1120 if (nr) {
1121 if (partial == chain)
1122 mark_inode_dirty(inode);
1123 else
1124 mark_buffer_dirty_inode(partial->bh, inode);
1125 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1126 }
1127
1128 while (partial > chain) {
1129 ext2_free_branches(inode,
1130 partial->p + 1,
1131 (__le32*)partial->bh->b_data+addr_per_block,
1132 (chain+n-1) - partial);
1133 mark_buffer_dirty_inode(partial->bh, inode);
1134 brelse (partial->bh);
1135 partial--;
1136 }
1137do_indirects:
1138
1139 switch (offsets[0]) {
1140 default:
1141 nr = i_data[EXT2_IND_BLOCK];
1142 if (nr) {
1143 i_data[EXT2_IND_BLOCK] = 0;
1144 mark_inode_dirty(inode);
1145 ext2_free_branches(inode, &nr, &nr+1, 1);
1146 }
1147 case EXT2_IND_BLOCK:
1148 nr = i_data[EXT2_DIND_BLOCK];
1149 if (nr) {
1150 i_data[EXT2_DIND_BLOCK] = 0;
1151 mark_inode_dirty(inode);
1152 ext2_free_branches(inode, &nr, &nr+1, 2);
1153 }
1154 case EXT2_DIND_BLOCK:
1155 nr = i_data[EXT2_TIND_BLOCK];
1156 if (nr) {
1157 i_data[EXT2_TIND_BLOCK] = 0;
1158 mark_inode_dirty(inode);
1159 ext2_free_branches(inode, &nr, &nr+1, 3);
1160 }
1161 case EXT2_TIND_BLOCK:
1162 ;
1163 }
1164
1165 ext2_discard_reservation(inode);
1166
1167 mutex_unlock(&ei->truncate_mutex);
1168}
1169
1170static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1171{
1172
1173
1174
1175
1176
1177
1178
1179
1180 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1181 S_ISLNK(inode->i_mode)))
1182 return;
1183 if (ext2_inode_is_fast_symlink(inode))
1184 return;
1185 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1186 return;
1187 __ext2_truncate_blocks(inode, offset);
1188}
1189
1190static int ext2_setsize(struct inode *inode, loff_t newsize)
1191{
1192 int error;
1193
1194 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1195 S_ISLNK(inode->i_mode)))
1196 return -EINVAL;
1197 if (ext2_inode_is_fast_symlink(inode))
1198 return -EINVAL;
1199 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1200 return -EPERM;
1201
1202 inode_dio_wait(inode);
1203
1204 if (mapping_is_xip(inode->i_mapping))
1205 error = xip_truncate_page(inode->i_mapping, newsize);
1206 else if (test_opt(inode->i_sb, NOBH))
1207 error = nobh_truncate_page(inode->i_mapping,
1208 newsize, ext2_get_block);
1209 else
1210 error = block_truncate_page(inode->i_mapping,
1211 newsize, ext2_get_block);
1212 if (error)
1213 return error;
1214
1215 truncate_setsize(inode, newsize);
1216 __ext2_truncate_blocks(inode, newsize);
1217
1218 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1219 if (inode_needs_sync(inode)) {
1220 sync_mapping_buffers(inode->i_mapping);
1221 sync_inode_metadata(inode, 1);
1222 } else {
1223 mark_inode_dirty(inode);
1224 }
1225
1226 return 0;
1227}
1228
1229static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1230 struct buffer_head **p)
1231{
1232 struct buffer_head * bh;
1233 unsigned long block_group;
1234 unsigned long block;
1235 unsigned long offset;
1236 struct ext2_group_desc * gdp;
1237
1238 *p = NULL;
1239 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1240 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1241 goto Einval;
1242
1243 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1244 gdp = ext2_get_group_desc(sb, block_group, NULL);
1245 if (!gdp)
1246 goto Egdp;
1247
1248
1249
1250 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1251 block = le32_to_cpu(gdp->bg_inode_table) +
1252 (offset >> EXT2_BLOCK_SIZE_BITS(sb));
1253 if (!(bh = sb_bread(sb, block)))
1254 goto Eio;
1255
1256 *p = bh;
1257 offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1258 return (struct ext2_inode *) (bh->b_data + offset);
1259
1260Einval:
1261 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1262 (unsigned long) ino);
1263 return ERR_PTR(-EINVAL);
1264Eio:
1265 ext2_error(sb, "ext2_get_inode",
1266 "unable to read inode block - inode=%lu, block=%lu",
1267 (unsigned long) ino, block);
1268Egdp:
1269 return ERR_PTR(-EIO);
1270}
1271
1272void ext2_set_inode_flags(struct inode *inode)
1273{
1274 unsigned int flags = EXT2_I(inode)->i_flags;
1275
1276 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
1277 if (flags & EXT2_SYNC_FL)
1278 inode->i_flags |= S_SYNC;
1279 if (flags & EXT2_APPEND_FL)
1280 inode->i_flags |= S_APPEND;
1281 if (flags & EXT2_IMMUTABLE_FL)
1282 inode->i_flags |= S_IMMUTABLE;
1283 if (flags & EXT2_NOATIME_FL)
1284 inode->i_flags |= S_NOATIME;
1285 if (flags & EXT2_DIRSYNC_FL)
1286 inode->i_flags |= S_DIRSYNC;
1287}
1288
1289
1290void ext2_get_inode_flags(struct ext2_inode_info *ei)
1291{
1292 unsigned int flags = ei->vfs_inode.i_flags;
1293
1294 ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
1295 EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
1296 if (flags & S_SYNC)
1297 ei->i_flags |= EXT2_SYNC_FL;
1298 if (flags & S_APPEND)
1299 ei->i_flags |= EXT2_APPEND_FL;
1300 if (flags & S_IMMUTABLE)
1301 ei->i_flags |= EXT2_IMMUTABLE_FL;
1302 if (flags & S_NOATIME)
1303 ei->i_flags |= EXT2_NOATIME_FL;
1304 if (flags & S_DIRSYNC)
1305 ei->i_flags |= EXT2_DIRSYNC_FL;
1306}
1307
1308struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1309{
1310 struct ext2_inode_info *ei;
1311 struct buffer_head * bh;
1312 struct ext2_inode *raw_inode;
1313 struct inode *inode;
1314 long ret = -EIO;
1315 int n;
1316 uid_t i_uid;
1317 gid_t i_gid;
1318
1319 inode = iget_locked(sb, ino);
1320 if (!inode)
1321 return ERR_PTR(-ENOMEM);
1322 if (!(inode->i_state & I_NEW))
1323 return inode;
1324
1325 ei = EXT2_I(inode);
1326 ei->i_block_alloc_info = NULL;
1327
1328 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1329 if (IS_ERR(raw_inode)) {
1330 ret = PTR_ERR(raw_inode);
1331 goto bad_inode;
1332 }
1333
1334 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1335 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1336 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1337 if (!(test_opt (inode->i_sb, NO_UID32))) {
1338 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1339 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1340 }
1341 i_uid_write(inode, i_uid);
1342 i_gid_write(inode, i_gid);
1343 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
1344 inode->i_size = le32_to_cpu(raw_inode->i_size);
1345 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1346 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1347 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1348 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1349 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1350
1351
1352
1353
1354
1355 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1356
1357 brelse (bh);
1358 ret = -ESTALE;
1359 goto bad_inode;
1360 }
1361 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1362 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1363 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1364 ei->i_frag_no = raw_inode->i_frag;
1365 ei->i_frag_size = raw_inode->i_fsize;
1366 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1367 ei->i_dir_acl = 0;
1368 if (S_ISREG(inode->i_mode))
1369 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1370 else
1371 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1372 ei->i_dtime = 0;
1373 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1374 ei->i_state = 0;
1375 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1376 ei->i_dir_start_lookup = 0;
1377
1378
1379
1380
1381
1382 for (n = 0; n < EXT2_N_BLOCKS; n++)
1383 ei->i_data[n] = raw_inode->i_block[n];
1384
1385 if (S_ISREG(inode->i_mode)) {
1386 inode->i_op = &ext2_file_inode_operations;
1387 if (ext2_use_xip(inode->i_sb)) {
1388 inode->i_mapping->a_ops = &ext2_aops_xip;
1389 inode->i_fop = &ext2_xip_file_operations;
1390 } else if (test_opt(inode->i_sb, NOBH)) {
1391 inode->i_mapping->a_ops = &ext2_nobh_aops;
1392 inode->i_fop = &ext2_file_operations;
1393 } else {
1394 inode->i_mapping->a_ops = &ext2_aops;
1395 inode->i_fop = &ext2_file_operations;
1396 }
1397 } else if (S_ISDIR(inode->i_mode)) {
1398 inode->i_op = &ext2_dir_inode_operations;
1399 inode->i_fop = &ext2_dir_operations;
1400 if (test_opt(inode->i_sb, NOBH))
1401 inode->i_mapping->a_ops = &ext2_nobh_aops;
1402 else
1403 inode->i_mapping->a_ops = &ext2_aops;
1404 } else if (S_ISLNK(inode->i_mode)) {
1405 if (ext2_inode_is_fast_symlink(inode)) {
1406 inode->i_op = &ext2_fast_symlink_inode_operations;
1407 nd_terminate_link(ei->i_data, inode->i_size,
1408 sizeof(ei->i_data) - 1);
1409 } else {
1410 inode->i_op = &ext2_symlink_inode_operations;
1411 if (test_opt(inode->i_sb, NOBH))
1412 inode->i_mapping->a_ops = &ext2_nobh_aops;
1413 else
1414 inode->i_mapping->a_ops = &ext2_aops;
1415 }
1416 } else {
1417 inode->i_op = &ext2_special_inode_operations;
1418 if (raw_inode->i_block[0])
1419 init_special_inode(inode, inode->i_mode,
1420 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1421 else
1422 init_special_inode(inode, inode->i_mode,
1423 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1424 }
1425 brelse (bh);
1426 ext2_set_inode_flags(inode);
1427 unlock_new_inode(inode);
1428 return inode;
1429
1430bad_inode:
1431 iget_failed(inode);
1432 return ERR_PTR(ret);
1433}
1434
1435static int __ext2_write_inode(struct inode *inode, int do_sync)
1436{
1437 struct ext2_inode_info *ei = EXT2_I(inode);
1438 struct super_block *sb = inode->i_sb;
1439 ino_t ino = inode->i_ino;
1440 uid_t uid = i_uid_read(inode);
1441 gid_t gid = i_gid_read(inode);
1442 struct buffer_head * bh;
1443 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1444 int n;
1445 int err = 0;
1446
1447 if (IS_ERR(raw_inode))
1448 return -EIO;
1449
1450
1451
1452 if (ei->i_state & EXT2_STATE_NEW)
1453 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1454
1455 ext2_get_inode_flags(ei);
1456 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1457 if (!(test_opt(sb, NO_UID32))) {
1458 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1459 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1460
1461
1462
1463
1464 if (!ei->i_dtime) {
1465 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1466 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1467 } else {
1468 raw_inode->i_uid_high = 0;
1469 raw_inode->i_gid_high = 0;
1470 }
1471 } else {
1472 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1473 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1474 raw_inode->i_uid_high = 0;
1475 raw_inode->i_gid_high = 0;
1476 }
1477 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1478 raw_inode->i_size = cpu_to_le32(inode->i_size);
1479 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1480 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1481 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1482
1483 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1484 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1485 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1486 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1487 raw_inode->i_frag = ei->i_frag_no;
1488 raw_inode->i_fsize = ei->i_frag_size;
1489 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1490 if (!S_ISREG(inode->i_mode))
1491 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1492 else {
1493 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1494 if (inode->i_size > 0x7fffffffULL) {
1495 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1496 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1497 EXT2_SB(sb)->s_es->s_rev_level ==
1498 cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1499
1500
1501
1502 spin_lock(&EXT2_SB(sb)->s_lock);
1503 ext2_update_dynamic_rev(sb);
1504 EXT2_SET_RO_COMPAT_FEATURE(sb,
1505 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1506 spin_unlock(&EXT2_SB(sb)->s_lock);
1507 ext2_write_super(sb);
1508 }
1509 }
1510 }
1511
1512 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1513 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1514 if (old_valid_dev(inode->i_rdev)) {
1515 raw_inode->i_block[0] =
1516 cpu_to_le32(old_encode_dev(inode->i_rdev));
1517 raw_inode->i_block[1] = 0;
1518 } else {
1519 raw_inode->i_block[0] = 0;
1520 raw_inode->i_block[1] =
1521 cpu_to_le32(new_encode_dev(inode->i_rdev));
1522 raw_inode->i_block[2] = 0;
1523 }
1524 } else for (n = 0; n < EXT2_N_BLOCKS; n++)
1525 raw_inode->i_block[n] = ei->i_data[n];
1526 mark_buffer_dirty(bh);
1527 if (do_sync) {
1528 sync_dirty_buffer(bh);
1529 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1530 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1531 sb->s_id, (unsigned long) ino);
1532 err = -EIO;
1533 }
1534 }
1535 ei->i_state &= ~EXT2_STATE_NEW;
1536 brelse (bh);
1537 return err;
1538}
1539
1540int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1541{
1542 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1543}
1544
1545int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1546{
1547 struct inode *inode = dentry->d_inode;
1548 int error;
1549
1550 error = inode_change_ok(inode, iattr);
1551 if (error)
1552 return error;
1553
1554 if (is_quota_modification(inode, iattr))
1555 dquot_initialize(inode);
1556 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
1557 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
1558 error = dquot_transfer(inode, iattr);
1559 if (error)
1560 return error;
1561 }
1562 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1563 error = ext2_setsize(inode, iattr->ia_size);
1564 if (error)
1565 return error;
1566 }
1567 setattr_copy(inode, iattr);
1568 if (iattr->ia_valid & ATTR_MODE)
1569 error = posix_acl_chmod(inode, inode->i_mode);
1570 mark_inode_dirty(inode);
1571
1572 return error;
1573}
1574