1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/fs.h>
27#include <linux/time.h>
28#include <linux/ext3_jbd.h>
29#include <linux/jbd.h>
30#include <linux/highuid.h>
31#include <linux/pagemap.h>
32#include <linux/quotaops.h>
33#include <linux/string.h>
34#include <linux/buffer_head.h>
35#include <linux/writeback.h>
36#include <linux/mpage.h>
37#include <linux/uio.h>
38#include <linux/bio.h>
39#include <linux/fiemap.h>
40#include <linux/namei.h>
41#include "xattr.h"
42#include "acl.h"
43
44static int ext3_writepage_trans_blocks(struct inode *inode);
45
46
47
48
49static int ext3_inode_is_fast_symlink(struct inode *inode)
50{
51 int ea_blocks = EXT3_I(inode)->i_file_acl ?
52 (inode->i_sb->s_blocksize >> 9) : 0;
53
54 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
55}
56
57
58
59
60
61
62
63
64
65
66int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
67 struct buffer_head *bh, ext3_fsblk_t blocknr)
68{
69 int err;
70
71 might_sleep();
72
73 BUFFER_TRACE(bh, "enter");
74
75 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
76 "data mode %lx\n",
77 bh, is_metadata, inode->i_mode,
78 test_opt(inode->i_sb, DATA_FLAGS));
79
80
81
82
83
84
85 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
86 (!is_metadata && !ext3_should_journal_data(inode))) {
87 if (bh) {
88 BUFFER_TRACE(bh, "call journal_forget");
89 return ext3_journal_forget(handle, bh);
90 }
91 return 0;
92 }
93
94
95
96
97 BUFFER_TRACE(bh, "call ext3_journal_revoke");
98 err = ext3_journal_revoke(handle, blocknr, bh);
99 if (err)
100 ext3_abort(inode->i_sb, __func__,
101 "error %d when attempting revoke", err);
102 BUFFER_TRACE(bh, "exit");
103 return err;
104}
105
106
107
108
109
110static unsigned long blocks_for_truncate(struct inode *inode)
111{
112 unsigned long needed;
113
114 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
115
116
117
118
119
120
121
122 if (needed < 2)
123 needed = 2;
124
125
126
127 if (needed > EXT3_MAX_TRANS_DATA)
128 needed = EXT3_MAX_TRANS_DATA;
129
130 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
131}
132
133
134
135
136
137
138
139
140
141
142
143static handle_t *start_transaction(struct inode *inode)
144{
145 handle_t *result;
146
147 result = ext3_journal_start(inode, blocks_for_truncate(inode));
148 if (!IS_ERR(result))
149 return result;
150
151 ext3_std_error(inode->i_sb, PTR_ERR(result));
152 return result;
153}
154
155
156
157
158
159
160
161static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
162{
163 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
164 return 0;
165 if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
166 return 0;
167 return 1;
168}
169
170
171
172
173
174
175static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
176{
177 int ret;
178
179 jbd_debug(2, "restarting handle %p\n", handle);
180
181
182
183
184
185
186 mutex_unlock(&EXT3_I(inode)->truncate_mutex);
187 ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
188 mutex_lock(&EXT3_I(inode)->truncate_mutex);
189 return ret;
190}
191
192
193
194
195void ext3_evict_inode (struct inode *inode)
196{
197 struct ext3_block_alloc_info *rsv;
198 handle_t *handle;
199 int want_delete = 0;
200
201 if (!inode->i_nlink && !is_bad_inode(inode)) {
202 dquot_initialize(inode);
203 want_delete = 1;
204 }
205
206 truncate_inode_pages(&inode->i_data, 0);
207
208 ext3_discard_reservation(inode);
209 rsv = EXT3_I(inode)->i_block_alloc_info;
210 EXT3_I(inode)->i_block_alloc_info = NULL;
211 if (unlikely(rsv))
212 kfree(rsv);
213
214 if (!want_delete)
215 goto no_delete;
216
217 handle = start_transaction(inode);
218 if (IS_ERR(handle)) {
219
220
221
222
223
224 ext3_orphan_del(NULL, inode);
225 goto no_delete;
226 }
227
228 if (IS_SYNC(inode))
229 handle->h_sync = 1;
230 inode->i_size = 0;
231 if (inode->i_blocks)
232 ext3_truncate(inode);
233
234
235
236
237
238
239
240
241 ext3_orphan_del(handle, inode);
242 EXT3_I(inode)->i_dtime = get_seconds();
243
244
245
246
247
248
249
250
251 if (ext3_mark_inode_dirty(handle, inode)) {
252
253 dquot_drop(inode);
254 end_writeback(inode);
255 } else {
256 ext3_xattr_delete_inode(handle, inode);
257 dquot_free_inode(inode);
258 dquot_drop(inode);
259 end_writeback(inode);
260 ext3_free_inode(handle, inode);
261 }
262 ext3_journal_stop(handle);
263 return;
264no_delete:
265 end_writeback(inode);
266 dquot_drop(inode);
267}
268
269typedef struct {
270 __le32 *p;
271 __le32 key;
272 struct buffer_head *bh;
273} Indirect;
274
275static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
276{
277 p->key = *(p->p = v);
278 p->bh = bh;
279}
280
281static int verify_chain(Indirect *from, Indirect *to)
282{
283 while (from <= to && from->key == *from->p)
284 from++;
285 return (from > to);
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319static int ext3_block_to_path(struct inode *inode,
320 long i_block, int offsets[4], int *boundary)
321{
322 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
323 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
324 const long direct_blocks = EXT3_NDIR_BLOCKS,
325 indirect_blocks = ptrs,
326 double_blocks = (1 << (ptrs_bits * 2));
327 int n = 0;
328 int final = 0;
329
330 if (i_block < 0) {
331 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
332 } else if (i_block < direct_blocks) {
333 offsets[n++] = i_block;
334 final = direct_blocks;
335 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
336 offsets[n++] = EXT3_IND_BLOCK;
337 offsets[n++] = i_block;
338 final = ptrs;
339 } else if ((i_block -= indirect_blocks) < double_blocks) {
340 offsets[n++] = EXT3_DIND_BLOCK;
341 offsets[n++] = i_block >> ptrs_bits;
342 offsets[n++] = i_block & (ptrs - 1);
343 final = ptrs;
344 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
345 offsets[n++] = EXT3_TIND_BLOCK;
346 offsets[n++] = i_block >> (ptrs_bits * 2);
347 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
348 offsets[n++] = i_block & (ptrs - 1);
349 final = ptrs;
350 } else {
351 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
352 }
353 if (boundary)
354 *boundary = final - 1 - (i_block & (ptrs - 1));
355 return n;
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
388 Indirect chain[4], int *err)
389{
390 struct super_block *sb = inode->i_sb;
391 Indirect *p = chain;
392 struct buffer_head *bh;
393
394 *err = 0;
395
396 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
397 if (!p->key)
398 goto no_block;
399 while (--depth) {
400 bh = sb_bread(sb, le32_to_cpu(p->key));
401 if (!bh)
402 goto failure;
403
404 if (!verify_chain(chain, p))
405 goto changed;
406 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
407
408 if (!p->key)
409 goto no_block;
410 }
411 return NULL;
412
413changed:
414 brelse(bh);
415 *err = -EAGAIN;
416 goto no_block;
417failure:
418 *err = -EIO;
419no_block:
420 return p;
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
444{
445 struct ext3_inode_info *ei = EXT3_I(inode);
446 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
447 __le32 *p;
448 ext3_fsblk_t bg_start;
449 ext3_grpblk_t colour;
450
451
452 for (p = ind->p - 1; p >= start; p--) {
453 if (*p)
454 return le32_to_cpu(*p);
455 }
456
457
458 if (ind->bh)
459 return ind->bh->b_blocknr;
460
461
462
463
464
465 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
466 colour = (current->pid % 16) *
467 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
468 return bg_start + colour;
469}
470
471
472
473
474
475
476
477
478
479
480
481static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
482 Indirect *partial)
483{
484 struct ext3_block_alloc_info *block_i;
485
486 block_i = EXT3_I(inode)->i_block_alloc_info;
487
488
489
490
491
492 if (block_i && (block == block_i->last_alloc_logical_block + 1)
493 && (block_i->last_alloc_physical_block != 0)) {
494 return block_i->last_alloc_physical_block + 1;
495 }
496
497 return ext3_find_near(inode, partial);
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
513 int blocks_to_boundary)
514{
515 unsigned long count = 0;
516
517
518
519
520
521 if (k > 0) {
522
523 if (blks < blocks_to_boundary + 1)
524 count += blks;
525 else
526 count += blocks_to_boundary + 1;
527 return count;
528 }
529
530 count++;
531 while (count < blks && count <= blocks_to_boundary &&
532 le32_to_cpu(*(branch[0].p + count)) == 0) {
533 count++;
534 }
535 return count;
536}
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
553 ext3_fsblk_t goal, int indirect_blks, int blks,
554 ext3_fsblk_t new_blocks[4], int *err)
555{
556 int target, i;
557 unsigned long count = 0;
558 int index = 0;
559 ext3_fsblk_t current_block = 0;
560 int ret = 0;
561
562
563
564
565
566
567
568
569
570 target = blks + indirect_blks;
571
572 while (1) {
573 count = target;
574
575 current_block = ext3_new_blocks(handle,inode,goal,&count,err);
576 if (*err)
577 goto failed_out;
578
579 target -= count;
580
581 while (index < indirect_blks && count) {
582 new_blocks[index++] = current_block++;
583 count--;
584 }
585
586 if (count > 0)
587 break;
588 }
589
590
591 new_blocks[index] = current_block;
592
593
594 ret = count;
595 *err = 0;
596 return ret;
597failed_out:
598 for (i = 0; i <index; i++)
599 ext3_free_blocks(handle, inode, new_blocks[i], 1);
600 return ret;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
631 int indirect_blks, int *blks, ext3_fsblk_t goal,
632 int *offsets, Indirect *branch)
633{
634 int blocksize = inode->i_sb->s_blocksize;
635 int i, n = 0;
636 int err = 0;
637 struct buffer_head *bh;
638 int num;
639 ext3_fsblk_t new_blocks[4];
640 ext3_fsblk_t current_block;
641
642 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
643 *blks, new_blocks, &err);
644 if (err)
645 return err;
646
647 branch[0].key = cpu_to_le32(new_blocks[0]);
648
649
650
651 for (n = 1; n <= indirect_blks; n++) {
652
653
654
655
656
657 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
658 branch[n].bh = bh;
659 lock_buffer(bh);
660 BUFFER_TRACE(bh, "call get_create_access");
661 err = ext3_journal_get_create_access(handle, bh);
662 if (err) {
663 unlock_buffer(bh);
664 brelse(bh);
665 goto failed;
666 }
667
668 memset(bh->b_data, 0, blocksize);
669 branch[n].p = (__le32 *) bh->b_data + offsets[n];
670 branch[n].key = cpu_to_le32(new_blocks[n]);
671 *branch[n].p = branch[n].key;
672 if ( n == indirect_blks) {
673 current_block = new_blocks[n];
674
675
676
677
678
679 for (i=1; i < num; i++)
680 *(branch[n].p + i) = cpu_to_le32(++current_block);
681 }
682 BUFFER_TRACE(bh, "marking uptodate");
683 set_buffer_uptodate(bh);
684 unlock_buffer(bh);
685
686 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
687 err = ext3_journal_dirty_metadata(handle, bh);
688 if (err)
689 goto failed;
690 }
691 *blks = num;
692 return err;
693failed:
694
695 for (i = 1; i <= n ; i++) {
696 BUFFER_TRACE(branch[i].bh, "call journal_forget");
697 ext3_journal_forget(handle, branch[i].bh);
698 }
699 for (i = 0; i <indirect_blks; i++)
700 ext3_free_blocks(handle, inode, new_blocks[i], 1);
701
702 ext3_free_blocks(handle, inode, new_blocks[i], num);
703
704 return err;
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720static int ext3_splice_branch(handle_t *handle, struct inode *inode,
721 long block, Indirect *where, int num, int blks)
722{
723 int i;
724 int err = 0;
725 struct ext3_block_alloc_info *block_i;
726 ext3_fsblk_t current_block;
727 struct ext3_inode_info *ei = EXT3_I(inode);
728
729 block_i = ei->i_block_alloc_info;
730
731
732
733
734
735 if (where->bh) {
736 BUFFER_TRACE(where->bh, "get_write_access");
737 err = ext3_journal_get_write_access(handle, where->bh);
738 if (err)
739 goto err_out;
740 }
741
742
743 *where->p = where->key;
744
745
746
747
748
749 if (num == 0 && blks > 1) {
750 current_block = le32_to_cpu(where->key) + 1;
751 for (i = 1; i < blks; i++)
752 *(where->p + i ) = cpu_to_le32(current_block++);
753 }
754
755
756
757
758
759
760 if (block_i) {
761 block_i->last_alloc_logical_block = block + blks - 1;
762 block_i->last_alloc_physical_block =
763 le32_to_cpu(where[num].key) + blks - 1;
764 }
765
766
767
768 inode->i_ctime = CURRENT_TIME_SEC;
769 ext3_mark_inode_dirty(handle, inode);
770
771 atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
772
773
774 if (where->bh) {
775
776
777
778
779
780
781
782
783 jbd_debug(5, "splicing indirect only\n");
784 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
785 err = ext3_journal_dirty_metadata(handle, where->bh);
786 if (err)
787 goto err_out;
788 } else {
789
790
791
792
793 jbd_debug(5, "splicing direct\n");
794 }
795 return err;
796
797err_out:
798 for (i = 1; i <= num; i++) {
799 BUFFER_TRACE(where[i].bh, "call journal_forget");
800 ext3_journal_forget(handle, where[i].bh);
801 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
802 }
803 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
804
805 return err;
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
828 sector_t iblock, unsigned long maxblocks,
829 struct buffer_head *bh_result,
830 int create)
831{
832 int err = -EIO;
833 int offsets[4];
834 Indirect chain[4];
835 Indirect *partial;
836 ext3_fsblk_t goal;
837 int indirect_blks;
838 int blocks_to_boundary = 0;
839 int depth;
840 struct ext3_inode_info *ei = EXT3_I(inode);
841 int count = 0;
842 ext3_fsblk_t first_block = 0;
843
844
845 J_ASSERT(handle != NULL || create == 0);
846 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
847
848 if (depth == 0)
849 goto out;
850
851 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
852
853
854 if (!partial) {
855 first_block = le32_to_cpu(chain[depth - 1].key);
856 clear_buffer_new(bh_result);
857 count++;
858
859 while (count < maxblocks && count <= blocks_to_boundary) {
860 ext3_fsblk_t blk;
861
862 if (!verify_chain(chain, chain + depth - 1)) {
863
864
865
866
867
868
869
870 err = -EAGAIN;
871 count = 0;
872 break;
873 }
874 blk = le32_to_cpu(*(chain[depth-1].p + count));
875
876 if (blk == first_block + count)
877 count++;
878 else
879 break;
880 }
881 if (err != -EAGAIN)
882 goto got_it;
883 }
884
885
886 if (!create || err == -EIO)
887 goto cleanup;
888
889 mutex_lock(&ei->truncate_mutex);
890
891
892
893
894
895
896
897
898
899
900
901
902
903 if (err == -EAGAIN || !verify_chain(chain, partial)) {
904 while (partial > chain) {
905 brelse(partial->bh);
906 partial--;
907 }
908 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
909 if (!partial) {
910 count++;
911 mutex_unlock(&ei->truncate_mutex);
912 if (err)
913 goto cleanup;
914 clear_buffer_new(bh_result);
915 goto got_it;
916 }
917 }
918
919
920
921
922
923 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
924 ext3_init_block_alloc_info(inode);
925
926 goal = ext3_find_goal(inode, iblock, partial);
927
928
929 indirect_blks = (chain + depth) - partial - 1;
930
931
932
933
934
935 count = ext3_blks_to_allocate(partial, indirect_blks,
936 maxblocks, blocks_to_boundary);
937
938
939
940 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
941 offsets + (partial - chain), partial);
942
943
944
945
946
947
948
949
950 if (!err)
951 err = ext3_splice_branch(handle, inode, iblock,
952 partial, indirect_blks, count);
953 mutex_unlock(&ei->truncate_mutex);
954 if (err)
955 goto cleanup;
956
957 set_buffer_new(bh_result);
958got_it:
959 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
960 if (count > blocks_to_boundary)
961 set_buffer_boundary(bh_result);
962 err = count;
963
964 partial = chain + depth - 1;
965cleanup:
966 while (partial > chain) {
967 BUFFER_TRACE(partial->bh, "call brelse");
968 brelse(partial->bh);
969 partial--;
970 }
971 BUFFER_TRACE(bh_result, "returned");
972out:
973 return err;
974}
975
976
977#define DIO_MAX_BLOCKS 4096
978
979
980
981
982
983
984
985#define DIO_CREDITS 25
986
987static int ext3_get_block(struct inode *inode, sector_t iblock,
988 struct buffer_head *bh_result, int create)
989{
990 handle_t *handle = ext3_journal_current_handle();
991 int ret = 0, started = 0;
992 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
993
994 if (create && !handle) {
995 if (max_blocks > DIO_MAX_BLOCKS)
996 max_blocks = DIO_MAX_BLOCKS;
997 handle = ext3_journal_start(inode, DIO_CREDITS +
998 EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
999 if (IS_ERR(handle)) {
1000 ret = PTR_ERR(handle);
1001 goto out;
1002 }
1003 started = 1;
1004 }
1005
1006 ret = ext3_get_blocks_handle(handle, inode, iblock,
1007 max_blocks, bh_result, create);
1008 if (ret > 0) {
1009 bh_result->b_size = (ret << inode->i_blkbits);
1010 ret = 0;
1011 }
1012 if (started)
1013 ext3_journal_stop(handle);
1014out:
1015 return ret;
1016}
1017
1018int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1019 u64 start, u64 len)
1020{
1021 return generic_block_fiemap(inode, fieinfo, start, len,
1022 ext3_get_block);
1023}
1024
1025
1026
1027
1028struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1029 long block, int create, int *errp)
1030{
1031 struct buffer_head dummy;
1032 int fatal = 0, err;
1033
1034 J_ASSERT(handle != NULL || create == 0);
1035
1036 dummy.b_state = 0;
1037 dummy.b_blocknr = -1000;
1038 buffer_trace_init(&dummy.b_history);
1039 err = ext3_get_blocks_handle(handle, inode, block, 1,
1040 &dummy, create);
1041
1042
1043
1044
1045 if (err > 0) {
1046 if (err > 1)
1047 WARN_ON(1);
1048 err = 0;
1049 }
1050 *errp = err;
1051 if (!err && buffer_mapped(&dummy)) {
1052 struct buffer_head *bh;
1053 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1054 if (!bh) {
1055 *errp = -EIO;
1056 goto err;
1057 }
1058 if (buffer_new(&dummy)) {
1059 J_ASSERT(create != 0);
1060 J_ASSERT(handle != NULL);
1061
1062
1063
1064
1065
1066
1067
1068
1069 lock_buffer(bh);
1070 BUFFER_TRACE(bh, "call get_create_access");
1071 fatal = ext3_journal_get_create_access(handle, bh);
1072 if (!fatal && !buffer_uptodate(bh)) {
1073 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1074 set_buffer_uptodate(bh);
1075 }
1076 unlock_buffer(bh);
1077 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1078 err = ext3_journal_dirty_metadata(handle, bh);
1079 if (!fatal)
1080 fatal = err;
1081 } else {
1082 BUFFER_TRACE(bh, "not a new buffer");
1083 }
1084 if (fatal) {
1085 *errp = fatal;
1086 brelse(bh);
1087 bh = NULL;
1088 }
1089 return bh;
1090 }
1091err:
1092 return NULL;
1093}
1094
1095struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1096 int block, int create, int *err)
1097{
1098 struct buffer_head * bh;
1099
1100 bh = ext3_getblk(handle, inode, block, create, err);
1101 if (!bh)
1102 return bh;
1103 if (buffer_uptodate(bh))
1104 return bh;
1105 ll_rw_block(READ_META, 1, &bh);
1106 wait_on_buffer(bh);
1107 if (buffer_uptodate(bh))
1108 return bh;
1109 put_bh(bh);
1110 *err = -EIO;
1111 return NULL;
1112}
1113
1114static int walk_page_buffers( handle_t *handle,
1115 struct buffer_head *head,
1116 unsigned from,
1117 unsigned to,
1118 int *partial,
1119 int (*fn)( handle_t *handle,
1120 struct buffer_head *bh))
1121{
1122 struct buffer_head *bh;
1123 unsigned block_start, block_end;
1124 unsigned blocksize = head->b_size;
1125 int err, ret = 0;
1126 struct buffer_head *next;
1127
1128 for ( bh = head, block_start = 0;
1129 ret == 0 && (bh != head || !block_start);
1130 block_start = block_end, bh = next)
1131 {
1132 next = bh->b_this_page;
1133 block_end = block_start + blocksize;
1134 if (block_end <= from || block_start >= to) {
1135 if (partial && !buffer_uptodate(bh))
1136 *partial = 1;
1137 continue;
1138 }
1139 err = (*fn)(handle, bh);
1140 if (!ret)
1141 ret = err;
1142 }
1143 return ret;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static int do_journal_get_write_access(handle_t *handle,
1172 struct buffer_head *bh)
1173{
1174 int dirty = buffer_dirty(bh);
1175 int ret;
1176
1177 if (!buffer_mapped(bh) || buffer_freed(bh))
1178 return 0;
1179
1180
1181
1182
1183
1184
1185
1186
1187 if (dirty)
1188 clear_buffer_dirty(bh);
1189 ret = ext3_journal_get_write_access(handle, bh);
1190 if (!ret && dirty)
1191 ret = ext3_journal_dirty_metadata(handle, bh);
1192 return ret;
1193}
1194
1195
1196
1197
1198
1199static void ext3_truncate_failed_write(struct inode *inode)
1200{
1201 truncate_inode_pages(inode->i_mapping, inode->i_size);
1202 ext3_truncate(inode);
1203}
1204
1205static int ext3_write_begin(struct file *file, struct address_space *mapping,
1206 loff_t pos, unsigned len, unsigned flags,
1207 struct page **pagep, void **fsdata)
1208{
1209 struct inode *inode = mapping->host;
1210 int ret;
1211 handle_t *handle;
1212 int retries = 0;
1213 struct page *page;
1214 pgoff_t index;
1215 unsigned from, to;
1216
1217
1218 int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
1219
1220 index = pos >> PAGE_CACHE_SHIFT;
1221 from = pos & (PAGE_CACHE_SIZE - 1);
1222 to = from + len;
1223
1224retry:
1225 page = grab_cache_page_write_begin(mapping, index, flags);
1226 if (!page)
1227 return -ENOMEM;
1228 *pagep = page;
1229
1230 handle = ext3_journal_start(inode, needed_blocks);
1231 if (IS_ERR(handle)) {
1232 unlock_page(page);
1233 page_cache_release(page);
1234 ret = PTR_ERR(handle);
1235 goto out;
1236 }
1237 ret = __block_write_begin(page, pos, len, ext3_get_block);
1238 if (ret)
1239 goto write_begin_failed;
1240
1241 if (ext3_should_journal_data(inode)) {
1242 ret = walk_page_buffers(handle, page_buffers(page),
1243 from, to, NULL, do_journal_get_write_access);
1244 }
1245write_begin_failed:
1246 if (ret) {
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1257 ext3_orphan_add(handle, inode);
1258 ext3_journal_stop(handle);
1259 unlock_page(page);
1260 page_cache_release(page);
1261 if (pos + len > inode->i_size)
1262 ext3_truncate_failed_write(inode);
1263 }
1264 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1265 goto retry;
1266out:
1267 return ret;
1268}
1269
1270
1271int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1272{
1273 int err = journal_dirty_data(handle, bh);
1274 if (err)
1275 ext3_journal_abort_handle(__func__, __func__,
1276 bh, handle, err);
1277 return err;
1278}
1279
1280
1281static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1282{
1283
1284
1285
1286
1287 if (buffer_mapped(bh) && buffer_uptodate(bh))
1288 return ext3_journal_dirty_data(handle, bh);
1289 return 0;
1290}
1291
1292
1293static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1294{
1295 if (!buffer_mapped(bh) || buffer_freed(bh))
1296 return 0;
1297 set_buffer_uptodate(bh);
1298 return ext3_journal_dirty_metadata(handle, bh);
1299}
1300
1301
1302
1303
1304
1305
1306
1307static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
1308{
1309
1310 if (pos + copied > inode->i_size)
1311 i_size_write(inode, pos + copied);
1312 if (pos + copied > EXT3_I(inode)->i_disksize) {
1313 EXT3_I(inode)->i_disksize = pos + copied;
1314 mark_inode_dirty(inode);
1315 }
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325static int ext3_ordered_write_end(struct file *file,
1326 struct address_space *mapping,
1327 loff_t pos, unsigned len, unsigned copied,
1328 struct page *page, void *fsdata)
1329{
1330 handle_t *handle = ext3_journal_current_handle();
1331 struct inode *inode = file->f_mapping->host;
1332 unsigned from, to;
1333 int ret = 0, ret2;
1334
1335 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1336
1337 from = pos & (PAGE_CACHE_SIZE - 1);
1338 to = from + copied;
1339 ret = walk_page_buffers(handle, page_buffers(page),
1340 from, to, NULL, journal_dirty_data_fn);
1341
1342 if (ret == 0)
1343 update_file_sizes(inode, pos, copied);
1344
1345
1346
1347
1348 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1349 ext3_orphan_add(handle, inode);
1350 ret2 = ext3_journal_stop(handle);
1351 if (!ret)
1352 ret = ret2;
1353 unlock_page(page);
1354 page_cache_release(page);
1355
1356 if (pos + len > inode->i_size)
1357 ext3_truncate_failed_write(inode);
1358 return ret ? ret : copied;
1359}
1360
1361static int ext3_writeback_write_end(struct file *file,
1362 struct address_space *mapping,
1363 loff_t pos, unsigned len, unsigned copied,
1364 struct page *page, void *fsdata)
1365{
1366 handle_t *handle = ext3_journal_current_handle();
1367 struct inode *inode = file->f_mapping->host;
1368 int ret;
1369
1370 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1371 update_file_sizes(inode, pos, copied);
1372
1373
1374
1375
1376 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1377 ext3_orphan_add(handle, inode);
1378 ret = ext3_journal_stop(handle);
1379 unlock_page(page);
1380 page_cache_release(page);
1381
1382 if (pos + len > inode->i_size)
1383 ext3_truncate_failed_write(inode);
1384 return ret ? ret : copied;
1385}
1386
1387static int ext3_journalled_write_end(struct file *file,
1388 struct address_space *mapping,
1389 loff_t pos, unsigned len, unsigned copied,
1390 struct page *page, void *fsdata)
1391{
1392 handle_t *handle = ext3_journal_current_handle();
1393 struct inode *inode = mapping->host;
1394 int ret = 0, ret2;
1395 int partial = 0;
1396 unsigned from, to;
1397
1398 from = pos & (PAGE_CACHE_SIZE - 1);
1399 to = from + len;
1400
1401 if (copied < len) {
1402 if (!PageUptodate(page))
1403 copied = 0;
1404 page_zero_new_buffers(page, from + copied, to);
1405 to = from + copied;
1406 }
1407
1408 ret = walk_page_buffers(handle, page_buffers(page), from,
1409 to, &partial, write_end_fn);
1410 if (!partial)
1411 SetPageUptodate(page);
1412
1413 if (pos + copied > inode->i_size)
1414 i_size_write(inode, pos + copied);
1415
1416
1417
1418
1419 if (pos + len > inode->i_size && ext3_can_truncate(inode))
1420 ext3_orphan_add(handle, inode);
1421 ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1422 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1423 EXT3_I(inode)->i_disksize = inode->i_size;
1424 ret2 = ext3_mark_inode_dirty(handle, inode);
1425 if (!ret)
1426 ret = ret2;
1427 }
1428
1429 ret2 = ext3_journal_stop(handle);
1430 if (!ret)
1431 ret = ret2;
1432 unlock_page(page);
1433 page_cache_release(page);
1434
1435 if (pos + len > inode->i_size)
1436 ext3_truncate_failed_write(inode);
1437 return ret ? ret : copied;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1455{
1456 struct inode *inode = mapping->host;
1457 journal_t *journal;
1458 int err;
1459
1460 if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
1480 journal = EXT3_JOURNAL(inode);
1481 journal_lock_updates(journal);
1482 err = journal_flush(journal);
1483 journal_unlock_updates(journal);
1484
1485 if (err)
1486 return 0;
1487 }
1488
1489 return generic_block_bmap(mapping,block,ext3_get_block);
1490}
1491
1492static int bget_one(handle_t *handle, struct buffer_head *bh)
1493{
1494 get_bh(bh);
1495 return 0;
1496}
1497
1498static int bput_one(handle_t *handle, struct buffer_head *bh)
1499{
1500 put_bh(bh);
1501 return 0;
1502}
1503
1504static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
1505{
1506 return !buffer_mapped(bh);
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561static int ext3_ordered_writepage(struct page *page,
1562 struct writeback_control *wbc)
1563{
1564 struct inode *inode = page->mapping->host;
1565 struct buffer_head *page_bufs;
1566 handle_t *handle = NULL;
1567 int ret = 0;
1568 int err;
1569
1570 J_ASSERT(PageLocked(page));
1571 WARN_ON_ONCE(IS_RDONLY(inode));
1572
1573
1574
1575
1576
1577 if (ext3_journal_current_handle())
1578 goto out_fail;
1579
1580 if (!page_has_buffers(page)) {
1581 create_empty_buffers(page, inode->i_sb->s_blocksize,
1582 (1 << BH_Dirty)|(1 << BH_Uptodate));
1583 page_bufs = page_buffers(page);
1584 } else {
1585 page_bufs = page_buffers(page);
1586 if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
1587 NULL, buffer_unmapped)) {
1588
1589
1590 return block_write_full_page(page, NULL, wbc);
1591 }
1592 }
1593 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1594
1595 if (IS_ERR(handle)) {
1596 ret = PTR_ERR(handle);
1597 goto out_fail;
1598 }
1599
1600 walk_page_buffers(handle, page_bufs, 0,
1601 PAGE_CACHE_SIZE, NULL, bget_one);
1602
1603 ret = block_write_full_page(page, ext3_get_block, wbc);
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 if (ret == 0) {
1618 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1619 NULL, journal_dirty_data_fn);
1620 if (!ret)
1621 ret = err;
1622 }
1623 walk_page_buffers(handle, page_bufs, 0,
1624 PAGE_CACHE_SIZE, NULL, bput_one);
1625 err = ext3_journal_stop(handle);
1626 if (!ret)
1627 ret = err;
1628 return ret;
1629
1630out_fail:
1631 redirty_page_for_writepage(wbc, page);
1632 unlock_page(page);
1633 return ret;
1634}
1635
1636static int ext3_writeback_writepage(struct page *page,
1637 struct writeback_control *wbc)
1638{
1639 struct inode *inode = page->mapping->host;
1640 handle_t *handle = NULL;
1641 int ret = 0;
1642 int err;
1643
1644 J_ASSERT(PageLocked(page));
1645 WARN_ON_ONCE(IS_RDONLY(inode));
1646
1647 if (ext3_journal_current_handle())
1648 goto out_fail;
1649
1650 if (page_has_buffers(page)) {
1651 if (!walk_page_buffers(NULL, page_buffers(page), 0,
1652 PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
1653
1654
1655 return block_write_full_page(page, NULL, wbc);
1656 }
1657 }
1658
1659 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1660 if (IS_ERR(handle)) {
1661 ret = PTR_ERR(handle);
1662 goto out_fail;
1663 }
1664
1665 ret = block_write_full_page(page, ext3_get_block, wbc);
1666
1667 err = ext3_journal_stop(handle);
1668 if (!ret)
1669 ret = err;
1670 return ret;
1671
1672out_fail:
1673 redirty_page_for_writepage(wbc, page);
1674 unlock_page(page);
1675 return ret;
1676}
1677
1678static int ext3_journalled_writepage(struct page *page,
1679 struct writeback_control *wbc)
1680{
1681 struct inode *inode = page->mapping->host;
1682 handle_t *handle = NULL;
1683 int ret = 0;
1684 int err;
1685
1686 J_ASSERT(PageLocked(page));
1687 WARN_ON_ONCE(IS_RDONLY(inode));
1688
1689 if (ext3_journal_current_handle())
1690 goto no_write;
1691
1692 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1693 if (IS_ERR(handle)) {
1694 ret = PTR_ERR(handle);
1695 goto no_write;
1696 }
1697
1698 if (!page_has_buffers(page) || PageChecked(page)) {
1699
1700
1701
1702
1703 ClearPageChecked(page);
1704 ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
1705 ext3_get_block);
1706 if (ret != 0) {
1707 ext3_journal_stop(handle);
1708 goto out_unlock;
1709 }
1710 ret = walk_page_buffers(handle, page_buffers(page), 0,
1711 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1712
1713 err = walk_page_buffers(handle, page_buffers(page), 0,
1714 PAGE_CACHE_SIZE, NULL, write_end_fn);
1715 if (ret == 0)
1716 ret = err;
1717 ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1718 unlock_page(page);
1719 } else {
1720
1721
1722
1723
1724
1725 ret = block_write_full_page(page, ext3_get_block, wbc);
1726 }
1727 err = ext3_journal_stop(handle);
1728 if (!ret)
1729 ret = err;
1730out:
1731 return ret;
1732
1733no_write:
1734 redirty_page_for_writepage(wbc, page);
1735out_unlock:
1736 unlock_page(page);
1737 goto out;
1738}
1739
1740static int ext3_readpage(struct file *file, struct page *page)
1741{
1742 return mpage_readpage(page, ext3_get_block);
1743}
1744
1745static int
1746ext3_readpages(struct file *file, struct address_space *mapping,
1747 struct list_head *pages, unsigned nr_pages)
1748{
1749 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1750}
1751
1752static void ext3_invalidatepage(struct page *page, unsigned long offset)
1753{
1754 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1755
1756
1757
1758
1759 if (offset == 0)
1760 ClearPageChecked(page);
1761
1762 journal_invalidatepage(journal, page, offset);
1763}
1764
1765static int ext3_releasepage(struct page *page, gfp_t wait)
1766{
1767 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1768
1769 WARN_ON(PageChecked(page));
1770 if (!page_has_buffers(page))
1771 return 0;
1772 return journal_try_to_free_buffers(journal, page, wait);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1785 const struct iovec *iov, loff_t offset,
1786 unsigned long nr_segs)
1787{
1788 struct file *file = iocb->ki_filp;
1789 struct inode *inode = file->f_mapping->host;
1790 struct ext3_inode_info *ei = EXT3_I(inode);
1791 handle_t *handle;
1792 ssize_t ret;
1793 int orphan = 0;
1794 size_t count = iov_length(iov, nr_segs);
1795 int retries = 0;
1796
1797 if (rw == WRITE) {
1798 loff_t final_size = offset + count;
1799
1800 if (final_size > inode->i_size) {
1801
1802 handle = ext3_journal_start(inode, 2);
1803 if (IS_ERR(handle)) {
1804 ret = PTR_ERR(handle);
1805 goto out;
1806 }
1807 ret = ext3_orphan_add(handle, inode);
1808 if (ret) {
1809 ext3_journal_stop(handle);
1810 goto out;
1811 }
1812 orphan = 1;
1813 ei->i_disksize = inode->i_size;
1814 ext3_journal_stop(handle);
1815 }
1816 }
1817
1818retry:
1819 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1820 offset, nr_segs,
1821 ext3_get_block, NULL);
1822
1823
1824
1825
1826 if (unlikely((rw & WRITE) && ret < 0)) {
1827 loff_t isize = i_size_read(inode);
1828 loff_t end = offset + iov_length(iov, nr_segs);
1829
1830 if (end > isize)
1831 vmtruncate(inode, isize);
1832 }
1833 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1834 goto retry;
1835
1836 if (orphan) {
1837 int err;
1838
1839
1840 handle = ext3_journal_start(inode, 2);
1841 if (IS_ERR(handle)) {
1842
1843
1844
1845 ext3_truncate(inode);
1846 ret = PTR_ERR(handle);
1847 goto out;
1848 }
1849 if (inode->i_nlink)
1850 ext3_orphan_del(handle, inode);
1851 if (ret > 0) {
1852 loff_t end = offset + ret;
1853 if (end > inode->i_size) {
1854 ei->i_disksize = end;
1855 i_size_write(inode, end);
1856
1857
1858
1859
1860
1861
1862
1863 ext3_mark_inode_dirty(handle, inode);
1864 }
1865 }
1866 err = ext3_journal_stop(handle);
1867 if (ret == 0)
1868 ret = err;
1869 }
1870out:
1871 return ret;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static int ext3_journalled_set_page_dirty(struct page *page)
1888{
1889 SetPageChecked(page);
1890 return __set_page_dirty_nobuffers(page);
1891}
1892
1893static const struct address_space_operations ext3_ordered_aops = {
1894 .readpage = ext3_readpage,
1895 .readpages = ext3_readpages,
1896 .writepage = ext3_ordered_writepage,
1897 .sync_page = block_sync_page,
1898 .write_begin = ext3_write_begin,
1899 .write_end = ext3_ordered_write_end,
1900 .bmap = ext3_bmap,
1901 .invalidatepage = ext3_invalidatepage,
1902 .releasepage = ext3_releasepage,
1903 .direct_IO = ext3_direct_IO,
1904 .migratepage = buffer_migrate_page,
1905 .is_partially_uptodate = block_is_partially_uptodate,
1906 .error_remove_page = generic_error_remove_page,
1907};
1908
1909static const struct address_space_operations ext3_writeback_aops = {
1910 .readpage = ext3_readpage,
1911 .readpages = ext3_readpages,
1912 .writepage = ext3_writeback_writepage,
1913 .sync_page = block_sync_page,
1914 .write_begin = ext3_write_begin,
1915 .write_end = ext3_writeback_write_end,
1916 .bmap = ext3_bmap,
1917 .invalidatepage = ext3_invalidatepage,
1918 .releasepage = ext3_releasepage,
1919 .direct_IO = ext3_direct_IO,
1920 .migratepage = buffer_migrate_page,
1921 .is_partially_uptodate = block_is_partially_uptodate,
1922 .error_remove_page = generic_error_remove_page,
1923};
1924
1925static const struct address_space_operations ext3_journalled_aops = {
1926 .readpage = ext3_readpage,
1927 .readpages = ext3_readpages,
1928 .writepage = ext3_journalled_writepage,
1929 .sync_page = block_sync_page,
1930 .write_begin = ext3_write_begin,
1931 .write_end = ext3_journalled_write_end,
1932 .set_page_dirty = ext3_journalled_set_page_dirty,
1933 .bmap = ext3_bmap,
1934 .invalidatepage = ext3_invalidatepage,
1935 .releasepage = ext3_releasepage,
1936 .is_partially_uptodate = block_is_partially_uptodate,
1937 .error_remove_page = generic_error_remove_page,
1938};
1939
1940void ext3_set_aops(struct inode *inode)
1941{
1942 if (ext3_should_order_data(inode))
1943 inode->i_mapping->a_ops = &ext3_ordered_aops;
1944 else if (ext3_should_writeback_data(inode))
1945 inode->i_mapping->a_ops = &ext3_writeback_aops;
1946 else
1947 inode->i_mapping->a_ops = &ext3_journalled_aops;
1948}
1949
1950
1951
1952
1953
1954
1955
1956static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1957 struct address_space *mapping, loff_t from)
1958{
1959 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1960 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1961 unsigned blocksize, iblock, length, pos;
1962 struct inode *inode = mapping->host;
1963 struct buffer_head *bh;
1964 int err = 0;
1965
1966 blocksize = inode->i_sb->s_blocksize;
1967 length = blocksize - (offset & (blocksize - 1));
1968 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1969
1970 if (!page_has_buffers(page))
1971 create_empty_buffers(page, blocksize, 0);
1972
1973
1974 bh = page_buffers(page);
1975 pos = blocksize;
1976 while (offset >= pos) {
1977 bh = bh->b_this_page;
1978 iblock++;
1979 pos += blocksize;
1980 }
1981
1982 err = 0;
1983 if (buffer_freed(bh)) {
1984 BUFFER_TRACE(bh, "freed: skip");
1985 goto unlock;
1986 }
1987
1988 if (!buffer_mapped(bh)) {
1989 BUFFER_TRACE(bh, "unmapped");
1990 ext3_get_block(inode, iblock, bh, 0);
1991
1992 if (!buffer_mapped(bh)) {
1993 BUFFER_TRACE(bh, "still unmapped");
1994 goto unlock;
1995 }
1996 }
1997
1998
1999 if (PageUptodate(page))
2000 set_buffer_uptodate(bh);
2001
2002 if (!buffer_uptodate(bh)) {
2003 err = -EIO;
2004 ll_rw_block(READ, 1, &bh);
2005 wait_on_buffer(bh);
2006
2007 if (!buffer_uptodate(bh))
2008 goto unlock;
2009 }
2010
2011 if (ext3_should_journal_data(inode)) {
2012 BUFFER_TRACE(bh, "get write access");
2013 err = ext3_journal_get_write_access(handle, bh);
2014 if (err)
2015 goto unlock;
2016 }
2017
2018 zero_user(page, offset, length);
2019 BUFFER_TRACE(bh, "zeroed end of block");
2020
2021 err = 0;
2022 if (ext3_should_journal_data(inode)) {
2023 err = ext3_journal_dirty_metadata(handle, bh);
2024 } else {
2025 if (ext3_should_order_data(inode))
2026 err = ext3_journal_dirty_data(handle, bh);
2027 mark_buffer_dirty(bh);
2028 }
2029
2030unlock:
2031 unlock_page(page);
2032 page_cache_release(page);
2033 return err;
2034}
2035
2036
2037
2038
2039
2040
2041static inline int all_zeroes(__le32 *p, __le32 *q)
2042{
2043 while (p < q)
2044 if (*p++)
2045 return 0;
2046 return 1;
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084static Indirect *ext3_find_shared(struct inode *inode, int depth,
2085 int offsets[4], Indirect chain[4], __le32 *top)
2086{
2087 Indirect *partial, *p;
2088 int k, err;
2089
2090 *top = 0;
2091
2092 for (k = depth; k > 1 && !offsets[k-1]; k--)
2093 ;
2094 partial = ext3_get_branch(inode, k, offsets, chain, &err);
2095
2096 if (!partial)
2097 partial = chain + k-1;
2098
2099
2100
2101
2102 if (!partial->key && *partial->p)
2103
2104 goto no_top;
2105 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2106 ;
2107
2108
2109
2110
2111
2112
2113 if (p == chain + k - 1 && p > chain) {
2114 p->p--;
2115 } else {
2116 *top = *p->p;
2117
2118#if 0
2119 *p->p = 0;
2120#endif
2121 }
2122
2123
2124 while(partial > p) {
2125 brelse(partial->bh);
2126 partial--;
2127 }
2128no_top:
2129 return partial;
2130}
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2141 struct buffer_head *bh, ext3_fsblk_t block_to_free,
2142 unsigned long count, __le32 *first, __le32 *last)
2143{
2144 __le32 *p;
2145 if (try_to_extend_transaction(handle, inode)) {
2146 if (bh) {
2147 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2148 if (ext3_journal_dirty_metadata(handle, bh))
2149 return;
2150 }
2151 ext3_mark_inode_dirty(handle, inode);
2152 truncate_restart_transaction(handle, inode);
2153 if (bh) {
2154 BUFFER_TRACE(bh, "retaking write access");
2155 if (ext3_journal_get_write_access(handle, bh))
2156 return;
2157 }
2158 }
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168 for (p = first; p < last; p++) {
2169 u32 nr = le32_to_cpu(*p);
2170 if (nr) {
2171 struct buffer_head *bh;
2172
2173 *p = 0;
2174 bh = sb_find_get_block(inode->i_sb, nr);
2175 ext3_forget(handle, 0, inode, bh, nr);
2176 }
2177 }
2178
2179 ext3_free_blocks(handle, inode, block_to_free, count);
2180}
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201static void ext3_free_data(handle_t *handle, struct inode *inode,
2202 struct buffer_head *this_bh,
2203 __le32 *first, __le32 *last)
2204{
2205 ext3_fsblk_t block_to_free = 0;
2206 unsigned long count = 0;
2207 __le32 *block_to_free_p = NULL;
2208
2209
2210 ext3_fsblk_t nr;
2211 __le32 *p;
2212
2213 int err;
2214
2215 if (this_bh) {
2216 BUFFER_TRACE(this_bh, "get_write_access");
2217 err = ext3_journal_get_write_access(handle, this_bh);
2218
2219
2220 if (err)
2221 return;
2222 }
2223
2224 for (p = first; p < last; p++) {
2225 nr = le32_to_cpu(*p);
2226 if (nr) {
2227
2228 if (count == 0) {
2229 block_to_free = nr;
2230 block_to_free_p = p;
2231 count = 1;
2232 } else if (nr == block_to_free + count) {
2233 count++;
2234 } else {
2235 ext3_clear_blocks(handle, inode, this_bh,
2236 block_to_free,
2237 count, block_to_free_p, p);
2238 block_to_free = nr;
2239 block_to_free_p = p;
2240 count = 1;
2241 }
2242 }
2243 }
2244
2245 if (count > 0)
2246 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2247 count, block_to_free_p, p);
2248
2249 if (this_bh) {
2250 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2251
2252
2253
2254
2255
2256
2257
2258 if (bh2jh(this_bh))
2259 ext3_journal_dirty_metadata(handle, this_bh);
2260 else
2261 ext3_error(inode->i_sb, "ext3_free_data",
2262 "circular indirect block detected, "
2263 "inode=%lu, block=%llu",
2264 inode->i_ino,
2265 (unsigned long long)this_bh->b_blocknr);
2266 }
2267}
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282static void ext3_free_branches(handle_t *handle, struct inode *inode,
2283 struct buffer_head *parent_bh,
2284 __le32 *first, __le32 *last, int depth)
2285{
2286 ext3_fsblk_t nr;
2287 __le32 *p;
2288
2289 if (is_handle_aborted(handle))
2290 return;
2291
2292 if (depth--) {
2293 struct buffer_head *bh;
2294 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2295 p = last;
2296 while (--p >= first) {
2297 nr = le32_to_cpu(*p);
2298 if (!nr)
2299 continue;
2300
2301
2302 bh = sb_bread(inode->i_sb, nr);
2303
2304
2305
2306
2307
2308 if (!bh) {
2309 ext3_error(inode->i_sb, "ext3_free_branches",
2310 "Read failure, inode=%lu, block="E3FSBLK,
2311 inode->i_ino, nr);
2312 continue;
2313 }
2314
2315
2316 BUFFER_TRACE(bh, "free child branches");
2317 ext3_free_branches(handle, inode, bh,
2318 (__le32*)bh->b_data,
2319 (__le32*)bh->b_data + addr_per_block,
2320 depth);
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 if (is_handle_aborted(handle))
2339 return;
2340 if (try_to_extend_transaction(handle, inode)) {
2341 ext3_mark_inode_dirty(handle, inode);
2342 truncate_restart_transaction(handle, inode);
2343 }
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2369
2370 ext3_free_blocks(handle, inode, nr, 1);
2371
2372 if (parent_bh) {
2373
2374
2375
2376
2377 BUFFER_TRACE(parent_bh, "get_write_access");
2378 if (!ext3_journal_get_write_access(handle,
2379 parent_bh)){
2380 *p = 0;
2381 BUFFER_TRACE(parent_bh,
2382 "call ext3_journal_dirty_metadata");
2383 ext3_journal_dirty_metadata(handle,
2384 parent_bh);
2385 }
2386 }
2387 }
2388 } else {
2389
2390 BUFFER_TRACE(parent_bh, "free data blocks");
2391 ext3_free_data(handle, inode, parent_bh, first, last);
2392 }
2393}
2394
2395int ext3_can_truncate(struct inode *inode)
2396{
2397 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2398 return 0;
2399 if (S_ISREG(inode->i_mode))
2400 return 1;
2401 if (S_ISDIR(inode->i_mode))
2402 return 1;
2403 if (S_ISLNK(inode->i_mode))
2404 return !ext3_inode_is_fast_symlink(inode);
2405 return 0;
2406}
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436void ext3_truncate(struct inode *inode)
2437{
2438 handle_t *handle;
2439 struct ext3_inode_info *ei = EXT3_I(inode);
2440 __le32 *i_data = ei->i_data;
2441 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2442 struct address_space *mapping = inode->i_mapping;
2443 int offsets[4];
2444 Indirect chain[4];
2445 Indirect *partial;
2446 __le32 nr = 0;
2447 int n;
2448 long last_block;
2449 unsigned blocksize = inode->i_sb->s_blocksize;
2450 struct page *page;
2451
2452 if (!ext3_can_truncate(inode))
2453 goto out_notrans;
2454
2455 if (inode->i_size == 0 && ext3_should_writeback_data(inode))
2456 ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
2457
2458
2459
2460
2461
2462 if ((inode->i_size & (blocksize - 1)) == 0) {
2463
2464 page = NULL;
2465 } else {
2466 page = grab_cache_page(mapping,
2467 inode->i_size >> PAGE_CACHE_SHIFT);
2468 if (!page)
2469 goto out_notrans;
2470 }
2471
2472 handle = start_transaction(inode);
2473 if (IS_ERR(handle)) {
2474 if (page) {
2475 clear_highpage(page);
2476 flush_dcache_page(page);
2477 unlock_page(page);
2478 page_cache_release(page);
2479 }
2480 goto out_notrans;
2481 }
2482
2483 last_block = (inode->i_size + blocksize-1)
2484 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2485
2486 if (page)
2487 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2488
2489 n = ext3_block_to_path(inode, last_block, offsets, NULL);
2490 if (n == 0)
2491 goto out_stop;
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502 if (ext3_orphan_add(handle, inode))
2503 goto out_stop;
2504
2505
2506
2507
2508
2509
2510
2511
2512 ei->i_disksize = inode->i_size;
2513
2514
2515
2516
2517
2518 mutex_lock(&ei->truncate_mutex);
2519
2520 if (n == 1) {
2521 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2522 i_data + EXT3_NDIR_BLOCKS);
2523 goto do_indirects;
2524 }
2525
2526 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2527
2528 if (nr) {
2529 if (partial == chain) {
2530
2531 ext3_free_branches(handle, inode, NULL,
2532 &nr, &nr+1, (chain+n-1) - partial);
2533 *partial->p = 0;
2534
2535
2536
2537
2538 } else {
2539
2540 ext3_free_branches(handle, inode, partial->bh,
2541 partial->p,
2542 partial->p+1, (chain+n-1) - partial);
2543 }
2544 }
2545
2546 while (partial > chain) {
2547 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2548 (__le32*)partial->bh->b_data+addr_per_block,
2549 (chain+n-1) - partial);
2550 BUFFER_TRACE(partial->bh, "call brelse");
2551 brelse (partial->bh);
2552 partial--;
2553 }
2554do_indirects:
2555
2556 switch (offsets[0]) {
2557 default:
2558 nr = i_data[EXT3_IND_BLOCK];
2559 if (nr) {
2560 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2561 i_data[EXT3_IND_BLOCK] = 0;
2562 }
2563 case EXT3_IND_BLOCK:
2564 nr = i_data[EXT3_DIND_BLOCK];
2565 if (nr) {
2566 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2567 i_data[EXT3_DIND_BLOCK] = 0;
2568 }
2569 case EXT3_DIND_BLOCK:
2570 nr = i_data[EXT3_TIND_BLOCK];
2571 if (nr) {
2572 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2573 i_data[EXT3_TIND_BLOCK] = 0;
2574 }
2575 case EXT3_TIND_BLOCK:
2576 ;
2577 }
2578
2579 ext3_discard_reservation(inode);
2580
2581 mutex_unlock(&ei->truncate_mutex);
2582 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2583 ext3_mark_inode_dirty(handle, inode);
2584
2585
2586
2587
2588
2589 if (IS_SYNC(inode))
2590 handle->h_sync = 1;
2591out_stop:
2592
2593
2594
2595
2596
2597
2598
2599 if (inode->i_nlink)
2600 ext3_orphan_del(handle, inode);
2601
2602 ext3_journal_stop(handle);
2603 return;
2604out_notrans:
2605
2606
2607
2608
2609 if (inode->i_nlink)
2610 ext3_orphan_del(NULL, inode);
2611}
2612
2613static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2614 unsigned long ino, struct ext3_iloc *iloc)
2615{
2616 unsigned long block_group;
2617 unsigned long offset;
2618 ext3_fsblk_t block;
2619 struct ext3_group_desc *gdp;
2620
2621 if (!ext3_valid_inum(sb, ino)) {
2622
2623
2624
2625
2626
2627 return 0;
2628 }
2629
2630 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2631 gdp = ext3_get_group_desc(sb, block_group, NULL);
2632 if (!gdp)
2633 return 0;
2634
2635
2636
2637 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2638 EXT3_INODE_SIZE(sb);
2639 block = le32_to_cpu(gdp->bg_inode_table) +
2640 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2641
2642 iloc->block_group = block_group;
2643 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2644 return block;
2645}
2646
2647
2648
2649
2650
2651
2652
2653static int __ext3_get_inode_loc(struct inode *inode,
2654 struct ext3_iloc *iloc, int in_mem)
2655{
2656 ext3_fsblk_t block;
2657 struct buffer_head *bh;
2658
2659 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2660 if (!block)
2661 return -EIO;
2662
2663 bh = sb_getblk(inode->i_sb, block);
2664 if (!bh) {
2665 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2666 "unable to read inode block - "
2667 "inode=%lu, block="E3FSBLK,
2668 inode->i_ino, block);
2669 return -EIO;
2670 }
2671 if (!buffer_uptodate(bh)) {
2672 lock_buffer(bh);
2673
2674
2675
2676
2677
2678
2679
2680 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2681 set_buffer_uptodate(bh);
2682
2683 if (buffer_uptodate(bh)) {
2684
2685 unlock_buffer(bh);
2686 goto has_buffer;
2687 }
2688
2689
2690
2691
2692
2693
2694 if (in_mem) {
2695 struct buffer_head *bitmap_bh;
2696 struct ext3_group_desc *desc;
2697 int inodes_per_buffer;
2698 int inode_offset, i;
2699 int block_group;
2700 int start;
2701
2702 block_group = (inode->i_ino - 1) /
2703 EXT3_INODES_PER_GROUP(inode->i_sb);
2704 inodes_per_buffer = bh->b_size /
2705 EXT3_INODE_SIZE(inode->i_sb);
2706 inode_offset = ((inode->i_ino - 1) %
2707 EXT3_INODES_PER_GROUP(inode->i_sb));
2708 start = inode_offset & ~(inodes_per_buffer - 1);
2709
2710
2711 desc = ext3_get_group_desc(inode->i_sb,
2712 block_group, NULL);
2713 if (!desc)
2714 goto make_io;
2715
2716 bitmap_bh = sb_getblk(inode->i_sb,
2717 le32_to_cpu(desc->bg_inode_bitmap));
2718 if (!bitmap_bh)
2719 goto make_io;
2720
2721
2722
2723
2724
2725
2726 if (!buffer_uptodate(bitmap_bh)) {
2727 brelse(bitmap_bh);
2728 goto make_io;
2729 }
2730 for (i = start; i < start + inodes_per_buffer; i++) {
2731 if (i == inode_offset)
2732 continue;
2733 if (ext3_test_bit(i, bitmap_bh->b_data))
2734 break;
2735 }
2736 brelse(bitmap_bh);
2737 if (i == start + inodes_per_buffer) {
2738
2739 memset(bh->b_data, 0, bh->b_size);
2740 set_buffer_uptodate(bh);
2741 unlock_buffer(bh);
2742 goto has_buffer;
2743 }
2744 }
2745
2746make_io:
2747
2748
2749
2750
2751
2752 get_bh(bh);
2753 bh->b_end_io = end_buffer_read_sync;
2754 submit_bh(READ_META, bh);
2755 wait_on_buffer(bh);
2756 if (!buffer_uptodate(bh)) {
2757 ext3_error(inode->i_sb, "ext3_get_inode_loc",
2758 "unable to read inode block - "
2759 "inode=%lu, block="E3FSBLK,
2760 inode->i_ino, block);
2761 brelse(bh);
2762 return -EIO;
2763 }
2764 }
2765has_buffer:
2766 iloc->bh = bh;
2767 return 0;
2768}
2769
2770int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2771{
2772
2773 return __ext3_get_inode_loc(inode, iloc,
2774 !ext3_test_inode_state(inode, EXT3_STATE_XATTR));
2775}
2776
2777void ext3_set_inode_flags(struct inode *inode)
2778{
2779 unsigned int flags = EXT3_I(inode)->i_flags;
2780
2781 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2782 if (flags & EXT3_SYNC_FL)
2783 inode->i_flags |= S_SYNC;
2784 if (flags & EXT3_APPEND_FL)
2785 inode->i_flags |= S_APPEND;
2786 if (flags & EXT3_IMMUTABLE_FL)
2787 inode->i_flags |= S_IMMUTABLE;
2788 if (flags & EXT3_NOATIME_FL)
2789 inode->i_flags |= S_NOATIME;
2790 if (flags & EXT3_DIRSYNC_FL)
2791 inode->i_flags |= S_DIRSYNC;
2792}
2793
2794
2795void ext3_get_inode_flags(struct ext3_inode_info *ei)
2796{
2797 unsigned int flags = ei->vfs_inode.i_flags;
2798
2799 ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2800 EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2801 if (flags & S_SYNC)
2802 ei->i_flags |= EXT3_SYNC_FL;
2803 if (flags & S_APPEND)
2804 ei->i_flags |= EXT3_APPEND_FL;
2805 if (flags & S_IMMUTABLE)
2806 ei->i_flags |= EXT3_IMMUTABLE_FL;
2807 if (flags & S_NOATIME)
2808 ei->i_flags |= EXT3_NOATIME_FL;
2809 if (flags & S_DIRSYNC)
2810 ei->i_flags |= EXT3_DIRSYNC_FL;
2811}
2812
2813struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2814{
2815 struct ext3_iloc iloc;
2816 struct ext3_inode *raw_inode;
2817 struct ext3_inode_info *ei;
2818 struct buffer_head *bh;
2819 struct inode *inode;
2820 journal_t *journal = EXT3_SB(sb)->s_journal;
2821 transaction_t *transaction;
2822 long ret;
2823 int block;
2824
2825 inode = iget_locked(sb, ino);
2826 if (!inode)
2827 return ERR_PTR(-ENOMEM);
2828 if (!(inode->i_state & I_NEW))
2829 return inode;
2830
2831 ei = EXT3_I(inode);
2832 ei->i_block_alloc_info = NULL;
2833
2834 ret = __ext3_get_inode_loc(inode, &iloc, 0);
2835 if (ret < 0)
2836 goto bad_inode;
2837 bh = iloc.bh;
2838 raw_inode = ext3_raw_inode(&iloc);
2839 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2840 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2841 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2842 if(!(test_opt (inode->i_sb, NO_UID32))) {
2843 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2844 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2845 }
2846 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2847 inode->i_size = le32_to_cpu(raw_inode->i_size);
2848 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2849 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2850 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2851 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2852
2853 ei->i_state_flags = 0;
2854 ei->i_dir_start_lookup = 0;
2855 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2856
2857
2858
2859
2860
2861 if (inode->i_nlink == 0) {
2862 if (inode->i_mode == 0 ||
2863 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2864
2865 brelse (bh);
2866 ret = -ESTALE;
2867 goto bad_inode;
2868 }
2869
2870
2871
2872
2873 }
2874 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2875 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2876#ifdef EXT3_FRAGMENTS
2877 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2878 ei->i_frag_no = raw_inode->i_frag;
2879 ei->i_frag_size = raw_inode->i_fsize;
2880#endif
2881 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2882 if (!S_ISREG(inode->i_mode)) {
2883 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2884 } else {
2885 inode->i_size |=
2886 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2887 }
2888 ei->i_disksize = inode->i_size;
2889 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2890 ei->i_block_group = iloc.block_group;
2891
2892
2893
2894
2895 for (block = 0; block < EXT3_N_BLOCKS; block++)
2896 ei->i_data[block] = raw_inode->i_block[block];
2897 INIT_LIST_HEAD(&ei->i_orphan);
2898
2899
2900
2901
2902
2903
2904
2905
2906 if (journal) {
2907 tid_t tid;
2908
2909 spin_lock(&journal->j_state_lock);
2910 if (journal->j_running_transaction)
2911 transaction = journal->j_running_transaction;
2912 else
2913 transaction = journal->j_committing_transaction;
2914 if (transaction)
2915 tid = transaction->t_tid;
2916 else
2917 tid = journal->j_commit_sequence;
2918 spin_unlock(&journal->j_state_lock);
2919 atomic_set(&ei->i_sync_tid, tid);
2920 atomic_set(&ei->i_datasync_tid, tid);
2921 }
2922
2923 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2924 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2925
2926
2927
2928
2929
2930 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2931 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2932 EXT3_INODE_SIZE(inode->i_sb)) {
2933 brelse (bh);
2934 ret = -EIO;
2935 goto bad_inode;
2936 }
2937 if (ei->i_extra_isize == 0) {
2938
2939 ei->i_extra_isize = sizeof(struct ext3_inode) -
2940 EXT3_GOOD_OLD_INODE_SIZE;
2941 } else {
2942 __le32 *magic = (void *)raw_inode +
2943 EXT3_GOOD_OLD_INODE_SIZE +
2944 ei->i_extra_isize;
2945 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2946 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
2947 }
2948 } else
2949 ei->i_extra_isize = 0;
2950
2951 if (S_ISREG(inode->i_mode)) {
2952 inode->i_op = &ext3_file_inode_operations;
2953 inode->i_fop = &ext3_file_operations;
2954 ext3_set_aops(inode);
2955 } else if (S_ISDIR(inode->i_mode)) {
2956 inode->i_op = &ext3_dir_inode_operations;
2957 inode->i_fop = &ext3_dir_operations;
2958 } else if (S_ISLNK(inode->i_mode)) {
2959 if (ext3_inode_is_fast_symlink(inode)) {
2960 inode->i_op = &ext3_fast_symlink_inode_operations;
2961 nd_terminate_link(ei->i_data, inode->i_size,
2962 sizeof(ei->i_data) - 1);
2963 } else {
2964 inode->i_op = &ext3_symlink_inode_operations;
2965 ext3_set_aops(inode);
2966 }
2967 } else {
2968 inode->i_op = &ext3_special_inode_operations;
2969 if (raw_inode->i_block[0])
2970 init_special_inode(inode, inode->i_mode,
2971 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2972 else
2973 init_special_inode(inode, inode->i_mode,
2974 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2975 }
2976 brelse (iloc.bh);
2977 ext3_set_inode_flags(inode);
2978 unlock_new_inode(inode);
2979 return inode;
2980
2981bad_inode:
2982 iget_failed(inode);
2983 return ERR_PTR(ret);
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993static int ext3_do_update_inode(handle_t *handle,
2994 struct inode *inode,
2995 struct ext3_iloc *iloc)
2996{
2997 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2998 struct ext3_inode_info *ei = EXT3_I(inode);
2999 struct buffer_head *bh = iloc->bh;
3000 int err = 0, rc, block;
3001
3002again:
3003
3004 lock_buffer(bh);
3005
3006
3007
3008 if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
3009 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
3010
3011 ext3_get_inode_flags(ei);
3012 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3013 if(!(test_opt(inode->i_sb, NO_UID32))) {
3014 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
3015 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
3016
3017
3018
3019
3020 if(!ei->i_dtime) {
3021 raw_inode->i_uid_high =
3022 cpu_to_le16(high_16_bits(inode->i_uid));
3023 raw_inode->i_gid_high =
3024 cpu_to_le16(high_16_bits(inode->i_gid));
3025 } else {
3026 raw_inode->i_uid_high = 0;
3027 raw_inode->i_gid_high = 0;
3028 }
3029 } else {
3030 raw_inode->i_uid_low =
3031 cpu_to_le16(fs_high2lowuid(inode->i_uid));
3032 raw_inode->i_gid_low =
3033 cpu_to_le16(fs_high2lowgid(inode->i_gid));
3034 raw_inode->i_uid_high = 0;
3035 raw_inode->i_gid_high = 0;
3036 }
3037 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
3038 raw_inode->i_size = cpu_to_le32(ei->i_disksize);
3039 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
3040 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
3041 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
3042 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
3043 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
3044 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
3045#ifdef EXT3_FRAGMENTS
3046 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
3047 raw_inode->i_frag = ei->i_frag_no;
3048 raw_inode->i_fsize = ei->i_frag_size;
3049#endif
3050 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
3051 if (!S_ISREG(inode->i_mode)) {
3052 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
3053 } else {
3054 raw_inode->i_size_high =
3055 cpu_to_le32(ei->i_disksize >> 32);
3056 if (ei->i_disksize > 0x7fffffffULL) {
3057 struct super_block *sb = inode->i_sb;
3058 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
3059 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
3060 EXT3_SB(sb)->s_es->s_rev_level ==
3061 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
3062
3063
3064
3065 unlock_buffer(bh);
3066 err = ext3_journal_get_write_access(handle,
3067 EXT3_SB(sb)->s_sbh);
3068 if (err)
3069 goto out_brelse;
3070
3071 ext3_update_dynamic_rev(sb);
3072 EXT3_SET_RO_COMPAT_FEATURE(sb,
3073 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
3074 handle->h_sync = 1;
3075 err = ext3_journal_dirty_metadata(handle,
3076 EXT3_SB(sb)->s_sbh);
3077
3078 goto again;
3079 }
3080 }
3081 }
3082 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3083 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3084 if (old_valid_dev(inode->i_rdev)) {
3085 raw_inode->i_block[0] =
3086 cpu_to_le32(old_encode_dev(inode->i_rdev));
3087 raw_inode->i_block[1] = 0;
3088 } else {
3089 raw_inode->i_block[0] = 0;
3090 raw_inode->i_block[1] =
3091 cpu_to_le32(new_encode_dev(inode->i_rdev));
3092 raw_inode->i_block[2] = 0;
3093 }
3094 } else for (block = 0; block < EXT3_N_BLOCKS; block++)
3095 raw_inode->i_block[block] = ei->i_data[block];
3096
3097 if (ei->i_extra_isize)
3098 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3099
3100 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
3101 unlock_buffer(bh);
3102 rc = ext3_journal_dirty_metadata(handle, bh);
3103 if (!err)
3104 err = rc;
3105 ext3_clear_inode_state(inode, EXT3_STATE_NEW);
3106
3107 atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
3108out_brelse:
3109 brelse (bh);
3110 ext3_std_error(inode->i_sb, err);
3111 return err;
3112}
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
3150{
3151 if (current->flags & PF_MEMALLOC)
3152 return 0;
3153
3154 if (ext3_journal_current_handle()) {
3155 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3156 dump_stack();
3157 return -EIO;
3158 }
3159
3160 if (wbc->sync_mode != WB_SYNC_ALL)
3161 return 0;
3162
3163 return ext3_force_commit(inode->i_sb);
3164}
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3184{
3185 struct inode *inode = dentry->d_inode;
3186 int error, rc = 0;
3187 const unsigned int ia_valid = attr->ia_valid;
3188
3189 error = inode_change_ok(inode, attr);
3190 if (error)
3191 return error;
3192
3193 if (is_quota_modification(inode, attr))
3194 dquot_initialize(inode);
3195 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3196 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3197 handle_t *handle;
3198
3199
3200
3201 handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
3202 EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
3203 if (IS_ERR(handle)) {
3204 error = PTR_ERR(handle);
3205 goto err_out;
3206 }
3207 error = dquot_transfer(inode, attr);
3208 if (error) {
3209 ext3_journal_stop(handle);
3210 return error;
3211 }
3212
3213
3214 if (attr->ia_valid & ATTR_UID)
3215 inode->i_uid = attr->ia_uid;
3216 if (attr->ia_valid & ATTR_GID)
3217 inode->i_gid = attr->ia_gid;
3218 error = ext3_mark_inode_dirty(handle, inode);
3219 ext3_journal_stop(handle);
3220 }
3221
3222 if (S_ISREG(inode->i_mode) &&
3223 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3224 handle_t *handle;
3225
3226 handle = ext3_journal_start(inode, 3);
3227 if (IS_ERR(handle)) {
3228 error = PTR_ERR(handle);
3229 goto err_out;
3230 }
3231
3232 error = ext3_orphan_add(handle, inode);
3233 EXT3_I(inode)->i_disksize = attr->ia_size;
3234 rc = ext3_mark_inode_dirty(handle, inode);
3235 if (!error)
3236 error = rc;
3237 ext3_journal_stop(handle);
3238 }
3239
3240 if ((attr->ia_valid & ATTR_SIZE) &&
3241 attr->ia_size != i_size_read(inode)) {
3242 rc = vmtruncate(inode, attr->ia_size);
3243 if (rc)
3244 goto err_out;
3245 }
3246
3247 setattr_copy(inode, attr);
3248 mark_inode_dirty(inode);
3249
3250 if (ia_valid & ATTR_MODE)
3251 rc = ext3_acl_chmod(inode);
3252
3253err_out:
3254 ext3_std_error(inode->i_sb, error);
3255 if (!error)
3256 error = rc;
3257 return error;
3258}
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288static int ext3_writepage_trans_blocks(struct inode *inode)
3289{
3290 int bpp = ext3_journal_blocks_per_page(inode);
3291 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3292 int ret;
3293
3294 if (ext3_should_journal_data(inode))
3295 ret = 3 * (bpp + indirects) + 2;
3296 else
3297 ret = 2 * (bpp + indirects) + 2;
3298
3299#ifdef CONFIG_QUOTA
3300
3301
3302 ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
3303#endif
3304
3305 return ret;
3306}
3307
3308
3309
3310
3311
3312int ext3_mark_iloc_dirty(handle_t *handle,
3313 struct inode *inode, struct ext3_iloc *iloc)
3314{
3315 int err = 0;
3316
3317
3318 get_bh(iloc->bh);
3319
3320
3321 err = ext3_do_update_inode(handle, inode, iloc);
3322 put_bh(iloc->bh);
3323 return err;
3324}
3325
3326
3327
3328
3329
3330
3331int
3332ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3333 struct ext3_iloc *iloc)
3334{
3335 int err = 0;
3336 if (handle) {
3337 err = ext3_get_inode_loc(inode, iloc);
3338 if (!err) {
3339 BUFFER_TRACE(iloc->bh, "get_write_access");
3340 err = ext3_journal_get_write_access(handle, iloc->bh);
3341 if (err) {
3342 brelse(iloc->bh);
3343 iloc->bh = NULL;
3344 }
3345 }
3346 }
3347 ext3_std_error(inode->i_sb, err);
3348 return err;
3349}
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3373{
3374 struct ext3_iloc iloc;
3375 int err;
3376
3377 might_sleep();
3378 err = ext3_reserve_inode_write(handle, inode, &iloc);
3379 if (!err)
3380 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3381 return err;
3382}
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398void ext3_dirty_inode(struct inode *inode)
3399{
3400 handle_t *current_handle = ext3_journal_current_handle();
3401 handle_t *handle;
3402
3403 handle = ext3_journal_start(inode, 2);
3404 if (IS_ERR(handle))
3405 goto out;
3406 if (current_handle &&
3407 current_handle->h_transaction != handle->h_transaction) {
3408
3409 printk(KERN_EMERG "%s: transactions do not match!\n",
3410 __func__);
3411 } else {
3412 jbd_debug(5, "marking dirty. outer handle=%p\n",
3413 current_handle);
3414 ext3_mark_inode_dirty(handle, inode);
3415 }
3416 ext3_journal_stop(handle);
3417out:
3418 return;
3419}
3420
3421#if 0
3422
3423
3424
3425
3426
3427
3428
3429static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3430{
3431 struct ext3_iloc iloc;
3432
3433 int err = 0;
3434 if (handle) {
3435 err = ext3_get_inode_loc(inode, &iloc);
3436 if (!err) {
3437 BUFFER_TRACE(iloc.bh, "get_write_access");
3438 err = journal_get_write_access(handle, iloc.bh);
3439 if (!err)
3440 err = ext3_journal_dirty_metadata(handle,
3441 iloc.bh);
3442 brelse(iloc.bh);
3443 }
3444 }
3445 ext3_std_error(inode->i_sb, err);
3446 return err;
3447}
3448#endif
3449
3450int ext3_change_inode_journal_flag(struct inode *inode, int val)
3451{
3452 journal_t *journal;
3453 handle_t *handle;
3454 int err;
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466 journal = EXT3_JOURNAL(inode);
3467 if (is_journal_aborted(journal))
3468 return -EROFS;
3469
3470 journal_lock_updates(journal);
3471 journal_flush(journal);
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481 if (val)
3482 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3483 else
3484 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3485 ext3_set_aops(inode);
3486
3487 journal_unlock_updates(journal);
3488
3489
3490
3491 handle = ext3_journal_start(inode, 1);
3492 if (IS_ERR(handle))
3493 return PTR_ERR(handle);
3494
3495 err = ext3_mark_inode_dirty(handle, inode);
3496 handle->h_sync = 1;
3497 ext3_journal_stop(handle);
3498 ext3_std_error(inode->i_sb, err);
3499
3500 return err;
3501}
3502