1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/fs.h>
27#include <linux/time.h>
28#include <linux/ext3_jbd.h>
29#include <linux/jbd.h>
30#include <linux/highuid.h>
31#include <linux/pagemap.h>
32#include <linux/quotaops.h>
33#include <linux/string.h>
34#include <linux/buffer_head.h>
35#include <linux/writeback.h>
36#include <linux/mpage.h>
37#include <linux/uio.h>
38#include <linux/bio.h>
39#include "xattr.h"
40#include "acl.h"
41
42static int ext3_writepage_trans_blocks(struct inode *inode);
43
44
45
46
47static int ext3_inode_is_fast_symlink(struct inode *inode)
48{
49 int ea_blocks = EXT3_I(inode)->i_file_acl ?
50 (inode->i_sb->s_blocksize >> 9) : 0;
51
52 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
53}
54
55
56
57
58
59
60
61
62
63
64int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
65 struct buffer_head *bh, ext3_fsblk_t blocknr)
66{
67 int err;
68
69 might_sleep();
70
71 BUFFER_TRACE(bh, "enter");
72
73 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
74 "data mode %lx\n",
75 bh, is_metadata, inode->i_mode,
76 test_opt(inode->i_sb, DATA_FLAGS));
77
78
79
80
81
82
83 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
84 (!is_metadata && !ext3_should_journal_data(inode))) {
85 if (bh) {
86 BUFFER_TRACE(bh, "call journal_forget");
87 return ext3_journal_forget(handle, bh);
88 }
89 return 0;
90 }
91
92
93
94
95 BUFFER_TRACE(bh, "call ext3_journal_revoke");
96 err = ext3_journal_revoke(handle, blocknr, bh);
97 if (err)
98 ext3_abort(inode->i_sb, __FUNCTION__,
99 "error %d when attempting revoke", err);
100 BUFFER_TRACE(bh, "exit");
101 return err;
102}
103
104
105
106
107
108static unsigned long blocks_for_truncate(struct inode *inode)
109{
110 unsigned long needed;
111
112 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
113
114
115
116
117
118
119
120 if (needed < 2)
121 needed = 2;
122
123
124
125 if (needed > EXT3_MAX_TRANS_DATA)
126 needed = EXT3_MAX_TRANS_DATA;
127
128 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
129}
130
131
132
133
134
135
136
137
138
139
140
141static handle_t *start_transaction(struct inode *inode)
142{
143 handle_t *result;
144
145 result = ext3_journal_start(inode, blocks_for_truncate(inode));
146 if (!IS_ERR(result))
147 return result;
148
149 ext3_std_error(inode->i_sb, PTR_ERR(result));
150 return result;
151}
152
153
154
155
156
157
158
159static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
160{
161 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
162 return 0;
163 if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
164 return 0;
165 return 1;
166}
167
168
169
170
171
172
173static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
174{
175 jbd_debug(2, "restarting handle %p\n", handle);
176 return ext3_journal_restart(handle, blocks_for_truncate(inode));
177}
178
179
180
181
182void ext3_delete_inode (struct inode * inode)
183{
184 handle_t *handle;
185
186 truncate_inode_pages(&inode->i_data, 0);
187
188 if (is_bad_inode(inode))
189 goto no_delete;
190
191 handle = start_transaction(inode);
192 if (IS_ERR(handle)) {
193
194
195
196
197
198 ext3_orphan_del(NULL, inode);
199 goto no_delete;
200 }
201
202 if (IS_SYNC(inode))
203 handle->h_sync = 1;
204 inode->i_size = 0;
205 if (inode->i_blocks)
206 ext3_truncate(inode);
207
208
209
210
211
212
213
214
215 ext3_orphan_del(handle, inode);
216 EXT3_I(inode)->i_dtime = get_seconds();
217
218
219
220
221
222
223
224
225 if (ext3_mark_inode_dirty(handle, inode))
226
227 clear_inode(inode);
228 else
229 ext3_free_inode(handle, inode);
230 ext3_journal_stop(handle);
231 return;
232no_delete:
233 clear_inode(inode);
234}
235
236typedef struct {
237 __le32 *p;
238 __le32 key;
239 struct buffer_head *bh;
240} Indirect;
241
242static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
243{
244 p->key = *(p->p = v);
245 p->bh = bh;
246}
247
248static int verify_chain(Indirect *from, Indirect *to)
249{
250 while (from <= to && from->key == *from->p)
251 from++;
252 return (from > to);
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286static int ext3_block_to_path(struct inode *inode,
287 long i_block, int offsets[4], int *boundary)
288{
289 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
290 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
291 const long direct_blocks = EXT3_NDIR_BLOCKS,
292 indirect_blocks = ptrs,
293 double_blocks = (1 << (ptrs_bits * 2));
294 int n = 0;
295 int final = 0;
296
297 if (i_block < 0) {
298 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
299 } else if (i_block < direct_blocks) {
300 offsets[n++] = i_block;
301 final = direct_blocks;
302 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
303 offsets[n++] = EXT3_IND_BLOCK;
304 offsets[n++] = i_block;
305 final = ptrs;
306 } else if ((i_block -= indirect_blocks) < double_blocks) {
307 offsets[n++] = EXT3_DIND_BLOCK;
308 offsets[n++] = i_block >> ptrs_bits;
309 offsets[n++] = i_block & (ptrs - 1);
310 final = ptrs;
311 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
312 offsets[n++] = EXT3_TIND_BLOCK;
313 offsets[n++] = i_block >> (ptrs_bits * 2);
314 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
315 offsets[n++] = i_block & (ptrs - 1);
316 final = ptrs;
317 } else {
318 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
319 }
320 if (boundary)
321 *boundary = final - 1 - (i_block & (ptrs - 1));
322 return n;
323}
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
355 Indirect chain[4], int *err)
356{
357 struct super_block *sb = inode->i_sb;
358 Indirect *p = chain;
359 struct buffer_head *bh;
360
361 *err = 0;
362
363 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
364 if (!p->key)
365 goto no_block;
366 while (--depth) {
367 bh = sb_bread(sb, le32_to_cpu(p->key));
368 if (!bh)
369 goto failure;
370
371 if (!verify_chain(chain, p))
372 goto changed;
373 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
374
375 if (!p->key)
376 goto no_block;
377 }
378 return NULL;
379
380changed:
381 brelse(bh);
382 *err = -EAGAIN;
383 goto no_block;
384failure:
385 *err = -EIO;
386no_block:
387 return p;
388}
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
411{
412 struct ext3_inode_info *ei = EXT3_I(inode);
413 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
414 __le32 *p;
415 ext3_fsblk_t bg_start;
416 ext3_grpblk_t colour;
417
418
419 for (p = ind->p - 1; p >= start; p--) {
420 if (*p)
421 return le32_to_cpu(*p);
422 }
423
424
425 if (ind->bh)
426 return ind->bh->b_blocknr;
427
428
429
430
431
432 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
433 colour = (current->pid % 16) *
434 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
435 return bg_start + colour;
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
451 Indirect chain[4], Indirect *partial)
452{
453 struct ext3_block_alloc_info *block_i;
454
455 block_i = EXT3_I(inode)->i_block_alloc_info;
456
457
458
459
460
461 if (block_i && (block == block_i->last_alloc_logical_block + 1)
462 && (block_i->last_alloc_physical_block != 0)) {
463 return block_i->last_alloc_physical_block + 1;
464 }
465
466 return ext3_find_near(inode, partial);
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
482 int blocks_to_boundary)
483{
484 unsigned long count = 0;
485
486
487
488
489
490 if (k > 0) {
491
492 if (blks < blocks_to_boundary + 1)
493 count += blks;
494 else
495 count += blocks_to_boundary + 1;
496 return count;
497 }
498
499 count++;
500 while (count < blks && count <= blocks_to_boundary &&
501 le32_to_cpu(*(branch[0].p + count)) == 0) {
502 count++;
503 }
504 return count;
505}
506
507
508
509
510
511
512
513
514
515
516
517static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
518 ext3_fsblk_t goal, int indirect_blks, int blks,
519 ext3_fsblk_t new_blocks[4], int *err)
520{
521 int target, i;
522 unsigned long count = 0;
523 int index = 0;
524 ext3_fsblk_t current_block = 0;
525 int ret = 0;
526
527
528
529
530
531
532
533
534
535 target = blks + indirect_blks;
536
537 while (1) {
538 count = target;
539
540 current_block = ext3_new_blocks(handle,inode,goal,&count,err);
541 if (*err)
542 goto failed_out;
543
544 target -= count;
545
546 while (index < indirect_blks && count) {
547 new_blocks[index++] = current_block++;
548 count--;
549 }
550
551 if (count > 0)
552 break;
553 }
554
555
556 new_blocks[index] = current_block;
557
558
559 ret = count;
560 *err = 0;
561 return ret;
562failed_out:
563 for (i = 0; i <index; i++)
564 ext3_free_blocks(handle, inode, new_blocks[i], 1);
565 return ret;
566}
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
594 int indirect_blks, int *blks, ext3_fsblk_t goal,
595 int *offsets, Indirect *branch)
596{
597 int blocksize = inode->i_sb->s_blocksize;
598 int i, n = 0;
599 int err = 0;
600 struct buffer_head *bh;
601 int num;
602 ext3_fsblk_t new_blocks[4];
603 ext3_fsblk_t current_block;
604
605 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
606 *blks, new_blocks, &err);
607 if (err)
608 return err;
609
610 branch[0].key = cpu_to_le32(new_blocks[0]);
611
612
613
614 for (n = 1; n <= indirect_blks; n++) {
615
616
617
618
619
620 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
621 branch[n].bh = bh;
622 lock_buffer(bh);
623 BUFFER_TRACE(bh, "call get_create_access");
624 err = ext3_journal_get_create_access(handle, bh);
625 if (err) {
626 unlock_buffer(bh);
627 brelse(bh);
628 goto failed;
629 }
630
631 memset(bh->b_data, 0, blocksize);
632 branch[n].p = (__le32 *) bh->b_data + offsets[n];
633 branch[n].key = cpu_to_le32(new_blocks[n]);
634 *branch[n].p = branch[n].key;
635 if ( n == indirect_blks) {
636 current_block = new_blocks[n];
637
638
639
640
641
642 for (i=1; i < num; i++)
643 *(branch[n].p + i) = cpu_to_le32(++current_block);
644 }
645 BUFFER_TRACE(bh, "marking uptodate");
646 set_buffer_uptodate(bh);
647 unlock_buffer(bh);
648
649 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
650 err = ext3_journal_dirty_metadata(handle, bh);
651 if (err)
652 goto failed;
653 }
654 *blks = num;
655 return err;
656failed:
657
658 for (i = 1; i <= n ; i++) {
659 BUFFER_TRACE(branch[i].bh, "call journal_forget");
660 ext3_journal_forget(handle, branch[i].bh);
661 }
662 for (i = 0; i <indirect_blks; i++)
663 ext3_free_blocks(handle, inode, new_blocks[i], 1);
664
665 ext3_free_blocks(handle, inode, new_blocks[i], num);
666
667 return err;
668}
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684static int ext3_splice_branch(handle_t *handle, struct inode *inode,
685 long block, Indirect *where, int num, int blks)
686{
687 int i;
688 int err = 0;
689 struct ext3_block_alloc_info *block_i;
690 ext3_fsblk_t current_block;
691
692 block_i = EXT3_I(inode)->i_block_alloc_info;
693
694
695
696
697
698 if (where->bh) {
699 BUFFER_TRACE(where->bh, "get_write_access");
700 err = ext3_journal_get_write_access(handle, where->bh);
701 if (err)
702 goto err_out;
703 }
704
705
706 *where->p = where->key;
707
708
709
710
711
712 if (num == 0 && blks > 1) {
713 current_block = le32_to_cpu(where->key) + 1;
714 for (i = 1; i < blks; i++)
715 *(where->p + i ) = cpu_to_le32(current_block++);
716 }
717
718
719
720
721
722
723 if (block_i) {
724 block_i->last_alloc_logical_block = block + blks - 1;
725 block_i->last_alloc_physical_block =
726 le32_to_cpu(where[num].key) + blks - 1;
727 }
728
729
730
731 inode->i_ctime = CURRENT_TIME_SEC;
732 ext3_mark_inode_dirty(handle, inode);
733
734
735 if (where->bh) {
736
737
738
739
740
741
742
743
744 jbd_debug(5, "splicing indirect only\n");
745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
746 err = ext3_journal_dirty_metadata(handle, where->bh);
747 if (err)
748 goto err_out;
749 } else {
750
751
752
753
754 jbd_debug(5, "splicing direct\n");
755 }
756 return err;
757
758err_out:
759 for (i = 1; i <= num; i++) {
760 BUFFER_TRACE(where[i].bh, "call journal_forget");
761 ext3_journal_forget(handle, where[i].bh);
762 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
763 }
764 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
765
766 return err;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
789 sector_t iblock, unsigned long maxblocks,
790 struct buffer_head *bh_result,
791 int create, int extend_disksize)
792{
793 int err = -EIO;
794 int offsets[4];
795 Indirect chain[4];
796 Indirect *partial;
797 ext3_fsblk_t goal;
798 int indirect_blks;
799 int blocks_to_boundary = 0;
800 int depth;
801 struct ext3_inode_info *ei = EXT3_I(inode);
802 int count = 0;
803 ext3_fsblk_t first_block = 0;
804
805
806 J_ASSERT(handle != NULL || create == 0);
807 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
808
809 if (depth == 0)
810 goto out;
811
812 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
813
814
815 if (!partial) {
816 first_block = le32_to_cpu(chain[depth - 1].key);
817 clear_buffer_new(bh_result);
818 count++;
819
820 while (count < maxblocks && count <= blocks_to_boundary) {
821 ext3_fsblk_t blk;
822
823 if (!verify_chain(chain, partial)) {
824
825
826
827
828
829
830
831 err = -EAGAIN;
832 count = 0;
833 break;
834 }
835 blk = le32_to_cpu(*(chain[depth-1].p + count));
836
837 if (blk == first_block + count)
838 count++;
839 else
840 break;
841 }
842 if (err != -EAGAIN)
843 goto got_it;
844 }
845
846
847 if (!create || err == -EIO)
848 goto cleanup;
849
850 mutex_lock(&ei->truncate_mutex);
851
852
853
854
855
856
857
858
859
860
861
862
863
864 if (err == -EAGAIN || !verify_chain(chain, partial)) {
865 while (partial > chain) {
866 brelse(partial->bh);
867 partial--;
868 }
869 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
870 if (!partial) {
871 count++;
872 mutex_unlock(&ei->truncate_mutex);
873 if (err)
874 goto cleanup;
875 clear_buffer_new(bh_result);
876 goto got_it;
877 }
878 }
879
880
881
882
883
884 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
885 ext3_init_block_alloc_info(inode);
886
887 goal = ext3_find_goal(inode, iblock, chain, partial);
888
889
890 indirect_blks = (chain + depth) - partial - 1;
891
892
893
894
895
896 count = ext3_blks_to_allocate(partial, indirect_blks,
897 maxblocks, blocks_to_boundary);
898
899
900
901 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
902 offsets + (partial - chain), partial);
903
904
905
906
907
908
909
910
911 if (!err)
912 err = ext3_splice_branch(handle, inode, iblock,
913 partial, indirect_blks, count);
914
915
916
917
918
919 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
920 ei->i_disksize = inode->i_size;
921 mutex_unlock(&ei->truncate_mutex);
922 if (err)
923 goto cleanup;
924
925 set_buffer_new(bh_result);
926got_it:
927 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
928 if (count > blocks_to_boundary)
929 set_buffer_boundary(bh_result);
930 err = count;
931
932 partial = chain + depth - 1;
933cleanup:
934 while (partial > chain) {
935 BUFFER_TRACE(partial->bh, "call brelse");
936 brelse(partial->bh);
937 partial--;
938 }
939 BUFFER_TRACE(bh_result, "returned");
940out:
941 return err;
942}
943
944#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
945
946static int ext3_get_block(struct inode *inode, sector_t iblock,
947 struct buffer_head *bh_result, int create)
948{
949 handle_t *handle = ext3_journal_current_handle();
950 int ret = 0;
951 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
952
953 if (!create)
954 goto get_block;
955
956 if (max_blocks == 1)
957 goto get_block;
958
959 if (handle->h_transaction->t_state == T_LOCKED) {
960
961
962
963
964 ext3_journal_stop(handle);
965 handle = ext3_journal_start(inode, DIO_CREDITS);
966 if (IS_ERR(handle))
967 ret = PTR_ERR(handle);
968 goto get_block;
969 }
970
971 if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
972
973
974
975 ret = ext3_journal_extend(handle, DIO_CREDITS);
976 if (ret > 0) {
977
978
979
980 ret = ext3_journal_restart(handle, DIO_CREDITS);
981 }
982 }
983
984get_block:
985 if (ret == 0) {
986 ret = ext3_get_blocks_handle(handle, inode, iblock,
987 max_blocks, bh_result, create, 0);
988 if (ret > 0) {
989 bh_result->b_size = (ret << inode->i_blkbits);
990 ret = 0;
991 }
992 }
993 return ret;
994}
995
996
997
998
999struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1000 long block, int create, int *errp)
1001{
1002 struct buffer_head dummy;
1003 int fatal = 0, err;
1004
1005 J_ASSERT(handle != NULL || create == 0);
1006
1007 dummy.b_state = 0;
1008 dummy.b_blocknr = -1000;
1009 buffer_trace_init(&dummy.b_history);
1010 err = ext3_get_blocks_handle(handle, inode, block, 1,
1011 &dummy, create, 1);
1012
1013
1014
1015
1016 if (err > 0) {
1017 if (err > 1)
1018 WARN_ON(1);
1019 err = 0;
1020 }
1021 *errp = err;
1022 if (!err && buffer_mapped(&dummy)) {
1023 struct buffer_head *bh;
1024 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1025 if (!bh) {
1026 *errp = -EIO;
1027 goto err;
1028 }
1029 if (buffer_new(&dummy)) {
1030 J_ASSERT(create != 0);
1031 J_ASSERT(handle != NULL);
1032
1033
1034
1035
1036
1037
1038
1039
1040 lock_buffer(bh);
1041 BUFFER_TRACE(bh, "call get_create_access");
1042 fatal = ext3_journal_get_create_access(handle, bh);
1043 if (!fatal && !buffer_uptodate(bh)) {
1044 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1045 set_buffer_uptodate(bh);
1046 }
1047 unlock_buffer(bh);
1048 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1049 err = ext3_journal_dirty_metadata(handle, bh);
1050 if (!fatal)
1051 fatal = err;
1052 } else {
1053 BUFFER_TRACE(bh, "not a new buffer");
1054 }
1055 if (fatal) {
1056 *errp = fatal;
1057 brelse(bh);
1058 bh = NULL;
1059 }
1060 return bh;
1061 }
1062err:
1063 return NULL;
1064}
1065
1066struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1067 int block, int create, int *err)
1068{
1069 struct buffer_head * bh;
1070
1071 bh = ext3_getblk(handle, inode, block, create, err);
1072 if (!bh)
1073 return bh;
1074 if (buffer_uptodate(bh))
1075 return bh;
1076 ll_rw_block(READ_META, 1, &bh);
1077 wait_on_buffer(bh);
1078 if (buffer_uptodate(bh))
1079 return bh;
1080 put_bh(bh);
1081 *err = -EIO;
1082 return NULL;
1083}
1084
1085static int walk_page_buffers( handle_t *handle,
1086 struct buffer_head *head,
1087 unsigned from,
1088 unsigned to,
1089 int *partial,
1090 int (*fn)( handle_t *handle,
1091 struct buffer_head *bh))
1092{
1093 struct buffer_head *bh;
1094 unsigned block_start, block_end;
1095 unsigned blocksize = head->b_size;
1096 int err, ret = 0;
1097 struct buffer_head *next;
1098
1099 for ( bh = head, block_start = 0;
1100 ret == 0 && (bh != head || !block_start);
1101 block_start = block_end, bh = next)
1102 {
1103 next = bh->b_this_page;
1104 block_end = block_start + blocksize;
1105 if (block_end <= from || block_start >= to) {
1106 if (partial && !buffer_uptodate(bh))
1107 *partial = 1;
1108 continue;
1109 }
1110 err = (*fn)(handle, bh);
1111 if (!ret)
1112 ret = err;
1113 }
1114 return ret;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static int do_journal_get_write_access(handle_t *handle,
1143 struct buffer_head *bh)
1144{
1145 if (!buffer_mapped(bh) || buffer_freed(bh))
1146 return 0;
1147 return ext3_journal_get_write_access(handle, bh);
1148}
1149
1150static int ext3_write_begin(struct file *file, struct address_space *mapping,
1151 loff_t pos, unsigned len, unsigned flags,
1152 struct page **pagep, void **fsdata)
1153{
1154 struct inode *inode = mapping->host;
1155 int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1156 handle_t *handle;
1157 int retries = 0;
1158 struct page *page;
1159 pgoff_t index;
1160 unsigned from, to;
1161
1162 index = pos >> PAGE_CACHE_SHIFT;
1163 from = pos & (PAGE_CACHE_SIZE - 1);
1164 to = from + len;
1165
1166retry:
1167 page = __grab_cache_page(mapping, index);
1168 if (!page)
1169 return -ENOMEM;
1170 *pagep = page;
1171
1172 handle = ext3_journal_start(inode, needed_blocks);
1173 if (IS_ERR(handle)) {
1174 unlock_page(page);
1175 page_cache_release(page);
1176 ret = PTR_ERR(handle);
1177 goto out;
1178 }
1179 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1180 ext3_get_block);
1181 if (ret)
1182 goto write_begin_failed;
1183
1184 if (ext3_should_journal_data(inode)) {
1185 ret = walk_page_buffers(handle, page_buffers(page),
1186 from, to, NULL, do_journal_get_write_access);
1187 }
1188write_begin_failed:
1189 if (ret) {
1190 ext3_journal_stop(handle);
1191 unlock_page(page);
1192 page_cache_release(page);
1193 }
1194 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1195 goto retry;
1196out:
1197 return ret;
1198}
1199
1200
1201int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1202{
1203 int err = journal_dirty_data(handle, bh);
1204 if (err)
1205 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1206 bh, handle, err);
1207 return err;
1208}
1209
1210
1211static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1212{
1213 if (!buffer_mapped(bh) || buffer_freed(bh))
1214 return 0;
1215 set_buffer_uptodate(bh);
1216 return ext3_journal_dirty_metadata(handle, bh);
1217}
1218
1219
1220
1221
1222
1223
1224
1225static int ext3_generic_write_end(struct file *file,
1226 struct address_space *mapping,
1227 loff_t pos, unsigned len, unsigned copied,
1228 struct page *page, void *fsdata)
1229{
1230 struct inode *inode = file->f_mapping->host;
1231
1232 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1233
1234 if (pos+copied > inode->i_size) {
1235 i_size_write(inode, pos+copied);
1236 mark_inode_dirty(inode);
1237 }
1238
1239 return copied;
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249static int ext3_ordered_write_end(struct file *file,
1250 struct address_space *mapping,
1251 loff_t pos, unsigned len, unsigned copied,
1252 struct page *page, void *fsdata)
1253{
1254 handle_t *handle = ext3_journal_current_handle();
1255 struct inode *inode = file->f_mapping->host;
1256 unsigned from, to;
1257 int ret = 0, ret2;
1258
1259 from = pos & (PAGE_CACHE_SIZE - 1);
1260 to = from + len;
1261
1262 ret = walk_page_buffers(handle, page_buffers(page),
1263 from, to, NULL, ext3_journal_dirty_data);
1264
1265 if (ret == 0) {
1266
1267
1268
1269
1270
1271 loff_t new_i_size;
1272
1273 new_i_size = pos + copied;
1274 if (new_i_size > EXT3_I(inode)->i_disksize)
1275 EXT3_I(inode)->i_disksize = new_i_size;
1276 copied = ext3_generic_write_end(file, mapping, pos, len, copied,
1277 page, fsdata);
1278 if (copied < 0)
1279 ret = copied;
1280 }
1281 ret2 = ext3_journal_stop(handle);
1282 if (!ret)
1283 ret = ret2;
1284 unlock_page(page);
1285 page_cache_release(page);
1286
1287 return ret ? ret : copied;
1288}
1289
1290static int ext3_writeback_write_end(struct file *file,
1291 struct address_space *mapping,
1292 loff_t pos, unsigned len, unsigned copied,
1293 struct page *page, void *fsdata)
1294{
1295 handle_t *handle = ext3_journal_current_handle();
1296 struct inode *inode = file->f_mapping->host;
1297 int ret = 0, ret2;
1298 loff_t new_i_size;
1299
1300 new_i_size = pos + copied;
1301 if (new_i_size > EXT3_I(inode)->i_disksize)
1302 EXT3_I(inode)->i_disksize = new_i_size;
1303
1304 copied = ext3_generic_write_end(file, mapping, pos, len, copied,
1305 page, fsdata);
1306 if (copied < 0)
1307 ret = copied;
1308
1309 ret2 = ext3_journal_stop(handle);
1310 if (!ret)
1311 ret = ret2;
1312 unlock_page(page);
1313 page_cache_release(page);
1314
1315 return ret ? ret : copied;
1316}
1317
1318static int ext3_journalled_write_end(struct file *file,
1319 struct address_space *mapping,
1320 loff_t pos, unsigned len, unsigned copied,
1321 struct page *page, void *fsdata)
1322{
1323 handle_t *handle = ext3_journal_current_handle();
1324 struct inode *inode = mapping->host;
1325 int ret = 0, ret2;
1326 int partial = 0;
1327 unsigned from, to;
1328
1329 from = pos & (PAGE_CACHE_SIZE - 1);
1330 to = from + len;
1331
1332 if (copied < len) {
1333 if (!PageUptodate(page))
1334 copied = 0;
1335 page_zero_new_buffers(page, from+copied, to);
1336 }
1337
1338 ret = walk_page_buffers(handle, page_buffers(page), from,
1339 to, &partial, write_end_fn);
1340 if (!partial)
1341 SetPageUptodate(page);
1342 if (pos+copied > inode->i_size)
1343 i_size_write(inode, pos+copied);
1344 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1345 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1346 EXT3_I(inode)->i_disksize = inode->i_size;
1347 ret2 = ext3_mark_inode_dirty(handle, inode);
1348 if (!ret)
1349 ret = ret2;
1350 }
1351
1352 ret2 = ext3_journal_stop(handle);
1353 if (!ret)
1354 ret = ret2;
1355 unlock_page(page);
1356 page_cache_release(page);
1357
1358 return ret ? ret : copied;
1359}
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1376{
1377 struct inode *inode = mapping->host;
1378 journal_t *journal;
1379 int err;
1380
1381 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1401 journal = EXT3_JOURNAL(inode);
1402 journal_lock_updates(journal);
1403 err = journal_flush(journal);
1404 journal_unlock_updates(journal);
1405
1406 if (err)
1407 return 0;
1408 }
1409
1410 return generic_block_bmap(mapping,block,ext3_get_block);
1411}
1412
1413static int bget_one(handle_t *handle, struct buffer_head *bh)
1414{
1415 get_bh(bh);
1416 return 0;
1417}
1418
1419static int bput_one(handle_t *handle, struct buffer_head *bh)
1420{
1421 put_bh(bh);
1422 return 0;
1423}
1424
1425static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1426{
1427 if (buffer_mapped(bh))
1428 return ext3_journal_dirty_data(handle, bh);
1429 return 0;
1430}
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484static int ext3_ordered_writepage(struct page *page,
1485 struct writeback_control *wbc)
1486{
1487 struct inode *inode = page->mapping->host;
1488 struct buffer_head *page_bufs;
1489 handle_t *handle = NULL;
1490 int ret = 0;
1491 int err;
1492
1493 J_ASSERT(PageLocked(page));
1494
1495
1496
1497
1498
1499 if (ext3_journal_current_handle())
1500 goto out_fail;
1501
1502 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1503
1504 if (IS_ERR(handle)) {
1505 ret = PTR_ERR(handle);
1506 goto out_fail;
1507 }
1508
1509 if (!page_has_buffers(page)) {
1510 create_empty_buffers(page, inode->i_sb->s_blocksize,
1511 (1 << BH_Dirty)|(1 << BH_Uptodate));
1512 }
1513 page_bufs = page_buffers(page);
1514 walk_page_buffers(handle, page_bufs, 0,
1515 PAGE_CACHE_SIZE, NULL, bget_one);
1516
1517 ret = block_write_full_page(page, ext3_get_block, wbc);
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 if (ret == 0) {
1532 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1533 NULL, journal_dirty_data_fn);
1534 if (!ret)
1535 ret = err;
1536 }
1537 walk_page_buffers(handle, page_bufs, 0,
1538 PAGE_CACHE_SIZE, NULL, bput_one);
1539 err = ext3_journal_stop(handle);
1540 if (!ret)
1541 ret = err;
1542 return ret;
1543
1544out_fail:
1545 redirty_page_for_writepage(wbc, page);
1546 unlock_page(page);
1547 return ret;
1548}
1549
1550static int ext3_writeback_writepage(struct page *page,
1551 struct writeback_control *wbc)
1552{
1553 struct inode *inode = page->mapping->host;
1554 handle_t *handle = NULL;
1555 int ret = 0;
1556 int err;
1557
1558 if (ext3_journal_current_handle())
1559 goto out_fail;
1560
1561 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1562 if (IS_ERR(handle)) {
1563 ret = PTR_ERR(handle);
1564 goto out_fail;
1565 }
1566
1567 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1568 ret = nobh_writepage(page, ext3_get_block, wbc);
1569 else
1570 ret = block_write_full_page(page, ext3_get_block, wbc);
1571
1572 err = ext3_journal_stop(handle);
1573 if (!ret)
1574 ret = err;
1575 return ret;
1576
1577out_fail:
1578 redirty_page_for_writepage(wbc, page);
1579 unlock_page(page);
1580 return ret;
1581}
1582
1583static int ext3_journalled_writepage(struct page *page,
1584 struct writeback_control *wbc)
1585{
1586 struct inode *inode = page->mapping->host;
1587 handle_t *handle = NULL;
1588 int ret = 0;
1589 int err;
1590
1591 if (ext3_journal_current_handle())
1592 goto no_write;
1593
1594 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1595 if (IS_ERR(handle)) {
1596 ret = PTR_ERR(handle);
1597 goto no_write;
1598 }
1599
1600 if (!page_has_buffers(page) || PageChecked(page)) {
1601
1602
1603
1604
1605 ClearPageChecked(page);
1606 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1607 ext3_get_block);
1608 if (ret != 0) {
1609 ext3_journal_stop(handle);
1610 goto out_unlock;
1611 }
1612 ret = walk_page_buffers(handle, page_buffers(page), 0,
1613 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1614
1615 err = walk_page_buffers(handle, page_buffers(page), 0,
1616 PAGE_CACHE_SIZE, NULL, write_end_fn);
1617 if (ret == 0)
1618 ret = err;
1619 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1620 unlock_page(page);
1621 } else {
1622
1623
1624
1625
1626
1627 ret = block_write_full_page(page, ext3_get_block, wbc);
1628 }
1629 err = ext3_journal_stop(handle);
1630 if (!ret)
1631 ret = err;
1632out:
1633 return ret;
1634
1635no_write:
1636 redirty_page_for_writepage(wbc, page);
1637out_unlock:
1638 unlock_page(page);
1639 goto out;
1640}
1641
1642static int ext3_readpage(struct file *file, struct page *page)
1643{
1644 return mpage_readpage(page, ext3_get_block);
1645}
1646
1647static int
1648ext3_readpages(struct file *file, struct address_space *mapping,
1649 struct list_head *pages, unsigned nr_pages)
1650{
1651 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1652}
1653
1654static void ext3_invalidatepage(struct page *page, unsigned long offset)
1655{
1656 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1657
1658
1659
1660
1661 if (offset == 0)
1662 ClearPageChecked(page);
1663
1664 journal_invalidatepage(journal, page, offset);
1665}
1666
1667static int ext3_releasepage(struct page *page, gfp_t wait)
1668{
1669 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1670
1671 WARN_ON(PageChecked(page));
1672 if (!page_has_buffers(page))
1673 return 0;
1674 return journal_try_to_free_buffers(journal, page, wait);
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1686 const struct iovec *iov, loff_t offset,
1687 unsigned long nr_segs)
1688{
1689 struct file *file = iocb->ki_filp;
1690 struct inode *inode = file->f_mapping->host;
1691 struct ext3_inode_info *ei = EXT3_I(inode);
1692 handle_t *handle = NULL;
1693 ssize_t ret;
1694 int orphan = 0;
1695 size_t count = iov_length(iov, nr_segs);
1696
1697 if (rw == WRITE) {
1698 loff_t final_size = offset + count;
1699
1700 handle = ext3_journal_start(inode, DIO_CREDITS);
1701 if (IS_ERR(handle)) {
1702 ret = PTR_ERR(handle);
1703 goto out;
1704 }
1705 if (final_size > inode->i_size) {
1706 ret = ext3_orphan_add(handle, inode);
1707 if (ret)
1708 goto out_stop;
1709 orphan = 1;
1710 ei->i_disksize = inode->i_size;
1711 }
1712 }
1713
1714 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1715 offset, nr_segs,
1716 ext3_get_block, NULL);
1717
1718
1719
1720
1721 handle = ext3_journal_current_handle();
1722
1723out_stop:
1724 if (handle) {
1725 int err;
1726
1727 if (orphan && inode->i_nlink)
1728 ext3_orphan_del(handle, inode);
1729 if (orphan && ret > 0) {
1730 loff_t end = offset + ret;
1731 if (end > inode->i_size) {
1732 ei->i_disksize = end;
1733 i_size_write(inode, end);
1734
1735
1736
1737
1738
1739
1740
1741 ext3_mark_inode_dirty(handle, inode);
1742 }
1743 }
1744 err = ext3_journal_stop(handle);
1745 if (ret == 0)
1746 ret = err;
1747 }
1748out:
1749 return ret;
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static int ext3_journalled_set_page_dirty(struct page *page)
1766{
1767 SetPageChecked(page);
1768 return __set_page_dirty_nobuffers(page);
1769}
1770
1771static const struct address_space_operations ext3_ordered_aops = {
1772 .readpage = ext3_readpage,
1773 .readpages = ext3_readpages,
1774 .writepage = ext3_ordered_writepage,
1775 .sync_page = block_sync_page,
1776 .write_begin = ext3_write_begin,
1777 .write_end = ext3_ordered_write_end,
1778 .bmap = ext3_bmap,
1779 .invalidatepage = ext3_invalidatepage,
1780 .releasepage = ext3_releasepage,
1781 .direct_IO = ext3_direct_IO,
1782 .migratepage = buffer_migrate_page,
1783};
1784
1785static const struct address_space_operations ext3_writeback_aops = {
1786 .readpage = ext3_readpage,
1787 .readpages = ext3_readpages,
1788 .writepage = ext3_writeback_writepage,
1789 .sync_page = block_sync_page,
1790 .write_begin = ext3_write_begin,
1791 .write_end = ext3_writeback_write_end,
1792 .bmap = ext3_bmap,
1793 .invalidatepage = ext3_invalidatepage,
1794 .releasepage = ext3_releasepage,
1795 .direct_IO = ext3_direct_IO,
1796 .migratepage = buffer_migrate_page,
1797};
1798
1799static const struct address_space_operations ext3_journalled_aops = {
1800 .readpage = ext3_readpage,
1801 .readpages = ext3_readpages,
1802 .writepage = ext3_journalled_writepage,
1803 .sync_page = block_sync_page,
1804 .write_begin = ext3_write_begin,
1805 .write_end = ext3_journalled_write_end,
1806 .set_page_dirty = ext3_journalled_set_page_dirty,
1807 .bmap = ext3_bmap,
1808 .invalidatepage = ext3_invalidatepage,
1809 .releasepage = ext3_releasepage,
1810};
1811
1812void ext3_set_aops(struct inode *inode)
1813{
1814 if (ext3_should_order_data(inode))
1815 inode->i_mapping->a_ops = &ext3_ordered_aops;
1816 else if (ext3_should_writeback_data(inode))
1817 inode->i_mapping->a_ops = &ext3_writeback_aops;
1818 else
1819 inode->i_mapping->a_ops = &ext3_journalled_aops;
1820}
1821
1822
1823
1824
1825
1826
1827
1828static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1829 struct address_space *mapping, loff_t from)
1830{
1831 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1832 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1833 unsigned blocksize, iblock, length, pos;
1834 struct inode *inode = mapping->host;
1835 struct buffer_head *bh;
1836 int err = 0;
1837
1838 blocksize = inode->i_sb->s_blocksize;
1839 length = blocksize - (offset & (blocksize - 1));
1840 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1841
1842
1843
1844
1845
1846 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1847 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1848 zero_user_page(page, offset, length, KM_USER0);
1849 set_page_dirty(page);
1850 goto unlock;
1851 }
1852
1853 if (!page_has_buffers(page))
1854 create_empty_buffers(page, blocksize, 0);
1855
1856
1857 bh = page_buffers(page);
1858 pos = blocksize;
1859 while (offset >= pos) {
1860 bh = bh->b_this_page;
1861 iblock++;
1862 pos += blocksize;
1863 }
1864
1865 err = 0;
1866 if (buffer_freed(bh)) {
1867 BUFFER_TRACE(bh, "freed: skip");
1868 goto unlock;
1869 }
1870
1871 if (!buffer_mapped(bh)) {
1872 BUFFER_TRACE(bh, "unmapped");
1873 ext3_get_block(inode, iblock, bh, 0);
1874
1875 if (!buffer_mapped(bh)) {
1876 BUFFER_TRACE(bh, "still unmapped");
1877 goto unlock;
1878 }
1879 }
1880
1881
1882 if (PageUptodate(page))
1883 set_buffer_uptodate(bh);
1884
1885 if (!buffer_uptodate(bh)) {
1886 err = -EIO;
1887 ll_rw_block(READ, 1, &bh);
1888 wait_on_buffer(bh);
1889
1890 if (!buffer_uptodate(bh))
1891 goto unlock;
1892 }
1893
1894 if (ext3_should_journal_data(inode)) {
1895 BUFFER_TRACE(bh, "get write access");
1896 err = ext3_journal_get_write_access(handle, bh);
1897 if (err)
1898 goto unlock;
1899 }
1900
1901 zero_user_page(page, offset, length, KM_USER0);
1902 BUFFER_TRACE(bh, "zeroed end of block");
1903
1904 err = 0;
1905 if (ext3_should_journal_data(inode)) {
1906 err = ext3_journal_dirty_metadata(handle, bh);
1907 } else {
1908 if (ext3_should_order_data(inode))
1909 err = ext3_journal_dirty_data(handle, bh);
1910 mark_buffer_dirty(bh);
1911 }
1912
1913unlock:
1914 unlock_page(page);
1915 page_cache_release(page);
1916 return err;
1917}
1918
1919
1920
1921
1922
1923
1924static inline int all_zeroes(__le32 *p, __le32 *q)
1925{
1926 while (p < q)
1927 if (*p++)
1928 return 0;
1929 return 1;
1930}
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967static Indirect *ext3_find_shared(struct inode *inode, int depth,
1968 int offsets[4], Indirect chain[4], __le32 *top)
1969{
1970 Indirect *partial, *p;
1971 int k, err;
1972
1973 *top = 0;
1974
1975 for (k = depth; k > 1 && !offsets[k-1]; k--)
1976 ;
1977 partial = ext3_get_branch(inode, k, offsets, chain, &err);
1978
1979 if (!partial)
1980 partial = chain + k-1;
1981
1982
1983
1984
1985 if (!partial->key && *partial->p)
1986
1987 goto no_top;
1988 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1989 ;
1990
1991
1992
1993
1994
1995
1996 if (p == chain + k - 1 && p > chain) {
1997 p->p--;
1998 } else {
1999 *top = *p->p;
2000
2001#if 0
2002 *p->p = 0;
2003#endif
2004 }
2005
2006
2007 while(partial > p) {
2008 brelse(partial->bh);
2009 partial--;
2010 }
2011no_top:
2012 return partial;
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2024 struct buffer_head *bh, ext3_fsblk_t block_to_free,
2025 unsigned long count, __le32 *first, __le32 *last)
2026{
2027 __le32 *p;
2028 if (try_to_extend_transaction(handle, inode)) {
2029 if (bh) {
2030 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2031 ext3_journal_dirty_metadata(handle, bh);
2032 }
2033 ext3_mark_inode_dirty(handle, inode);
2034 ext3_journal_test_restart(handle, inode);
2035 if (bh) {
2036 BUFFER_TRACE(bh, "retaking write access");
2037 ext3_journal_get_write_access(handle, bh);
2038 }
2039 }
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049 for (p = first; p < last; p++) {
2050 u32 nr = le32_to_cpu(*p);
2051 if (nr) {
2052 struct buffer_head *bh;
2053
2054 *p = 0;
2055 bh = sb_find_get_block(inode->i_sb, nr);
2056 ext3_forget(handle, 0, inode, bh, nr);
2057 }
2058 }
2059
2060 ext3_free_blocks(handle, inode, block_to_free, count);
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082static void ext3_free_data(handle_t *handle, struct inode *inode,
2083 struct buffer_head *this_bh,
2084 __le32 *first, __le32 *last)
2085{
2086 ext3_fsblk_t block_to_free = 0;
2087 unsigned long count = 0;
2088 __le32 *block_to_free_p = NULL;
2089
2090
2091 ext3_fsblk_t nr;
2092 __le32 *p;
2093
2094 int err;
2095
2096 if (this_bh) {
2097 BUFFER_TRACE(this_bh, "get_write_access");
2098 err = ext3_journal_get_write_access(handle, this_bh);
2099
2100
2101 if (err)
2102 return;
2103 }
2104
2105 for (p = first; p < last; p++) {
2106 nr = le32_to_cpu(*p);
2107 if (nr) {
2108
2109 if (count == 0) {
2110 block_to_free = nr;
2111 block_to_free_p = p;
2112 count = 1;
2113 } else if (nr == block_to_free + count) {
2114 count++;
2115 } else {
2116 ext3_clear_blocks(handle, inode, this_bh,
2117 block_to_free,
2118 count, block_to_free_p, p);
2119 block_to_free = nr;
2120 block_to_free_p = p;
2121 count = 1;
2122 }
2123 }
2124 }
2125
2126 if (count > 0)
2127 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2128 count, block_to_free_p, p);
2129
2130 if (this_bh) {
2131 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2132 ext3_journal_dirty_metadata(handle, this_bh);
2133 }
2134}
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149static void ext3_free_branches(handle_t *handle, struct inode *inode,
2150 struct buffer_head *parent_bh,
2151 __le32 *first, __le32 *last, int depth)
2152{
2153 ext3_fsblk_t nr;
2154 __le32 *p;
2155
2156 if (is_handle_aborted(handle))
2157 return;
2158
2159 if (depth--) {
2160 struct buffer_head *bh;
2161 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2162 p = last;
2163 while (--p >= first) {
2164 nr = le32_to_cpu(*p);
2165 if (!nr)
2166 continue;
2167
2168
2169 bh = sb_bread(inode->i_sb, nr);
2170
2171
2172
2173
2174
2175 if (!bh) {
2176 ext3_error(inode->i_sb, "ext3_free_branches",
2177 "Read failure, inode=%lu, block="E3FSBLK,
2178 inode->i_ino, nr);
2179 continue;
2180 }
2181
2182
2183 BUFFER_TRACE(bh, "free child branches");
2184 ext3_free_branches(handle, inode, bh,
2185 (__le32*)bh->b_data,
2186 (__le32*)bh->b_data + addr_per_block,
2187 depth);
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226 if (is_handle_aborted(handle))
2227 return;
2228 if (try_to_extend_transaction(handle, inode)) {
2229 ext3_mark_inode_dirty(handle, inode);
2230 ext3_journal_test_restart(handle, inode);
2231 }
2232
2233 ext3_free_blocks(handle, inode, nr, 1);
2234
2235 if (parent_bh) {
2236
2237
2238
2239
2240 BUFFER_TRACE(parent_bh, "get_write_access");
2241 if (!ext3_journal_get_write_access(handle,
2242 parent_bh)){
2243 *p = 0;
2244 BUFFER_TRACE(parent_bh,
2245 "call ext3_journal_dirty_metadata");
2246 ext3_journal_dirty_metadata(handle,
2247 parent_bh);
2248 }
2249 }
2250 }
2251 } else {
2252
2253 BUFFER_TRACE(parent_bh, "free data blocks");
2254 ext3_free_data(handle, inode, parent_bh, first, last);
2255 }
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286void ext3_truncate(struct inode *inode)
2287{
2288 handle_t *handle;
2289 struct ext3_inode_info *ei = EXT3_I(inode);
2290 __le32 *i_data = ei->i_data;
2291 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2292 struct address_space *mapping = inode->i_mapping;
2293 int offsets[4];
2294 Indirect chain[4];
2295 Indirect *partial;
2296 __le32 nr = 0;
2297 int n;
2298 long last_block;
2299 unsigned blocksize = inode->i_sb->s_blocksize;
2300 struct page *page;
2301
2302 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2303 S_ISLNK(inode->i_mode)))
2304 return;
2305 if (ext3_inode_is_fast_symlink(inode))
2306 return;
2307 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2308 return;
2309
2310
2311
2312
2313
2314 if ((inode->i_size & (blocksize - 1)) == 0) {
2315
2316 page = NULL;
2317 } else {
2318 page = grab_cache_page(mapping,
2319 inode->i_size >> PAGE_CACHE_SHIFT);
2320 if (!page)
2321 return;
2322 }
2323
2324 handle = start_transaction(inode);
2325 if (IS_ERR(handle)) {
2326 if (page) {
2327 clear_highpage(page);
2328 flush_dcache_page(page);
2329 unlock_page(page);
2330 page_cache_release(page);
2331 }
2332 return;
2333 }
2334
2335 last_block = (inode->i_size + blocksize-1)
2336 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2337
2338 if (page)
2339 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2340
2341 n = ext3_block_to_path(inode, last_block, offsets, NULL);
2342 if (n == 0)
2343 goto out_stop;
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354 if (ext3_orphan_add(handle, inode))
2355 goto out_stop;
2356
2357
2358
2359
2360
2361
2362
2363
2364 ei->i_disksize = inode->i_size;
2365
2366
2367
2368
2369
2370 mutex_lock(&ei->truncate_mutex);
2371
2372 if (n == 1) {
2373 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2374 i_data + EXT3_NDIR_BLOCKS);
2375 goto do_indirects;
2376 }
2377
2378 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2379
2380 if (nr) {
2381 if (partial == chain) {
2382
2383 ext3_free_branches(handle, inode, NULL,
2384 &nr, &nr+1, (chain+n-1) - partial);
2385 *partial->p = 0;
2386
2387
2388
2389
2390 } else {
2391
2392 BUFFER_TRACE(partial->bh, "get_write_access");
2393 ext3_free_branches(handle, inode, partial->bh,
2394 partial->p,
2395 partial->p+1, (chain+n-1) - partial);
2396 }
2397 }
2398
2399 while (partial > chain) {
2400 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2401 (__le32*)partial->bh->b_data+addr_per_block,
2402 (chain+n-1) - partial);
2403 BUFFER_TRACE(partial->bh, "call brelse");
2404 brelse (partial->bh);
2405 partial--;
2406 }
2407do_indirects:
2408
2409 switch (offsets[0]) {
2410 default:
2411 nr = i_data[EXT3_IND_BLOCK];
2412 if (nr) {
2413 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2414 i_data[EXT3_IND_BLOCK] = 0;
2415 }
2416 case EXT3_IND_BLOCK:
2417 nr = i_data[EXT3_DIND_BLOCK];
2418 if (nr) {
2419 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2420 i_data[EXT3_DIND_BLOCK] = 0;
2421 }
2422 case EXT3_DIND_BLOCK:
2423 nr = i_data[EXT3_TIND_BLOCK];
2424 if (nr) {
2425 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2426 i_data[EXT3_TIND_BLOCK] = 0;
2427 }
2428 case EXT3_TIND_BLOCK:
2429 ;
2430 }
2431
2432 ext3_discard_reservation(inode);
2433
2434 mutex_unlock(&ei->truncate_mutex);
2435 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2436 ext3_mark_inode_dirty(handle, inode);
2437
2438
2439
2440
2441
2442 if (IS_SYNC(inode))
2443 handle->h_sync = 1;
2444out_stop:
2445
2446
2447
2448
2449
2450
2451
2452 if (inode->i_nlink)
2453 ext3_orphan_del(handle, inode);
2454
2455 ext3_journal_stop(handle);
2456}
2457
2458static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2459 unsigned long ino, struct ext3_iloc *iloc)
2460{
2461 unsigned long desc, group_desc, block_group;
2462 unsigned long offset;
2463 ext3_fsblk_t block;
2464 struct buffer_head *bh;
2465 struct ext3_group_desc * gdp;
2466
2467 if (!ext3_valid_inum(sb, ino)) {
2468
2469
2470
2471
2472
2473 return 0;
2474 }
2475
2476 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2477 if (block_group >= EXT3_SB(sb)->s_groups_count) {
2478 ext3_error(sb,"ext3_get_inode_block","group >= groups count");
2479 return 0;
2480 }
2481 smp_rmb();
2482 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
2483 desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
2484 bh = EXT3_SB(sb)->s_group_desc[group_desc];
2485 if (!bh) {
2486 ext3_error (sb, "ext3_get_inode_block",
2487 "Descriptor not loaded");
2488 return 0;
2489 }
2490
2491 gdp = (struct ext3_group_desc *)bh->b_data;
2492
2493
2494
2495 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2496 EXT3_INODE_SIZE(sb);
2497 block = le32_to_cpu(gdp[desc].bg_inode_table) +
2498 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2499
2500 iloc->block_group = block_group;
2501 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2502 return block;
2503}
2504
2505
2506
2507
2508
2509
2510
2511static int __ext3_get_inode_loc(struct inode *inode,
2512 struct ext3_iloc *iloc, int in_mem)
2513{
2514 ext3_fsblk_t block;
2515 struct buffer_head *bh;
2516
2517 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2518 if (!block)
2519 return -EIO;
2520
2521 bh = sb_getblk(inode->i_sb, block);
2522 if (!bh) {
2523 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2524 "unable to read inode block - "
2525 "inode=%lu, block="E3FSBLK,
2526 inode->i_ino, block);
2527 return -EIO;
2528 }
2529 if (!buffer_uptodate(bh)) {
2530 lock_buffer(bh);
2531 if (buffer_uptodate(bh)) {
2532
2533 unlock_buffer(bh);
2534 goto has_buffer;
2535 }
2536
2537
2538
2539
2540
2541
2542 if (in_mem) {
2543 struct buffer_head *bitmap_bh;
2544 struct ext3_group_desc *desc;
2545 int inodes_per_buffer;
2546 int inode_offset, i;
2547 int block_group;
2548 int start;
2549
2550 block_group = (inode->i_ino - 1) /
2551 EXT3_INODES_PER_GROUP(inode->i_sb);
2552 inodes_per_buffer = bh->b_size /
2553 EXT3_INODE_SIZE(inode->i_sb);
2554 inode_offset = ((inode->i_ino - 1) %
2555 EXT3_INODES_PER_GROUP(inode->i_sb));
2556 start = inode_offset & ~(inodes_per_buffer - 1);
2557
2558
2559 desc = ext3_get_group_desc(inode->i_sb,
2560 block_group, NULL);
2561 if (!desc)
2562 goto make_io;
2563
2564 bitmap_bh = sb_getblk(inode->i_sb,
2565 le32_to_cpu(desc->bg_inode_bitmap));
2566 if (!bitmap_bh)
2567 goto make_io;
2568
2569
2570
2571
2572
2573
2574 if (!buffer_uptodate(bitmap_bh)) {
2575 brelse(bitmap_bh);
2576 goto make_io;
2577 }
2578 for (i = start; i < start + inodes_per_buffer; i++) {
2579 if (i == inode_offset)
2580 continue;
2581 if (ext3_test_bit(i, bitmap_bh->b_data))
2582 break;
2583 }
2584 brelse(bitmap_bh);
2585 if (i == start + inodes_per_buffer) {
2586
2587 memset(bh->b_data, 0, bh->b_size);
2588 set_buffer_uptodate(bh);
2589 unlock_buffer(bh);
2590 goto has_buffer;
2591 }
2592 }
2593
2594make_io:
2595
2596
2597
2598
2599
2600 get_bh(bh);
2601 bh->b_end_io = end_buffer_read_sync;
2602 submit_bh(READ_META, bh);
2603 wait_on_buffer(bh);
2604 if (!buffer_uptodate(bh)) {
2605 ext3_error(inode->i_sb, "ext3_get_inode_loc",
2606 "unable to read inode block - "
2607 "inode=%lu, block="E3FSBLK,
2608 inode->i_ino, block);
2609 brelse(bh);
2610 return -EIO;
2611 }
2612 }
2613has_buffer:
2614 iloc->bh = bh;
2615 return 0;
2616}
2617
2618int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2619{
2620
2621 return __ext3_get_inode_loc(inode, iloc,
2622 !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
2623}
2624
2625void ext3_set_inode_flags(struct inode *inode)
2626{
2627 unsigned int flags = EXT3_I(inode)->i_flags;
2628
2629 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2630 if (flags & EXT3_SYNC_FL)
2631 inode->i_flags |= S_SYNC;
2632 if (flags & EXT3_APPEND_FL)
2633 inode->i_flags |= S_APPEND;
2634 if (flags & EXT3_IMMUTABLE_FL)
2635 inode->i_flags |= S_IMMUTABLE;
2636 if (flags & EXT3_NOATIME_FL)
2637 inode->i_flags |= S_NOATIME;
2638 if (flags & EXT3_DIRSYNC_FL)
2639 inode->i_flags |= S_DIRSYNC;
2640}
2641
2642
2643void ext3_get_inode_flags(struct ext3_inode_info *ei)
2644{
2645 unsigned int flags = ei->vfs_inode.i_flags;
2646
2647 ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2648 EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2649 if (flags & S_SYNC)
2650 ei->i_flags |= EXT3_SYNC_FL;
2651 if (flags & S_APPEND)
2652 ei->i_flags |= EXT3_APPEND_FL;
2653 if (flags & S_IMMUTABLE)
2654 ei->i_flags |= EXT3_IMMUTABLE_FL;
2655 if (flags & S_NOATIME)
2656 ei->i_flags |= EXT3_NOATIME_FL;
2657 if (flags & S_DIRSYNC)
2658 ei->i_flags |= EXT3_DIRSYNC_FL;
2659}
2660
2661void ext3_read_inode(struct inode * inode)
2662{
2663 struct ext3_iloc iloc;
2664 struct ext3_inode *raw_inode;
2665 struct ext3_inode_info *ei = EXT3_I(inode);
2666 struct buffer_head *bh;
2667 int block;
2668
2669#ifdef CONFIG_EXT3_FS_POSIX_ACL
2670 ei->i_acl = EXT3_ACL_NOT_CACHED;
2671 ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2672#endif
2673 ei->i_block_alloc_info = NULL;
2674
2675 if (__ext3_get_inode_loc(inode, &iloc, 0))
2676 goto bad_inode;
2677 bh = iloc.bh;
2678 raw_inode = ext3_raw_inode(&iloc);
2679 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2680 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2681 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2682 if(!(test_opt (inode->i_sb, NO_UID32))) {
2683 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2684 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2685 }
2686 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2687 inode->i_size = le32_to_cpu(raw_inode->i_size);
2688 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2689 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2690 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2691 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2692
2693 ei->i_state = 0;
2694 ei->i_dir_start_lookup = 0;
2695 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2696
2697
2698
2699
2700
2701 if (inode->i_nlink == 0) {
2702 if (inode->i_mode == 0 ||
2703 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2704
2705 brelse (bh);
2706 goto bad_inode;
2707 }
2708
2709
2710
2711
2712 }
2713 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2714 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2715#ifdef EXT3_FRAGMENTS
2716 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2717 ei->i_frag_no = raw_inode->i_frag;
2718 ei->i_frag_size = raw_inode->i_fsize;
2719#endif
2720 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2721 if (!S_ISREG(inode->i_mode)) {
2722 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2723 } else {
2724 inode->i_size |=
2725 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2726 }
2727 ei->i_disksize = inode->i_size;
2728 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2729 ei->i_block_group = iloc.block_group;
2730
2731
2732
2733
2734 for (block = 0; block < EXT3_N_BLOCKS; block++)
2735 ei->i_data[block] = raw_inode->i_block[block];
2736 INIT_LIST_HEAD(&ei->i_orphan);
2737
2738 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2739 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2740
2741
2742
2743
2744
2745 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2746 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2747 EXT3_INODE_SIZE(inode->i_sb)) {
2748 brelse (bh);
2749 goto bad_inode;
2750 }
2751 if (ei->i_extra_isize == 0) {
2752
2753 ei->i_extra_isize = sizeof(struct ext3_inode) -
2754 EXT3_GOOD_OLD_INODE_SIZE;
2755 } else {
2756 __le32 *magic = (void *)raw_inode +
2757 EXT3_GOOD_OLD_INODE_SIZE +
2758 ei->i_extra_isize;
2759 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2760 ei->i_state |= EXT3_STATE_XATTR;
2761 }
2762 } else
2763 ei->i_extra_isize = 0;
2764
2765 if (S_ISREG(inode->i_mode)) {
2766 inode->i_op = &ext3_file_inode_operations;
2767 inode->i_fop = &ext3_file_operations;
2768 ext3_set_aops(inode);
2769 } else if (S_ISDIR(inode->i_mode)) {
2770 inode->i_op = &ext3_dir_inode_operations;
2771 inode->i_fop = &ext3_dir_operations;
2772 } else if (S_ISLNK(inode->i_mode)) {
2773 if (ext3_inode_is_fast_symlink(inode))
2774 inode->i_op = &ext3_fast_symlink_inode_operations;
2775 else {
2776 inode->i_op = &ext3_symlink_inode_operations;
2777 ext3_set_aops(inode);
2778 }
2779 } else {
2780 inode->i_op = &ext3_special_inode_operations;
2781 if (raw_inode->i_block[0])
2782 init_special_inode(inode, inode->i_mode,
2783 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2784 else
2785 init_special_inode(inode, inode->i_mode,
2786 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2787 }
2788 brelse (iloc.bh);
2789 ext3_set_inode_flags(inode);
2790 return;
2791
2792bad_inode:
2793 make_bad_inode(inode);
2794 return;
2795}
2796
2797
2798
2799
2800
2801
2802
2803
2804static int ext3_do_update_inode(handle_t *handle,
2805 struct inode *inode,
2806 struct ext3_iloc *iloc)
2807{
2808 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2809 struct ext3_inode_info *ei = EXT3_I(inode);
2810 struct buffer_head *bh = iloc->bh;
2811 int err = 0, rc, block;
2812
2813
2814
2815 if (ei->i_state & EXT3_STATE_NEW)
2816 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2817
2818 ext3_get_inode_flags(ei);
2819 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2820 if(!(test_opt(inode->i_sb, NO_UID32))) {
2821 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2822 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2823
2824
2825
2826
2827 if(!ei->i_dtime) {
2828 raw_inode->i_uid_high =
2829 cpu_to_le16(high_16_bits(inode->i_uid));
2830 raw_inode->i_gid_high =
2831 cpu_to_le16(high_16_bits(inode->i_gid));
2832 } else {
2833 raw_inode->i_uid_high = 0;
2834 raw_inode->i_gid_high = 0;
2835 }
2836 } else {
2837 raw_inode->i_uid_low =
2838 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2839 raw_inode->i_gid_low =
2840 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2841 raw_inode->i_uid_high = 0;
2842 raw_inode->i_gid_high = 0;
2843 }
2844 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2845 raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2846 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2847 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2848 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2849 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2850 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2851 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2852#ifdef EXT3_FRAGMENTS
2853 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2854 raw_inode->i_frag = ei->i_frag_no;
2855 raw_inode->i_fsize = ei->i_frag_size;
2856#endif
2857 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2858 if (!S_ISREG(inode->i_mode)) {
2859 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2860 } else {
2861 raw_inode->i_size_high =
2862 cpu_to_le32(ei->i_disksize >> 32);
2863 if (ei->i_disksize > 0x7fffffffULL) {
2864 struct super_block *sb = inode->i_sb;
2865 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2866 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2867 EXT3_SB(sb)->s_es->s_rev_level ==
2868 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2869
2870
2871
2872 err = ext3_journal_get_write_access(handle,
2873 EXT3_SB(sb)->s_sbh);
2874 if (err)
2875 goto out_brelse;
2876 ext3_update_dynamic_rev(sb);
2877 EXT3_SET_RO_COMPAT_FEATURE(sb,
2878 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2879 sb->s_dirt = 1;
2880 handle->h_sync = 1;
2881 err = ext3_journal_dirty_metadata(handle,
2882 EXT3_SB(sb)->s_sbh);
2883 }
2884 }
2885 }
2886 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2887 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2888 if (old_valid_dev(inode->i_rdev)) {
2889 raw_inode->i_block[0] =
2890 cpu_to_le32(old_encode_dev(inode->i_rdev));
2891 raw_inode->i_block[1] = 0;
2892 } else {
2893 raw_inode->i_block[0] = 0;
2894 raw_inode->i_block[1] =
2895 cpu_to_le32(new_encode_dev(inode->i_rdev));
2896 raw_inode->i_block[2] = 0;
2897 }
2898 } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2899 raw_inode->i_block[block] = ei->i_data[block];
2900
2901 if (ei->i_extra_isize)
2902 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2903
2904 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2905 rc = ext3_journal_dirty_metadata(handle, bh);
2906 if (!err)
2907 err = rc;
2908 ei->i_state &= ~EXT3_STATE_NEW;
2909
2910out_brelse:
2911 brelse (bh);
2912 ext3_std_error(inode->i_sb, err);
2913 return err;
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951int ext3_write_inode(struct inode *inode, int wait)
2952{
2953 if (current->flags & PF_MEMALLOC)
2954 return 0;
2955
2956 if (ext3_journal_current_handle()) {
2957 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
2958 dump_stack();
2959 return -EIO;
2960 }
2961
2962 if (!wait)
2963 return 0;
2964
2965 return ext3_force_commit(inode->i_sb);
2966}
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2986{
2987 struct inode *inode = dentry->d_inode;
2988 int error, rc = 0;
2989 const unsigned int ia_valid = attr->ia_valid;
2990
2991 error = inode_change_ok(inode, attr);
2992 if (error)
2993 return error;
2994
2995 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2996 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2997 handle_t *handle;
2998
2999
3000
3001 handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+
3002 EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3003 if (IS_ERR(handle)) {
3004 error = PTR_ERR(handle);
3005 goto err_out;
3006 }
3007 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3008 if (error) {
3009 ext3_journal_stop(handle);
3010 return error;
3011 }
3012
3013
3014 if (attr->ia_valid & ATTR_UID)
3015 inode->i_uid = attr->ia_uid;
3016 if (attr->ia_valid & ATTR_GID)
3017 inode->i_gid = attr->ia_gid;
3018 error = ext3_mark_inode_dirty(handle, inode);
3019 ext3_journal_stop(handle);
3020 }
3021
3022 if (S_ISREG(inode->i_mode) &&
3023 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3024 handle_t *handle;
3025
3026 handle = ext3_journal_start(inode, 3);
3027 if (IS_ERR(handle)) {
3028 error = PTR_ERR(handle);
3029 goto err_out;
3030 }
3031
3032 error = ext3_orphan_add(handle, inode);
3033 EXT3_I(inode)->i_disksize = attr->ia_size;
3034 rc = ext3_mark_inode_dirty(handle, inode);
3035 if (!error)
3036 error = rc;
3037 ext3_journal_stop(handle);
3038 }
3039
3040 rc = inode_setattr(inode, attr);
3041
3042
3043
3044
3045 if (inode->i_nlink)
3046 ext3_orphan_del(NULL, inode);
3047
3048 if (!rc && (ia_valid & ATTR_MODE))
3049 rc = ext3_acl_chmod(inode);
3050
3051err_out:
3052 ext3_std_error(inode->i_sb, error);
3053 if (!error)
3054 error = rc;
3055 return error;
3056}
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086static int ext3_writepage_trans_blocks(struct inode *inode)
3087{
3088 int bpp = ext3_journal_blocks_per_page(inode);
3089 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3090 int ret;
3091
3092 if (ext3_should_journal_data(inode))
3093 ret = 3 * (bpp + indirects) + 2;
3094 else
3095 ret = 2 * (bpp + indirects) + 2;
3096
3097#ifdef CONFIG_QUOTA
3098
3099
3100 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
3101#endif
3102
3103 return ret;
3104}
3105
3106
3107
3108
3109
3110int ext3_mark_iloc_dirty(handle_t *handle,
3111 struct inode *inode, struct ext3_iloc *iloc)
3112{
3113 int err = 0;
3114
3115
3116 get_bh(iloc->bh);
3117
3118
3119 err = ext3_do_update_inode(handle, inode, iloc);
3120 put_bh(iloc->bh);
3121 return err;
3122}
3123
3124
3125
3126
3127
3128
3129int
3130ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3131 struct ext3_iloc *iloc)
3132{
3133 int err = 0;
3134 if (handle) {
3135 err = ext3_get_inode_loc(inode, iloc);
3136 if (!err) {
3137 BUFFER_TRACE(iloc->bh, "get_write_access");
3138 err = ext3_journal_get_write_access(handle, iloc->bh);
3139 if (err) {
3140 brelse(iloc->bh);
3141 iloc->bh = NULL;
3142 }
3143 }
3144 }
3145 ext3_std_error(inode->i_sb, err);
3146 return err;
3147}
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3171{
3172 struct ext3_iloc iloc;
3173 int err;
3174
3175 might_sleep();
3176 err = ext3_reserve_inode_write(handle, inode, &iloc);
3177 if (!err)
3178 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3179 return err;
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196void ext3_dirty_inode(struct inode *inode)
3197{
3198 handle_t *current_handle = ext3_journal_current_handle();
3199 handle_t *handle;
3200
3201 handle = ext3_journal_start(inode, 2);
3202 if (IS_ERR(handle))
3203 goto out;
3204 if (current_handle &&
3205 current_handle->h_transaction != handle->h_transaction) {
3206
3207 printk(KERN_EMERG "%s: transactions do not match!\n",
3208 __FUNCTION__);
3209 } else {
3210 jbd_debug(5, "marking dirty. outer handle=%p\n",
3211 current_handle);
3212 ext3_mark_inode_dirty(handle, inode);
3213 }
3214 ext3_journal_stop(handle);
3215out:
3216 return;
3217}
3218
3219#if 0
3220
3221
3222
3223
3224
3225
3226
3227static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3228{
3229 struct ext3_iloc iloc;
3230
3231 int err = 0;
3232 if (handle) {
3233 err = ext3_get_inode_loc(inode, &iloc);
3234 if (!err) {
3235 BUFFER_TRACE(iloc.bh, "get_write_access");
3236 err = journal_get_write_access(handle, iloc.bh);
3237 if (!err)
3238 err = ext3_journal_dirty_metadata(handle,
3239 iloc.bh);
3240 brelse(iloc.bh);
3241 }
3242 }
3243 ext3_std_error(inode->i_sb, err);
3244 return err;
3245}
3246#endif
3247
3248int ext3_change_inode_journal_flag(struct inode *inode, int val)
3249{
3250 journal_t *journal;
3251 handle_t *handle;
3252 int err;
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264 journal = EXT3_JOURNAL(inode);
3265 if (is_journal_aborted(journal))
3266 return -EROFS;
3267
3268 journal_lock_updates(journal);
3269 journal_flush(journal);
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279 if (val)
3280 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3281 else
3282 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3283 ext3_set_aops(inode);
3284
3285 journal_unlock_updates(journal);
3286
3287
3288
3289 handle = ext3_journal_start(inode, 1);
3290 if (IS_ERR(handle))
3291 return PTR_ERR(handle);
3292
3293 err = ext3_mark_inode_dirty(handle, inode);
3294 handle->h_sync = 1;
3295 ext3_journal_stop(handle);
3296 ext3_std_error(inode->i_sb, err);
3297
3298 return err;
3299}
3300