1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/fs.h>
27#include <linux/time.h>
28#include <linux/ext4_jbd2.h>
29#include <linux/jbd2.h>
30#include <linux/highuid.h>
31#include <linux/pagemap.h>
32#include <linux/quotaops.h>
33#include <linux/string.h>
34#include <linux/buffer_head.h>
35#include <linux/writeback.h>
36#include <linux/mpage.h>
37#include <linux/uio.h>
38#include <linux/bio.h>
39#include "xattr.h"
40#include "acl.h"
41
42
43
44
45static int ext4_inode_is_fast_symlink(struct inode *inode)
46{
47 int ea_blocks = EXT4_I(inode)->i_file_acl ?
48 (inode->i_sb->s_blocksize >> 9) : 0;
49
50 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
51}
52
53
54
55
56
57
58
59
60
61
62int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
63 struct buffer_head *bh, ext4_fsblk_t blocknr)
64{
65 int err;
66
67 might_sleep();
68
69 BUFFER_TRACE(bh, "enter");
70
71 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72 "data mode %lx\n",
73 bh, is_metadata, inode->i_mode,
74 test_opt(inode->i_sb, DATA_FLAGS));
75
76
77
78
79
80
81 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
82 (!is_metadata && !ext4_should_journal_data(inode))) {
83 if (bh) {
84 BUFFER_TRACE(bh, "call jbd2_journal_forget");
85 return ext4_journal_forget(handle, bh);
86 }
87 return 0;
88 }
89
90
91
92
93 BUFFER_TRACE(bh, "call ext4_journal_revoke");
94 err = ext4_journal_revoke(handle, blocknr, bh);
95 if (err)
96 ext4_abort(inode->i_sb, __FUNCTION__,
97 "error %d when attempting revoke", err);
98 BUFFER_TRACE(bh, "exit");
99 return err;
100}
101
102
103
104
105
106static unsigned long blocks_for_truncate(struct inode *inode)
107{
108 unsigned long needed;
109
110 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
111
112
113
114
115
116
117
118 if (needed < 2)
119 needed = 2;
120
121
122
123 if (needed > EXT4_MAX_TRANS_DATA)
124 needed = EXT4_MAX_TRANS_DATA;
125
126 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
127}
128
129
130
131
132
133
134
135
136
137
138
139static handle_t *start_transaction(struct inode *inode)
140{
141 handle_t *result;
142
143 result = ext4_journal_start(inode, blocks_for_truncate(inode));
144 if (!IS_ERR(result))
145 return result;
146
147 ext4_std_error(inode->i_sb, PTR_ERR(result));
148 return result;
149}
150
151
152
153
154
155
156
157static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
158{
159 if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
160 return 0;
161 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
162 return 0;
163 return 1;
164}
165
166
167
168
169
170
171static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
172{
173 jbd_debug(2, "restarting handle %p\n", handle);
174 return ext4_journal_restart(handle, blocks_for_truncate(inode));
175}
176
177
178
179
180void ext4_delete_inode (struct inode * inode)
181{
182 handle_t *handle;
183
184 truncate_inode_pages(&inode->i_data, 0);
185
186 if (is_bad_inode(inode))
187 goto no_delete;
188
189 handle = start_transaction(inode);
190 if (IS_ERR(handle)) {
191
192
193
194
195
196 ext4_orphan_del(NULL, inode);
197 goto no_delete;
198 }
199
200 if (IS_SYNC(inode))
201 handle->h_sync = 1;
202 inode->i_size = 0;
203 if (inode->i_blocks)
204 ext4_truncate(inode);
205
206
207
208
209
210
211
212
213 ext4_orphan_del(handle, inode);
214 EXT4_I(inode)->i_dtime = get_seconds();
215
216
217
218
219
220
221
222
223 if (ext4_mark_inode_dirty(handle, inode))
224
225 clear_inode(inode);
226 else
227 ext4_free_inode(handle, inode);
228 ext4_journal_stop(handle);
229 return;
230no_delete:
231 clear_inode(inode);
232}
233
234typedef struct {
235 __le32 *p;
236 __le32 key;
237 struct buffer_head *bh;
238} Indirect;
239
240static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
241{
242 p->key = *(p->p = v);
243 p->bh = bh;
244}
245
246static int verify_chain(Indirect *from, Indirect *to)
247{
248 while (from <= to && from->key == *from->p)
249 from++;
250 return (from > to);
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284static int ext4_block_to_path(struct inode *inode,
285 long i_block, int offsets[4], int *boundary)
286{
287 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
288 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
289 const long direct_blocks = EXT4_NDIR_BLOCKS,
290 indirect_blocks = ptrs,
291 double_blocks = (1 << (ptrs_bits * 2));
292 int n = 0;
293 int final = 0;
294
295 if (i_block < 0) {
296 ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
297 } else if (i_block < direct_blocks) {
298 offsets[n++] = i_block;
299 final = direct_blocks;
300 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
301 offsets[n++] = EXT4_IND_BLOCK;
302 offsets[n++] = i_block;
303 final = ptrs;
304 } else if ((i_block -= indirect_blocks) < double_blocks) {
305 offsets[n++] = EXT4_DIND_BLOCK;
306 offsets[n++] = i_block >> ptrs_bits;
307 offsets[n++] = i_block & (ptrs - 1);
308 final = ptrs;
309 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
310 offsets[n++] = EXT4_TIND_BLOCK;
311 offsets[n++] = i_block >> (ptrs_bits * 2);
312 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
313 offsets[n++] = i_block & (ptrs - 1);
314 final = ptrs;
315 } else {
316 ext4_warning(inode->i_sb, "ext4_block_to_path", "block > big");
317 }
318 if (boundary)
319 *boundary = final - 1 - (i_block & (ptrs - 1));
320 return n;
321}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352static Indirect *ext4_get_branch(struct inode *inode, int depth, int *offsets,
353 Indirect chain[4], int *err)
354{
355 struct super_block *sb = inode->i_sb;
356 Indirect *p = chain;
357 struct buffer_head *bh;
358
359 *err = 0;
360
361 add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
362 if (!p->key)
363 goto no_block;
364 while (--depth) {
365 bh = sb_bread(sb, le32_to_cpu(p->key));
366 if (!bh)
367 goto failure;
368
369 if (!verify_chain(chain, p))
370 goto changed;
371 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
372
373 if (!p->key)
374 goto no_block;
375 }
376 return NULL;
377
378changed:
379 brelse(bh);
380 *err = -EAGAIN;
381 goto no_block;
382failure:
383 *err = -EIO;
384no_block:
385 return p;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
409{
410 struct ext4_inode_info *ei = EXT4_I(inode);
411 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
412 __le32 *p;
413 ext4_fsblk_t bg_start;
414 ext4_grpblk_t colour;
415
416
417 for (p = ind->p - 1; p >= start; p--) {
418 if (*p)
419 return le32_to_cpu(*p);
420 }
421
422
423 if (ind->bh)
424 return ind->bh->b_blocknr;
425
426
427
428
429
430 bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
431 colour = (current->pid % 16) *
432 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
433 return bg_start + colour;
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448static ext4_fsblk_t ext4_find_goal(struct inode *inode, long block,
449 Indirect chain[4], Indirect *partial)
450{
451 struct ext4_block_alloc_info *block_i;
452
453 block_i = EXT4_I(inode)->i_block_alloc_info;
454
455
456
457
458
459 if (block_i && (block == block_i->last_alloc_logical_block + 1)
460 && (block_i->last_alloc_physical_block != 0)) {
461 return block_i->last_alloc_physical_block + 1;
462 }
463
464 return ext4_find_near(inode, partial);
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
480 int blocks_to_boundary)
481{
482 unsigned long count = 0;
483
484
485
486
487
488 if (k > 0) {
489
490 if (blks < blocks_to_boundary + 1)
491 count += blks;
492 else
493 count += blocks_to_boundary + 1;
494 return count;
495 }
496
497 count++;
498 while (count < blks && count <= blocks_to_boundary &&
499 le32_to_cpu(*(branch[0].p + count)) == 0) {
500 count++;
501 }
502 return count;
503}
504
505
506
507
508
509
510
511
512
513
514
515static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
516 ext4_fsblk_t goal, int indirect_blks, int blks,
517 ext4_fsblk_t new_blocks[4], int *err)
518{
519 int target, i;
520 unsigned long count = 0;
521 int index = 0;
522 ext4_fsblk_t current_block = 0;
523 int ret = 0;
524
525
526
527
528
529
530
531
532
533 target = blks + indirect_blks;
534
535 while (1) {
536 count = target;
537
538 current_block = ext4_new_blocks(handle,inode,goal,&count,err);
539 if (*err)
540 goto failed_out;
541
542 target -= count;
543
544 while (index < indirect_blks && count) {
545 new_blocks[index++] = current_block++;
546 count--;
547 }
548
549 if (count > 0)
550 break;
551 }
552
553
554 new_blocks[index] = current_block;
555
556
557 ret = count;
558 *err = 0;
559 return ret;
560failed_out:
561 for (i = 0; i <index; i++)
562 ext4_free_blocks(handle, inode, new_blocks[i], 1);
563 return ret;
564}
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
592 int indirect_blks, int *blks, ext4_fsblk_t goal,
593 int *offsets, Indirect *branch)
594{
595 int blocksize = inode->i_sb->s_blocksize;
596 int i, n = 0;
597 int err = 0;
598 struct buffer_head *bh;
599 int num;
600 ext4_fsblk_t new_blocks[4];
601 ext4_fsblk_t current_block;
602
603 num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
604 *blks, new_blocks, &err);
605 if (err)
606 return err;
607
608 branch[0].key = cpu_to_le32(new_blocks[0]);
609
610
611
612 for (n = 1; n <= indirect_blks; n++) {
613
614
615
616
617
618 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
619 branch[n].bh = bh;
620 lock_buffer(bh);
621 BUFFER_TRACE(bh, "call get_create_access");
622 err = ext4_journal_get_create_access(handle, bh);
623 if (err) {
624 unlock_buffer(bh);
625 brelse(bh);
626 goto failed;
627 }
628
629 memset(bh->b_data, 0, blocksize);
630 branch[n].p = (__le32 *) bh->b_data + offsets[n];
631 branch[n].key = cpu_to_le32(new_blocks[n]);
632 *branch[n].p = branch[n].key;
633 if ( n == indirect_blks) {
634 current_block = new_blocks[n];
635
636
637
638
639
640 for (i=1; i < num; i++)
641 *(branch[n].p + i) = cpu_to_le32(++current_block);
642 }
643 BUFFER_TRACE(bh, "marking uptodate");
644 set_buffer_uptodate(bh);
645 unlock_buffer(bh);
646
647 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
648 err = ext4_journal_dirty_metadata(handle, bh);
649 if (err)
650 goto failed;
651 }
652 *blks = num;
653 return err;
654failed:
655
656 for (i = 1; i <= n ; i++) {
657 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
658 ext4_journal_forget(handle, branch[i].bh);
659 }
660 for (i = 0; i <indirect_blks; i++)
661 ext4_free_blocks(handle, inode, new_blocks[i], 1);
662
663 ext4_free_blocks(handle, inode, new_blocks[i], num);
664
665 return err;
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682static int ext4_splice_branch(handle_t *handle, struct inode *inode,
683 long block, Indirect *where, int num, int blks)
684{
685 int i;
686 int err = 0;
687 struct ext4_block_alloc_info *block_i;
688 ext4_fsblk_t current_block;
689
690 block_i = EXT4_I(inode)->i_block_alloc_info;
691
692
693
694
695
696 if (where->bh) {
697 BUFFER_TRACE(where->bh, "get_write_access");
698 err = ext4_journal_get_write_access(handle, where->bh);
699 if (err)
700 goto err_out;
701 }
702
703
704 *where->p = where->key;
705
706
707
708
709
710 if (num == 0 && blks > 1) {
711 current_block = le32_to_cpu(where->key) + 1;
712 for (i = 1; i < blks; i++)
713 *(where->p + i ) = cpu_to_le32(current_block++);
714 }
715
716
717
718
719
720
721 if (block_i) {
722 block_i->last_alloc_logical_block = block + blks - 1;
723 block_i->last_alloc_physical_block =
724 le32_to_cpu(where[num].key) + blks - 1;
725 }
726
727
728
729 inode->i_ctime = ext4_current_time(inode);
730 ext4_mark_inode_dirty(handle, inode);
731
732
733 if (where->bh) {
734
735
736
737
738
739
740
741
742 jbd_debug(5, "splicing indirect only\n");
743 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
744 err = ext4_journal_dirty_metadata(handle, where->bh);
745 if (err)
746 goto err_out;
747 } else {
748
749
750
751
752 jbd_debug(5, "splicing direct\n");
753 }
754 return err;
755
756err_out:
757 for (i = 1; i <= num; i++) {
758 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
759 ext4_journal_forget(handle, where[i].bh);
760 ext4_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
761 }
762 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
763
764 return err;
765}
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
787 sector_t iblock, unsigned long maxblocks,
788 struct buffer_head *bh_result,
789 int create, int extend_disksize)
790{
791 int err = -EIO;
792 int offsets[4];
793 Indirect chain[4];
794 Indirect *partial;
795 ext4_fsblk_t goal;
796 int indirect_blks;
797 int blocks_to_boundary = 0;
798 int depth;
799 struct ext4_inode_info *ei = EXT4_I(inode);
800 int count = 0;
801 ext4_fsblk_t first_block = 0;
802
803
804 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
805 J_ASSERT(handle != NULL || create == 0);
806 depth = ext4_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
807
808 if (depth == 0)
809 goto out;
810
811 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
812
813
814 if (!partial) {
815 first_block = le32_to_cpu(chain[depth - 1].key);
816 clear_buffer_new(bh_result);
817 count++;
818
819 while (count < maxblocks && count <= blocks_to_boundary) {
820 ext4_fsblk_t blk;
821
822 if (!verify_chain(chain, partial)) {
823
824
825
826
827
828
829
830 err = -EAGAIN;
831 count = 0;
832 break;
833 }
834 blk = le32_to_cpu(*(chain[depth-1].p + count));
835
836 if (blk == first_block + count)
837 count++;
838 else
839 break;
840 }
841 if (err != -EAGAIN)
842 goto got_it;
843 }
844
845
846 if (!create || err == -EIO)
847 goto cleanup;
848
849 mutex_lock(&ei->truncate_mutex);
850
851
852
853
854
855
856
857
858
859
860
861
862
863 if (err == -EAGAIN || !verify_chain(chain, partial)) {
864 while (partial > chain) {
865 brelse(partial->bh);
866 partial--;
867 }
868 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
869 if (!partial) {
870 count++;
871 mutex_unlock(&ei->truncate_mutex);
872 if (err)
873 goto cleanup;
874 clear_buffer_new(bh_result);
875 goto got_it;
876 }
877 }
878
879
880
881
882
883 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
884 ext4_init_block_alloc_info(inode);
885
886 goal = ext4_find_goal(inode, iblock, chain, partial);
887
888
889 indirect_blks = (chain + depth) - partial - 1;
890
891
892
893
894
895 count = ext4_blks_to_allocate(partial, indirect_blks,
896 maxblocks, blocks_to_boundary);
897
898
899
900 err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
901 offsets + (partial - chain), partial);
902
903
904
905
906
907
908
909
910 if (!err)
911 err = ext4_splice_branch(handle, inode, iblock,
912 partial, indirect_blks, count);
913
914
915
916
917
918 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
919 ei->i_disksize = inode->i_size;
920 mutex_unlock(&ei->truncate_mutex);
921 if (err)
922 goto cleanup;
923
924 set_buffer_new(bh_result);
925got_it:
926 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
927 if (count > blocks_to_boundary)
928 set_buffer_boundary(bh_result);
929 err = count;
930
931 partial = chain + depth - 1;
932cleanup:
933 while (partial > chain) {
934 BUFFER_TRACE(partial->bh, "call brelse");
935 brelse(partial->bh);
936 partial--;
937 }
938 BUFFER_TRACE(bh_result, "returned");
939out:
940 return err;
941}
942
943#define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32)
944
945static int ext4_get_block(struct inode *inode, sector_t iblock,
946 struct buffer_head *bh_result, int create)
947{
948 handle_t *handle = ext4_journal_current_handle();
949 int ret = 0;
950 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
951
952 if (!create)
953 goto get_block;
954
955 if (max_blocks == 1)
956 goto get_block;
957
958 if (handle->h_transaction->t_state == T_LOCKED) {
959
960
961
962
963 ext4_journal_stop(handle);
964 handle = ext4_journal_start(inode, DIO_CREDITS);
965 if (IS_ERR(handle))
966 ret = PTR_ERR(handle);
967 goto get_block;
968 }
969
970 if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) {
971
972
973
974 ret = ext4_journal_extend(handle, DIO_CREDITS);
975 if (ret > 0) {
976
977
978
979 ret = ext4_journal_restart(handle, DIO_CREDITS);
980 }
981 }
982
983get_block:
984 if (ret == 0) {
985 ret = ext4_get_blocks_wrap(handle, inode, iblock,
986 max_blocks, bh_result, create, 0);
987 if (ret > 0) {
988 bh_result->b_size = (ret << inode->i_blkbits);
989 ret = 0;
990 }
991 }
992 return ret;
993}
994
995
996
997
998struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
999 long block, int create, int *errp)
1000{
1001 struct buffer_head dummy;
1002 int fatal = 0, err;
1003
1004 J_ASSERT(handle != NULL || create == 0);
1005
1006 dummy.b_state = 0;
1007 dummy.b_blocknr = -1000;
1008 buffer_trace_init(&dummy.b_history);
1009 err = ext4_get_blocks_wrap(handle, inode, block, 1,
1010 &dummy, create, 1);
1011
1012
1013
1014
1015 if (err > 0) {
1016 if (err > 1)
1017 WARN_ON(1);
1018 err = 0;
1019 }
1020 *errp = err;
1021 if (!err && buffer_mapped(&dummy)) {
1022 struct buffer_head *bh;
1023 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1024 if (!bh) {
1025 *errp = -EIO;
1026 goto err;
1027 }
1028 if (buffer_new(&dummy)) {
1029 J_ASSERT(create != 0);
1030 J_ASSERT(handle != NULL);
1031
1032
1033
1034
1035
1036
1037
1038
1039 lock_buffer(bh);
1040 BUFFER_TRACE(bh, "call get_create_access");
1041 fatal = ext4_journal_get_create_access(handle, bh);
1042 if (!fatal && !buffer_uptodate(bh)) {
1043 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1044 set_buffer_uptodate(bh);
1045 }
1046 unlock_buffer(bh);
1047 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1048 err = ext4_journal_dirty_metadata(handle, bh);
1049 if (!fatal)
1050 fatal = err;
1051 } else {
1052 BUFFER_TRACE(bh, "not a new buffer");
1053 }
1054 if (fatal) {
1055 *errp = fatal;
1056 brelse(bh);
1057 bh = NULL;
1058 }
1059 return bh;
1060 }
1061err:
1062 return NULL;
1063}
1064
1065struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1066 int block, int create, int *err)
1067{
1068 struct buffer_head * bh;
1069
1070 bh = ext4_getblk(handle, inode, block, create, err);
1071 if (!bh)
1072 return bh;
1073 if (buffer_uptodate(bh))
1074 return bh;
1075 ll_rw_block(READ_META, 1, &bh);
1076 wait_on_buffer(bh);
1077 if (buffer_uptodate(bh))
1078 return bh;
1079 put_bh(bh);
1080 *err = -EIO;
1081 return NULL;
1082}
1083
1084static int walk_page_buffers( handle_t *handle,
1085 struct buffer_head *head,
1086 unsigned from,
1087 unsigned to,
1088 int *partial,
1089 int (*fn)( handle_t *handle,
1090 struct buffer_head *bh))
1091{
1092 struct buffer_head *bh;
1093 unsigned block_start, block_end;
1094 unsigned blocksize = head->b_size;
1095 int err, ret = 0;
1096 struct buffer_head *next;
1097
1098 for ( bh = head, block_start = 0;
1099 ret == 0 && (bh != head || !block_start);
1100 block_start = block_end, bh = next)
1101 {
1102 next = bh->b_this_page;
1103 block_end = block_start + blocksize;
1104 if (block_end <= from || block_start >= to) {
1105 if (partial && !buffer_uptodate(bh))
1106 *partial = 1;
1107 continue;
1108 }
1109 err = (*fn)(handle, bh);
1110 if (!ret)
1111 ret = err;
1112 }
1113 return ret;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static int do_journal_get_write_access(handle_t *handle,
1142 struct buffer_head *bh)
1143{
1144 if (!buffer_mapped(bh) || buffer_freed(bh))
1145 return 0;
1146 return ext4_journal_get_write_access(handle, bh);
1147}
1148
1149static int ext4_write_begin(struct file *file, struct address_space *mapping,
1150 loff_t pos, unsigned len, unsigned flags,
1151 struct page **pagep, void **fsdata)
1152{
1153 struct inode *inode = mapping->host;
1154 int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1155 handle_t *handle;
1156 int retries = 0;
1157 struct page *page;
1158 pgoff_t index;
1159 unsigned from, to;
1160
1161 index = pos >> PAGE_CACHE_SHIFT;
1162 from = pos & (PAGE_CACHE_SIZE - 1);
1163 to = from + len;
1164
1165retry:
1166 page = __grab_cache_page(mapping, index);
1167 if (!page)
1168 return -ENOMEM;
1169 *pagep = page;
1170
1171 handle = ext4_journal_start(inode, needed_blocks);
1172 if (IS_ERR(handle)) {
1173 unlock_page(page);
1174 page_cache_release(page);
1175 ret = PTR_ERR(handle);
1176 goto out;
1177 }
1178
1179 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1180 ext4_get_block);
1181
1182 if (!ret && ext4_should_journal_data(inode)) {
1183 ret = walk_page_buffers(handle, page_buffers(page),
1184 from, to, NULL, do_journal_get_write_access);
1185 }
1186
1187 if (ret) {
1188 ext4_journal_stop(handle);
1189 unlock_page(page);
1190 page_cache_release(page);
1191 }
1192
1193 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1194 goto retry;
1195out:
1196 return ret;
1197}
1198
1199int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1200{
1201 int err = jbd2_journal_dirty_data(handle, bh);
1202 if (err)
1203 ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1204 bh, handle, err);
1205 return err;
1206}
1207
1208
1209static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1210{
1211 if (!buffer_mapped(bh) || buffer_freed(bh))
1212 return 0;
1213 set_buffer_uptodate(bh);
1214 return ext4_journal_dirty_metadata(handle, bh);
1215}
1216
1217
1218
1219
1220
1221
1222
1223static int ext4_generic_write_end(struct file *file,
1224 struct address_space *mapping,
1225 loff_t pos, unsigned len, unsigned copied,
1226 struct page *page, void *fsdata)
1227{
1228 struct inode *inode = file->f_mapping->host;
1229
1230 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1231
1232 if (pos+copied > inode->i_size) {
1233 i_size_write(inode, pos+copied);
1234 mark_inode_dirty(inode);
1235 }
1236
1237 return copied;
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247static int ext4_ordered_write_end(struct file *file,
1248 struct address_space *mapping,
1249 loff_t pos, unsigned len, unsigned copied,
1250 struct page *page, void *fsdata)
1251{
1252 handle_t *handle = ext4_journal_current_handle();
1253 struct inode *inode = file->f_mapping->host;
1254 unsigned from, to;
1255 int ret = 0, ret2;
1256
1257 from = pos & (PAGE_CACHE_SIZE - 1);
1258 to = from + len;
1259
1260 ret = walk_page_buffers(handle, page_buffers(page),
1261 from, to, NULL, ext4_journal_dirty_data);
1262
1263 if (ret == 0) {
1264
1265
1266
1267
1268
1269 loff_t new_i_size;
1270
1271 new_i_size = pos + copied;
1272 if (new_i_size > EXT4_I(inode)->i_disksize)
1273 EXT4_I(inode)->i_disksize = new_i_size;
1274 copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1275 page, fsdata);
1276 if (copied < 0)
1277 ret = copied;
1278 }
1279 ret2 = ext4_journal_stop(handle);
1280 if (!ret)
1281 ret = ret2;
1282 unlock_page(page);
1283 page_cache_release(page);
1284
1285 return ret ? ret : copied;
1286}
1287
1288static int ext4_writeback_write_end(struct file *file,
1289 struct address_space *mapping,
1290 loff_t pos, unsigned len, unsigned copied,
1291 struct page *page, void *fsdata)
1292{
1293 handle_t *handle = ext4_journal_current_handle();
1294 struct inode *inode = file->f_mapping->host;
1295 int ret = 0, ret2;
1296 loff_t new_i_size;
1297
1298 new_i_size = pos + copied;
1299 if (new_i_size > EXT4_I(inode)->i_disksize)
1300 EXT4_I(inode)->i_disksize = new_i_size;
1301
1302 copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1303 page, fsdata);
1304 if (copied < 0)
1305 ret = copied;
1306
1307 ret2 = ext4_journal_stop(handle);
1308 if (!ret)
1309 ret = ret2;
1310 unlock_page(page);
1311 page_cache_release(page);
1312
1313 return ret ? ret : copied;
1314}
1315
1316static int ext4_journalled_write_end(struct file *file,
1317 struct address_space *mapping,
1318 loff_t pos, unsigned len, unsigned copied,
1319 struct page *page, void *fsdata)
1320{
1321 handle_t *handle = ext4_journal_current_handle();
1322 struct inode *inode = mapping->host;
1323 int ret = 0, ret2;
1324 int partial = 0;
1325 unsigned from, to;
1326
1327 from = pos & (PAGE_CACHE_SIZE - 1);
1328 to = from + len;
1329
1330 if (copied < len) {
1331 if (!PageUptodate(page))
1332 copied = 0;
1333 page_zero_new_buffers(page, from+copied, to);
1334 }
1335
1336 ret = walk_page_buffers(handle, page_buffers(page), from,
1337 to, &partial, write_end_fn);
1338 if (!partial)
1339 SetPageUptodate(page);
1340 if (pos+copied > inode->i_size)
1341 i_size_write(inode, pos+copied);
1342 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1343 if (inode->i_size > EXT4_I(inode)->i_disksize) {
1344 EXT4_I(inode)->i_disksize = inode->i_size;
1345 ret2 = ext4_mark_inode_dirty(handle, inode);
1346 if (!ret)
1347 ret = ret2;
1348 }
1349
1350 ret2 = ext4_journal_stop(handle);
1351 if (!ret)
1352 ret = ret2;
1353 unlock_page(page);
1354 page_cache_release(page);
1355
1356 return ret ? ret : copied;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1374{
1375 struct inode *inode = mapping->host;
1376 journal_t *journal;
1377 int err;
1378
1379 if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1399 journal = EXT4_JOURNAL(inode);
1400 jbd2_journal_lock_updates(journal);
1401 err = jbd2_journal_flush(journal);
1402 jbd2_journal_unlock_updates(journal);
1403
1404 if (err)
1405 return 0;
1406 }
1407
1408 return generic_block_bmap(mapping,block,ext4_get_block);
1409}
1410
1411static int bget_one(handle_t *handle, struct buffer_head *bh)
1412{
1413 get_bh(bh);
1414 return 0;
1415}
1416
1417static int bput_one(handle_t *handle, struct buffer_head *bh)
1418{
1419 put_bh(bh);
1420 return 0;
1421}
1422
1423static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1424{
1425 if (buffer_mapped(bh))
1426 return ext4_journal_dirty_data(handle, bh);
1427 return 0;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static int ext4_ordered_writepage(struct page *page,
1483 struct writeback_control *wbc)
1484{
1485 struct inode *inode = page->mapping->host;
1486 struct buffer_head *page_bufs;
1487 handle_t *handle = NULL;
1488 int ret = 0;
1489 int err;
1490
1491 J_ASSERT(PageLocked(page));
1492
1493
1494
1495
1496
1497 if (ext4_journal_current_handle())
1498 goto out_fail;
1499
1500 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1501
1502 if (IS_ERR(handle)) {
1503 ret = PTR_ERR(handle);
1504 goto out_fail;
1505 }
1506
1507 if (!page_has_buffers(page)) {
1508 create_empty_buffers(page, inode->i_sb->s_blocksize,
1509 (1 << BH_Dirty)|(1 << BH_Uptodate));
1510 }
1511 page_bufs = page_buffers(page);
1512 walk_page_buffers(handle, page_bufs, 0,
1513 PAGE_CACHE_SIZE, NULL, bget_one);
1514
1515 ret = block_write_full_page(page, ext4_get_block, wbc);
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 if (ret == 0) {
1530 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1531 NULL, jbd2_journal_dirty_data_fn);
1532 if (!ret)
1533 ret = err;
1534 }
1535 walk_page_buffers(handle, page_bufs, 0,
1536 PAGE_CACHE_SIZE, NULL, bput_one);
1537 err = ext4_journal_stop(handle);
1538 if (!ret)
1539 ret = err;
1540 return ret;
1541
1542out_fail:
1543 redirty_page_for_writepage(wbc, page);
1544 unlock_page(page);
1545 return ret;
1546}
1547
1548static int ext4_writeback_writepage(struct page *page,
1549 struct writeback_control *wbc)
1550{
1551 struct inode *inode = page->mapping->host;
1552 handle_t *handle = NULL;
1553 int ret = 0;
1554 int err;
1555
1556 if (ext4_journal_current_handle())
1557 goto out_fail;
1558
1559 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1560 if (IS_ERR(handle)) {
1561 ret = PTR_ERR(handle);
1562 goto out_fail;
1563 }
1564
1565 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1566 ret = nobh_writepage(page, ext4_get_block, wbc);
1567 else
1568 ret = block_write_full_page(page, ext4_get_block, wbc);
1569
1570 err = ext4_journal_stop(handle);
1571 if (!ret)
1572 ret = err;
1573 return ret;
1574
1575out_fail:
1576 redirty_page_for_writepage(wbc, page);
1577 unlock_page(page);
1578 return ret;
1579}
1580
1581static int ext4_journalled_writepage(struct page *page,
1582 struct writeback_control *wbc)
1583{
1584 struct inode *inode = page->mapping->host;
1585 handle_t *handle = NULL;
1586 int ret = 0;
1587 int err;
1588
1589 if (ext4_journal_current_handle())
1590 goto no_write;
1591
1592 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1593 if (IS_ERR(handle)) {
1594 ret = PTR_ERR(handle);
1595 goto no_write;
1596 }
1597
1598 if (!page_has_buffers(page) || PageChecked(page)) {
1599
1600
1601
1602
1603 ClearPageChecked(page);
1604 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1605 ext4_get_block);
1606 if (ret != 0) {
1607 ext4_journal_stop(handle);
1608 goto out_unlock;
1609 }
1610 ret = walk_page_buffers(handle, page_buffers(page), 0,
1611 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1612
1613 err = walk_page_buffers(handle, page_buffers(page), 0,
1614 PAGE_CACHE_SIZE, NULL, write_end_fn);
1615 if (ret == 0)
1616 ret = err;
1617 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1618 unlock_page(page);
1619 } else {
1620
1621
1622
1623
1624
1625 ret = block_write_full_page(page, ext4_get_block, wbc);
1626 }
1627 err = ext4_journal_stop(handle);
1628 if (!ret)
1629 ret = err;
1630out:
1631 return ret;
1632
1633no_write:
1634 redirty_page_for_writepage(wbc, page);
1635out_unlock:
1636 unlock_page(page);
1637 goto out;
1638}
1639
1640static int ext4_readpage(struct file *file, struct page *page)
1641{
1642 return mpage_readpage(page, ext4_get_block);
1643}
1644
1645static int
1646ext4_readpages(struct file *file, struct address_space *mapping,
1647 struct list_head *pages, unsigned nr_pages)
1648{
1649 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1650}
1651
1652static void ext4_invalidatepage(struct page *page, unsigned long offset)
1653{
1654 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1655
1656
1657
1658
1659 if (offset == 0)
1660 ClearPageChecked(page);
1661
1662 jbd2_journal_invalidatepage(journal, page, offset);
1663}
1664
1665static int ext4_releasepage(struct page *page, gfp_t wait)
1666{
1667 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1668
1669 WARN_ON(PageChecked(page));
1670 if (!page_has_buffers(page))
1671 return 0;
1672 return jbd2_journal_try_to_free_buffers(journal, page, wait);
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1684 const struct iovec *iov, loff_t offset,
1685 unsigned long nr_segs)
1686{
1687 struct file *file = iocb->ki_filp;
1688 struct inode *inode = file->f_mapping->host;
1689 struct ext4_inode_info *ei = EXT4_I(inode);
1690 handle_t *handle = NULL;
1691 ssize_t ret;
1692 int orphan = 0;
1693 size_t count = iov_length(iov, nr_segs);
1694
1695 if (rw == WRITE) {
1696 loff_t final_size = offset + count;
1697
1698 handle = ext4_journal_start(inode, DIO_CREDITS);
1699 if (IS_ERR(handle)) {
1700 ret = PTR_ERR(handle);
1701 goto out;
1702 }
1703 if (final_size > inode->i_size) {
1704 ret = ext4_orphan_add(handle, inode);
1705 if (ret)
1706 goto out_stop;
1707 orphan = 1;
1708 ei->i_disksize = inode->i_size;
1709 }
1710 }
1711
1712 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1713 offset, nr_segs,
1714 ext4_get_block, NULL);
1715
1716
1717
1718
1719 handle = ext4_journal_current_handle();
1720
1721out_stop:
1722 if (handle) {
1723 int err;
1724
1725 if (orphan && inode->i_nlink)
1726 ext4_orphan_del(handle, inode);
1727 if (orphan && ret > 0) {
1728 loff_t end = offset + ret;
1729 if (end > inode->i_size) {
1730 ei->i_disksize = end;
1731 i_size_write(inode, end);
1732
1733
1734
1735
1736
1737
1738
1739 ext4_mark_inode_dirty(handle, inode);
1740 }
1741 }
1742 err = ext4_journal_stop(handle);
1743 if (ret == 0)
1744 ret = err;
1745 }
1746out:
1747 return ret;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763static int ext4_journalled_set_page_dirty(struct page *page)
1764{
1765 SetPageChecked(page);
1766 return __set_page_dirty_nobuffers(page);
1767}
1768
1769static const struct address_space_operations ext4_ordered_aops = {
1770 .readpage = ext4_readpage,
1771 .readpages = ext4_readpages,
1772 .writepage = ext4_ordered_writepage,
1773 .sync_page = block_sync_page,
1774 .write_begin = ext4_write_begin,
1775 .write_end = ext4_ordered_write_end,
1776 .bmap = ext4_bmap,
1777 .invalidatepage = ext4_invalidatepage,
1778 .releasepage = ext4_releasepage,
1779 .direct_IO = ext4_direct_IO,
1780 .migratepage = buffer_migrate_page,
1781};
1782
1783static const struct address_space_operations ext4_writeback_aops = {
1784 .readpage = ext4_readpage,
1785 .readpages = ext4_readpages,
1786 .writepage = ext4_writeback_writepage,
1787 .sync_page = block_sync_page,
1788 .write_begin = ext4_write_begin,
1789 .write_end = ext4_writeback_write_end,
1790 .bmap = ext4_bmap,
1791 .invalidatepage = ext4_invalidatepage,
1792 .releasepage = ext4_releasepage,
1793 .direct_IO = ext4_direct_IO,
1794 .migratepage = buffer_migrate_page,
1795};
1796
1797static const struct address_space_operations ext4_journalled_aops = {
1798 .readpage = ext4_readpage,
1799 .readpages = ext4_readpages,
1800 .writepage = ext4_journalled_writepage,
1801 .sync_page = block_sync_page,
1802 .write_begin = ext4_write_begin,
1803 .write_end = ext4_journalled_write_end,
1804 .set_page_dirty = ext4_journalled_set_page_dirty,
1805 .bmap = ext4_bmap,
1806 .invalidatepage = ext4_invalidatepage,
1807 .releasepage = ext4_releasepage,
1808};
1809
1810void ext4_set_aops(struct inode *inode)
1811{
1812 if (ext4_should_order_data(inode))
1813 inode->i_mapping->a_ops = &ext4_ordered_aops;
1814 else if (ext4_should_writeback_data(inode))
1815 inode->i_mapping->a_ops = &ext4_writeback_aops;
1816 else
1817 inode->i_mapping->a_ops = &ext4_journalled_aops;
1818}
1819
1820
1821
1822
1823
1824
1825
1826int ext4_block_truncate_page(handle_t *handle, struct page *page,
1827 struct address_space *mapping, loff_t from)
1828{
1829 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1830 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1831 unsigned blocksize, iblock, length, pos;
1832 struct inode *inode = mapping->host;
1833 struct buffer_head *bh;
1834 int err = 0;
1835
1836 blocksize = inode->i_sb->s_blocksize;
1837 length = blocksize - (offset & (blocksize - 1));
1838 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1839
1840
1841
1842
1843
1844 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1845 ext4_should_writeback_data(inode) && PageUptodate(page)) {
1846 zero_user_page(page, offset, length, KM_USER0);
1847 set_page_dirty(page);
1848 goto unlock;
1849 }
1850
1851 if (!page_has_buffers(page))
1852 create_empty_buffers(page, blocksize, 0);
1853
1854
1855 bh = page_buffers(page);
1856 pos = blocksize;
1857 while (offset >= pos) {
1858 bh = bh->b_this_page;
1859 iblock++;
1860 pos += blocksize;
1861 }
1862
1863 err = 0;
1864 if (buffer_freed(bh)) {
1865 BUFFER_TRACE(bh, "freed: skip");
1866 goto unlock;
1867 }
1868
1869 if (!buffer_mapped(bh)) {
1870 BUFFER_TRACE(bh, "unmapped");
1871 ext4_get_block(inode, iblock, bh, 0);
1872
1873 if (!buffer_mapped(bh)) {
1874 BUFFER_TRACE(bh, "still unmapped");
1875 goto unlock;
1876 }
1877 }
1878
1879
1880 if (PageUptodate(page))
1881 set_buffer_uptodate(bh);
1882
1883 if (!buffer_uptodate(bh)) {
1884 err = -EIO;
1885 ll_rw_block(READ, 1, &bh);
1886 wait_on_buffer(bh);
1887
1888 if (!buffer_uptodate(bh))
1889 goto unlock;
1890 }
1891
1892 if (ext4_should_journal_data(inode)) {
1893 BUFFER_TRACE(bh, "get write access");
1894 err = ext4_journal_get_write_access(handle, bh);
1895 if (err)
1896 goto unlock;
1897 }
1898
1899 zero_user_page(page, offset, length, KM_USER0);
1900
1901 BUFFER_TRACE(bh, "zeroed end of block");
1902
1903 err = 0;
1904 if (ext4_should_journal_data(inode)) {
1905 err = ext4_journal_dirty_metadata(handle, bh);
1906 } else {
1907 if (ext4_should_order_data(inode))
1908 err = ext4_journal_dirty_data(handle, bh);
1909 mark_buffer_dirty(bh);
1910 }
1911
1912unlock:
1913 unlock_page(page);
1914 page_cache_release(page);
1915 return err;
1916}
1917
1918
1919
1920
1921
1922
1923static inline int all_zeroes(__le32 *p, __le32 *q)
1924{
1925 while (p < q)
1926 if (*p++)
1927 return 0;
1928 return 1;
1929}
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966static Indirect *ext4_find_shared(struct inode *inode, int depth,
1967 int offsets[4], Indirect chain[4], __le32 *top)
1968{
1969 Indirect *partial, *p;
1970 int k, err;
1971
1972 *top = 0;
1973
1974 for (k = depth; k > 1 && !offsets[k-1]; k--)
1975 ;
1976 partial = ext4_get_branch(inode, k, offsets, chain, &err);
1977
1978 if (!partial)
1979 partial = chain + k-1;
1980
1981
1982
1983
1984 if (!partial->key && *partial->p)
1985
1986 goto no_top;
1987 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1988 ;
1989
1990
1991
1992
1993
1994
1995 if (p == chain + k - 1 && p > chain) {
1996 p->p--;
1997 } else {
1998 *top = *p->p;
1999
2000#if 0
2001 *p->p = 0;
2002#endif
2003 }
2004
2005
2006 while(partial > p) {
2007 brelse(partial->bh);
2008 partial--;
2009 }
2010no_top:
2011 return partial;
2012}
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2023 struct buffer_head *bh, ext4_fsblk_t block_to_free,
2024 unsigned long count, __le32 *first, __le32 *last)
2025{
2026 __le32 *p;
2027 if (try_to_extend_transaction(handle, inode)) {
2028 if (bh) {
2029 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2030 ext4_journal_dirty_metadata(handle, bh);
2031 }
2032 ext4_mark_inode_dirty(handle, inode);
2033 ext4_journal_test_restart(handle, inode);
2034 if (bh) {
2035 BUFFER_TRACE(bh, "retaking write access");
2036 ext4_journal_get_write_access(handle, bh);
2037 }
2038 }
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048 for (p = first; p < last; p++) {
2049 u32 nr = le32_to_cpu(*p);
2050 if (nr) {
2051 struct buffer_head *bh;
2052
2053 *p = 0;
2054 bh = sb_find_get_block(inode->i_sb, nr);
2055 ext4_forget(handle, 0, inode, bh, nr);
2056 }
2057 }
2058
2059 ext4_free_blocks(handle, inode, block_to_free, count);
2060}
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081static void ext4_free_data(handle_t *handle, struct inode *inode,
2082 struct buffer_head *this_bh,
2083 __le32 *first, __le32 *last)
2084{
2085 ext4_fsblk_t block_to_free = 0;
2086 unsigned long count = 0;
2087 __le32 *block_to_free_p = NULL;
2088
2089
2090 ext4_fsblk_t nr;
2091 __le32 *p;
2092
2093 int err;
2094
2095 if (this_bh) {
2096 BUFFER_TRACE(this_bh, "get_write_access");
2097 err = ext4_journal_get_write_access(handle, this_bh);
2098
2099
2100 if (err)
2101 return;
2102 }
2103
2104 for (p = first; p < last; p++) {
2105 nr = le32_to_cpu(*p);
2106 if (nr) {
2107
2108 if (count == 0) {
2109 block_to_free = nr;
2110 block_to_free_p = p;
2111 count = 1;
2112 } else if (nr == block_to_free + count) {
2113 count++;
2114 } else {
2115 ext4_clear_blocks(handle, inode, this_bh,
2116 block_to_free,
2117 count, block_to_free_p, p);
2118 block_to_free = nr;
2119 block_to_free_p = p;
2120 count = 1;
2121 }
2122 }
2123 }
2124
2125 if (count > 0)
2126 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2127 count, block_to_free_p, p);
2128
2129 if (this_bh) {
2130 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2131 ext4_journal_dirty_metadata(handle, this_bh);
2132 }
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148static void ext4_free_branches(handle_t *handle, struct inode *inode,
2149 struct buffer_head *parent_bh,
2150 __le32 *first, __le32 *last, int depth)
2151{
2152 ext4_fsblk_t nr;
2153 __le32 *p;
2154
2155 if (is_handle_aborted(handle))
2156 return;
2157
2158 if (depth--) {
2159 struct buffer_head *bh;
2160 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2161 p = last;
2162 while (--p >= first) {
2163 nr = le32_to_cpu(*p);
2164 if (!nr)
2165 continue;
2166
2167
2168 bh = sb_bread(inode->i_sb, nr);
2169
2170
2171
2172
2173
2174 if (!bh) {
2175 ext4_error(inode->i_sb, "ext4_free_branches",
2176 "Read failure, inode=%lu, block=%llu",
2177 inode->i_ino, nr);
2178 continue;
2179 }
2180
2181
2182 BUFFER_TRACE(bh, "free child branches");
2183 ext4_free_branches(handle, inode, bh,
2184 (__le32*)bh->b_data,
2185 (__le32*)bh->b_data + addr_per_block,
2186 depth);
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207 ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225 if (is_handle_aborted(handle))
2226 return;
2227 if (try_to_extend_transaction(handle, inode)) {
2228 ext4_mark_inode_dirty(handle, inode);
2229 ext4_journal_test_restart(handle, inode);
2230 }
2231
2232 ext4_free_blocks(handle, inode, nr, 1);
2233
2234 if (parent_bh) {
2235
2236
2237
2238
2239 BUFFER_TRACE(parent_bh, "get_write_access");
2240 if (!ext4_journal_get_write_access(handle,
2241 parent_bh)){
2242 *p = 0;
2243 BUFFER_TRACE(parent_bh,
2244 "call ext4_journal_dirty_metadata");
2245 ext4_journal_dirty_metadata(handle,
2246 parent_bh);
2247 }
2248 }
2249 }
2250 } else {
2251
2252 BUFFER_TRACE(parent_bh, "free data blocks");
2253 ext4_free_data(handle, inode, parent_bh, first, last);
2254 }
2255}
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285void ext4_truncate(struct inode *inode)
2286{
2287 handle_t *handle;
2288 struct ext4_inode_info *ei = EXT4_I(inode);
2289 __le32 *i_data = ei->i_data;
2290 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2291 struct address_space *mapping = inode->i_mapping;
2292 int offsets[4];
2293 Indirect chain[4];
2294 Indirect *partial;
2295 __le32 nr = 0;
2296 int n;
2297 long last_block;
2298 unsigned blocksize = inode->i_sb->s_blocksize;
2299 struct page *page;
2300
2301 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2302 S_ISLNK(inode->i_mode)))
2303 return;
2304 if (ext4_inode_is_fast_symlink(inode))
2305 return;
2306 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2307 return;
2308
2309
2310
2311
2312
2313 if ((inode->i_size & (blocksize - 1)) == 0) {
2314
2315 page = NULL;
2316 } else {
2317 page = grab_cache_page(mapping,
2318 inode->i_size >> PAGE_CACHE_SHIFT);
2319 if (!page)
2320 return;
2321 }
2322
2323 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
2324 return ext4_ext_truncate(inode, page);
2325
2326 handle = start_transaction(inode);
2327 if (IS_ERR(handle)) {
2328 if (page) {
2329 clear_highpage(page);
2330 flush_dcache_page(page);
2331 unlock_page(page);
2332 page_cache_release(page);
2333 }
2334 return;
2335 }
2336
2337 last_block = (inode->i_size + blocksize-1)
2338 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2339
2340 if (page)
2341 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2342
2343 n = ext4_block_to_path(inode, last_block, offsets, NULL);
2344 if (n == 0)
2345 goto out_stop;
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356 if (ext4_orphan_add(handle, inode))
2357 goto out_stop;
2358
2359
2360
2361
2362
2363
2364
2365
2366 ei->i_disksize = inode->i_size;
2367
2368
2369
2370
2371
2372 mutex_lock(&ei->truncate_mutex);
2373
2374 if (n == 1) {
2375 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2376 i_data + EXT4_NDIR_BLOCKS);
2377 goto do_indirects;
2378 }
2379
2380 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2381
2382 if (nr) {
2383 if (partial == chain) {
2384
2385 ext4_free_branches(handle, inode, NULL,
2386 &nr, &nr+1, (chain+n-1) - partial);
2387 *partial->p = 0;
2388
2389
2390
2391
2392 } else {
2393
2394 BUFFER_TRACE(partial->bh, "get_write_access");
2395 ext4_free_branches(handle, inode, partial->bh,
2396 partial->p,
2397 partial->p+1, (chain+n-1) - partial);
2398 }
2399 }
2400
2401 while (partial > chain) {
2402 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2403 (__le32*)partial->bh->b_data+addr_per_block,
2404 (chain+n-1) - partial);
2405 BUFFER_TRACE(partial->bh, "call brelse");
2406 brelse (partial->bh);
2407 partial--;
2408 }
2409do_indirects:
2410
2411 switch (offsets[0]) {
2412 default:
2413 nr = i_data[EXT4_IND_BLOCK];
2414 if (nr) {
2415 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2416 i_data[EXT4_IND_BLOCK] = 0;
2417 }
2418 case EXT4_IND_BLOCK:
2419 nr = i_data[EXT4_DIND_BLOCK];
2420 if (nr) {
2421 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2422 i_data[EXT4_DIND_BLOCK] = 0;
2423 }
2424 case EXT4_DIND_BLOCK:
2425 nr = i_data[EXT4_TIND_BLOCK];
2426 if (nr) {
2427 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2428 i_data[EXT4_TIND_BLOCK] = 0;
2429 }
2430 case EXT4_TIND_BLOCK:
2431 ;
2432 }
2433
2434 ext4_discard_reservation(inode);
2435
2436 mutex_unlock(&ei->truncate_mutex);
2437 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2438 ext4_mark_inode_dirty(handle, inode);
2439
2440
2441
2442
2443
2444 if (IS_SYNC(inode))
2445 handle->h_sync = 1;
2446out_stop:
2447
2448
2449
2450
2451
2452
2453
2454 if (inode->i_nlink)
2455 ext4_orphan_del(handle, inode);
2456
2457 ext4_journal_stop(handle);
2458}
2459
2460static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2461 unsigned long ino, struct ext4_iloc *iloc)
2462{
2463 unsigned long desc, group_desc, block_group;
2464 unsigned long offset;
2465 ext4_fsblk_t block;
2466 struct buffer_head *bh;
2467 struct ext4_group_desc * gdp;
2468
2469 if (!ext4_valid_inum(sb, ino)) {
2470
2471
2472
2473
2474
2475 return 0;
2476 }
2477
2478 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2479 if (block_group >= EXT4_SB(sb)->s_groups_count) {
2480 ext4_error(sb,"ext4_get_inode_block","group >= groups count");
2481 return 0;
2482 }
2483 smp_rmb();
2484 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2485 desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2486 bh = EXT4_SB(sb)->s_group_desc[group_desc];
2487 if (!bh) {
2488 ext4_error (sb, "ext4_get_inode_block",
2489 "Descriptor not loaded");
2490 return 0;
2491 }
2492
2493 gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
2494 desc * EXT4_DESC_SIZE(sb));
2495
2496
2497
2498 offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2499 EXT4_INODE_SIZE(sb);
2500 block = ext4_inode_table(sb, gdp) +
2501 (offset >> EXT4_BLOCK_SIZE_BITS(sb));
2502
2503 iloc->block_group = block_group;
2504 iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2505 return block;
2506}
2507
2508
2509
2510
2511
2512
2513
2514static int __ext4_get_inode_loc(struct inode *inode,
2515 struct ext4_iloc *iloc, int in_mem)
2516{
2517 ext4_fsblk_t block;
2518 struct buffer_head *bh;
2519
2520 block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2521 if (!block)
2522 return -EIO;
2523
2524 bh = sb_getblk(inode->i_sb, block);
2525 if (!bh) {
2526 ext4_error (inode->i_sb, "ext4_get_inode_loc",
2527 "unable to read inode block - "
2528 "inode=%lu, block=%llu",
2529 inode->i_ino, block);
2530 return -EIO;
2531 }
2532 if (!buffer_uptodate(bh)) {
2533 lock_buffer(bh);
2534 if (buffer_uptodate(bh)) {
2535
2536 unlock_buffer(bh);
2537 goto has_buffer;
2538 }
2539
2540
2541
2542
2543
2544
2545 if (in_mem) {
2546 struct buffer_head *bitmap_bh;
2547 struct ext4_group_desc *desc;
2548 int inodes_per_buffer;
2549 int inode_offset, i;
2550 int block_group;
2551 int start;
2552
2553 block_group = (inode->i_ino - 1) /
2554 EXT4_INODES_PER_GROUP(inode->i_sb);
2555 inodes_per_buffer = bh->b_size /
2556 EXT4_INODE_SIZE(inode->i_sb);
2557 inode_offset = ((inode->i_ino - 1) %
2558 EXT4_INODES_PER_GROUP(inode->i_sb));
2559 start = inode_offset & ~(inodes_per_buffer - 1);
2560
2561
2562 desc = ext4_get_group_desc(inode->i_sb,
2563 block_group, NULL);
2564 if (!desc)
2565 goto make_io;
2566
2567 bitmap_bh = sb_getblk(inode->i_sb,
2568 ext4_inode_bitmap(inode->i_sb, desc));
2569 if (!bitmap_bh)
2570 goto make_io;
2571
2572
2573
2574
2575
2576
2577 if (!buffer_uptodate(bitmap_bh)) {
2578 brelse(bitmap_bh);
2579 goto make_io;
2580 }
2581 for (i = start; i < start + inodes_per_buffer; i++) {
2582 if (i == inode_offset)
2583 continue;
2584 if (ext4_test_bit(i, bitmap_bh->b_data))
2585 break;
2586 }
2587 brelse(bitmap_bh);
2588 if (i == start + inodes_per_buffer) {
2589
2590 memset(bh->b_data, 0, bh->b_size);
2591 set_buffer_uptodate(bh);
2592 unlock_buffer(bh);
2593 goto has_buffer;
2594 }
2595 }
2596
2597make_io:
2598
2599
2600
2601
2602
2603 get_bh(bh);
2604 bh->b_end_io = end_buffer_read_sync;
2605 submit_bh(READ_META, bh);
2606 wait_on_buffer(bh);
2607 if (!buffer_uptodate(bh)) {
2608 ext4_error(inode->i_sb, "ext4_get_inode_loc",
2609 "unable to read inode block - "
2610 "inode=%lu, block=%llu",
2611 inode->i_ino, block);
2612 brelse(bh);
2613 return -EIO;
2614 }
2615 }
2616has_buffer:
2617 iloc->bh = bh;
2618 return 0;
2619}
2620
2621int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2622{
2623
2624 return __ext4_get_inode_loc(inode, iloc,
2625 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2626}
2627
2628void ext4_set_inode_flags(struct inode *inode)
2629{
2630 unsigned int flags = EXT4_I(inode)->i_flags;
2631
2632 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2633 if (flags & EXT4_SYNC_FL)
2634 inode->i_flags |= S_SYNC;
2635 if (flags & EXT4_APPEND_FL)
2636 inode->i_flags |= S_APPEND;
2637 if (flags & EXT4_IMMUTABLE_FL)
2638 inode->i_flags |= S_IMMUTABLE;
2639 if (flags & EXT4_NOATIME_FL)
2640 inode->i_flags |= S_NOATIME;
2641 if (flags & EXT4_DIRSYNC_FL)
2642 inode->i_flags |= S_DIRSYNC;
2643}
2644
2645
2646void ext4_get_inode_flags(struct ext4_inode_info *ei)
2647{
2648 unsigned int flags = ei->vfs_inode.i_flags;
2649
2650 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2651 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2652 if (flags & S_SYNC)
2653 ei->i_flags |= EXT4_SYNC_FL;
2654 if (flags & S_APPEND)
2655 ei->i_flags |= EXT4_APPEND_FL;
2656 if (flags & S_IMMUTABLE)
2657 ei->i_flags |= EXT4_IMMUTABLE_FL;
2658 if (flags & S_NOATIME)
2659 ei->i_flags |= EXT4_NOATIME_FL;
2660 if (flags & S_DIRSYNC)
2661 ei->i_flags |= EXT4_DIRSYNC_FL;
2662}
2663
2664void ext4_read_inode(struct inode * inode)
2665{
2666 struct ext4_iloc iloc;
2667 struct ext4_inode *raw_inode;
2668 struct ext4_inode_info *ei = EXT4_I(inode);
2669 struct buffer_head *bh;
2670 int block;
2671
2672#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2673 ei->i_acl = EXT4_ACL_NOT_CACHED;
2674 ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2675#endif
2676 ei->i_block_alloc_info = NULL;
2677
2678 if (__ext4_get_inode_loc(inode, &iloc, 0))
2679 goto bad_inode;
2680 bh = iloc.bh;
2681 raw_inode = ext4_raw_inode(&iloc);
2682 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2683 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2684 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2685 if(!(test_opt (inode->i_sb, NO_UID32))) {
2686 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2687 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2688 }
2689 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2690 inode->i_size = le32_to_cpu(raw_inode->i_size);
2691
2692 ei->i_state = 0;
2693 ei->i_dir_start_lookup = 0;
2694 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2695
2696
2697
2698
2699
2700 if (inode->i_nlink == 0) {
2701 if (inode->i_mode == 0 ||
2702 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2703
2704 brelse (bh);
2705 goto bad_inode;
2706 }
2707
2708
2709
2710
2711 }
2712 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2713 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2714 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2715 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2716 cpu_to_le32(EXT4_OS_HURD))
2717 ei->i_file_acl |=
2718 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2719 if (!S_ISREG(inode->i_mode)) {
2720 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2721 } else {
2722 inode->i_size |=
2723 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2724 }
2725 ei->i_disksize = inode->i_size;
2726 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2727 ei->i_block_group = iloc.block_group;
2728
2729
2730
2731
2732 for (block = 0; block < EXT4_N_BLOCKS; block++)
2733 ei->i_data[block] = raw_inode->i_block[block];
2734 INIT_LIST_HEAD(&ei->i_orphan);
2735
2736 if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 &&
2737 EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2738
2739
2740
2741
2742
2743 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2744 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2745 EXT4_INODE_SIZE(inode->i_sb)) {
2746 brelse (bh);
2747 goto bad_inode;
2748 }
2749 if (ei->i_extra_isize == 0) {
2750
2751 ei->i_extra_isize = sizeof(struct ext4_inode) -
2752 EXT4_GOOD_OLD_INODE_SIZE;
2753 } else {
2754 __le32 *magic = (void *)raw_inode +
2755 EXT4_GOOD_OLD_INODE_SIZE +
2756 ei->i_extra_isize;
2757 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2758 ei->i_state |= EXT4_STATE_XATTR;
2759 }
2760 } else
2761 ei->i_extra_isize = 0;
2762
2763 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2764 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2765 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2766 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2767
2768 if (S_ISREG(inode->i_mode)) {
2769 inode->i_op = &ext4_file_inode_operations;
2770 inode->i_fop = &ext4_file_operations;
2771 ext4_set_aops(inode);
2772 } else if (S_ISDIR(inode->i_mode)) {
2773 inode->i_op = &ext4_dir_inode_operations;
2774 inode->i_fop = &ext4_dir_operations;
2775 } else if (S_ISLNK(inode->i_mode)) {
2776 if (ext4_inode_is_fast_symlink(inode))
2777 inode->i_op = &ext4_fast_symlink_inode_operations;
2778 else {
2779 inode->i_op = &ext4_symlink_inode_operations;
2780 ext4_set_aops(inode);
2781 }
2782 } else {
2783 inode->i_op = &ext4_special_inode_operations;
2784 if (raw_inode->i_block[0])
2785 init_special_inode(inode, inode->i_mode,
2786 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2787 else
2788 init_special_inode(inode, inode->i_mode,
2789 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2790 }
2791 brelse (iloc.bh);
2792 ext4_set_inode_flags(inode);
2793 return;
2794
2795bad_inode:
2796 make_bad_inode(inode);
2797 return;
2798}
2799
2800
2801
2802
2803
2804
2805
2806
2807static int ext4_do_update_inode(handle_t *handle,
2808 struct inode *inode,
2809 struct ext4_iloc *iloc)
2810{
2811 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2812 struct ext4_inode_info *ei = EXT4_I(inode);
2813 struct buffer_head *bh = iloc->bh;
2814 int err = 0, rc, block;
2815
2816
2817
2818 if (ei->i_state & EXT4_STATE_NEW)
2819 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2820
2821 ext4_get_inode_flags(ei);
2822 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2823 if(!(test_opt(inode->i_sb, NO_UID32))) {
2824 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2825 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2826
2827
2828
2829
2830 if(!ei->i_dtime) {
2831 raw_inode->i_uid_high =
2832 cpu_to_le16(high_16_bits(inode->i_uid));
2833 raw_inode->i_gid_high =
2834 cpu_to_le16(high_16_bits(inode->i_gid));
2835 } else {
2836 raw_inode->i_uid_high = 0;
2837 raw_inode->i_gid_high = 0;
2838 }
2839 } else {
2840 raw_inode->i_uid_low =
2841 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2842 raw_inode->i_gid_low =
2843 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2844 raw_inode->i_uid_high = 0;
2845 raw_inode->i_gid_high = 0;
2846 }
2847 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2848 raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2849
2850 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
2851 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
2852 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
2853 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
2854
2855 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2856 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2857 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2858 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2859 cpu_to_le32(EXT4_OS_HURD))
2860 raw_inode->i_file_acl_high =
2861 cpu_to_le16(ei->i_file_acl >> 32);
2862 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2863 if (!S_ISREG(inode->i_mode)) {
2864 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2865 } else {
2866 raw_inode->i_size_high =
2867 cpu_to_le32(ei->i_disksize >> 32);
2868 if (ei->i_disksize > 0x7fffffffULL) {
2869 struct super_block *sb = inode->i_sb;
2870 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2871 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2872 EXT4_SB(sb)->s_es->s_rev_level ==
2873 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2874
2875
2876
2877 err = ext4_journal_get_write_access(handle,
2878 EXT4_SB(sb)->s_sbh);
2879 if (err)
2880 goto out_brelse;
2881 ext4_update_dynamic_rev(sb);
2882 EXT4_SET_RO_COMPAT_FEATURE(sb,
2883 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
2884 sb->s_dirt = 1;
2885 handle->h_sync = 1;
2886 err = ext4_journal_dirty_metadata(handle,
2887 EXT4_SB(sb)->s_sbh);
2888 }
2889 }
2890 }
2891 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2892 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2893 if (old_valid_dev(inode->i_rdev)) {
2894 raw_inode->i_block[0] =
2895 cpu_to_le32(old_encode_dev(inode->i_rdev));
2896 raw_inode->i_block[1] = 0;
2897 } else {
2898 raw_inode->i_block[0] = 0;
2899 raw_inode->i_block[1] =
2900 cpu_to_le32(new_encode_dev(inode->i_rdev));
2901 raw_inode->i_block[2] = 0;
2902 }
2903 } else for (block = 0; block < EXT4_N_BLOCKS; block++)
2904 raw_inode->i_block[block] = ei->i_data[block];
2905
2906 if (ei->i_extra_isize)
2907 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2908
2909 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2910 rc = ext4_journal_dirty_metadata(handle, bh);
2911 if (!err)
2912 err = rc;
2913 ei->i_state &= ~EXT4_STATE_NEW;
2914
2915out_brelse:
2916 brelse (bh);
2917 ext4_std_error(inode->i_sb, err);
2918 return err;
2919}
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956int ext4_write_inode(struct inode *inode, int wait)
2957{
2958 if (current->flags & PF_MEMALLOC)
2959 return 0;
2960
2961 if (ext4_journal_current_handle()) {
2962 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
2963 dump_stack();
2964 return -EIO;
2965 }
2966
2967 if (!wait)
2968 return 0;
2969
2970 return ext4_force_commit(inode->i_sb);
2971}
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990int ext4_setattr(struct dentry *dentry, struct iattr *attr)
2991{
2992 struct inode *inode = dentry->d_inode;
2993 int error, rc = 0;
2994 const unsigned int ia_valid = attr->ia_valid;
2995
2996 error = inode_change_ok(inode, attr);
2997 if (error)
2998 return error;
2999
3000 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3001 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3002 handle_t *handle;
3003
3004
3005
3006 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3007 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3008 if (IS_ERR(handle)) {
3009 error = PTR_ERR(handle);
3010 goto err_out;
3011 }
3012 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3013 if (error) {
3014 ext4_journal_stop(handle);
3015 return error;
3016 }
3017
3018
3019 if (attr->ia_valid & ATTR_UID)
3020 inode->i_uid = attr->ia_uid;
3021 if (attr->ia_valid & ATTR_GID)
3022 inode->i_gid = attr->ia_gid;
3023 error = ext4_mark_inode_dirty(handle, inode);
3024 ext4_journal_stop(handle);
3025 }
3026
3027 if (S_ISREG(inode->i_mode) &&
3028 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3029 handle_t *handle;
3030
3031 handle = ext4_journal_start(inode, 3);
3032 if (IS_ERR(handle)) {
3033 error = PTR_ERR(handle);
3034 goto err_out;
3035 }
3036
3037 error = ext4_orphan_add(handle, inode);
3038 EXT4_I(inode)->i_disksize = attr->ia_size;
3039 rc = ext4_mark_inode_dirty(handle, inode);
3040 if (!error)
3041 error = rc;
3042 ext4_journal_stop(handle);
3043 }
3044
3045 rc = inode_setattr(inode, attr);
3046
3047
3048
3049
3050 if (inode->i_nlink)
3051 ext4_orphan_del(NULL, inode);
3052
3053 if (!rc && (ia_valid & ATTR_MODE))
3054 rc = ext4_acl_chmod(inode);
3055
3056err_out:
3057 ext4_std_error(inode->i_sb, error);
3058 if (!error)
3059 error = rc;
3060 return error;
3061}
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091int ext4_writepage_trans_blocks(struct inode *inode)
3092{
3093 int bpp = ext4_journal_blocks_per_page(inode);
3094 int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3095 int ret;
3096
3097 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3098 return ext4_ext_writepage_trans_blocks(inode, bpp);
3099
3100 if (ext4_should_journal_data(inode))
3101 ret = 3 * (bpp + indirects) + 2;
3102 else
3103 ret = 2 * (bpp + indirects) + 2;
3104
3105#ifdef CONFIG_QUOTA
3106
3107
3108 ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3109#endif
3110
3111 return ret;
3112}
3113
3114
3115
3116
3117
3118int ext4_mark_iloc_dirty(handle_t *handle,
3119 struct inode *inode, struct ext4_iloc *iloc)
3120{
3121 int err = 0;
3122
3123
3124 get_bh(iloc->bh);
3125
3126
3127 err = ext4_do_update_inode(handle, inode, iloc);
3128 put_bh(iloc->bh);
3129 return err;
3130}
3131
3132
3133
3134
3135
3136
3137int
3138ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3139 struct ext4_iloc *iloc)
3140{
3141 int err = 0;
3142 if (handle) {
3143 err = ext4_get_inode_loc(inode, iloc);
3144 if (!err) {
3145 BUFFER_TRACE(iloc->bh, "get_write_access");
3146 err = ext4_journal_get_write_access(handle, iloc->bh);
3147 if (err) {
3148 brelse(iloc->bh);
3149 iloc->bh = NULL;
3150 }
3151 }
3152 }
3153 ext4_std_error(inode->i_sb, err);
3154 return err;
3155}
3156
3157
3158
3159
3160
3161int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize,
3162 struct ext4_iloc iloc, handle_t *handle)
3163{
3164 struct ext4_inode *raw_inode;
3165 struct ext4_xattr_ibody_header *header;
3166 struct ext4_xattr_entry *entry;
3167
3168 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3169 return 0;
3170
3171 raw_inode = ext4_raw_inode(&iloc);
3172
3173 header = IHDR(inode, raw_inode);
3174 entry = IFIRST(header);
3175
3176
3177 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3178 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3179 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3180 new_extra_isize);
3181 EXT4_I(inode)->i_extra_isize = new_extra_isize;
3182 return 0;
3183 }
3184
3185
3186 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3187 raw_inode, handle);
3188}
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3212{
3213 struct ext4_iloc iloc;
3214 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3215 static unsigned int mnt_count;
3216 int err, ret;
3217
3218 might_sleep();
3219 err = ext4_reserve_inode_write(handle, inode, &iloc);
3220 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3221 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3222
3223
3224
3225
3226
3227
3228
3229 if ((jbd2_journal_extend(handle,
3230 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3231 ret = ext4_expand_extra_isize(inode,
3232 sbi->s_want_extra_isize,
3233 iloc, handle);
3234 if (ret) {
3235 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3236 if (mnt_count !=
3237 le16_to_cpu(sbi->s_es->s_mnt_count)) {
3238 ext4_warning(inode->i_sb, __FUNCTION__,
3239 "Unable to expand inode %lu. Delete"
3240 " some EAs or run e2fsck.",
3241 inode->i_ino);
3242 mnt_count =
3243 le16_to_cpu(sbi->s_es->s_mnt_count);
3244 }
3245 }
3246 }
3247 }
3248 if (!err)
3249 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3250 return err;
3251}
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267void ext4_dirty_inode(struct inode *inode)
3268{
3269 handle_t *current_handle = ext4_journal_current_handle();
3270 handle_t *handle;
3271
3272 handle = ext4_journal_start(inode, 2);
3273 if (IS_ERR(handle))
3274 goto out;
3275 if (current_handle &&
3276 current_handle->h_transaction != handle->h_transaction) {
3277
3278 printk(KERN_EMERG "%s: transactions do not match!\n",
3279 __FUNCTION__);
3280 } else {
3281 jbd_debug(5, "marking dirty. outer handle=%p\n",
3282 current_handle);
3283 ext4_mark_inode_dirty(handle, inode);
3284 }
3285 ext4_journal_stop(handle);
3286out:
3287 return;
3288}
3289
3290#if 0
3291
3292
3293
3294
3295
3296
3297
3298static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3299{
3300 struct ext4_iloc iloc;
3301
3302 int err = 0;
3303 if (handle) {
3304 err = ext4_get_inode_loc(inode, &iloc);
3305 if (!err) {
3306 BUFFER_TRACE(iloc.bh, "get_write_access");
3307 err = jbd2_journal_get_write_access(handle, iloc.bh);
3308 if (!err)
3309 err = ext4_journal_dirty_metadata(handle,
3310 iloc.bh);
3311 brelse(iloc.bh);
3312 }
3313 }
3314 ext4_std_error(inode->i_sb, err);
3315 return err;
3316}
3317#endif
3318
3319int ext4_change_inode_journal_flag(struct inode *inode, int val)
3320{
3321 journal_t *journal;
3322 handle_t *handle;
3323 int err;
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335 journal = EXT4_JOURNAL(inode);
3336 if (is_journal_aborted(journal))
3337 return -EROFS;
3338
3339 jbd2_journal_lock_updates(journal);
3340 jbd2_journal_flush(journal);
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350 if (val)
3351 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3352 else
3353 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3354 ext4_set_aops(inode);
3355
3356 jbd2_journal_unlock_updates(journal);
3357
3358
3359
3360 handle = ext4_journal_start(inode, 1);
3361 if (IS_ERR(handle))
3362 return PTR_ERR(handle);
3363
3364 err = ext4_mark_inode_dirty(handle, inode);
3365 handle->h_sync = 1;
3366 ext4_journal_stop(handle);
3367 ext4_std_error(inode->i_sb, err);
3368
3369 return err;
3370}
3371