1
2
3
4
5#include <linux/time.h>
6#include <linux/fs.h>
7#include "reiserfs.h"
8#include "acl.h"
9#include "xattr.h"
10#include <linux/exportfs.h>
11#include <linux/pagemap.h>
12#include <linux/highmem.h>
13#include <linux/slab.h>
14#include <linux/uaccess.h>
15#include <asm/unaligned.h>
16#include <linux/buffer_head.h>
17#include <linux/mpage.h>
18#include <linux/writeback.h>
19#include <linux/quotaops.h>
20#include <linux/swap.h>
21#include <linux/uio.h>
22#include <linux/bio.h>
23
24int reiserfs_commit_write(struct file *f, struct page *page,
25 unsigned from, unsigned to);
26
27void reiserfs_evict_inode(struct inode *inode)
28{
29
30
31
32
33 int jbegin_count =
34 JOURNAL_PER_BALANCE_CNT * 2 +
35 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
36 struct reiserfs_transaction_handle th;
37 int err;
38
39 if (!inode->i_nlink && !is_bad_inode(inode))
40 dquot_initialize(inode);
41
42 truncate_inode_pages_final(&inode->i_data);
43 if (inode->i_nlink)
44 goto no_delete;
45
46
47
48
49
50
51 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
52
53 reiserfs_delete_xattrs(inode);
54
55 reiserfs_write_lock(inode->i_sb);
56
57 if (journal_begin(&th, inode->i_sb, jbegin_count))
58 goto out;
59 reiserfs_update_inode_transaction(inode);
60
61 reiserfs_discard_prealloc(&th, inode);
62
63 err = reiserfs_delete_object(&th, inode);
64
65
66
67
68
69
70 if (!err) {
71 int depth = reiserfs_write_unlock_nested(inode->i_sb);
72 dquot_free_inode(inode);
73 reiserfs_write_lock_nested(inode->i_sb, depth);
74 }
75
76 if (journal_end(&th))
77 goto out;
78
79
80
81
82
83 if (err)
84 goto out;
85
86
87
88
89
90
91 remove_save_link(inode, 0 );
92out:
93 reiserfs_write_unlock(inode->i_sb);
94 } else {
95
96 ;
97 }
98
99
100 clear_inode(inode);
101
102 dquot_drop(inode);
103 inode->i_blocks = 0;
104 return;
105
106no_delete:
107 clear_inode(inode);
108 dquot_drop(inode);
109}
110
111static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
112 __u32 objectid, loff_t offset, int type, int length)
113{
114 key->version = version;
115
116 key->on_disk_key.k_dir_id = dirid;
117 key->on_disk_key.k_objectid = objectid;
118 set_cpu_key_k_offset(key, offset);
119 set_cpu_key_k_type(key, type);
120 key->key_length = length;
121}
122
123
124
125
126
127void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
128 int type, int length)
129{
130 _make_cpu_key(key, get_inode_item_key_version(inode),
131 le32_to_cpu(INODE_PKEY(inode)->k_dir_id),
132 le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,
133 length);
134}
135
136
137inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
138 int version,
139 loff_t offset, int type, int length,
140 int entry_count )
141{
142 if (key) {
143 ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);
144 ih->ih_key.k_objectid =
145 cpu_to_le32(key->on_disk_key.k_objectid);
146 }
147 put_ih_version(ih, version);
148 set_le_ih_k_offset(ih, offset);
149 set_le_ih_k_type(ih, type);
150 put_ih_item_len(ih, length);
151
152
153
154
155
156 put_ih_entry_count(ih, entry_count);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193static inline void fix_tail_page_for_writing(struct page *page)
194{
195 struct buffer_head *head, *next, *bh;
196
197 if (page && page_has_buffers(page)) {
198 head = page_buffers(page);
199 bh = head;
200 do {
201 next = bh->b_this_page;
202 if (buffer_mapped(bh) && bh->b_blocknr == 0) {
203 reiserfs_unmap_buffer(bh);
204 }
205 bh = next;
206 } while (bh != head);
207 }
208}
209
210
211
212
213
214static inline int allocation_needed(int retval, b_blocknr_t allocated,
215 struct item_head *ih,
216 __le32 * item, int pos_in_item)
217{
218 if (allocated)
219 return 0;
220 if (retval == POSITION_FOUND && is_indirect_le_ih(ih) &&
221 get_block_num(item, pos_in_item))
222 return 0;
223 return 1;
224}
225
226static inline int indirect_item_found(int retval, struct item_head *ih)
227{
228 return (retval == POSITION_FOUND) && is_indirect_le_ih(ih);
229}
230
231static inline void set_block_dev_mapped(struct buffer_head *bh,
232 b_blocknr_t block, struct inode *inode)
233{
234 map_bh(bh, inode->i_sb, block);
235}
236
237
238
239
240
241static int file_capable(struct inode *inode, sector_t block)
242{
243
244 if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
245
246 block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
247 return 1;
248
249 return 0;
250}
251
252static int restart_transaction(struct reiserfs_transaction_handle *th,
253 struct inode *inode, struct treepath *path)
254{
255 struct super_block *s = th->t_super;
256 int err;
257
258 BUG_ON(!th->t_trans_id);
259 BUG_ON(!th->t_refcount);
260
261 pathrelse(path);
262
263
264 if (th->t_refcount > 1) {
265 return 0;
266 }
267 reiserfs_update_sd(th, inode);
268 err = journal_end(th);
269 if (!err) {
270 err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);
271 if (!err)
272 reiserfs_update_inode_transaction(inode);
273 }
274 return err;
275}
276
277
278
279
280
281
282
283
284
285static int _get_block_create_0(struct inode *inode, sector_t block,
286 struct buffer_head *bh_result, int args)
287{
288 INITIALIZE_PATH(path);
289 struct cpu_key key;
290 struct buffer_head *bh;
291 struct item_head *ih, tmp_ih;
292 b_blocknr_t blocknr;
293 char *p = NULL;
294 int chars;
295 int ret;
296 int result;
297 int done = 0;
298 unsigned long offset;
299
300
301 make_cpu_key(&key, inode,
302 (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
303 3);
304
305 result = search_for_position_by_key(inode->i_sb, &key, &path);
306 if (result != POSITION_FOUND) {
307 pathrelse(&path);
308 if (p)
309 kunmap(bh_result->b_page);
310 if (result == IO_ERROR)
311 return -EIO;
312
313
314
315
316
317 if ((args & GET_BLOCK_NO_HOLE)
318 && !PageUptodate(bh_result->b_page)) {
319 return -ENOENT;
320 }
321 return 0;
322 }
323
324 bh = get_last_bh(&path);
325 ih = tp_item_head(&path);
326 if (is_indirect_le_ih(ih)) {
327 __le32 *ind_item = (__le32 *) ih_item_body(bh, ih);
328
329
330
331
332
333
334 blocknr = get_block_num(ind_item, path.pos_in_item);
335 ret = 0;
336 if (blocknr) {
337 map_bh(bh_result, inode->i_sb, blocknr);
338 if (path.pos_in_item ==
339 ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {
340 set_buffer_boundary(bh_result);
341 }
342 } else
343
344
345
346
347
348
349 if ((args & GET_BLOCK_NO_HOLE)
350 && !PageUptodate(bh_result->b_page)) {
351 ret = -ENOENT;
352 }
353
354 pathrelse(&path);
355 if (p)
356 kunmap(bh_result->b_page);
357 return ret;
358 }
359
360 if (!(args & GET_BLOCK_READ_DIRECT)) {
361
362
363
364
365 pathrelse(&path);
366 if (p)
367 kunmap(bh_result->b_page);
368 return -ENOENT;
369 }
370
371
372
373
374
375
376 if (buffer_uptodate(bh_result)) {
377 goto finished;
378 } else
379
380
381
382
383
384
385 if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
386 set_buffer_uptodate(bh_result);
387 goto finished;
388 }
389
390 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
391 copy_item_head(&tmp_ih, ih);
392
393
394
395
396
397
398
399 if (!p)
400 p = (char *)kmap(bh_result->b_page);
401
402 p += offset;
403 memset(p, 0, inode->i_sb->s_blocksize);
404 do {
405 if (!is_direct_le_ih(ih)) {
406 BUG();
407 }
408
409
410
411
412
413
414 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
415 break;
416 if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
417 chars =
418 inode->i_size - (le_ih_k_offset(ih) - 1) -
419 path.pos_in_item;
420 done = 1;
421 } else {
422 chars = ih_item_len(ih) - path.pos_in_item;
423 }
424 memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars);
425
426 if (done)
427 break;
428
429 p += chars;
430
431
432
433
434
435
436
437 if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
438 break;
439
440
441 set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
442 result = search_for_position_by_key(inode->i_sb, &key, &path);
443 if (result != POSITION_FOUND)
444
445 break;
446 bh = get_last_bh(&path);
447 ih = tp_item_head(&path);
448 } while (1);
449
450 flush_dcache_page(bh_result->b_page);
451 kunmap(bh_result->b_page);
452
453finished:
454 pathrelse(&path);
455
456 if (result == IO_ERROR)
457 return -EIO;
458
459
460
461
462
463 map_bh(bh_result, inode->i_sb, 0);
464 set_buffer_uptodate(bh_result);
465 return 0;
466}
467
468
469
470
471
472static int reiserfs_bmap(struct inode *inode, sector_t block,
473 struct buffer_head *bh_result, int create)
474{
475 if (!file_capable(inode, block))
476 return -EFBIG;
477
478 reiserfs_write_lock(inode->i_sb);
479
480 _get_block_create_0(inode, block, bh_result, 0);
481 reiserfs_write_unlock(inode->i_sb);
482 return 0;
483}
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
504 struct buffer_head *bh_result,
505 int create)
506{
507 return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
508}
509
510
511
512
513
514static int reiserfs_get_blocks_direct_io(struct inode *inode,
515 sector_t iblock,
516 struct buffer_head *bh_result,
517 int create)
518{
519 int ret;
520
521 bh_result->b_page = NULL;
522
523
524
525
526
527
528 bh_result->b_size = i_blocksize(inode);
529
530 ret = reiserfs_get_block(inode, iblock, bh_result,
531 create | GET_BLOCK_NO_DANGLE);
532 if (ret)
533 goto out;
534
535
536 if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
537
538
539
540
541 clear_buffer_mapped(bh_result);
542 ret = -EINVAL;
543 }
544
545
546
547
548
549 if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
550 int err;
551
552 reiserfs_write_lock(inode->i_sb);
553
554 err = reiserfs_commit_for_inode(inode);
555 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
556
557 reiserfs_write_unlock(inode->i_sb);
558
559 if (err < 0)
560 ret = err;
561 }
562out:
563 return ret;
564}
565
566
567
568
569
570
571
572
573
574
575
576static int convert_tail_for_hole(struct inode *inode,
577 struct buffer_head *bh_result,
578 loff_t tail_offset)
579{
580 unsigned long index;
581 unsigned long tail_end;
582 unsigned long tail_start;
583 struct page *tail_page;
584 struct page *hole_page = bh_result->b_page;
585 int retval = 0;
586
587 if ((tail_offset & (bh_result->b_size - 1)) != 1)
588 return -EIO;
589
590
591 tail_start = tail_offset & (PAGE_SIZE - 1);
592 tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
593
594 index = tail_offset >> PAGE_SHIFT;
595
596
597
598
599 if (!hole_page || index != hole_page->index) {
600 tail_page = grab_cache_page(inode->i_mapping, index);
601 retval = -ENOMEM;
602 if (!tail_page) {
603 goto out;
604 }
605 } else {
606 tail_page = hole_page;
607 }
608
609
610
611
612
613
614
615
616
617
618
619 fix_tail_page_for_writing(tail_page);
620 retval = __reiserfs_write_begin(tail_page, tail_start,
621 tail_end - tail_start);
622 if (retval)
623 goto unlock;
624
625
626 flush_dcache_page(tail_page);
627
628 retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end);
629
630unlock:
631 if (tail_page != hole_page) {
632 unlock_page(tail_page);
633 put_page(tail_page);
634 }
635out:
636 return retval;
637}
638
639static inline int _allocate_block(struct reiserfs_transaction_handle *th,
640 sector_t block,
641 struct inode *inode,
642 b_blocknr_t * allocated_block_nr,
643 struct treepath *path, int flags)
644{
645 BUG_ON(!th->t_trans_id);
646
647#ifdef REISERFS_PREALLOCATE
648 if (!(flags & GET_BLOCK_NO_IMUX)) {
649 return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
650 path, block);
651 }
652#endif
653 return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path,
654 block);
655}
656
657int reiserfs_get_block(struct inode *inode, sector_t block,
658 struct buffer_head *bh_result, int create)
659{
660 int repeat, retval = 0;
661
662 b_blocknr_t allocated_block_nr = 0;
663 INITIALIZE_PATH(path);
664 int pos_in_item;
665 struct cpu_key key;
666 struct buffer_head *bh, *unbh = NULL;
667 struct item_head *ih, tmp_ih;
668 __le32 *item;
669 int done;
670 int fs_gen;
671 struct reiserfs_transaction_handle *th = NULL;
672
673
674
675
676
677
678
679
680 int jbegin_count =
681 JOURNAL_PER_BALANCE_CNT * 3 + 1 +
682 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
683 int version;
684 int dangle = 1;
685 loff_t new_offset =
686 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
687
688 reiserfs_write_lock(inode->i_sb);
689 version = get_inode_item_key_version(inode);
690
691 if (!file_capable(inode, block)) {
692 reiserfs_write_unlock(inode->i_sb);
693 return -EFBIG;
694 }
695
696
697
698
699
700 if (!(create & GET_BLOCK_CREATE)) {
701 int ret;
702
703 ret = _get_block_create_0(inode, block, bh_result,
704 create | GET_BLOCK_READ_DIRECT);
705 reiserfs_write_unlock(inode->i_sb);
706 return ret;
707 }
708
709
710
711
712
713 if ((create & GET_BLOCK_NO_DANGLE) ||
714 reiserfs_transaction_running(inode->i_sb))
715 dangle = 0;
716
717
718
719
720
721
722 if ((have_large_tails(inode->i_sb)
723 && inode->i_size < i_block_size(inode) * 4)
724 || (have_small_tails(inode->i_sb)
725 && inode->i_size < i_block_size(inode)))
726 REISERFS_I(inode)->i_flags |= i_pack_on_close_mask;
727
728
729 make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 );
730 if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
731start_trans:
732 th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
733 if (!th) {
734 retval = -ENOMEM;
735 goto failure;
736 }
737 reiserfs_update_inode_transaction(inode);
738 }
739research:
740
741 retval = search_for_position_by_key(inode->i_sb, &key, &path);
742 if (retval == IO_ERROR) {
743 retval = -EIO;
744 goto failure;
745 }
746
747 bh = get_last_bh(&path);
748 ih = tp_item_head(&path);
749 item = tp_item_body(&path);
750 pos_in_item = path.pos_in_item;
751
752 fs_gen = get_generation(inode->i_sb);
753 copy_item_head(&tmp_ih, ih);
754
755 if (allocation_needed
756 (retval, allocated_block_nr, ih, item, pos_in_item)) {
757
758 if (!th) {
759 pathrelse(&path);
760 goto start_trans;
761 }
762
763 repeat =
764 _allocate_block(th, block, inode, &allocated_block_nr,
765 &path, create);
766
767
768
769
770
771
772 if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
773 SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
774 retval = restart_transaction(th, inode, &path);
775 if (retval)
776 goto failure;
777 repeat =
778 _allocate_block(th, block, inode,
779 &allocated_block_nr, NULL, create);
780
781 if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) {
782 goto research;
783 }
784 if (repeat == QUOTA_EXCEEDED)
785 retval = -EDQUOT;
786 else
787 retval = -ENOSPC;
788 goto failure;
789 }
790
791 if (fs_changed(fs_gen, inode->i_sb)
792 && item_moved(&tmp_ih, &path)) {
793 goto research;
794 }
795 }
796
797 if (indirect_item_found(retval, ih)) {
798 b_blocknr_t unfm_ptr;
799
800
801
802
803
804 unfm_ptr = get_block_num(item, pos_in_item);
805 if (unfm_ptr == 0) {
806
807 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
808 if (fs_changed(fs_gen, inode->i_sb)
809 && item_moved(&tmp_ih, &path)) {
810 reiserfs_restore_prepared_buffer(inode->i_sb,
811 bh);
812 goto research;
813 }
814 set_buffer_new(bh_result);
815 if (buffer_dirty(bh_result)
816 && reiserfs_data_ordered(inode->i_sb))
817 reiserfs_add_ordered_list(inode, bh_result);
818 put_block_num(item, pos_in_item, allocated_block_nr);
819 unfm_ptr = allocated_block_nr;
820 journal_mark_dirty(th, bh);
821 reiserfs_update_sd(th, inode);
822 }
823 set_block_dev_mapped(bh_result, unfm_ptr, inode);
824 pathrelse(&path);
825 retval = 0;
826 if (!dangle && th)
827 retval = reiserfs_end_persistent_transaction(th);
828
829 reiserfs_write_unlock(inode->i_sb);
830
831
832
833
834
835
836 return retval;
837 }
838
839 if (!th) {
840 pathrelse(&path);
841 goto start_trans;
842 }
843
844
845
846
847
848
849 done = 0;
850 do {
851 if (is_statdata_le_ih(ih)) {
852 __le32 unp = 0;
853 struct cpu_key tmp_key;
854
855
856 make_le_item_head(&tmp_ih, &key, version, 1,
857 TYPE_INDIRECT, UNFM_P_SIZE,
858 0 );
859
860
861
862
863
864 if (cpu_key_k_offset(&key) == 1) {
865 unp = cpu_to_le32(allocated_block_nr);
866 set_block_dev_mapped(bh_result,
867 allocated_block_nr, inode);
868 set_buffer_new(bh_result);
869 done = 1;
870 }
871 tmp_key = key;
872 set_cpu_key_k_offset(&tmp_key, 1);
873 PATH_LAST_POSITION(&path)++;
874
875 retval =
876 reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih,
877 inode, (char *)&unp);
878 if (retval) {
879 reiserfs_free_block(th, inode,
880 allocated_block_nr, 1);
881
882
883
884
885 goto failure;
886 }
887 } else if (is_direct_le_ih(ih)) {
888
889 loff_t tail_offset;
890
891 tail_offset =
892 ((le_ih_k_offset(ih) -
893 1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
894
895
896
897
898
899
900 if (tail_offset == cpu_key_k_offset(&key)) {
901 set_block_dev_mapped(bh_result,
902 allocated_block_nr, inode);
903 unbh = bh_result;
904 done = 1;
905 } else {
906
907
908
909
910
911
912
913 pathrelse(&path);
914
915
916
917
918 BUG_ON(!th->t_refcount);
919 if (th->t_refcount == 1) {
920 retval =
921 reiserfs_end_persistent_transaction
922 (th);
923 th = NULL;
924 if (retval)
925 goto failure;
926 }
927
928 retval =
929 convert_tail_for_hole(inode, bh_result,
930 tail_offset);
931 if (retval) {
932 if (retval != -ENOSPC)
933 reiserfs_error(inode->i_sb,
934 "clm-6004",
935 "convert tail failed "
936 "inode %lu, error %d",
937 inode->i_ino,
938 retval);
939 if (allocated_block_nr) {
940
941
942
943
944 if (!th)
945 th = reiserfs_persistent_transaction(inode->i_sb, 3);
946 if (th)
947 reiserfs_free_block(th,
948 inode,
949 allocated_block_nr,
950 1);
951 }
952 goto failure;
953 }
954 goto research;
955 }
956 retval =
957 direct2indirect(th, inode, &path, unbh,
958 tail_offset);
959 if (retval) {
960 reiserfs_unmap_buffer(unbh);
961 reiserfs_free_block(th, inode,
962 allocated_block_nr, 1);
963 goto failure;
964 }
965
966
967
968
969
970
971
972
973
974
975 set_buffer_uptodate(unbh);
976
977
978
979
980
981
982 if (unbh->b_page) {
983
984
985
986
987 reiserfs_add_tail_list(inode, unbh);
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002 mark_buffer_dirty(unbh);
1003 }
1004 } else {
1005
1006
1007
1008
1009
1010 struct cpu_key tmp_key;
1011
1012
1013
1014
1015 unp_t unf_single = 0;
1016 unp_t *un;
1017 __u64 max_to_insert =
1018 MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
1019 UNFM_P_SIZE;
1020 __u64 blocks_needed;
1021
1022 RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
1023 "vs-804: invalid position for append");
1024
1025
1026
1027
1028
1029 make_cpu_key(&tmp_key, inode,
1030 le_key_k_offset(version,
1031 &ih->ih_key) +
1032 op_bytes_number(ih,
1033 inode->i_sb->s_blocksize),
1034 TYPE_INDIRECT, 3);
1035
1036 RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
1037 "green-805: invalid offset");
1038 blocks_needed =
1039 1 +
1040 ((cpu_key_k_offset(&key) -
1041 cpu_key_k_offset(&tmp_key)) >> inode->i_sb->
1042 s_blocksize_bits);
1043
1044 if (blocks_needed == 1) {
1045 un = &unf_single;
1046 } else {
1047 un = kcalloc(min(blocks_needed, max_to_insert),
1048 UNFM_P_SIZE, GFP_NOFS);
1049 if (!un) {
1050 un = &unf_single;
1051 blocks_needed = 1;
1052 max_to_insert = 0;
1053 }
1054 }
1055 if (blocks_needed <= max_to_insert) {
1056
1057
1058
1059
1060 un[blocks_needed - 1] =
1061 cpu_to_le32(allocated_block_nr);
1062 set_block_dev_mapped(bh_result,
1063 allocated_block_nr, inode);
1064 set_buffer_new(bh_result);
1065 done = 1;
1066 } else {
1067
1068
1069
1070
1071
1072
1073 blocks_needed =
1074 max_to_insert ? max_to_insert : 1;
1075 }
1076 retval =
1077 reiserfs_paste_into_item(th, &path, &tmp_key, inode,
1078 (char *)un,
1079 UNFM_P_SIZE *
1080 blocks_needed);
1081
1082 if (blocks_needed != 1)
1083 kfree(un);
1084
1085 if (retval) {
1086 reiserfs_free_block(th, inode,
1087 allocated_block_nr, 1);
1088 goto failure;
1089 }
1090 if (!done) {
1091
1092
1093
1094
1095
1096
1097 inode->i_size +=
1098 inode->i_sb->s_blocksize * blocks_needed;
1099 }
1100 }
1101
1102 if (done == 1)
1103 break;
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
1114 retval = restart_transaction(th, inode, &path);
1115 if (retval)
1116 goto failure;
1117 }
1118
1119
1120
1121
1122
1123 reiserfs_cond_resched(inode->i_sb);
1124
1125 retval = search_for_position_by_key(inode->i_sb, &key, &path);
1126 if (retval == IO_ERROR) {
1127 retval = -EIO;
1128 goto failure;
1129 }
1130 if (retval == POSITION_FOUND) {
1131 reiserfs_warning(inode->i_sb, "vs-825",
1132 "%K should not be found", &key);
1133 retval = -EEXIST;
1134 if (allocated_block_nr)
1135 reiserfs_free_block(th, inode,
1136 allocated_block_nr, 1);
1137 pathrelse(&path);
1138 goto failure;
1139 }
1140 bh = get_last_bh(&path);
1141 ih = tp_item_head(&path);
1142 item = tp_item_body(&path);
1143 pos_in_item = path.pos_in_item;
1144 } while (1);
1145
1146 retval = 0;
1147
1148failure:
1149 if (th && (!dangle || (retval && !th->t_trans_id))) {
1150 int err;
1151 if (th->t_trans_id)
1152 reiserfs_update_sd(th, inode);
1153 err = reiserfs_end_persistent_transaction(th);
1154 if (err)
1155 retval = err;
1156 }
1157
1158 reiserfs_write_unlock(inode->i_sb);
1159 reiserfs_check_path(&path);
1160 return retval;
1161}
1162
1163static void reiserfs_readahead(struct readahead_control *rac)
1164{
1165 mpage_readahead(rac, reiserfs_get_block);
1166}
1167
1168
1169
1170
1171
1172
1173static int real_space_diff(struct inode *inode, int sd_size)
1174{
1175 int bytes;
1176 loff_t blocksize = inode->i_sb->s_blocksize;
1177
1178 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
1179 return sd_size;
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 bytes =
1191 ((inode->i_size +
1192 (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +
1193 sd_size;
1194 return bytes;
1195}
1196
1197static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
1198 int sd_size)
1199{
1200 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1201 return inode->i_size +
1202 (loff_t) (real_space_diff(inode, sd_size));
1203 }
1204 return ((loff_t) real_space_diff(inode, sd_size)) +
1205 (((loff_t) blocks) << 9);
1206}
1207
1208
1209static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
1210{
1211 loff_t bytes = inode_get_bytes(inode);
1212 loff_t real_space = real_space_diff(inode, sd_size);
1213
1214
1215 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1216 bytes += (loff_t) 511;
1217 }
1218
1219
1220
1221
1222
1223
1224 if (bytes < real_space)
1225 return 0;
1226 return (bytes - real_space) >> 9;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237static void init_inode(struct inode *inode, struct treepath *path)
1238{
1239 struct buffer_head *bh;
1240 struct item_head *ih;
1241 __u32 rdev;
1242
1243 bh = PATH_PLAST_BUFFER(path);
1244 ih = tp_item_head(path);
1245
1246 copy_key(INODE_PKEY(inode), &ih->ih_key);
1247
1248 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
1249 REISERFS_I(inode)->i_flags = 0;
1250 REISERFS_I(inode)->i_prealloc_block = 0;
1251 REISERFS_I(inode)->i_prealloc_count = 0;
1252 REISERFS_I(inode)->i_trans_id = 0;
1253 REISERFS_I(inode)->i_jl = NULL;
1254 reiserfs_init_xattr_rwsem(inode);
1255
1256 if (stat_data_v1(ih)) {
1257 struct stat_data_v1 *sd =
1258 (struct stat_data_v1 *)ih_item_body(bh, ih);
1259 unsigned long blocks;
1260
1261 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1262 set_inode_sd_version(inode, STAT_DATA_V1);
1263 inode->i_mode = sd_v1_mode(sd);
1264 set_nlink(inode, sd_v1_nlink(sd));
1265 i_uid_write(inode, sd_v1_uid(sd));
1266 i_gid_write(inode, sd_v1_gid(sd));
1267 inode->i_size = sd_v1_size(sd);
1268 inode->i_atime.tv_sec = sd_v1_atime(sd);
1269 inode->i_mtime.tv_sec = sd_v1_mtime(sd);
1270 inode->i_ctime.tv_sec = sd_v1_ctime(sd);
1271 inode->i_atime.tv_nsec = 0;
1272 inode->i_ctime.tv_nsec = 0;
1273 inode->i_mtime.tv_nsec = 0;
1274
1275 inode->i_blocks = sd_v1_blocks(sd);
1276 inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1277 blocks = (inode->i_size + 511) >> 9;
1278 blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 if (inode->i_blocks > blocks) {
1289 inode->i_blocks = blocks;
1290 }
1291
1292 rdev = sd_v1_rdev(sd);
1293 REISERFS_I(inode)->i_first_direct_byte =
1294 sd_v1_first_direct_byte(sd);
1295
1296
1297
1298
1299
1300 if (inode->i_blocks & 1) {
1301 inode->i_blocks++;
1302 }
1303 inode_set_bytes(inode,
1304 to_real_used_space(inode, inode->i_blocks,
1305 SD_V1_SIZE));
1306
1307
1308
1309
1310 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
1311 } else {
1312
1313
1314
1315
1316 struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih);
1317
1318 inode->i_mode = sd_v2_mode(sd);
1319 set_nlink(inode, sd_v2_nlink(sd));
1320 i_uid_write(inode, sd_v2_uid(sd));
1321 inode->i_size = sd_v2_size(sd);
1322 i_gid_write(inode, sd_v2_gid(sd));
1323 inode->i_mtime.tv_sec = sd_v2_mtime(sd);
1324 inode->i_atime.tv_sec = sd_v2_atime(sd);
1325 inode->i_ctime.tv_sec = sd_v2_ctime(sd);
1326 inode->i_ctime.tv_nsec = 0;
1327 inode->i_mtime.tv_nsec = 0;
1328 inode->i_atime.tv_nsec = 0;
1329 inode->i_blocks = sd_v2_blocks(sd);
1330 rdev = sd_v2_rdev(sd);
1331 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1332 inode->i_generation =
1333 le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1334 else
1335 inode->i_generation = sd_v2_generation(sd);
1336
1337 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1338 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1339 else
1340 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
1341 REISERFS_I(inode)->i_first_direct_byte = 0;
1342 set_inode_sd_version(inode, STAT_DATA_V2);
1343 inode_set_bytes(inode,
1344 to_real_used_space(inode, inode->i_blocks,
1345 SD_V2_SIZE));
1346
1347
1348
1349
1350 REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
1351 sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
1352 }
1353
1354 pathrelse(path);
1355 if (S_ISREG(inode->i_mode)) {
1356 inode->i_op = &reiserfs_file_inode_operations;
1357 inode->i_fop = &reiserfs_file_operations;
1358 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1359 } else if (S_ISDIR(inode->i_mode)) {
1360 inode->i_op = &reiserfs_dir_inode_operations;
1361 inode->i_fop = &reiserfs_dir_operations;
1362 } else if (S_ISLNK(inode->i_mode)) {
1363 inode->i_op = &reiserfs_symlink_inode_operations;
1364 inode_nohighmem(inode);
1365 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1366 } else {
1367 inode->i_blocks = 0;
1368 inode->i_op = &reiserfs_special_inode_operations;
1369 init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
1370 }
1371}
1372
1373
1374static void inode2sd(void *sd, struct inode *inode, loff_t size)
1375{
1376 struct stat_data *sd_v2 = (struct stat_data *)sd;
1377
1378 set_sd_v2_mode(sd_v2, inode->i_mode);
1379 set_sd_v2_nlink(sd_v2, inode->i_nlink);
1380 set_sd_v2_uid(sd_v2, i_uid_read(inode));
1381 set_sd_v2_size(sd_v2, size);
1382 set_sd_v2_gid(sd_v2, i_gid_read(inode));
1383 set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
1384 set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
1385 set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
1386 set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
1387 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1388 set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
1389 else
1390 set_sd_v2_generation(sd_v2, inode->i_generation);
1391 set_sd_v2_attrs(sd_v2, REISERFS_I(inode)->i_attrs);
1392}
1393
1394
1395static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
1396{
1397 struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
1398
1399 set_sd_v1_mode(sd_v1, inode->i_mode);
1400 set_sd_v1_uid(sd_v1, i_uid_read(inode));
1401 set_sd_v1_gid(sd_v1, i_gid_read(inode));
1402 set_sd_v1_nlink(sd_v1, inode->i_nlink);
1403 set_sd_v1_size(sd_v1, size);
1404 set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
1405 set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
1406 set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
1407
1408 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1409 set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
1410 else
1411 set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
1412
1413
1414 set_sd_v1_first_direct_byte(sd_v1,
1415 REISERFS_I(inode)->i_first_direct_byte);
1416}
1417
1418
1419
1420
1421
1422static void update_stat_data(struct treepath *path, struct inode *inode,
1423 loff_t size)
1424{
1425 struct buffer_head *bh;
1426 struct item_head *ih;
1427
1428 bh = PATH_PLAST_BUFFER(path);
1429 ih = tp_item_head(path);
1430
1431 if (!is_statdata_le_ih(ih))
1432 reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
1433 INODE_PKEY(inode), ih);
1434
1435
1436 if (stat_data_v1(ih)) {
1437 inode2sd_v1(ih_item_body(bh, ih), inode, size);
1438 } else {
1439 inode2sd(ih_item_body(bh, ih), inode, size);
1440 }
1441
1442 return;
1443}
1444
1445void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
1446 struct inode *inode, loff_t size)
1447{
1448 struct cpu_key key;
1449 INITIALIZE_PATH(path);
1450 struct buffer_head *bh;
1451 int fs_gen;
1452 struct item_head *ih, tmp_ih;
1453 int retval;
1454
1455 BUG_ON(!th->t_trans_id);
1456
1457
1458 make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
1459
1460 for (;;) {
1461 int pos;
1462
1463 retval = search_item(inode->i_sb, &key, &path);
1464 if (retval == IO_ERROR) {
1465 reiserfs_error(inode->i_sb, "vs-13050",
1466 "i/o failure occurred trying to "
1467 "update %K stat data", &key);
1468 return;
1469 }
1470 if (retval == ITEM_NOT_FOUND) {
1471 pos = PATH_LAST_POSITION(&path);
1472 pathrelse(&path);
1473 if (inode->i_nlink == 0) {
1474
1475 return;
1476 }
1477 reiserfs_warning(inode->i_sb, "vs-13060",
1478 "stat data of object %k (nlink == %d) "
1479 "not found (pos %d)",
1480 INODE_PKEY(inode), inode->i_nlink,
1481 pos);
1482 reiserfs_check_path(&path);
1483 return;
1484 }
1485
1486
1487
1488
1489
1490
1491 bh = get_last_bh(&path);
1492 ih = tp_item_head(&path);
1493 copy_item_head(&tmp_ih, ih);
1494 fs_gen = get_generation(inode->i_sb);
1495 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
1496
1497
1498 if (fs_changed(fs_gen, inode->i_sb)
1499 && item_moved(&tmp_ih, &path)) {
1500 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
1501 continue;
1502 }
1503 break;
1504 }
1505 update_stat_data(&path, inode, size);
1506 journal_mark_dirty(th, bh);
1507 pathrelse(&path);
1508 return;
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518static void reiserfs_make_bad_inode(struct inode *inode)
1519{
1520 memset(INODE_PKEY(inode), 0, KEY_SIZE);
1521 make_bad_inode(inode);
1522}
1523
1524
1525
1526
1527
1528int reiserfs_init_locked_inode(struct inode *inode, void *p)
1529{
1530 struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
1531 inode->i_ino = args->objectid;
1532 INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
1533 return 0;
1534}
1535
1536
1537
1538
1539
1540void reiserfs_read_locked_inode(struct inode *inode,
1541 struct reiserfs_iget_args *args)
1542{
1543 INITIALIZE_PATH(path_to_sd);
1544 struct cpu_key key;
1545 unsigned long dirino;
1546 int retval;
1547
1548 dirino = args->dirid;
1549
1550
1551
1552
1553
1554 _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3);
1555
1556
1557 retval = search_item(inode->i_sb, &key, &path_to_sd);
1558 if (retval == IO_ERROR) {
1559 reiserfs_error(inode->i_sb, "vs-13070",
1560 "i/o failure occurred trying to find "
1561 "stat data of %K", &key);
1562 reiserfs_make_bad_inode(inode);
1563 return;
1564 }
1565
1566
1567 if (retval != ITEM_FOUND) {
1568 pathrelse(&path_to_sd);
1569 reiserfs_make_bad_inode(inode);
1570 clear_nlink(inode);
1571 return;
1572 }
1573
1574 init_inode(inode, &path_to_sd);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595 if ((inode->i_nlink == 0) &&
1596 !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
1597 reiserfs_warning(inode->i_sb, "vs-13075",
1598 "dead inode read from disk %K. "
1599 "This is likely to be race with knfsd. Ignore",
1600 &key);
1601 reiserfs_make_bad_inode(inode);
1602 }
1603
1604
1605 reiserfs_check_path(&path_to_sd);
1606
1607
1608
1609
1610 if (get_inode_sd_version(inode) == STAT_DATA_V1)
1611 cache_no_acl(inode);
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626int reiserfs_find_actor(struct inode *inode, void *opaque)
1627{
1628 struct reiserfs_iget_args *args;
1629
1630 args = opaque;
1631
1632 return (inode->i_ino == args->objectid) &&
1633 (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
1634}
1635
1636struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1637{
1638 struct inode *inode;
1639 struct reiserfs_iget_args args;
1640 int depth;
1641
1642 args.objectid = key->on_disk_key.k_objectid;
1643 args.dirid = key->on_disk_key.k_dir_id;
1644 depth = reiserfs_write_unlock_nested(s);
1645 inode = iget5_locked(s, key->on_disk_key.k_objectid,
1646 reiserfs_find_actor, reiserfs_init_locked_inode,
1647 (void *)(&args));
1648 reiserfs_write_lock_nested(s, depth);
1649 if (!inode)
1650 return ERR_PTR(-ENOMEM);
1651
1652 if (inode->i_state & I_NEW) {
1653 reiserfs_read_locked_inode(inode, &args);
1654 unlock_new_inode(inode);
1655 }
1656
1657 if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) {
1658
1659 iput(inode);
1660 inode = NULL;
1661 }
1662 return inode;
1663}
1664
1665static struct dentry *reiserfs_get_dentry(struct super_block *sb,
1666 u32 objectid, u32 dir_id, u32 generation)
1667
1668{
1669 struct cpu_key key;
1670 struct inode *inode;
1671
1672 key.on_disk_key.k_objectid = objectid;
1673 key.on_disk_key.k_dir_id = dir_id;
1674 reiserfs_write_lock(sb);
1675 inode = reiserfs_iget(sb, &key);
1676 if (inode && !IS_ERR(inode) && generation != 0 &&
1677 generation != inode->i_generation) {
1678 iput(inode);
1679 inode = NULL;
1680 }
1681 reiserfs_write_unlock(sb);
1682
1683 return d_obtain_alias(inode);
1684}
1685
1686struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1687 int fh_len, int fh_type)
1688{
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 if (fh_type > fh_len) {
1703 if (fh_type != 6 || fh_len != 5)
1704 reiserfs_warning(sb, "reiserfs-13077",
1705 "nfsd/reiserfs, fhtype=%d, len=%d - odd",
1706 fh_type, fh_len);
1707 fh_type = fh_len;
1708 }
1709 if (fh_len < 2)
1710 return NULL;
1711
1712 return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1713 (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1714}
1715
1716struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1717 int fh_len, int fh_type)
1718{
1719 if (fh_type > fh_len)
1720 fh_type = fh_len;
1721 if (fh_type < 4)
1722 return NULL;
1723
1724 return reiserfs_get_dentry(sb,
1725 (fh_type >= 5) ? fid->raw[3] : fid->raw[2],
1726 (fh_type >= 5) ? fid->raw[4] : fid->raw[3],
1727 (fh_type == 6) ? fid->raw[5] : 0);
1728}
1729
1730int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
1731 struct inode *parent)
1732{
1733 int maxlen = *lenp;
1734
1735 if (parent && (maxlen < 5)) {
1736 *lenp = 5;
1737 return FILEID_INVALID;
1738 } else if (maxlen < 3) {
1739 *lenp = 3;
1740 return FILEID_INVALID;
1741 }
1742
1743 data[0] = inode->i_ino;
1744 data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1745 data[2] = inode->i_generation;
1746 *lenp = 3;
1747 if (parent) {
1748 data[3] = parent->i_ino;
1749 data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
1750 *lenp = 5;
1751 if (maxlen >= 6) {
1752 data[5] = parent->i_generation;
1753 *lenp = 6;
1754 }
1755 }
1756 return *lenp;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1770{
1771 struct reiserfs_transaction_handle th;
1772 int jbegin_count = 1;
1773
1774 if (sb_rdonly(inode->i_sb))
1775 return -EROFS;
1776
1777
1778
1779
1780
1781
1782
1783 if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
1784 reiserfs_write_lock(inode->i_sb);
1785 if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
1786 reiserfs_update_sd(&th, inode);
1787 journal_end_sync(&th);
1788 }
1789 reiserfs_write_unlock(inode->i_sb);
1790 }
1791 return 0;
1792}
1793
1794
1795
1796
1797
1798static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
1799 struct inode *inode,
1800 struct item_head *ih, struct treepath *path,
1801 struct inode *dir)
1802{
1803 struct super_block *sb = th->t_super;
1804 char empty_dir[EMPTY_DIR_SIZE];
1805 char *body = empty_dir;
1806 struct cpu_key key;
1807 int retval;
1808
1809 BUG_ON(!th->t_trans_id);
1810
1811 _make_cpu_key(&key, KEY_FORMAT_3_5, le32_to_cpu(ih->ih_key.k_dir_id),
1812 le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
1813 TYPE_DIRENTRY, 3 );
1814
1815
1816
1817
1818
1819
1820 if (old_format_only(sb)) {
1821 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1822 TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
1823
1824 make_empty_dir_item_v1(body, ih->ih_key.k_dir_id,
1825 ih->ih_key.k_objectid,
1826 INODE_PKEY(dir)->k_dir_id,
1827 INODE_PKEY(dir)->k_objectid);
1828 } else {
1829 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1830 TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
1831
1832 make_empty_dir_item(body, ih->ih_key.k_dir_id,
1833 ih->ih_key.k_objectid,
1834 INODE_PKEY(dir)->k_dir_id,
1835 INODE_PKEY(dir)->k_objectid);
1836 }
1837
1838
1839 retval = search_item(sb, &key, path);
1840 if (retval == IO_ERROR) {
1841 reiserfs_error(sb, "vs-13080",
1842 "i/o failure occurred creating new directory");
1843 return -EIO;
1844 }
1845 if (retval == ITEM_FOUND) {
1846 pathrelse(path);
1847 reiserfs_warning(sb, "vs-13070",
1848 "object with this key exists (%k)",
1849 &(ih->ih_key));
1850 return -EEXIST;
1851 }
1852
1853
1854 return reiserfs_insert_item(th, path, &key, ih, inode, body);
1855}
1856
1857
1858
1859
1860
1861static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th,
1862 struct inode *inode,
1863 struct item_head *ih,
1864 struct treepath *path, const char *symname,
1865 int item_len)
1866{
1867 struct super_block *sb = th->t_super;
1868 struct cpu_key key;
1869 int retval;
1870
1871 BUG_ON(!th->t_trans_id);
1872
1873 _make_cpu_key(&key, KEY_FORMAT_3_5,
1874 le32_to_cpu(ih->ih_key.k_dir_id),
1875 le32_to_cpu(ih->ih_key.k_objectid),
1876 1, TYPE_DIRECT, 3 );
1877
1878 make_le_item_head(ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len,
1879 0 );
1880
1881
1882 retval = search_item(sb, &key, path);
1883 if (retval == IO_ERROR) {
1884 reiserfs_error(sb, "vs-13080",
1885 "i/o failure occurred creating new symlink");
1886 return -EIO;
1887 }
1888 if (retval == ITEM_FOUND) {
1889 pathrelse(path);
1890 reiserfs_warning(sb, "vs-13080",
1891 "object with this key exists (%k)",
1892 &(ih->ih_key));
1893 return -EEXIST;
1894 }
1895
1896
1897 return reiserfs_insert_item(th, path, &key, ih, inode, symname);
1898}
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1921 struct inode *dir, umode_t mode, const char *symname,
1922
1923
1924 loff_t i_size, struct dentry *dentry,
1925 struct inode *inode,
1926 struct reiserfs_security_handle *security)
1927{
1928 struct super_block *sb = dir->i_sb;
1929 struct reiserfs_iget_args args;
1930 INITIALIZE_PATH(path_to_key);
1931 struct cpu_key key;
1932 struct item_head ih;
1933 struct stat_data sd;
1934 int retval;
1935 int err;
1936 int depth;
1937
1938 BUG_ON(!th->t_trans_id);
1939
1940 depth = reiserfs_write_unlock_nested(sb);
1941 err = dquot_alloc_inode(inode);
1942 reiserfs_write_lock_nested(sb, depth);
1943 if (err)
1944 goto out_end_trans;
1945 if (!dir->i_nlink) {
1946 err = -EPERM;
1947 goto out_bad_inode;
1948 }
1949
1950
1951 ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
1952 ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
1953 if (!ih.ih_key.k_objectid) {
1954 err = -ENOMEM;
1955 goto out_bad_inode;
1956 }
1957 args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
1958 if (old_format_only(sb))
1959 make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET,
1960 TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
1961 else
1962 make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET,
1963 TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
1964 memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE);
1965 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
1966
1967 depth = reiserfs_write_unlock_nested(inode->i_sb);
1968 err = insert_inode_locked4(inode, args.objectid,
1969 reiserfs_find_actor, &args);
1970 reiserfs_write_lock_nested(inode->i_sb, depth);
1971 if (err) {
1972 err = -EINVAL;
1973 goto out_bad_inode;
1974 }
1975
1976 if (old_format_only(sb))
1977
1978
1979
1980
1981
1982
1983 inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
1984 else
1985#if defined( USE_INODE_GENERATION_COUNTER )
1986 inode->i_generation =
1987 le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
1988#else
1989 inode->i_generation = ++event;
1990#endif
1991
1992
1993 set_nlink(inode, (S_ISDIR(mode) ? 2 : 1));
1994
1995
1996
1997 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
1998 inode->i_size = i_size;
1999 inode->i_blocks = 0;
2000 inode->i_bytes = 0;
2001 REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
2002 U32_MAX ;
2003
2004 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
2005 REISERFS_I(inode)->i_flags = 0;
2006 REISERFS_I(inode)->i_prealloc_block = 0;
2007 REISERFS_I(inode)->i_prealloc_count = 0;
2008 REISERFS_I(inode)->i_trans_id = 0;
2009 REISERFS_I(inode)->i_jl = NULL;
2010 REISERFS_I(inode)->i_attrs =
2011 REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
2012 sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
2013 reiserfs_init_xattr_rwsem(inode);
2014
2015
2016 _make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id),
2017 le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET,
2018 TYPE_STAT_DATA, 3 );
2019
2020
2021 retval = search_item(sb, &key, &path_to_key);
2022 if (retval == IO_ERROR) {
2023 err = -EIO;
2024 goto out_bad_inode;
2025 }
2026 if (retval == ITEM_FOUND) {
2027 pathrelse(&path_to_key);
2028 err = -EEXIST;
2029 goto out_bad_inode;
2030 }
2031 if (old_format_only(sb)) {
2032
2033 if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
2034 pathrelse(&path_to_key);
2035 err = -EINVAL;
2036 goto out_bad_inode;
2037 }
2038 inode2sd_v1(&sd, inode, inode->i_size);
2039 } else {
2040 inode2sd(&sd, inode, inode->i_size);
2041 }
2042
2043
2044
2045
2046
2047 if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
2048 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
2049 else
2050 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
2051 if (old_format_only(sb))
2052 set_inode_sd_version(inode, STAT_DATA_V1);
2053 else
2054 set_inode_sd_version(inode, STAT_DATA_V2);
2055
2056
2057#ifdef DISPLACE_NEW_PACKING_LOCALITIES
2058 if (REISERFS_I(dir)->new_packing_locality)
2059 th->displace_new_blocks = 1;
2060#endif
2061 retval =
2062 reiserfs_insert_item(th, &path_to_key, &key, &ih, inode,
2063 (char *)(&sd));
2064 if (retval) {
2065 err = retval;
2066 reiserfs_check_path(&path_to_key);
2067 goto out_bad_inode;
2068 }
2069#ifdef DISPLACE_NEW_PACKING_LOCALITIES
2070 if (!th->displace_new_blocks)
2071 REISERFS_I(dir)->new_packing_locality = 0;
2072#endif
2073 if (S_ISDIR(mode)) {
2074
2075 retval =
2076 reiserfs_new_directory(th, inode, &ih, &path_to_key, dir);
2077 }
2078
2079 if (S_ISLNK(mode)) {
2080
2081 if (!old_format_only(sb))
2082 i_size = ROUND_UP(i_size);
2083 retval =
2084 reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname,
2085 i_size);
2086 }
2087 if (retval) {
2088 err = retval;
2089 reiserfs_check_path(&path_to_key);
2090 journal_end(th);
2091 goto out_inserted_sd;
2092 }
2093
2094
2095
2096
2097
2098 if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) {
2099 inode->i_flags |= S_PRIVATE;
2100 inode->i_opflags &= ~IOP_XATTR;
2101 }
2102
2103 if (reiserfs_posixacl(inode->i_sb)) {
2104 reiserfs_write_unlock(inode->i_sb);
2105 retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
2106 reiserfs_write_lock(inode->i_sb);
2107 if (retval) {
2108 err = retval;
2109 reiserfs_check_path(&path_to_key);
2110 journal_end(th);
2111 goto out_inserted_sd;
2112 }
2113 } else if (inode->i_sb->s_flags & SB_POSIXACL) {
2114 reiserfs_warning(inode->i_sb, "jdm-13090",
2115 "ACLs aren't enabled in the fs, "
2116 "but vfs thinks they are!");
2117 }
2118
2119 if (security->name) {
2120 reiserfs_write_unlock(inode->i_sb);
2121 retval = reiserfs_security_write(th, inode, security);
2122 reiserfs_write_lock(inode->i_sb);
2123 if (retval) {
2124 err = retval;
2125 reiserfs_check_path(&path_to_key);
2126 retval = journal_end(th);
2127 if (retval)
2128 err = retval;
2129 goto out_inserted_sd;
2130 }
2131 }
2132
2133 reiserfs_update_sd(th, inode);
2134 reiserfs_check_path(&path_to_key);
2135
2136 return 0;
2137
2138out_bad_inode:
2139
2140 INODE_PKEY(inode)->k_objectid = 0;
2141
2142
2143 depth = reiserfs_write_unlock_nested(inode->i_sb);
2144 dquot_free_inode(inode);
2145 reiserfs_write_lock_nested(inode->i_sb, depth);
2146
2147out_end_trans:
2148 journal_end(th);
2149
2150
2151
2152
2153 depth = reiserfs_write_unlock_nested(inode->i_sb);
2154 dquot_drop(inode);
2155 reiserfs_write_lock_nested(inode->i_sb, depth);
2156 inode->i_flags |= S_NOQUOTA;
2157 make_bad_inode(inode);
2158
2159out_inserted_sd:
2160 clear_nlink(inode);
2161 th->t_trans_id = 0;
2162 if (inode->i_state & I_NEW)
2163 unlock_new_inode(inode);
2164 iput(inode);
2165 return err;
2166}
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181static int grab_tail_page(struct inode *inode,
2182 struct page **page_result,
2183 struct buffer_head **bh_result)
2184{
2185
2186
2187
2188
2189
2190 unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
2191 unsigned long pos = 0;
2192 unsigned long start = 0;
2193 unsigned long blocksize = inode->i_sb->s_blocksize;
2194 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
2195 struct buffer_head *bh;
2196 struct buffer_head *head;
2197 struct page *page;
2198 int error;
2199
2200
2201
2202
2203
2204
2205
2206 if ((offset & (blocksize - 1)) == 0) {
2207 return -ENOENT;
2208 }
2209 page = grab_cache_page(inode->i_mapping, index);
2210 error = -ENOMEM;
2211 if (!page) {
2212 goto out;
2213 }
2214
2215 start = (offset / blocksize) * blocksize;
2216
2217 error = __block_write_begin(page, start, offset - start,
2218 reiserfs_get_block_create_0);
2219 if (error)
2220 goto unlock;
2221
2222 head = page_buffers(page);
2223 bh = head;
2224 do {
2225 if (pos >= start) {
2226 break;
2227 }
2228 bh = bh->b_this_page;
2229 pos += blocksize;
2230 } while (bh != head);
2231
2232 if (!buffer_uptodate(bh)) {
2233
2234
2235
2236
2237
2238
2239 reiserfs_error(inode->i_sb, "clm-6000",
2240 "error reading block %lu", bh->b_blocknr);
2241 error = -EIO;
2242 goto unlock;
2243 }
2244 *bh_result = bh;
2245 *page_result = page;
2246
2247out:
2248 return error;
2249
2250unlock:
2251 unlock_page(page);
2252 put_page(page);
2253 return error;
2254}
2255
2256
2257
2258
2259
2260
2261
2262int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2263{
2264 struct reiserfs_transaction_handle th;
2265
2266 unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
2267 unsigned blocksize = inode->i_sb->s_blocksize;
2268 unsigned length;
2269 struct page *page = NULL;
2270 int error;
2271 struct buffer_head *bh = NULL;
2272 int err2;
2273
2274 reiserfs_write_lock(inode->i_sb);
2275
2276 if (inode->i_size > 0) {
2277 error = grab_tail_page(inode, &page, &bh);
2278 if (error) {
2279
2280
2281
2282
2283
2284 if (error != -ENOENT)
2285 reiserfs_error(inode->i_sb, "clm-6001",
2286 "grab_tail_page failed %d",
2287 error);
2288 page = NULL;
2289 bh = NULL;
2290 }
2291 }
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305 error = journal_begin(&th, inode->i_sb,
2306 JOURNAL_PER_BALANCE_CNT * 2 + 1);
2307 if (error)
2308 goto out;
2309 reiserfs_update_inode_transaction(inode);
2310 if (update_timestamps)
2311
2312
2313
2314
2315
2316
2317 add_save_link(&th, inode, 1);
2318 err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
2319 error = journal_end(&th);
2320 if (error)
2321 goto out;
2322
2323
2324 if (err2) {
2325 error = err2;
2326 goto out;
2327 }
2328
2329 if (update_timestamps) {
2330 error = remove_save_link(inode, 1 );
2331 if (error)
2332 goto out;
2333 }
2334
2335 if (page) {
2336 length = offset & (blocksize - 1);
2337
2338 if (length) {
2339 length = blocksize - length;
2340 zero_user(page, offset, length);
2341 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2342 mark_buffer_dirty(bh);
2343 }
2344 }
2345 unlock_page(page);
2346 put_page(page);
2347 }
2348
2349 reiserfs_write_unlock(inode->i_sb);
2350
2351 return 0;
2352out:
2353 if (page) {
2354 unlock_page(page);
2355 put_page(page);
2356 }
2357
2358 reiserfs_write_unlock(inode->i_sb);
2359
2360 return error;
2361}
2362
2363static int map_block_for_writepage(struct inode *inode,
2364 struct buffer_head *bh_result,
2365 unsigned long block)
2366{
2367 struct reiserfs_transaction_handle th;
2368 int fs_gen;
2369 struct item_head tmp_ih;
2370 struct item_head *ih;
2371 struct buffer_head *bh;
2372 __le32 *item;
2373 struct cpu_key key;
2374 INITIALIZE_PATH(path);
2375 int pos_in_item;
2376 int jbegin_count = JOURNAL_PER_BALANCE_CNT;
2377 loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
2378 int retval;
2379 int use_get_block = 0;
2380 int bytes_copied = 0;
2381 int copy_size;
2382 int trans_running = 0;
2383
2384
2385
2386
2387
2388 th.t_trans_id = 0;
2389
2390 if (!buffer_uptodate(bh_result)) {
2391 return -EIO;
2392 }
2393
2394 kmap(bh_result->b_page);
2395start_over:
2396 reiserfs_write_lock(inode->i_sb);
2397 make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
2398
2399research:
2400 retval = search_for_position_by_key(inode->i_sb, &key, &path);
2401 if (retval != POSITION_FOUND) {
2402 use_get_block = 1;
2403 goto out;
2404 }
2405
2406 bh = get_last_bh(&path);
2407 ih = tp_item_head(&path);
2408 item = tp_item_body(&path);
2409 pos_in_item = path.pos_in_item;
2410
2411
2412 if (indirect_item_found(retval, ih)) {
2413 if (bytes_copied > 0) {
2414 reiserfs_warning(inode->i_sb, "clm-6002",
2415 "bytes_copied %d", bytes_copied);
2416 }
2417 if (!get_block_num(item, pos_in_item)) {
2418
2419 use_get_block = 1;
2420 goto out;
2421 }
2422 set_block_dev_mapped(bh_result,
2423 get_block_num(item, pos_in_item), inode);
2424 } else if (is_direct_le_ih(ih)) {
2425 char *p;
2426 p = page_address(bh_result->b_page);
2427 p += (byte_offset - 1) & (PAGE_SIZE - 1);
2428 copy_size = ih_item_len(ih) - pos_in_item;
2429
2430 fs_gen = get_generation(inode->i_sb);
2431 copy_item_head(&tmp_ih, ih);
2432
2433 if (!trans_running) {
2434
2435 retval = journal_begin(&th, inode->i_sb, jbegin_count);
2436 if (retval)
2437 goto out;
2438 reiserfs_update_inode_transaction(inode);
2439 trans_running = 1;
2440 if (fs_changed(fs_gen, inode->i_sb)
2441 && item_moved(&tmp_ih, &path)) {
2442 reiserfs_restore_prepared_buffer(inode->i_sb,
2443 bh);
2444 goto research;
2445 }
2446 }
2447
2448 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
2449
2450 if (fs_changed(fs_gen, inode->i_sb)
2451 && item_moved(&tmp_ih, &path)) {
2452 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
2453 goto research;
2454 }
2455
2456 memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied,
2457 copy_size);
2458
2459 journal_mark_dirty(&th, bh);
2460 bytes_copied += copy_size;
2461 set_block_dev_mapped(bh_result, 0, inode);
2462
2463
2464 if (bytes_copied < bh_result->b_size &&
2465 (byte_offset + bytes_copied) < inode->i_size) {
2466 set_cpu_key_k_offset(&key,
2467 cpu_key_k_offset(&key) +
2468 copy_size);
2469 goto research;
2470 }
2471 } else {
2472 reiserfs_warning(inode->i_sb, "clm-6003",
2473 "bad item inode %lu", inode->i_ino);
2474 retval = -EIO;
2475 goto out;
2476 }
2477 retval = 0;
2478
2479out:
2480 pathrelse(&path);
2481 if (trans_running) {
2482 int err = journal_end(&th);
2483 if (err)
2484 retval = err;
2485 trans_running = 0;
2486 }
2487 reiserfs_write_unlock(inode->i_sb);
2488
2489
2490 if (use_get_block) {
2491 retval = reiserfs_get_block(inode, block, bh_result,
2492 GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
2493 | GET_BLOCK_NO_DANGLE);
2494 if (!retval) {
2495 if (!buffer_mapped(bh_result)
2496 || bh_result->b_blocknr == 0) {
2497
2498 use_get_block = 0;
2499 goto start_over;
2500 }
2501 }
2502 }
2503 kunmap(bh_result->b_page);
2504
2505 if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
2506
2507
2508
2509
2510 lock_buffer(bh_result);
2511 clear_buffer_dirty(bh_result);
2512 unlock_buffer(bh_result);
2513 }
2514 return retval;
2515}
2516
2517
2518
2519
2520
2521
2522static int reiserfs_write_full_page(struct page *page,
2523 struct writeback_control *wbc)
2524{
2525 struct inode *inode = page->mapping->host;
2526 unsigned long end_index = inode->i_size >> PAGE_SHIFT;
2527 int error = 0;
2528 unsigned long block;
2529 sector_t last_block;
2530 struct buffer_head *head, *bh;
2531 int partial = 0;
2532 int nr = 0;
2533 int checked = PageChecked(page);
2534 struct reiserfs_transaction_handle th;
2535 struct super_block *s = inode->i_sb;
2536 int bh_per_page = PAGE_SIZE / s->s_blocksize;
2537 th.t_trans_id = 0;
2538
2539
2540 if (checked && (current->flags & PF_MEMALLOC)) {
2541 redirty_page_for_writepage(wbc, page);
2542 unlock_page(page);
2543 return 0;
2544 }
2545
2546
2547
2548
2549
2550
2551
2552 if (!page_has_buffers(page)) {
2553 create_empty_buffers(page, s->s_blocksize,
2554 (1 << BH_Dirty) | (1 << BH_Uptodate));
2555 }
2556 head = page_buffers(page);
2557
2558
2559
2560
2561
2562 if (page->index >= end_index) {
2563 unsigned last_offset;
2564
2565 last_offset = inode->i_size & (PAGE_SIZE - 1);
2566
2567 if (page->index >= end_index + 1 || !last_offset) {
2568 unlock_page(page);
2569 return 0;
2570 }
2571 zero_user_segment(page, last_offset, PAGE_SIZE);
2572 }
2573 bh = head;
2574 block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
2575 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2576
2577 do {
2578 if (block > last_block) {
2579
2580
2581
2582
2583
2584 clear_buffer_dirty(bh);
2585 set_buffer_uptodate(bh);
2586 } else if ((checked || buffer_dirty(bh)) &&
2587 (!buffer_mapped(bh) || bh->b_blocknr == 0)) {
2588
2589
2590
2591
2592
2593 if ((error = map_block_for_writepage(inode, bh, block))) {
2594 goto fail;
2595 }
2596 }
2597 bh = bh->b_this_page;
2598 block++;
2599 } while (bh != head);
2600
2601
2602
2603
2604
2605
2606
2607 if (checked) {
2608 ClearPageChecked(page);
2609 reiserfs_write_lock(s);
2610 error = journal_begin(&th, s, bh_per_page + 1);
2611 if (error) {
2612 reiserfs_write_unlock(s);
2613 goto fail;
2614 }
2615 reiserfs_update_inode_transaction(inode);
2616 }
2617
2618 do {
2619 get_bh(bh);
2620 if (!buffer_mapped(bh))
2621 continue;
2622 if (buffer_mapped(bh) && bh->b_blocknr == 0)
2623 continue;
2624
2625 if (checked) {
2626 reiserfs_prepare_for_journal(s, bh, 1);
2627 journal_mark_dirty(&th, bh);
2628 continue;
2629 }
2630
2631
2632
2633
2634 if (wbc->sync_mode != WB_SYNC_NONE) {
2635 lock_buffer(bh);
2636 } else {
2637 if (!trylock_buffer(bh)) {
2638 redirty_page_for_writepage(wbc, page);
2639 continue;
2640 }
2641 }
2642 if (test_clear_buffer_dirty(bh)) {
2643 mark_buffer_async_write(bh);
2644 } else {
2645 unlock_buffer(bh);
2646 }
2647 } while ((bh = bh->b_this_page) != head);
2648
2649 if (checked) {
2650 error = journal_end(&th);
2651 reiserfs_write_unlock(s);
2652 if (error)
2653 goto fail;
2654 }
2655 BUG_ON(PageWriteback(page));
2656 set_page_writeback(page);
2657 unlock_page(page);
2658
2659
2660
2661
2662
2663
2664 do {
2665 struct buffer_head *next = bh->b_this_page;
2666 if (buffer_async_write(bh)) {
2667 submit_bh(REQ_OP_WRITE, 0, bh);
2668 nr++;
2669 }
2670 put_bh(bh);
2671 bh = next;
2672 } while (bh != head);
2673
2674 error = 0;
2675done:
2676 if (nr == 0) {
2677
2678
2679
2680
2681
2682
2683 bh = head;
2684 do {
2685 if (!buffer_uptodate(bh)) {
2686 partial = 1;
2687 break;
2688 }
2689 bh = bh->b_this_page;
2690 } while (bh != head);
2691 if (!partial)
2692 SetPageUptodate(page);
2693 end_page_writeback(page);
2694 }
2695 return error;
2696
2697fail:
2698
2699
2700
2701
2702
2703 ClearPageUptodate(page);
2704 bh = head;
2705 do {
2706 get_bh(bh);
2707 if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) {
2708 lock_buffer(bh);
2709 mark_buffer_async_write(bh);
2710 } else {
2711
2712
2713
2714
2715 clear_buffer_dirty(bh);
2716 }
2717 bh = bh->b_this_page;
2718 } while (bh != head);
2719 SetPageError(page);
2720 BUG_ON(PageWriteback(page));
2721 set_page_writeback(page);
2722 unlock_page(page);
2723 do {
2724 struct buffer_head *next = bh->b_this_page;
2725 if (buffer_async_write(bh)) {
2726 clear_buffer_dirty(bh);
2727 submit_bh(REQ_OP_WRITE, 0, bh);
2728 nr++;
2729 }
2730 put_bh(bh);
2731 bh = next;
2732 } while (bh != head);
2733 goto done;
2734}
2735
2736static int reiserfs_readpage(struct file *f, struct page *page)
2737{
2738 return block_read_full_page(page, reiserfs_get_block);
2739}
2740
2741static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
2742{
2743 struct inode *inode = page->mapping->host;
2744 reiserfs_wait_on_write_block(inode->i_sb);
2745 return reiserfs_write_full_page(page, wbc);
2746}
2747
2748static void reiserfs_truncate_failed_write(struct inode *inode)
2749{
2750 truncate_inode_pages(inode->i_mapping, inode->i_size);
2751 reiserfs_truncate_file(inode, 0);
2752}
2753
2754static int reiserfs_write_begin(struct file *file,
2755 struct address_space *mapping,
2756 loff_t pos, unsigned len, unsigned flags,
2757 struct page **pagep, void **fsdata)
2758{
2759 struct inode *inode;
2760 struct page *page;
2761 pgoff_t index;
2762 int ret;
2763 int old_ref = 0;
2764
2765 inode = mapping->host;
2766 *fsdata = NULL;
2767 if (flags & AOP_FLAG_CONT_EXPAND &&
2768 (pos & (inode->i_sb->s_blocksize - 1)) == 0) {
2769 pos ++;
2770 *fsdata = (void *)(unsigned long)flags;
2771 }
2772
2773 index = pos >> PAGE_SHIFT;
2774 page = grab_cache_page_write_begin(mapping, index, flags);
2775 if (!page)
2776 return -ENOMEM;
2777 *pagep = page;
2778
2779 reiserfs_wait_on_write_block(inode->i_sb);
2780 fix_tail_page_for_writing(page);
2781 if (reiserfs_transaction_running(inode->i_sb)) {
2782 struct reiserfs_transaction_handle *th;
2783 th = (struct reiserfs_transaction_handle *)current->
2784 journal_info;
2785 BUG_ON(!th->t_refcount);
2786 BUG_ON(!th->t_trans_id);
2787 old_ref = th->t_refcount;
2788 th->t_refcount++;
2789 }
2790 ret = __block_write_begin(page, pos, len, reiserfs_get_block);
2791 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2792 struct reiserfs_transaction_handle *th = current->journal_info;
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806 if (th->t_refcount > old_ref) {
2807 if (old_ref)
2808 th->t_refcount--;
2809 else {
2810 int err;
2811 reiserfs_write_lock(inode->i_sb);
2812 err = reiserfs_end_persistent_transaction(th);
2813 reiserfs_write_unlock(inode->i_sb);
2814 if (err)
2815 ret = err;
2816 }
2817 }
2818 }
2819 if (ret) {
2820 unlock_page(page);
2821 put_page(page);
2822
2823 reiserfs_truncate_failed_write(inode);
2824 }
2825 return ret;
2826}
2827
2828int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
2829{
2830 struct inode *inode = page->mapping->host;
2831 int ret;
2832 int old_ref = 0;
2833 int depth;
2834
2835 depth = reiserfs_write_unlock_nested(inode->i_sb);
2836 reiserfs_wait_on_write_block(inode->i_sb);
2837 reiserfs_write_lock_nested(inode->i_sb, depth);
2838
2839 fix_tail_page_for_writing(page);
2840 if (reiserfs_transaction_running(inode->i_sb)) {
2841 struct reiserfs_transaction_handle *th;
2842 th = (struct reiserfs_transaction_handle *)current->
2843 journal_info;
2844 BUG_ON(!th->t_refcount);
2845 BUG_ON(!th->t_trans_id);
2846 old_ref = th->t_refcount;
2847 th->t_refcount++;
2848 }
2849
2850 ret = __block_write_begin(page, from, len, reiserfs_get_block);
2851 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2852 struct reiserfs_transaction_handle *th = current->journal_info;
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866 if (th->t_refcount > old_ref) {
2867 if (old_ref)
2868 th->t_refcount--;
2869 else {
2870 int err;
2871 reiserfs_write_lock(inode->i_sb);
2872 err = reiserfs_end_persistent_transaction(th);
2873 reiserfs_write_unlock(inode->i_sb);
2874 if (err)
2875 ret = err;
2876 }
2877 }
2878 }
2879 return ret;
2880
2881}
2882
2883static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
2884{
2885 return generic_block_bmap(as, block, reiserfs_bmap);
2886}
2887
2888static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2889 loff_t pos, unsigned len, unsigned copied,
2890 struct page *page, void *fsdata)
2891{
2892 struct inode *inode = page->mapping->host;
2893 int ret = 0;
2894 int update_sd = 0;
2895 struct reiserfs_transaction_handle *th;
2896 unsigned start;
2897 bool locked = false;
2898
2899 if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
2900 pos ++;
2901
2902 reiserfs_wait_on_write_block(inode->i_sb);
2903 if (reiserfs_transaction_running(inode->i_sb))
2904 th = current->journal_info;
2905 else
2906 th = NULL;
2907
2908 start = pos & (PAGE_SIZE - 1);
2909 if (unlikely(copied < len)) {
2910 if (!PageUptodate(page))
2911 copied = 0;
2912
2913 page_zero_new_buffers(page, start + copied, start + len);
2914 }
2915 flush_dcache_page(page);
2916
2917 reiserfs_commit_page(inode, page, start, start + copied);
2918
2919
2920
2921
2922
2923
2924 if (pos + copied > inode->i_size) {
2925 struct reiserfs_transaction_handle myth;
2926 reiserfs_write_lock(inode->i_sb);
2927 locked = true;
2928
2929
2930
2931
2932
2933 if ((have_large_tails(inode->i_sb)
2934 && inode->i_size > i_block_size(inode) * 4)
2935 || (have_small_tails(inode->i_sb)
2936 && inode->i_size > i_block_size(inode)))
2937 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2938
2939 ret = journal_begin(&myth, inode->i_sb, 1);
2940 if (ret)
2941 goto journal_error;
2942
2943 reiserfs_update_inode_transaction(inode);
2944 inode->i_size = pos + copied;
2945
2946
2947
2948
2949
2950 mark_inode_dirty(inode);
2951 reiserfs_update_sd(&myth, inode);
2952 update_sd = 1;
2953 ret = journal_end(&myth);
2954 if (ret)
2955 goto journal_error;
2956 }
2957 if (th) {
2958 if (!locked) {
2959 reiserfs_write_lock(inode->i_sb);
2960 locked = true;
2961 }
2962 if (!update_sd)
2963 mark_inode_dirty(inode);
2964 ret = reiserfs_end_persistent_transaction(th);
2965 if (ret)
2966 goto out;
2967 }
2968
2969out:
2970 if (locked)
2971 reiserfs_write_unlock(inode->i_sb);
2972 unlock_page(page);
2973 put_page(page);
2974
2975 if (pos + len > inode->i_size)
2976 reiserfs_truncate_failed_write(inode);
2977
2978 return ret == 0 ? copied : ret;
2979
2980journal_error:
2981 reiserfs_write_unlock(inode->i_sb);
2982 locked = false;
2983 if (th) {
2984 if (!update_sd)
2985 reiserfs_update_sd(th, inode);
2986 ret = reiserfs_end_persistent_transaction(th);
2987 }
2988 goto out;
2989}
2990
2991int reiserfs_commit_write(struct file *f, struct page *page,
2992 unsigned from, unsigned to)
2993{
2994 struct inode *inode = page->mapping->host;
2995 loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
2996 int ret = 0;
2997 int update_sd = 0;
2998 struct reiserfs_transaction_handle *th = NULL;
2999 int depth;
3000
3001 depth = reiserfs_write_unlock_nested(inode->i_sb);
3002 reiserfs_wait_on_write_block(inode->i_sb);
3003 reiserfs_write_lock_nested(inode->i_sb, depth);
3004
3005 if (reiserfs_transaction_running(inode->i_sb)) {
3006 th = current->journal_info;
3007 }
3008 reiserfs_commit_page(inode, page, from, to);
3009
3010
3011
3012
3013
3014
3015 if (pos > inode->i_size) {
3016 struct reiserfs_transaction_handle myth;
3017
3018
3019
3020
3021
3022 if ((have_large_tails(inode->i_sb)
3023 && inode->i_size > i_block_size(inode) * 4)
3024 || (have_small_tails(inode->i_sb)
3025 && inode->i_size > i_block_size(inode)))
3026 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
3027
3028 ret = journal_begin(&myth, inode->i_sb, 1);
3029 if (ret)
3030 goto journal_error;
3031
3032 reiserfs_update_inode_transaction(inode);
3033 inode->i_size = pos;
3034
3035
3036
3037
3038
3039 mark_inode_dirty(inode);
3040 reiserfs_update_sd(&myth, inode);
3041 update_sd = 1;
3042 ret = journal_end(&myth);
3043 if (ret)
3044 goto journal_error;
3045 }
3046 if (th) {
3047 if (!update_sd)
3048 mark_inode_dirty(inode);
3049 ret = reiserfs_end_persistent_transaction(th);
3050 if (ret)
3051 goto out;
3052 }
3053
3054out:
3055 return ret;
3056
3057journal_error:
3058 if (th) {
3059 if (!update_sd)
3060 reiserfs_update_sd(th, inode);
3061 ret = reiserfs_end_persistent_transaction(th);
3062 }
3063
3064 return ret;
3065}
3066
3067void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
3068{
3069 if (reiserfs_attrs(inode->i_sb)) {
3070 if (sd_attrs & REISERFS_SYNC_FL)
3071 inode->i_flags |= S_SYNC;
3072 else
3073 inode->i_flags &= ~S_SYNC;
3074 if (sd_attrs & REISERFS_IMMUTABLE_FL)
3075 inode->i_flags |= S_IMMUTABLE;
3076 else
3077 inode->i_flags &= ~S_IMMUTABLE;
3078 if (sd_attrs & REISERFS_APPEND_FL)
3079 inode->i_flags |= S_APPEND;
3080 else
3081 inode->i_flags &= ~S_APPEND;
3082 if (sd_attrs & REISERFS_NOATIME_FL)
3083 inode->i_flags |= S_NOATIME;
3084 else
3085 inode->i_flags &= ~S_NOATIME;
3086 if (sd_attrs & REISERFS_NOTAIL_FL)
3087 REISERFS_I(inode)->i_flags |= i_nopack_mask;
3088 else
3089 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
3090 }
3091}
3092
3093
3094
3095
3096
3097static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
3098{
3099 int ret = 1;
3100 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3101
3102 lock_buffer(bh);
3103 spin_lock(&j->j_dirty_buffers_lock);
3104 if (!buffer_mapped(bh)) {
3105 goto free_jh;
3106 }
3107
3108
3109
3110
3111 if (reiserfs_file_data_log(inode)) {
3112
3113
3114
3115
3116 if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
3117 ret = 0;
3118 }
3119 } else if (buffer_dirty(bh)) {
3120 struct reiserfs_journal_list *jl;
3121 struct reiserfs_jh *jh = bh->b_private;
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137 if (jh && (jl = jh->jl)
3138 && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
3139 ret = 0;
3140 }
3141free_jh:
3142 if (ret && bh->b_private) {
3143 reiserfs_free_jh(bh);
3144 }
3145 spin_unlock(&j->j_dirty_buffers_lock);
3146 unlock_buffer(bh);
3147 return ret;
3148}
3149
3150
3151static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
3152 unsigned int length)
3153{
3154 struct buffer_head *head, *bh, *next;
3155 struct inode *inode = page->mapping->host;
3156 unsigned int curr_off = 0;
3157 unsigned int stop = offset + length;
3158 int partial_page = (offset || length < PAGE_SIZE);
3159 int ret = 1;
3160
3161 BUG_ON(!PageLocked(page));
3162
3163 if (!partial_page)
3164 ClearPageChecked(page);
3165
3166 if (!page_has_buffers(page))
3167 goto out;
3168
3169 head = page_buffers(page);
3170 bh = head;
3171 do {
3172 unsigned int next_off = curr_off + bh->b_size;
3173 next = bh->b_this_page;
3174
3175 if (next_off > stop)
3176 goto out;
3177
3178
3179
3180
3181 if (offset <= curr_off) {
3182 if (invalidatepage_can_drop(inode, bh))
3183 reiserfs_unmap_buffer(bh);
3184 else
3185 ret = 0;
3186 }
3187 curr_off = next_off;
3188 bh = next;
3189 } while (bh != head);
3190
3191
3192
3193
3194
3195
3196 if (!partial_page && ret) {
3197 ret = try_to_release_page(page, 0);
3198
3199 }
3200out:
3201 return;
3202}
3203
3204static int reiserfs_set_page_dirty(struct page *page)
3205{
3206 struct inode *inode = page->mapping->host;
3207 if (reiserfs_file_data_log(inode)) {
3208 SetPageChecked(page);
3209 return __set_page_dirty_nobuffers(page);
3210 }
3211 return __set_page_dirty_buffers(page);
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
3224{
3225 struct inode *inode = page->mapping->host;
3226 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3227 struct buffer_head *head;
3228 struct buffer_head *bh;
3229 int ret = 1;
3230
3231 WARN_ON(PageChecked(page));
3232 spin_lock(&j->j_dirty_buffers_lock);
3233 head = page_buffers(page);
3234 bh = head;
3235 do {
3236 if (bh->b_private) {
3237 if (!buffer_dirty(bh) && !buffer_locked(bh)) {
3238 reiserfs_free_jh(bh);
3239 } else {
3240 ret = 0;
3241 break;
3242 }
3243 }
3244 bh = bh->b_this_page;
3245 } while (bh != head);
3246 if (ret)
3247 ret = try_to_free_buffers(page);
3248 spin_unlock(&j->j_dirty_buffers_lock);
3249 return ret;
3250}
3251
3252
3253
3254
3255
3256static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3257{
3258 struct file *file = iocb->ki_filp;
3259 struct inode *inode = file->f_mapping->host;
3260 size_t count = iov_iter_count(iter);
3261 ssize_t ret;
3262
3263 ret = blockdev_direct_IO(iocb, inode, iter,
3264 reiserfs_get_blocks_direct_io);
3265
3266
3267
3268
3269
3270 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
3271 loff_t isize = i_size_read(inode);
3272 loff_t end = iocb->ki_pos + count;
3273
3274 if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
3275 truncate_setsize(inode, isize);
3276 reiserfs_vfs_truncate_file(inode);
3277 }
3278 }
3279
3280 return ret;
3281}
3282
3283int reiserfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
3284 struct iattr *attr)
3285{
3286 struct inode *inode = d_inode(dentry);
3287 unsigned int ia_valid;
3288 int error;
3289
3290 error = setattr_prepare(&init_user_ns, dentry, attr);
3291 if (error)
3292 return error;
3293
3294
3295 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3296
3297 if (is_quota_modification(inode, attr)) {
3298 error = dquot_initialize(inode);
3299 if (error)
3300 return error;
3301 }
3302 reiserfs_write_lock(inode->i_sb);
3303 if (attr->ia_valid & ATTR_SIZE) {
3304
3305
3306
3307
3308 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
3309 attr->ia_size > MAX_NON_LFS) {
3310 reiserfs_write_unlock(inode->i_sb);
3311 error = -EFBIG;
3312 goto out;
3313 }
3314
3315 inode_dio_wait(inode);
3316
3317
3318 if (attr->ia_size > inode->i_size) {
3319 error = generic_cont_expand_simple(inode, attr->ia_size);
3320 if (REISERFS_I(inode)->i_prealloc_count > 0) {
3321 int err;
3322 struct reiserfs_transaction_handle th;
3323
3324 err = journal_begin(&th, inode->i_sb, 4);
3325 if (!err) {
3326 reiserfs_discard_prealloc(&th, inode);
3327 err = journal_end(&th);
3328 }
3329 if (err)
3330 error = err;
3331 }
3332 if (error) {
3333 reiserfs_write_unlock(inode->i_sb);
3334 goto out;
3335 }
3336
3337
3338
3339
3340 attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
3341 }
3342 }
3343 reiserfs_write_unlock(inode->i_sb);
3344
3345 if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
3346 ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
3347 (get_inode_sd_version(inode) == STAT_DATA_V1)) {
3348
3349 error = -EINVAL;
3350 goto out;
3351 }
3352
3353 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3354 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3355 struct reiserfs_transaction_handle th;
3356 int jbegin_count =
3357 2 *
3358 (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
3359 REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
3360 2;
3361
3362 error = reiserfs_chown_xattrs(inode, attr);
3363
3364 if (error)
3365 return error;
3366
3367
3368
3369
3370
3371 reiserfs_write_lock(inode->i_sb);
3372 error = journal_begin(&th, inode->i_sb, jbegin_count);
3373 reiserfs_write_unlock(inode->i_sb);
3374 if (error)
3375 goto out;
3376 error = dquot_transfer(inode, attr);
3377 reiserfs_write_lock(inode->i_sb);
3378 if (error) {
3379 journal_end(&th);
3380 reiserfs_write_unlock(inode->i_sb);
3381 goto out;
3382 }
3383
3384
3385
3386
3387
3388 if (attr->ia_valid & ATTR_UID)
3389 inode->i_uid = attr->ia_uid;
3390 if (attr->ia_valid & ATTR_GID)
3391 inode->i_gid = attr->ia_gid;
3392 mark_inode_dirty(inode);
3393 error = journal_end(&th);
3394 reiserfs_write_unlock(inode->i_sb);
3395 if (error)
3396 goto out;
3397 }
3398
3399 if ((attr->ia_valid & ATTR_SIZE) &&
3400 attr->ia_size != i_size_read(inode)) {
3401 error = inode_newsize_ok(inode, attr->ia_size);
3402 if (!error) {
3403
3404
3405
3406
3407 mutex_lock(&REISERFS_I(inode)->tailpack);
3408 truncate_setsize(inode, attr->ia_size);
3409 reiserfs_truncate_file(inode, 1);
3410 mutex_unlock(&REISERFS_I(inode)->tailpack);
3411 }
3412 }
3413
3414 if (!error) {
3415 setattr_copy(&init_user_ns, inode, attr);
3416 mark_inode_dirty(inode);
3417 }
3418
3419 if (!error && reiserfs_posixacl(inode->i_sb)) {
3420 if (attr->ia_valid & ATTR_MODE)
3421 error = reiserfs_acl_chmod(inode);
3422 }
3423
3424out:
3425 return error;
3426}
3427
3428const struct address_space_operations reiserfs_address_space_operations = {
3429 .writepage = reiserfs_writepage,
3430 .readpage = reiserfs_readpage,
3431 .readahead = reiserfs_readahead,
3432 .releasepage = reiserfs_releasepage,
3433 .invalidatepage = reiserfs_invalidatepage,
3434 .write_begin = reiserfs_write_begin,
3435 .write_end = reiserfs_write_end,
3436 .bmap = reiserfs_aop_bmap,
3437 .direct_IO = reiserfs_direct_IO,
3438 .set_page_dirty = reiserfs_set_page_dirty,
3439};
3440