1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/buffer_head.h>
23#include <linux/gfp.h>
24#include <linux/pagemap.h>
25#include <linux/pagevec.h>
26#include <linux/sched.h>
27#include <linux/swap.h>
28#include <linux/uio.h>
29#include <linux/writeback.h>
30#include <linux/aio.h>
31
32#include <asm/page.h>
33#include <asm/uaccess.h>
34
35#include "attrib.h"
36#include "bitmap.h"
37#include "inode.h"
38#include "debug.h"
39#include "lcnalloc.h"
40#include "malloc.h"
41#include "mft.h"
42#include "ntfs.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static int ntfs_file_open(struct inode *vi, struct file *filp)
63{
64 if (sizeof(unsigned long) < 8) {
65 if (i_size_read(vi) > MAX_LFS_FILESIZE)
66 return -EOVERFLOW;
67 }
68 return generic_file_open(vi, filp);
69}
70
71#ifdef NTFS_RW
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
112{
113 s64 old_init_size;
114 loff_t old_i_size;
115 pgoff_t index, end_index;
116 unsigned long flags;
117 struct inode *vi = VFS_I(ni);
118 ntfs_inode *base_ni;
119 MFT_RECORD *m = NULL;
120 ATTR_RECORD *a;
121 ntfs_attr_search_ctx *ctx = NULL;
122 struct address_space *mapping;
123 struct page *page = NULL;
124 u8 *kattr;
125 int err;
126 u32 attr_len;
127
128 read_lock_irqsave(&ni->size_lock, flags);
129 old_init_size = ni->initialized_size;
130 old_i_size = i_size_read(vi);
131 BUG_ON(new_init_size > ni->allocated_size);
132 read_unlock_irqrestore(&ni->size_lock, flags);
133 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
134 "old_initialized_size 0x%llx, "
135 "new_initialized_size 0x%llx, i_size 0x%llx.",
136 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
137 (unsigned long long)old_init_size,
138 (unsigned long long)new_init_size, old_i_size);
139 if (!NInoAttr(ni))
140 base_ni = ni;
141 else
142 base_ni = ni->ext.base_ntfs_ino;
143
144 if (NInoNonResident(ni))
145 goto do_non_resident_extend;
146 BUG_ON(old_init_size != old_i_size);
147 m = map_mft_record(base_ni);
148 if (IS_ERR(m)) {
149 err = PTR_ERR(m);
150 m = NULL;
151 goto err_out;
152 }
153 ctx = ntfs_attr_get_search_ctx(base_ni, m);
154 if (unlikely(!ctx)) {
155 err = -ENOMEM;
156 goto err_out;
157 }
158 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
159 CASE_SENSITIVE, 0, NULL, 0, ctx);
160 if (unlikely(err)) {
161 if (err == -ENOENT)
162 err = -EIO;
163 goto err_out;
164 }
165 m = ctx->mrec;
166 a = ctx->attr;
167 BUG_ON(a->non_resident);
168
169 attr_len = le32_to_cpu(a->data.resident.value_length);
170 BUG_ON(old_i_size != (loff_t)attr_len);
171
172
173
174
175 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
176 memset(kattr + attr_len, 0, new_init_size - attr_len);
177 a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
178
179 write_lock_irqsave(&ni->size_lock, flags);
180 i_size_write(vi, new_init_size);
181 ni->initialized_size = new_init_size;
182 write_unlock_irqrestore(&ni->size_lock, flags);
183 goto done;
184do_non_resident_extend:
185
186
187
188
189
190 if (new_init_size > old_i_size) {
191 m = map_mft_record(base_ni);
192 if (IS_ERR(m)) {
193 err = PTR_ERR(m);
194 m = NULL;
195 goto err_out;
196 }
197 ctx = ntfs_attr_get_search_ctx(base_ni, m);
198 if (unlikely(!ctx)) {
199 err = -ENOMEM;
200 goto err_out;
201 }
202 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
203 CASE_SENSITIVE, 0, NULL, 0, ctx);
204 if (unlikely(err)) {
205 if (err == -ENOENT)
206 err = -EIO;
207 goto err_out;
208 }
209 m = ctx->mrec;
210 a = ctx->attr;
211 BUG_ON(!a->non_resident);
212 BUG_ON(old_i_size != (loff_t)
213 sle64_to_cpu(a->data.non_resident.data_size));
214 a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
215 flush_dcache_mft_record_page(ctx->ntfs_ino);
216 mark_mft_record_dirty(ctx->ntfs_ino);
217
218 i_size_write(vi, new_init_size);
219 ntfs_attr_put_search_ctx(ctx);
220 ctx = NULL;
221 unmap_mft_record(base_ni);
222 m = NULL;
223 }
224 mapping = vi->i_mapping;
225 index = old_init_size >> PAGE_CACHE_SHIFT;
226 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
227 do {
228
229
230
231
232 page = read_mapping_page(mapping, index, NULL);
233 if (IS_ERR(page)) {
234 err = PTR_ERR(page);
235 goto init_err_out;
236 }
237 if (unlikely(PageError(page))) {
238 page_cache_release(page);
239 err = -EIO;
240 goto init_err_out;
241 }
242
243
244
245
246 write_lock_irqsave(&ni->size_lock, flags);
247 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
248 if (ni->initialized_size > new_init_size)
249 ni->initialized_size = new_init_size;
250 write_unlock_irqrestore(&ni->size_lock, flags);
251
252 set_page_dirty(page);
253 page_cache_release(page);
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281 balance_dirty_pages_ratelimited(mapping);
282 cond_resched();
283 } while (++index < end_index);
284 read_lock_irqsave(&ni->size_lock, flags);
285 BUG_ON(ni->initialized_size != new_init_size);
286 read_unlock_irqrestore(&ni->size_lock, flags);
287
288 m = map_mft_record(base_ni);
289 if (IS_ERR(m)) {
290 err = PTR_ERR(m);
291 m = NULL;
292 goto init_err_out;
293 }
294 ctx = ntfs_attr_get_search_ctx(base_ni, m);
295 if (unlikely(!ctx)) {
296 err = -ENOMEM;
297 goto init_err_out;
298 }
299 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
300 CASE_SENSITIVE, 0, NULL, 0, ctx);
301 if (unlikely(err)) {
302 if (err == -ENOENT)
303 err = -EIO;
304 goto init_err_out;
305 }
306 m = ctx->mrec;
307 a = ctx->attr;
308 BUG_ON(!a->non_resident);
309 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
310done:
311 flush_dcache_mft_record_page(ctx->ntfs_ino);
312 mark_mft_record_dirty(ctx->ntfs_ino);
313 if (ctx)
314 ntfs_attr_put_search_ctx(ctx);
315 if (m)
316 unmap_mft_record(base_ni);
317 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
318 (unsigned long long)new_init_size, i_size_read(vi));
319 return 0;
320init_err_out:
321 write_lock_irqsave(&ni->size_lock, flags);
322 ni->initialized_size = old_init_size;
323 write_unlock_irqrestore(&ni->size_lock, flags);
324err_out:
325 if (ctx)
326 ntfs_attr_put_search_ctx(ctx);
327 if (m)
328 unmap_mft_record(base_ni);
329 ntfs_debug("Failed. Returning error code %i.", err);
330 return err;
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
356 int bytes)
357{
358 const char __user *end;
359 volatile char c;
360
361
362 end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
363
364 while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
365 ;
366}
367
368
369
370
371
372
373static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
374 size_t iov_ofs, int bytes)
375{
376 do {
377 const char __user *buf;
378 unsigned len;
379
380 buf = iov->iov_base + iov_ofs;
381 len = iov->iov_len - iov_ofs;
382 if (len > bytes)
383 len = bytes;
384 ntfs_fault_in_pages_readable(buf, len);
385 bytes -= len;
386 iov++;
387 iov_ofs = 0;
388 } while (bytes);
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
408 pgoff_t index, const unsigned nr_pages, struct page **pages,
409 struct page **cached_page)
410{
411 int err, nr;
412
413 BUG_ON(!nr_pages);
414 err = nr = 0;
415 do {
416 pages[nr] = find_lock_page(mapping, index);
417 if (!pages[nr]) {
418 if (!*cached_page) {
419 *cached_page = page_cache_alloc(mapping);
420 if (unlikely(!*cached_page)) {
421 err = -ENOMEM;
422 goto err_out;
423 }
424 }
425 err = add_to_page_cache_lru(*cached_page, mapping, index,
426 GFP_KERNEL);
427 if (unlikely(err)) {
428 if (err == -EEXIST)
429 continue;
430 goto err_out;
431 }
432 pages[nr] = *cached_page;
433 *cached_page = NULL;
434 }
435 index++;
436 nr++;
437 } while (nr < nr_pages);
438out:
439 return err;
440err_out:
441 while (nr > 0) {
442 unlock_page(pages[--nr]);
443 page_cache_release(pages[nr]);
444 }
445 goto out;
446}
447
448static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
449{
450 lock_buffer(bh);
451 get_bh(bh);
452 bh->b_end_io = end_buffer_read_sync;
453 return submit_bh(READ, bh);
454}
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
482 unsigned nr_pages, s64 pos, size_t bytes)
483{
484 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
485 LCN lcn;
486 s64 bh_pos, vcn_len, end, initialized_size;
487 sector_t lcn_block;
488 struct page *page;
489 struct inode *vi;
490 ntfs_inode *ni, *base_ni = NULL;
491 ntfs_volume *vol;
492 runlist_element *rl, *rl2;
493 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
494 ntfs_attr_search_ctx *ctx = NULL;
495 MFT_RECORD *m = NULL;
496 ATTR_RECORD *a = NULL;
497 unsigned long flags;
498 u32 attr_rec_len = 0;
499 unsigned blocksize, u;
500 int err, mp_size;
501 bool rl_write_locked, was_hole, is_retry;
502 unsigned char blocksize_bits;
503 struct {
504 u8 runlist_merged:1;
505 u8 mft_attr_mapped:1;
506 u8 mp_rebuilt:1;
507 u8 attr_switched:1;
508 } status = { 0, 0, 0, 0 };
509
510 BUG_ON(!nr_pages);
511 BUG_ON(!pages);
512 BUG_ON(!*pages);
513 vi = pages[0]->mapping->host;
514 ni = NTFS_I(vi);
515 vol = ni->vol;
516 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
517 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
518 vi->i_ino, ni->type, pages[0]->index, nr_pages,
519 (long long)pos, bytes);
520 blocksize = vol->sb->s_blocksize;
521 blocksize_bits = vol->sb->s_blocksize_bits;
522 u = 0;
523 do {
524 page = pages[u];
525 BUG_ON(!page);
526
527
528
529
530 if (!page_has_buffers(page)) {
531 create_empty_buffers(page, blocksize, 0);
532 if (unlikely(!page_has_buffers(page)))
533 return -ENOMEM;
534 }
535 } while (++u < nr_pages);
536 rl_write_locked = false;
537 rl = NULL;
538 err = 0;
539 vcn = lcn = -1;
540 vcn_len = 0;
541 lcn_block = -1;
542 was_hole = false;
543 cpos = pos >> vol->cluster_size_bits;
544 end = pos + bytes;
545 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
546
547
548
549
550 u = 0;
551do_next_page:
552 page = pages[u];
553 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
554 bh = head = page_buffers(page);
555 do {
556 VCN cdelta;
557 s64 bh_end;
558 unsigned bh_cofs;
559
560
561 if (buffer_new(bh))
562 clear_buffer_new(bh);
563 bh_end = bh_pos + blocksize;
564 bh_cpos = bh_pos >> vol->cluster_size_bits;
565 bh_cofs = bh_pos & vol->cluster_size_mask;
566 if (buffer_mapped(bh)) {
567
568
569
570
571 if (buffer_uptodate(bh))
572 continue;
573
574
575
576
577 if (PageUptodate(page)) {
578 set_buffer_uptodate(bh);
579 continue;
580 }
581
582
583
584
585
586 if ((bh_pos < pos && bh_end > pos) ||
587 (bh_pos < end && bh_end > end)) {
588
589
590
591
592
593 read_lock_irqsave(&ni->size_lock, flags);
594 initialized_size = ni->initialized_size;
595 read_unlock_irqrestore(&ni->size_lock, flags);
596 if (bh_pos < initialized_size) {
597 ntfs_submit_bh_for_read(bh);
598 *wait_bh++ = bh;
599 } else {
600 zero_user(page, bh_offset(bh),
601 blocksize);
602 set_buffer_uptodate(bh);
603 }
604 }
605 continue;
606 }
607
608 bh->b_bdev = vol->sb->s_bdev;
609
610
611
612
613
614
615
616
617 cdelta = bh_cpos - vcn;
618 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
619map_buffer_cached:
620 BUG_ON(lcn < 0);
621 bh->b_blocknr = lcn_block +
622 (cdelta << (vol->cluster_size_bits -
623 blocksize_bits)) +
624 (bh_cofs >> blocksize_bits);
625 set_buffer_mapped(bh);
626
627
628
629
630
631
632
633
634
635 if (PageUptodate(page)) {
636 if (!buffer_uptodate(bh))
637 set_buffer_uptodate(bh);
638 if (unlikely(was_hole)) {
639
640 unmap_underlying_metadata(bh->b_bdev,
641 bh->b_blocknr);
642 if (bh_end <= pos || bh_pos >= end)
643 mark_buffer_dirty(bh);
644 else
645 set_buffer_new(bh);
646 }
647 continue;
648 }
649
650 if (likely(!was_hole)) {
651
652
653
654
655
656
657 if (!buffer_uptodate(bh) && bh_pos < end &&
658 bh_end > pos &&
659 (bh_pos < pos ||
660 bh_end > end)) {
661
662
663
664
665
666
667 read_lock_irqsave(&ni->size_lock,
668 flags);
669 initialized_size = ni->initialized_size;
670 read_unlock_irqrestore(&ni->size_lock,
671 flags);
672 if (bh_pos < initialized_size) {
673 ntfs_submit_bh_for_read(bh);
674 *wait_bh++ = bh;
675 } else {
676 zero_user(page, bh_offset(bh),
677 blocksize);
678 set_buffer_uptodate(bh);
679 }
680 }
681 continue;
682 }
683
684 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
685
686
687
688
689
690
691
692
693 if (bh_end <= pos || bh_pos >= end) {
694 if (!buffer_uptodate(bh)) {
695 zero_user(page, bh_offset(bh),
696 blocksize);
697 set_buffer_uptodate(bh);
698 }
699 mark_buffer_dirty(bh);
700 continue;
701 }
702 set_buffer_new(bh);
703 if (!buffer_uptodate(bh) &&
704 (bh_pos < pos || bh_end > end)) {
705 u8 *kaddr;
706 unsigned pofs;
707
708 kaddr = kmap_atomic(page);
709 if (bh_pos < pos) {
710 pofs = bh_pos & ~PAGE_CACHE_MASK;
711 memset(kaddr + pofs, 0, pos - bh_pos);
712 }
713 if (bh_end > end) {
714 pofs = end & ~PAGE_CACHE_MASK;
715 memset(kaddr + pofs, 0, bh_end - end);
716 }
717 kunmap_atomic(kaddr);
718 flush_dcache_page(page);
719 }
720 continue;
721 }
722
723
724
725
726
727 read_lock_irqsave(&ni->size_lock, flags);
728 initialized_size = ni->allocated_size;
729 read_unlock_irqrestore(&ni->size_lock, flags);
730 if (bh_pos > initialized_size) {
731 if (PageUptodate(page)) {
732 if (!buffer_uptodate(bh))
733 set_buffer_uptodate(bh);
734 } else if (!buffer_uptodate(bh)) {
735 zero_user(page, bh_offset(bh), blocksize);
736 set_buffer_uptodate(bh);
737 }
738 continue;
739 }
740 is_retry = false;
741 if (!rl) {
742 down_read(&ni->runlist.lock);
743retry_remap:
744 rl = ni->runlist.rl;
745 }
746 if (likely(rl != NULL)) {
747
748 while (rl->length && rl[1].vcn <= bh_cpos)
749 rl++;
750 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
751 if (likely(lcn >= 0)) {
752
753
754
755
756 was_hole = false;
757 vcn = bh_cpos;
758 vcn_len = rl[1].vcn - vcn;
759 lcn_block = lcn << (vol->cluster_size_bits -
760 blocksize_bits);
761 cdelta = 0;
762
763
764
765
766
767
768
769 if (likely(vcn + vcn_len >= cend)) {
770 if (rl_write_locked) {
771 up_write(&ni->runlist.lock);
772 rl_write_locked = false;
773 } else
774 up_read(&ni->runlist.lock);
775 rl = NULL;
776 }
777 goto map_buffer_cached;
778 }
779 } else
780 lcn = LCN_RL_NOT_MAPPED;
781
782
783
784
785 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
786 if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
787
788 if (!rl_write_locked) {
789
790
791
792
793
794
795
796 up_read(&ni->runlist.lock);
797 down_write(&ni->runlist.lock);
798 rl_write_locked = true;
799 goto retry_remap;
800 }
801 err = ntfs_map_runlist_nolock(ni, bh_cpos,
802 NULL);
803 if (likely(!err)) {
804 is_retry = true;
805 goto retry_remap;
806 }
807
808
809
810
811
812 if (err == -ENOENT) {
813 lcn = LCN_ENOENT;
814 err = 0;
815 goto rl_not_mapped_enoent;
816 }
817 } else
818 err = -EIO;
819
820 bh->b_blocknr = -1;
821 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
822 "attribute type 0x%x, vcn 0x%llx, "
823 "vcn offset 0x%x, because its "
824 "location on disk could not be "
825 "determined%s (error code %i).",
826 ni->mft_no, ni->type,
827 (unsigned long long)bh_cpos,
828 (unsigned)bh_pos &
829 vol->cluster_size_mask,
830 is_retry ? " even after retrying" : "",
831 err);
832 break;
833 }
834rl_not_mapped_enoent:
835
836
837
838
839
840
841
842 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
843 bh_cend = (bh_end + vol->cluster_size - 1) >>
844 vol->cluster_size_bits;
845 if ((bh_cend <= cpos || bh_cpos >= cend)) {
846 bh->b_blocknr = -1;
847
848
849
850
851
852
853
854
855 if (PageUptodate(page)) {
856 if (!buffer_uptodate(bh))
857 set_buffer_uptodate(bh);
858 } else if (!buffer_uptodate(bh)) {
859 zero_user(page, bh_offset(bh),
860 blocksize);
861 set_buffer_uptodate(bh);
862 }
863 continue;
864 }
865 }
866
867
868
869
870 BUG_ON(lcn != LCN_HOLE);
871
872
873
874
875
876 BUG_ON(!rl);
877 if (!rl_write_locked) {
878 up_read(&ni->runlist.lock);
879 down_write(&ni->runlist.lock);
880 rl_write_locked = true;
881 goto retry_remap;
882 }
883
884 BUG_ON(rl->lcn != LCN_HOLE);
885 lcn = -1;
886 rl2 = rl;
887 while (--rl2 >= ni->runlist.rl) {
888 if (rl2->lcn >= 0) {
889 lcn = rl2->lcn + rl2->length;
890 break;
891 }
892 }
893 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
894 false);
895 if (IS_ERR(rl2)) {
896 err = PTR_ERR(rl2);
897 ntfs_debug("Failed to allocate cluster, error code %i.",
898 err);
899 break;
900 }
901 lcn = rl2->lcn;
902 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
903 if (IS_ERR(rl)) {
904 err = PTR_ERR(rl);
905 if (err != -ENOMEM)
906 err = -EIO;
907 if (ntfs_cluster_free_from_rl(vol, rl2)) {
908 ntfs_error(vol->sb, "Failed to release "
909 "allocated cluster in error "
910 "code path. Run chkdsk to "
911 "recover the lost cluster.");
912 NVolSetErrors(vol);
913 }
914 ntfs_free(rl2);
915 break;
916 }
917 ni->runlist.rl = rl;
918 status.runlist_merged = 1;
919 ntfs_debug("Allocated cluster, lcn 0x%llx.",
920 (unsigned long long)lcn);
921
922 if (!NInoAttr(ni))
923 base_ni = ni;
924 else
925 base_ni = ni->ext.base_ntfs_ino;
926 m = map_mft_record(base_ni);
927 if (IS_ERR(m)) {
928 err = PTR_ERR(m);
929 break;
930 }
931 ctx = ntfs_attr_get_search_ctx(base_ni, m);
932 if (unlikely(!ctx)) {
933 err = -ENOMEM;
934 unmap_mft_record(base_ni);
935 break;
936 }
937 status.mft_attr_mapped = 1;
938 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
939 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
940 if (unlikely(err)) {
941 if (err == -ENOENT)
942 err = -EIO;
943 break;
944 }
945 m = ctx->mrec;
946 a = ctx->attr;
947
948
949
950
951
952
953
954 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
955 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
956 BUG_ON(!rl2);
957 BUG_ON(!rl2->length);
958 BUG_ON(rl2->lcn < LCN_HOLE);
959 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
960
961
962
963
964 if (!highest_vcn)
965 highest_vcn = (sle64_to_cpu(
966 a->data.non_resident.allocated_size) >>
967 vol->cluster_size_bits) - 1;
968
969
970
971
972 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
973 highest_vcn);
974 if (unlikely(mp_size <= 0)) {
975 if (!(err = mp_size))
976 err = -EIO;
977 ntfs_debug("Failed to get size for mapping pairs "
978 "array, error code %i.", err);
979 break;
980 }
981
982
983
984
985 attr_rec_len = le32_to_cpu(a->length);
986 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
987 a->data.non_resident.mapping_pairs_offset));
988 if (unlikely(err)) {
989 BUG_ON(err != -ENOSPC);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002 ntfs_error(vol->sb, "Not enough space in the mft "
1003 "record for the extended attribute "
1004 "record. This case is not "
1005 "implemented yet.");
1006 err = -EOPNOTSUPP;
1007 break ;
1008 }
1009 status.mp_rebuilt = 1;
1010
1011
1012
1013
1014 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1015 a->data.non_resident.mapping_pairs_offset),
1016 mp_size, rl2, vcn, highest_vcn, NULL);
1017 if (unlikely(err)) {
1018 ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
1019 "attribute type 0x%x, because building "
1020 "the mapping pairs failed with error "
1021 "code %i.", vi->i_ino,
1022 (unsigned)le32_to_cpu(ni->type), err);
1023 err = -EIO;
1024 break;
1025 }
1026
1027 if (unlikely(!a->data.non_resident.highest_vcn))
1028 a->data.non_resident.highest_vcn =
1029 cpu_to_sle64(highest_vcn);
1030
1031
1032
1033
1034 if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
1035
1036
1037
1038
1039
1040 if (a->data.non_resident.lowest_vcn) {
1041 flush_dcache_mft_record_page(ctx->ntfs_ino);
1042 mark_mft_record_dirty(ctx->ntfs_ino);
1043 ntfs_attr_reinit_search_ctx(ctx);
1044 err = ntfs_attr_lookup(ni->type, ni->name,
1045 ni->name_len, CASE_SENSITIVE,
1046 0, NULL, 0, ctx);
1047 if (unlikely(err)) {
1048 status.attr_switched = 1;
1049 break;
1050 }
1051
1052 a = ctx->attr;
1053 }
1054 write_lock_irqsave(&ni->size_lock, flags);
1055 ni->itype.compressed.size += vol->cluster_size;
1056 a->data.non_resident.compressed_size =
1057 cpu_to_sle64(ni->itype.compressed.size);
1058 write_unlock_irqrestore(&ni->size_lock, flags);
1059 }
1060
1061 flush_dcache_mft_record_page(ctx->ntfs_ino);
1062 mark_mft_record_dirty(ctx->ntfs_ino);
1063 ntfs_attr_put_search_ctx(ctx);
1064 unmap_mft_record(base_ni);
1065
1066 status.runlist_merged = 0;
1067 status.mft_attr_mapped = 0;
1068 status.mp_rebuilt = 0;
1069
1070 was_hole = true;
1071 vcn = bh_cpos;
1072 vcn_len = 1;
1073 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
1074 cdelta = 0;
1075
1076
1077
1078
1079
1080 if (likely(vcn + vcn_len >= cend)) {
1081 up_write(&ni->runlist.lock);
1082 rl_write_locked = false;
1083 rl = NULL;
1084 }
1085 goto map_buffer_cached;
1086 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1087
1088 if (likely(!err && ++u < nr_pages))
1089 goto do_next_page;
1090
1091 if (likely(!err)) {
1092 if (unlikely(rl_write_locked)) {
1093 up_write(&ni->runlist.lock);
1094 rl_write_locked = false;
1095 } else if (unlikely(rl))
1096 up_read(&ni->runlist.lock);
1097 rl = NULL;
1098 }
1099
1100 read_lock_irqsave(&ni->size_lock, flags);
1101 initialized_size = ni->initialized_size;
1102 read_unlock_irqrestore(&ni->size_lock, flags);
1103 while (wait_bh > wait) {
1104 bh = *--wait_bh;
1105 wait_on_buffer(bh);
1106 if (likely(buffer_uptodate(bh))) {
1107 page = bh->b_page;
1108 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
1109 bh_offset(bh);
1110
1111
1112
1113
1114 if (unlikely(bh_pos + blocksize > initialized_size)) {
1115 int ofs = 0;
1116
1117 if (likely(bh_pos < initialized_size))
1118 ofs = initialized_size - bh_pos;
1119 zero_user_segment(page, bh_offset(bh) + ofs,
1120 blocksize);
1121 }
1122 } else
1123 err = -EIO;
1124 }
1125 if (likely(!err)) {
1126
1127 u = 0;
1128 do {
1129 bh = head = page_buffers(pages[u]);
1130 do {
1131 if (buffer_new(bh))
1132 clear_buffer_new(bh);
1133 } while ((bh = bh->b_this_page) != head);
1134 } while (++u < nr_pages);
1135 ntfs_debug("Done.");
1136 return err;
1137 }
1138 if (status.attr_switched) {
1139
1140 ntfs_attr_reinit_search_ctx(ctx);
1141 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1142 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
1143 ntfs_error(vol->sb, "Failed to find required "
1144 "attribute extent of attribute in "
1145 "error code path. Run chkdsk to "
1146 "recover.");
1147 write_lock_irqsave(&ni->size_lock, flags);
1148 ni->itype.compressed.size += vol->cluster_size;
1149 write_unlock_irqrestore(&ni->size_lock, flags);
1150 flush_dcache_mft_record_page(ctx->ntfs_ino);
1151 mark_mft_record_dirty(ctx->ntfs_ino);
1152
1153
1154
1155
1156
1157 NVolSetErrors(vol);
1158 } else {
1159 m = ctx->mrec;
1160 a = ctx->attr;
1161 status.attr_switched = 0;
1162 }
1163 }
1164
1165
1166
1167
1168
1169
1170
1171 if (status.runlist_merged && !status.attr_switched) {
1172 BUG_ON(!rl_write_locked);
1173
1174 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
1175 ntfs_error(vol->sb, "Failed to punch hole into "
1176 "attribute runlist in error code "
1177 "path. Run chkdsk to recover the "
1178 "lost cluster.");
1179 NVolSetErrors(vol);
1180 } else {
1181 status.runlist_merged = 0;
1182
1183
1184
1185
1186
1187 down_write(&vol->lcnbmp_lock);
1188 if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
1189 ntfs_error(vol->sb, "Failed to release "
1190 "allocated cluster in error "
1191 "code path. Run chkdsk to "
1192 "recover the lost cluster.");
1193 NVolSetErrors(vol);
1194 }
1195 up_write(&vol->lcnbmp_lock);
1196 }
1197 }
1198
1199
1200
1201
1202
1203
1204 if (status.mp_rebuilt && !status.runlist_merged) {
1205 if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
1206 ntfs_error(vol->sb, "Failed to restore attribute "
1207 "record in error code path. Run "
1208 "chkdsk to recover.");
1209 NVolSetErrors(vol);
1210 } else {
1211 if (ntfs_mapping_pairs_build(vol, (u8*)a +
1212 le16_to_cpu(a->data.non_resident.
1213 mapping_pairs_offset), attr_rec_len -
1214 le16_to_cpu(a->data.non_resident.
1215 mapping_pairs_offset), ni->runlist.rl,
1216 vcn, highest_vcn, NULL)) {
1217 ntfs_error(vol->sb, "Failed to restore "
1218 "mapping pairs array in error "
1219 "code path. Run chkdsk to "
1220 "recover.");
1221 NVolSetErrors(vol);
1222 }
1223 flush_dcache_mft_record_page(ctx->ntfs_ino);
1224 mark_mft_record_dirty(ctx->ntfs_ino);
1225 }
1226 }
1227
1228 if (status.mft_attr_mapped) {
1229 ntfs_attr_put_search_ctx(ctx);
1230 unmap_mft_record(base_ni);
1231 }
1232
1233 if (rl_write_locked)
1234 up_write(&ni->runlist.lock);
1235 else if (rl)
1236 up_read(&ni->runlist.lock);
1237
1238
1239
1240
1241
1242 nr_pages = u;
1243 u = 0;
1244 end = bh_cpos << vol->cluster_size_bits;
1245 do {
1246 page = pages[u];
1247 bh = head = page_buffers(page);
1248 do {
1249 if (u == nr_pages &&
1250 ((s64)page->index << PAGE_CACHE_SHIFT) +
1251 bh_offset(bh) >= end)
1252 break;
1253 if (!buffer_new(bh))
1254 continue;
1255 clear_buffer_new(bh);
1256 if (!buffer_uptodate(bh)) {
1257 if (PageUptodate(page))
1258 set_buffer_uptodate(bh);
1259 else {
1260 zero_user(page, bh_offset(bh),
1261 blocksize);
1262 set_buffer_uptodate(bh);
1263 }
1264 }
1265 mark_buffer_dirty(bh);
1266 } while ((bh = bh->b_this_page) != head);
1267 } while (++u <= nr_pages);
1268 ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
1269 return err;
1270}
1271
1272
1273
1274
1275
1276
1277static inline size_t ntfs_copy_from_user(struct page **pages,
1278 unsigned nr_pages, unsigned ofs, const char __user *buf,
1279 size_t bytes)
1280{
1281 struct page **last_page = pages + nr_pages;
1282 char *addr;
1283 size_t total = 0;
1284 unsigned len;
1285 int left;
1286
1287 do {
1288 len = PAGE_CACHE_SIZE - ofs;
1289 if (len > bytes)
1290 len = bytes;
1291 addr = kmap_atomic(*pages);
1292 left = __copy_from_user_inatomic(addr + ofs, buf, len);
1293 kunmap_atomic(addr);
1294 if (unlikely(left)) {
1295
1296 addr = kmap(*pages);
1297 left = __copy_from_user(addr + ofs, buf, len);
1298 kunmap(*pages);
1299 if (unlikely(left))
1300 goto err_out;
1301 }
1302 total += len;
1303 bytes -= len;
1304 if (!bytes)
1305 break;
1306 buf += len;
1307 ofs = 0;
1308 } while (++pages < last_page);
1309out:
1310 return total;
1311err_out:
1312 total += len - left;
1313
1314 while (++pages < last_page) {
1315 bytes -= len;
1316 if (!bytes)
1317 break;
1318 len = PAGE_CACHE_SIZE;
1319 if (len > bytes)
1320 len = bytes;
1321 zero_user(*pages, 0, len);
1322 }
1323 goto out;
1324}
1325
1326static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
1327 const struct iovec *iov, size_t iov_ofs, size_t bytes)
1328{
1329 size_t total = 0;
1330
1331 while (1) {
1332 const char __user *buf = iov->iov_base + iov_ofs;
1333 unsigned len;
1334 size_t left;
1335
1336 len = iov->iov_len - iov_ofs;
1337 if (len > bytes)
1338 len = bytes;
1339 left = __copy_from_user_inatomic(vaddr, buf, len);
1340 total += len;
1341 bytes -= len;
1342 vaddr += len;
1343 if (unlikely(left)) {
1344 total -= left;
1345 break;
1346 }
1347 if (!bytes)
1348 break;
1349 iov++;
1350 iov_ofs = 0;
1351 }
1352 return total;
1353}
1354
1355static inline void ntfs_set_next_iovec(const struct iovec **iovp,
1356 size_t *iov_ofsp, size_t bytes)
1357{
1358 const struct iovec *iov = *iovp;
1359 size_t iov_ofs = *iov_ofsp;
1360
1361 while (bytes) {
1362 unsigned len;
1363
1364 len = iov->iov_len - iov_ofs;
1365 if (len > bytes)
1366 len = bytes;
1367 bytes -= len;
1368 iov_ofs += len;
1369 if (iov->iov_len == iov_ofs) {
1370 iov++;
1371 iov_ofs = 0;
1372 }
1373 }
1374 *iovp = iov;
1375 *iov_ofsp = iov_ofs;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1394 unsigned nr_pages, unsigned ofs, const struct iovec **iov,
1395 size_t *iov_ofs, size_t bytes)
1396{
1397 struct page **last_page = pages + nr_pages;
1398 char *addr;
1399 size_t copied, len, total = 0;
1400
1401 do {
1402 len = PAGE_CACHE_SIZE - ofs;
1403 if (len > bytes)
1404 len = bytes;
1405 addr = kmap_atomic(*pages);
1406 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
1407 *iov, *iov_ofs, len);
1408 kunmap_atomic(addr);
1409 if (unlikely(copied != len)) {
1410
1411 addr = kmap(*pages);
1412 copied = __ntfs_copy_from_user_iovec_inatomic(addr +
1413 ofs, *iov, *iov_ofs, len);
1414 if (unlikely(copied != len))
1415 goto err_out;
1416 kunmap(*pages);
1417 }
1418 total += len;
1419 ntfs_set_next_iovec(iov, iov_ofs, len);
1420 bytes -= len;
1421 if (!bytes)
1422 break;
1423 ofs = 0;
1424 } while (++pages < last_page);
1425out:
1426 return total;
1427err_out:
1428 BUG_ON(copied > len);
1429
1430 memset(addr + ofs + copied, 0, len - copied);
1431 kunmap(*pages);
1432 total += copied;
1433 ntfs_set_next_iovec(iov, iov_ofs, copied);
1434 while (++pages < last_page) {
1435 bytes -= len;
1436 if (!bytes)
1437 break;
1438 len = PAGE_CACHE_SIZE;
1439 if (len > bytes)
1440 len = bytes;
1441 zero_user(*pages, 0, len);
1442 }
1443 goto out;
1444}
1445
1446static inline void ntfs_flush_dcache_pages(struct page **pages,
1447 unsigned nr_pages)
1448{
1449 BUG_ON(!nr_pages);
1450
1451
1452
1453
1454
1455 do {
1456 --nr_pages;
1457 flush_dcache_page(pages[nr_pages]);
1458 } while (nr_pages > 0);
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470static inline int ntfs_commit_pages_after_non_resident_write(
1471 struct page **pages, const unsigned nr_pages,
1472 s64 pos, size_t bytes)
1473{
1474 s64 end, initialized_size;
1475 struct inode *vi;
1476 ntfs_inode *ni, *base_ni;
1477 struct buffer_head *bh, *head;
1478 ntfs_attr_search_ctx *ctx;
1479 MFT_RECORD *m;
1480 ATTR_RECORD *a;
1481 unsigned long flags;
1482 unsigned blocksize, u;
1483 int err;
1484
1485 vi = pages[0]->mapping->host;
1486 ni = NTFS_I(vi);
1487 blocksize = vi->i_sb->s_blocksize;
1488 end = pos + bytes;
1489 u = 0;
1490 do {
1491 s64 bh_pos;
1492 struct page *page;
1493 bool partial;
1494
1495 page = pages[u];
1496 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
1497 bh = head = page_buffers(page);
1498 partial = false;
1499 do {
1500 s64 bh_end;
1501
1502 bh_end = bh_pos + blocksize;
1503 if (bh_end <= pos || bh_pos >= end) {
1504 if (!buffer_uptodate(bh))
1505 partial = true;
1506 } else {
1507 set_buffer_uptodate(bh);
1508 mark_buffer_dirty(bh);
1509 }
1510 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1511
1512
1513
1514
1515 if (!partial && !PageUptodate(page))
1516 SetPageUptodate(page);
1517 } while (++u < nr_pages);
1518
1519
1520
1521
1522 read_lock_irqsave(&ni->size_lock, flags);
1523 initialized_size = ni->initialized_size;
1524 read_unlock_irqrestore(&ni->size_lock, flags);
1525 if (end <= initialized_size) {
1526 ntfs_debug("Done.");
1527 return 0;
1528 }
1529
1530
1531
1532
1533 if (!NInoAttr(ni))
1534 base_ni = ni;
1535 else
1536 base_ni = ni->ext.base_ntfs_ino;
1537
1538 m = map_mft_record(base_ni);
1539 if (IS_ERR(m)) {
1540 err = PTR_ERR(m);
1541 m = NULL;
1542 ctx = NULL;
1543 goto err_out;
1544 }
1545 BUG_ON(!NInoNonResident(ni));
1546 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1547 if (unlikely(!ctx)) {
1548 err = -ENOMEM;
1549 goto err_out;
1550 }
1551 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1552 CASE_SENSITIVE, 0, NULL, 0, ctx);
1553 if (unlikely(err)) {
1554 if (err == -ENOENT)
1555 err = -EIO;
1556 goto err_out;
1557 }
1558 a = ctx->attr;
1559 BUG_ON(!a->non_resident);
1560 write_lock_irqsave(&ni->size_lock, flags);
1561 BUG_ON(end > ni->allocated_size);
1562 ni->initialized_size = end;
1563 a->data.non_resident.initialized_size = cpu_to_sle64(end);
1564 if (end > i_size_read(vi)) {
1565 i_size_write(vi, end);
1566 a->data.non_resident.data_size =
1567 a->data.non_resident.initialized_size;
1568 }
1569 write_unlock_irqrestore(&ni->size_lock, flags);
1570
1571 flush_dcache_mft_record_page(ctx->ntfs_ino);
1572 mark_mft_record_dirty(ctx->ntfs_ino);
1573 ntfs_attr_put_search_ctx(ctx);
1574 unmap_mft_record(base_ni);
1575 ntfs_debug("Done.");
1576 return 0;
1577err_out:
1578 if (ctx)
1579 ntfs_attr_put_search_ctx(ctx);
1580 if (m)
1581 unmap_mft_record(base_ni);
1582 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
1583 "code %i).", err);
1584 if (err != -ENOMEM)
1585 NVolSetErrors(ni->vol);
1586 return err;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625static int ntfs_commit_pages_after_write(struct page **pages,
1626 const unsigned nr_pages, s64 pos, size_t bytes)
1627{
1628 s64 end, initialized_size;
1629 loff_t i_size;
1630 struct inode *vi;
1631 ntfs_inode *ni, *base_ni;
1632 struct page *page;
1633 ntfs_attr_search_ctx *ctx;
1634 MFT_RECORD *m;
1635 ATTR_RECORD *a;
1636 char *kattr, *kaddr;
1637 unsigned long flags;
1638 u32 attr_len;
1639 int err;
1640
1641 BUG_ON(!nr_pages);
1642 BUG_ON(!pages);
1643 page = pages[0];
1644 BUG_ON(!page);
1645 vi = page->mapping->host;
1646 ni = NTFS_I(vi);
1647 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1648 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
1649 vi->i_ino, ni->type, page->index, nr_pages,
1650 (long long)pos, bytes);
1651 if (NInoNonResident(ni))
1652 return ntfs_commit_pages_after_non_resident_write(pages,
1653 nr_pages, pos, bytes);
1654 BUG_ON(nr_pages > 1);
1655
1656
1657
1658
1659 if (!NInoAttr(ni))
1660 base_ni = ni;
1661 else
1662 base_ni = ni->ext.base_ntfs_ino;
1663 BUG_ON(NInoNonResident(ni));
1664
1665 m = map_mft_record(base_ni);
1666 if (IS_ERR(m)) {
1667 err = PTR_ERR(m);
1668 m = NULL;
1669 ctx = NULL;
1670 goto err_out;
1671 }
1672 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1673 if (unlikely(!ctx)) {
1674 err = -ENOMEM;
1675 goto err_out;
1676 }
1677 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1678 CASE_SENSITIVE, 0, NULL, 0, ctx);
1679 if (unlikely(err)) {
1680 if (err == -ENOENT)
1681 err = -EIO;
1682 goto err_out;
1683 }
1684 a = ctx->attr;
1685 BUG_ON(a->non_resident);
1686
1687 attr_len = le32_to_cpu(a->data.resident.value_length);
1688 i_size = i_size_read(vi);
1689 BUG_ON(attr_len != i_size);
1690 BUG_ON(pos > attr_len);
1691 end = pos + bytes;
1692 BUG_ON(end > le32_to_cpu(a->length) -
1693 le16_to_cpu(a->data.resident.value_offset));
1694 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1695 kaddr = kmap_atomic(page);
1696
1697 memcpy(kattr + pos, kaddr + pos, bytes);
1698
1699 if (end > attr_len) {
1700 attr_len = end;
1701 a->data.resident.value_length = cpu_to_le32(attr_len);
1702 }
1703
1704
1705
1706
1707 if (!PageUptodate(page)) {
1708 if (pos > 0)
1709 memcpy(kaddr, kattr, pos);
1710 if (end < attr_len)
1711 memcpy(kaddr + end, kattr + end, attr_len - end);
1712
1713 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1714 flush_dcache_page(page);
1715 SetPageUptodate(page);
1716 }
1717 kunmap_atomic(kaddr);
1718
1719 read_lock_irqsave(&ni->size_lock, flags);
1720 initialized_size = ni->initialized_size;
1721 BUG_ON(end > ni->allocated_size);
1722 read_unlock_irqrestore(&ni->size_lock, flags);
1723 BUG_ON(initialized_size != i_size);
1724 if (end > initialized_size) {
1725 write_lock_irqsave(&ni->size_lock, flags);
1726 ni->initialized_size = end;
1727 i_size_write(vi, end);
1728 write_unlock_irqrestore(&ni->size_lock, flags);
1729 }
1730
1731 flush_dcache_mft_record_page(ctx->ntfs_ino);
1732 mark_mft_record_dirty(ctx->ntfs_ino);
1733 ntfs_attr_put_search_ctx(ctx);
1734 unmap_mft_record(base_ni);
1735 ntfs_debug("Done.");
1736 return 0;
1737err_out:
1738 if (err == -ENOMEM) {
1739 ntfs_warning(vi->i_sb, "Error allocating memory required to "
1740 "commit the write.");
1741 if (PageUptodate(page)) {
1742 ntfs_warning(vi->i_sb, "Page is uptodate, setting "
1743 "dirty so the write will be retried "
1744 "later on by the VM.");
1745
1746
1747
1748
1749 __set_page_dirty_nobuffers(page);
1750 err = 0;
1751 } else
1752 ntfs_error(vi->i_sb, "Page is not uptodate. Written "
1753 "data has been lost.");
1754 } else {
1755 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
1756 "with error %i.", err);
1757 NVolSetErrors(ni->vol);
1758 }
1759 if (ctx)
1760 ntfs_attr_put_search_ctx(ctx);
1761 if (m)
1762 unmap_mft_record(base_ni);
1763 return err;
1764}
1765
1766static void ntfs_write_failed(struct address_space *mapping, loff_t to)
1767{
1768 struct inode *inode = mapping->host;
1769
1770 if (to > inode->i_size) {
1771 truncate_pagecache(inode, to, inode->i_size);
1772 ntfs_truncate_vfs(inode);
1773 }
1774}
1775
1776
1777
1778
1779
1780
1781static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
1782 const struct iovec *iov, unsigned long nr_segs,
1783 loff_t pos, loff_t *ppos, size_t count)
1784{
1785 struct file *file = iocb->ki_filp;
1786 struct address_space *mapping = file->f_mapping;
1787 struct inode *vi = mapping->host;
1788 ntfs_inode *ni = NTFS_I(vi);
1789 ntfs_volume *vol = ni->vol;
1790 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
1791 struct page *cached_page = NULL;
1792 char __user *buf = NULL;
1793 s64 end, ll;
1794 VCN last_vcn;
1795 LCN lcn;
1796 unsigned long flags;
1797 size_t bytes, iov_ofs = 0;
1798 ssize_t status, written;
1799 unsigned nr_pages;
1800 int err;
1801
1802 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1803 "pos 0x%llx, count 0x%lx.",
1804 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
1805 (unsigned long long)pos, (unsigned long)count);
1806 if (unlikely(!count))
1807 return 0;
1808 BUG_ON(NInoMstProtected(ni));
1809
1810
1811
1812
1813
1814
1815 if (ni->type != AT_INDEX_ALLOCATION) {
1816
1817 if (NInoEncrypted(ni)) {
1818
1819
1820
1821
1822
1823 ntfs_debug("Denying write access to encrypted file.");
1824 return -EACCES;
1825 }
1826 if (NInoCompressed(ni)) {
1827
1828 BUG_ON(ni->type != AT_DATA);
1829 BUG_ON(ni->name_len);
1830
1831
1832
1833
1834
1835
1836 ntfs_error(vi->i_sb, "Writing to compressed files is "
1837 "not implemented yet. Sorry.");
1838 return -EOPNOTSUPP;
1839 }
1840 }
1841
1842
1843
1844
1845 if (unlikely(NInoTruncateFailed(ni))) {
1846 inode_dio_wait(vi);
1847 err = ntfs_truncate(vi);
1848 if (err || NInoTruncateFailed(ni)) {
1849 if (!err)
1850 err = -EIO;
1851 ntfs_error(vol->sb, "Cannot perform write to inode "
1852 "0x%lx, attribute type 0x%x, because "
1853 "ntfs_truncate() failed (error code "
1854 "%i).", vi->i_ino,
1855 (unsigned)le32_to_cpu(ni->type), err);
1856 return err;
1857 }
1858 }
1859
1860 end = pos + count;
1861
1862
1863
1864
1865 read_lock_irqsave(&ni->size_lock, flags);
1866 ll = ni->allocated_size;
1867 read_unlock_irqrestore(&ni->size_lock, flags);
1868 if (end > ll) {
1869
1870 ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
1871 if (likely(ll >= 0)) {
1872 BUG_ON(pos >= ll);
1873
1874 if (end > ll) {
1875 ntfs_debug("Truncating write to inode 0x%lx, "
1876 "attribute type 0x%x, because "
1877 "the allocation was only "
1878 "partially extended.",
1879 vi->i_ino, (unsigned)
1880 le32_to_cpu(ni->type));
1881 end = ll;
1882 count = ll - pos;
1883 }
1884 } else {
1885 err = ll;
1886 read_lock_irqsave(&ni->size_lock, flags);
1887 ll = ni->allocated_size;
1888 read_unlock_irqrestore(&ni->size_lock, flags);
1889
1890 if (pos < ll) {
1891 ntfs_debug("Truncating write to inode 0x%lx, "
1892 "attribute type 0x%x, because "
1893 "extending the allocation "
1894 "failed (error code %i).",
1895 vi->i_ino, (unsigned)
1896 le32_to_cpu(ni->type), err);
1897 end = ll;
1898 count = ll - pos;
1899 } else {
1900 ntfs_error(vol->sb, "Cannot perform write to "
1901 "inode 0x%lx, attribute type "
1902 "0x%x, because extending the "
1903 "allocation failed (error "
1904 "code %i).", vi->i_ino,
1905 (unsigned)
1906 le32_to_cpu(ni->type), err);
1907 return err;
1908 }
1909 }
1910 }
1911 written = 0;
1912
1913
1914
1915
1916
1917
1918
1919 read_lock_irqsave(&ni->size_lock, flags);
1920 ll = ni->initialized_size;
1921 read_unlock_irqrestore(&ni->size_lock, flags);
1922 if (pos > ll) {
1923 err = ntfs_attr_extend_initialized(ni, pos);
1924 if (err < 0) {
1925 ntfs_error(vol->sb, "Cannot perform write to inode "
1926 "0x%lx, attribute type 0x%x, because "
1927 "extending the initialized size "
1928 "failed (error code %i).", vi->i_ino,
1929 (unsigned)le32_to_cpu(ni->type), err);
1930 status = err;
1931 goto err_out;
1932 }
1933 }
1934
1935
1936
1937
1938 nr_pages = 1;
1939 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
1940 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
1941
1942 last_vcn = -1;
1943 if (likely(nr_segs == 1))
1944 buf = iov->iov_base;
1945 do {
1946 VCN vcn;
1947 pgoff_t idx, start_idx;
1948 unsigned ofs, do_pages, u;
1949 size_t copied;
1950
1951 start_idx = idx = pos >> PAGE_CACHE_SHIFT;
1952 ofs = pos & ~PAGE_CACHE_MASK;
1953 bytes = PAGE_CACHE_SIZE - ofs;
1954 do_pages = 1;
1955 if (nr_pages > 1) {
1956 vcn = pos >> vol->cluster_size_bits;
1957 if (vcn != last_vcn) {
1958 last_vcn = vcn;
1959
1960
1961
1962
1963
1964 down_read(&ni->runlist.lock);
1965 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
1966 vol->cluster_size_bits, false);
1967 up_read(&ni->runlist.lock);
1968 if (unlikely(lcn < LCN_HOLE)) {
1969 status = -EIO;
1970 if (lcn == LCN_ENOMEM)
1971 status = -ENOMEM;
1972 else
1973 ntfs_error(vol->sb, "Cannot "
1974 "perform write to "
1975 "inode 0x%lx, "
1976 "attribute type 0x%x, "
1977 "because the attribute "
1978 "is corrupt.",
1979 vi->i_ino, (unsigned)
1980 le32_to_cpu(ni->type));
1981 break;
1982 }
1983 if (lcn == LCN_HOLE) {
1984 start_idx = (pos & ~(s64)
1985 vol->cluster_size_mask)
1986 >> PAGE_CACHE_SHIFT;
1987 bytes = vol->cluster_size - (pos &
1988 vol->cluster_size_mask);
1989 do_pages = nr_pages;
1990 }
1991 }
1992 }
1993 if (bytes > count)
1994 bytes = count;
1995
1996
1997
1998
1999
2000
2001
2002
2003 if (likely(nr_segs == 1))
2004 ntfs_fault_in_pages_readable(buf, bytes);
2005 else
2006 ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
2007
2008 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
2009 pages, &cached_page);
2010 if (unlikely(status))
2011 break;
2012
2013
2014
2015
2016
2017
2018 if (NInoNonResident(ni)) {
2019 status = ntfs_prepare_pages_for_non_resident_write(
2020 pages, do_pages, pos, bytes);
2021 if (unlikely(status)) {
2022 loff_t i_size;
2023
2024 do {
2025 unlock_page(pages[--do_pages]);
2026 page_cache_release(pages[do_pages]);
2027 } while (do_pages);
2028
2029
2030
2031
2032
2033
2034
2035 i_size = i_size_read(vi);
2036 if (pos + bytes > i_size) {
2037 ntfs_write_failed(mapping, pos + bytes);
2038 }
2039 break;
2040 }
2041 }
2042 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
2043 if (likely(nr_segs == 1)) {
2044 copied = ntfs_copy_from_user(pages + u, do_pages - u,
2045 ofs, buf, bytes);
2046 buf += copied;
2047 } else
2048 copied = ntfs_copy_from_user_iovec(pages + u,
2049 do_pages - u, ofs, &iov, &iov_ofs,
2050 bytes);
2051 ntfs_flush_dcache_pages(pages + u, do_pages - u);
2052 status = ntfs_commit_pages_after_write(pages, do_pages, pos,
2053 bytes);
2054 if (likely(!status)) {
2055 written += copied;
2056 count -= copied;
2057 pos += copied;
2058 if (unlikely(copied != bytes))
2059 status = -EFAULT;
2060 }
2061 do {
2062 unlock_page(pages[--do_pages]);
2063 mark_page_accessed(pages[do_pages]);
2064 page_cache_release(pages[do_pages]);
2065 } while (do_pages);
2066 if (unlikely(status))
2067 break;
2068 balance_dirty_pages_ratelimited(mapping);
2069 cond_resched();
2070 } while (count);
2071err_out:
2072 *ppos = pos;
2073 if (cached_page)
2074 page_cache_release(cached_page);
2075 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
2076 written ? "written" : "status", (unsigned long)written,
2077 (long)status);
2078 return written ? written : status;
2079}
2080
2081
2082
2083
2084static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
2085 const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
2086{
2087 struct file *file = iocb->ki_filp;
2088 struct address_space *mapping = file->f_mapping;
2089 struct inode *inode = mapping->host;
2090 loff_t pos;
2091 size_t count;
2092 ssize_t written, err;
2093
2094 count = 0;
2095 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
2096 if (err)
2097 return err;
2098 pos = *ppos;
2099
2100 current->backing_dev_info = mapping->backing_dev_info;
2101 written = 0;
2102 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2103 if (err)
2104 goto out;
2105 if (!count)
2106 goto out;
2107 err = file_remove_suid(file);
2108 if (err)
2109 goto out;
2110 err = file_update_time(file);
2111 if (err)
2112 goto out;
2113 written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
2114 count);
2115out:
2116 current->backing_dev_info = NULL;
2117 return written ? written : err;
2118}
2119
2120
2121
2122
2123static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2124 unsigned long nr_segs, loff_t pos)
2125{
2126 struct file *file = iocb->ki_filp;
2127 struct address_space *mapping = file->f_mapping;
2128 struct inode *inode = mapping->host;
2129 ssize_t ret;
2130
2131 BUG_ON(iocb->ki_pos != pos);
2132
2133 mutex_lock(&inode->i_mutex);
2134 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
2135 mutex_unlock(&inode->i_mutex);
2136 if (ret > 0) {
2137 int err = generic_write_sync(file, pos, ret);
2138 if (err < 0)
2139 ret = err;
2140 }
2141 return ret;
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
2169 int datasync)
2170{
2171 struct inode *vi = filp->f_mapping->host;
2172 int err, ret = 0;
2173
2174 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
2175
2176 err = filemap_write_and_wait_range(vi->i_mapping, start, end);
2177 if (err)
2178 return err;
2179 mutex_lock(&vi->i_mutex);
2180
2181 BUG_ON(S_ISDIR(vi->i_mode));
2182 if (!datasync || !NInoNonResident(NTFS_I(vi)))
2183 ret = __ntfs_write_inode(vi, 1);
2184 write_inode_now(vi, !datasync);
2185
2186
2187
2188
2189
2190 err = sync_blockdev(vi->i_sb->s_bdev);
2191 if (unlikely(err && !ret))
2192 ret = err;
2193 if (likely(!ret))
2194 ntfs_debug("Done.");
2195 else
2196 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
2197 "%u.", datasync ? "data" : "", vi->i_ino, -ret);
2198 mutex_unlock(&vi->i_mutex);
2199 return ret;
2200}
2201
2202#endif
2203
2204const struct file_operations ntfs_file_ops = {
2205 .llseek = generic_file_llseek,
2206 .read = do_sync_read,
2207 .aio_read = generic_file_aio_read,
2208#ifdef NTFS_RW
2209 .write = do_sync_write,
2210 .aio_write = ntfs_file_aio_write,
2211
2212
2213
2214
2215
2216
2217 .fsync = ntfs_file_fsync,
2218
2219
2220
2221#endif
2222
2223
2224 .mmap = generic_file_mmap,
2225 .open = ntfs_file_open,
2226 .splice_read = generic_file_splice_read
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236};
2237
2238const struct inode_operations ntfs_file_inode_ops = {
2239#ifdef NTFS_RW
2240 .setattr = ntfs_setattr,
2241#endif
2242};
2243
2244const struct file_operations ntfs_empty_file_ops = {};
2245
2246const struct inode_operations ntfs_empty_inode_ops = {};
2247