1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include "ubifs.h"
41#include <linux/mount.h>
42#include <linux/slab.h>
43#include <linux/migrate.h>
44
45static int read_block(struct inode *inode, void *addr, unsigned int block,
46 struct ubifs_data_node *dn)
47{
48 struct ubifs_info *c = inode->i_sb->s_fs_info;
49 int err, len, out_len;
50 union ubifs_key key;
51 unsigned int dlen;
52
53 data_key_init(c, &key, inode->i_ino, block);
54 err = ubifs_tnc_lookup(c, &key, dn);
55 if (err) {
56 if (err == -ENOENT)
57
58 memset(addr, 0, UBIFS_BLOCK_SIZE);
59 return err;
60 }
61
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 ubifs_inode(inode)->creat_sqnum);
64 len = le32_to_cpu(dn->size);
65 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 goto dump;
67
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69
70 if (IS_ENCRYPTED(inode)) {
71 err = ubifs_decrypt(inode, dn, &dlen, block);
72 if (err)
73 goto dump;
74 }
75
76 out_len = UBIFS_BLOCK_SIZE;
77 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
78 le16_to_cpu(dn->compr_type));
79 if (err || len != out_len)
80 goto dump;
81
82
83
84
85
86
87 if (len < UBIFS_BLOCK_SIZE)
88 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
89
90 return 0;
91
92dump:
93 ubifs_err(c, "bad data node (block %u, inode %lu)",
94 block, inode->i_ino);
95 ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ);
96 return -EINVAL;
97}
98
99static int do_readpage(struct page *page)
100{
101 void *addr;
102 int err = 0, i;
103 unsigned int block, beyond;
104 struct ubifs_data_node *dn;
105 struct inode *inode = page->mapping->host;
106 struct ubifs_info *c = inode->i_sb->s_fs_info;
107 loff_t i_size = i_size_read(inode);
108
109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 inode->i_ino, page->index, i_size, page->flags);
111 ubifs_assert(c, !PageChecked(page));
112 ubifs_assert(c, !PagePrivate(page));
113
114 addr = kmap(page);
115
116 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
117 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
118 if (block >= beyond) {
119
120 SetPageChecked(page);
121 memset(addr, 0, PAGE_SIZE);
122 goto out;
123 }
124
125 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
126 if (!dn) {
127 err = -ENOMEM;
128 goto error;
129 }
130
131 i = 0;
132 while (1) {
133 int ret;
134
135 if (block >= beyond) {
136
137 err = -ENOENT;
138 memset(addr, 0, UBIFS_BLOCK_SIZE);
139 } else {
140 ret = read_block(inode, addr, block, dn);
141 if (ret) {
142 err = ret;
143 if (err != -ENOENT)
144 break;
145 } else if (block + 1 == beyond) {
146 int dlen = le32_to_cpu(dn->size);
147 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
148
149 if (ilen && ilen < dlen)
150 memset(addr + ilen, 0, dlen - ilen);
151 }
152 }
153 if (++i >= UBIFS_BLOCKS_PER_PAGE)
154 break;
155 block += 1;
156 addr += UBIFS_BLOCK_SIZE;
157 }
158 if (err) {
159 struct ubifs_info *c = inode->i_sb->s_fs_info;
160 if (err == -ENOENT) {
161
162 SetPageChecked(page);
163 dbg_gen("hole");
164 goto out_free;
165 }
166 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
167 page->index, inode->i_ino, err);
168 goto error;
169 }
170
171out_free:
172 kfree(dn);
173out:
174 SetPageUptodate(page);
175 ClearPageError(page);
176 flush_dcache_page(page);
177 kunmap(page);
178 return 0;
179
180error:
181 kfree(dn);
182 ClearPageUptodate(page);
183 SetPageError(page);
184 flush_dcache_page(page);
185 kunmap(page);
186 return err;
187}
188
189
190
191
192
193
194
195
196static void release_new_page_budget(struct ubifs_info *c)
197{
198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
199
200 ubifs_release_budget(c, &req);
201}
202
203
204
205
206
207
208
209
210static void release_existing_page_budget(struct ubifs_info *c)
211{
212 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
213
214 ubifs_release_budget(c, &req);
215}
216
217static int write_begin_slow(struct address_space *mapping,
218 loff_t pos, unsigned len, struct page **pagep,
219 unsigned flags)
220{
221 struct inode *inode = mapping->host;
222 struct ubifs_info *c = inode->i_sb->s_fs_info;
223 pgoff_t index = pos >> PAGE_SHIFT;
224 struct ubifs_budget_req req = { .new_page = 1 };
225 int err, appending = !!(pos + len > inode->i_size);
226 struct page *page;
227
228 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
229 inode->i_ino, pos, len, inode->i_size);
230
231
232
233
234
235
236
237
238
239 if (appending)
240
241 req.dirtied_ino = 1;
242
243 err = ubifs_budget_space(c, &req);
244 if (unlikely(err))
245 return err;
246
247 page = grab_cache_page_write_begin(mapping, index, flags);
248 if (unlikely(!page)) {
249 ubifs_release_budget(c, &req);
250 return -ENOMEM;
251 }
252
253 if (!PageUptodate(page)) {
254 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
255 SetPageChecked(page);
256 else {
257 err = do_readpage(page);
258 if (err) {
259 unlock_page(page);
260 put_page(page);
261 ubifs_release_budget(c, &req);
262 return err;
263 }
264 }
265
266 SetPageUptodate(page);
267 ClearPageError(page);
268 }
269
270 if (PagePrivate(page))
271
272
273
274
275
276
277
278
279
280
281 release_new_page_budget(c);
282 else if (!PageChecked(page))
283
284
285
286
287
288
289 ubifs_convert_page_budget(c);
290
291 if (appending) {
292 struct ubifs_inode *ui = ubifs_inode(inode);
293
294
295
296
297
298
299 mutex_lock(&ui->ui_mutex);
300 if (ui->dirty)
301
302
303
304
305 ubifs_release_dirty_inode_budget(c, ui);
306 }
307
308 *pagep = page;
309 return 0;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325static int allocate_budget(struct ubifs_info *c, struct page *page,
326 struct ubifs_inode *ui, int appending)
327{
328 struct ubifs_budget_req req = { .fast = 1 };
329
330 if (PagePrivate(page)) {
331 if (!appending)
332
333
334
335
336 return 0;
337
338 mutex_lock(&ui->ui_mutex);
339 if (ui->dirty)
340
341
342
343
344
345
346
347
348
349 return 0;
350
351
352
353
354
355 req.dirtied_ino = 1;
356 } else {
357 if (PageChecked(page))
358
359
360
361
362
363
364
365 req.new_page = 1;
366 else
367
368
369
370
371
372 req.dirtied_page = 1;
373
374 if (appending) {
375 mutex_lock(&ui->ui_mutex);
376 if (!ui->dirty)
377
378
379
380
381
382 req.dirtied_ino = 1;
383 }
384 }
385
386 return ubifs_budget_space(c, &req);
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421static int ubifs_write_begin(struct file *file, struct address_space *mapping,
422 loff_t pos, unsigned len, unsigned flags,
423 struct page **pagep, void **fsdata)
424{
425 struct inode *inode = mapping->host;
426 struct ubifs_info *c = inode->i_sb->s_fs_info;
427 struct ubifs_inode *ui = ubifs_inode(inode);
428 pgoff_t index = pos >> PAGE_SHIFT;
429 int err, appending = !!(pos + len > inode->i_size);
430 int skipped_read = 0;
431 struct page *page;
432
433 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
434 ubifs_assert(c, !c->ro_media && !c->ro_mount);
435
436 if (unlikely(c->ro_error))
437 return -EROFS;
438
439
440 page = grab_cache_page_write_begin(mapping, index, flags);
441 if (unlikely(!page))
442 return -ENOMEM;
443
444 if (!PageUptodate(page)) {
445
446 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
447
448
449
450
451
452
453
454
455
456 SetPageChecked(page);
457 skipped_read = 1;
458 } else {
459 err = do_readpage(page);
460 if (err) {
461 unlock_page(page);
462 put_page(page);
463 return err;
464 }
465 }
466
467 SetPageUptodate(page);
468 ClearPageError(page);
469 }
470
471 err = allocate_budget(c, page, ui, appending);
472 if (unlikely(err)) {
473 ubifs_assert(c, err == -ENOSPC);
474
475
476
477
478 if (skipped_read) {
479 ClearPageChecked(page);
480 ClearPageUptodate(page);
481 }
482
483
484
485
486
487
488
489 if (appending) {
490 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
491 mutex_unlock(&ui->ui_mutex);
492 }
493 unlock_page(page);
494 put_page(page);
495
496 return write_begin_slow(mapping, pos, len, pagep, flags);
497 }
498
499
500
501
502
503
504
505 *pagep = page;
506 return 0;
507
508}
509
510
511
512
513
514
515
516
517
518
519
520static void cancel_budget(struct ubifs_info *c, struct page *page,
521 struct ubifs_inode *ui, int appending)
522{
523 if (appending) {
524 if (!ui->dirty)
525 ubifs_release_dirty_inode_budget(c, ui);
526 mutex_unlock(&ui->ui_mutex);
527 }
528 if (!PagePrivate(page)) {
529 if (PageChecked(page))
530 release_new_page_budget(c);
531 else
532 release_existing_page_budget(c);
533 }
534}
535
536static int ubifs_write_end(struct file *file, struct address_space *mapping,
537 loff_t pos, unsigned len, unsigned copied,
538 struct page *page, void *fsdata)
539{
540 struct inode *inode = mapping->host;
541 struct ubifs_inode *ui = ubifs_inode(inode);
542 struct ubifs_info *c = inode->i_sb->s_fs_info;
543 loff_t end_pos = pos + len;
544 int appending = !!(end_pos > inode->i_size);
545
546 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
547 inode->i_ino, pos, page->index, len, copied, inode->i_size);
548
549 if (unlikely(copied < len && len == PAGE_SIZE)) {
550
551
552
553
554
555
556
557
558
559 dbg_gen("copied %d instead of %d, read page and repeat",
560 copied, len);
561 cancel_budget(c, page, ui, appending);
562 ClearPageChecked(page);
563
564
565
566
567
568 copied = do_readpage(page);
569 goto out;
570 }
571
572 if (!PagePrivate(page)) {
573 attach_page_private(page, (void *)1);
574 atomic_long_inc(&c->dirty_pg_cnt);
575 __set_page_dirty_nobuffers(page);
576 }
577
578 if (appending) {
579 i_size_write(inode, end_pos);
580 ui->ui_size = end_pos;
581
582
583
584
585
586 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
587 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
588 mutex_unlock(&ui->ui_mutex);
589 }
590
591out:
592 unlock_page(page);
593 put_page(page);
594 return copied;
595}
596
597
598
599
600
601
602
603
604
605
606static int populate_page(struct ubifs_info *c, struct page *page,
607 struct bu_info *bu, int *n)
608{
609 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
610 struct inode *inode = page->mapping->host;
611 loff_t i_size = i_size_read(inode);
612 unsigned int page_block;
613 void *addr, *zaddr;
614 pgoff_t end_index;
615
616 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
617 inode->i_ino, page->index, i_size, page->flags);
618
619 addr = zaddr = kmap(page);
620
621 end_index = (i_size - 1) >> PAGE_SHIFT;
622 if (!i_size || page->index > end_index) {
623 hole = 1;
624 memset(addr, 0, PAGE_SIZE);
625 goto out_hole;
626 }
627
628 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
629 while (1) {
630 int err, len, out_len, dlen;
631
632 if (nn >= bu->cnt) {
633 hole = 1;
634 memset(addr, 0, UBIFS_BLOCK_SIZE);
635 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
636 struct ubifs_data_node *dn;
637
638 dn = bu->buf + (bu->zbranch[nn].offs - offs);
639
640 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
641 ubifs_inode(inode)->creat_sqnum);
642
643 len = le32_to_cpu(dn->size);
644 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
645 goto out_err;
646
647 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
648 out_len = UBIFS_BLOCK_SIZE;
649
650 if (IS_ENCRYPTED(inode)) {
651 err = ubifs_decrypt(inode, dn, &dlen, page_block);
652 if (err)
653 goto out_err;
654 }
655
656 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
657 le16_to_cpu(dn->compr_type));
658 if (err || len != out_len)
659 goto out_err;
660
661 if (len < UBIFS_BLOCK_SIZE)
662 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
663
664 nn += 1;
665 read = (i << UBIFS_BLOCK_SHIFT) + len;
666 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
667 nn += 1;
668 continue;
669 } else {
670 hole = 1;
671 memset(addr, 0, UBIFS_BLOCK_SIZE);
672 }
673 if (++i >= UBIFS_BLOCKS_PER_PAGE)
674 break;
675 addr += UBIFS_BLOCK_SIZE;
676 page_block += 1;
677 }
678
679 if (end_index == page->index) {
680 int len = i_size & (PAGE_SIZE - 1);
681
682 if (len && len < read)
683 memset(zaddr + len, 0, read - len);
684 }
685
686out_hole:
687 if (hole) {
688 SetPageChecked(page);
689 dbg_gen("hole");
690 }
691
692 SetPageUptodate(page);
693 ClearPageError(page);
694 flush_dcache_page(page);
695 kunmap(page);
696 *n = nn;
697 return 0;
698
699out_err:
700 ClearPageUptodate(page);
701 SetPageError(page);
702 flush_dcache_page(page);
703 kunmap(page);
704 ubifs_err(c, "bad data node (block %u, inode %lu)",
705 page_block, inode->i_ino);
706 return -EINVAL;
707}
708
709
710
711
712
713
714
715
716
717static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
718 struct page *page1)
719{
720 pgoff_t offset = page1->index, end_index;
721 struct address_space *mapping = page1->mapping;
722 struct inode *inode = mapping->host;
723 struct ubifs_inode *ui = ubifs_inode(inode);
724 int err, page_idx, page_cnt, ret = 0, n = 0;
725 int allocate = bu->buf ? 0 : 1;
726 loff_t isize;
727 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
728
729 err = ubifs_tnc_get_bu_keys(c, bu);
730 if (err)
731 goto out_warn;
732
733 if (bu->eof) {
734
735 ui->read_in_a_row = 1;
736 ui->bulk_read = 0;
737 }
738
739 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
740 if (!page_cnt) {
741
742
743
744
745
746
747 goto out_bu_off;
748 }
749
750 if (bu->cnt) {
751 if (allocate) {
752
753
754
755
756 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
757 bu->zbranch[bu->cnt - 1].len -
758 bu->zbranch[0].offs;
759 ubifs_assert(c, bu->buf_len > 0);
760 ubifs_assert(c, bu->buf_len <= c->leb_size);
761 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
762 if (!bu->buf)
763 goto out_bu_off;
764 }
765
766 err = ubifs_tnc_bulk_read(c, bu);
767 if (err)
768 goto out_warn;
769 }
770
771 err = populate_page(c, page1, bu, &n);
772 if (err)
773 goto out_warn;
774
775 unlock_page(page1);
776 ret = 1;
777
778 isize = i_size_read(inode);
779 if (isize == 0)
780 goto out_free;
781 end_index = ((isize - 1) >> PAGE_SHIFT);
782
783 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
784 pgoff_t page_offset = offset + page_idx;
785 struct page *page;
786
787 if (page_offset > end_index)
788 break;
789 page = pagecache_get_page(mapping, page_offset,
790 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
791 ra_gfp_mask);
792 if (!page)
793 break;
794 if (!PageUptodate(page))
795 err = populate_page(c, page, bu, &n);
796 unlock_page(page);
797 put_page(page);
798 if (err)
799 break;
800 }
801
802 ui->last_page_read = offset + page_idx - 1;
803
804out_free:
805 if (allocate)
806 kfree(bu->buf);
807 return ret;
808
809out_warn:
810 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
811 goto out_free;
812
813out_bu_off:
814 ui->read_in_a_row = ui->bulk_read = 0;
815 goto out_free;
816}
817
818
819
820
821
822
823
824
825
826
827static int ubifs_bulk_read(struct page *page)
828{
829 struct inode *inode = page->mapping->host;
830 struct ubifs_info *c = inode->i_sb->s_fs_info;
831 struct ubifs_inode *ui = ubifs_inode(inode);
832 pgoff_t index = page->index, last_page_read = ui->last_page_read;
833 struct bu_info *bu;
834 int err = 0, allocated = 0;
835
836 ui->last_page_read = index;
837 if (!c->bulk_read)
838 return 0;
839
840
841
842
843
844 if (!mutex_trylock(&ui->ui_mutex))
845 return 0;
846
847 if (index != last_page_read + 1) {
848
849 ui->read_in_a_row = 1;
850 if (ui->bulk_read)
851 ui->bulk_read = 0;
852 goto out_unlock;
853 }
854
855 if (!ui->bulk_read) {
856 ui->read_in_a_row += 1;
857 if (ui->read_in_a_row < 3)
858 goto out_unlock;
859
860 ui->bulk_read = 1;
861 }
862
863
864
865
866
867 if (mutex_trylock(&c->bu_mutex))
868 bu = &c->bu;
869 else {
870 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
871 if (!bu)
872 goto out_unlock;
873
874 bu->buf = NULL;
875 allocated = 1;
876 }
877
878 bu->buf_len = c->max_bu_buf_len;
879 data_key_init(c, &bu->key, inode->i_ino,
880 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
881 err = ubifs_do_bulk_read(c, bu, page);
882
883 if (!allocated)
884 mutex_unlock(&c->bu_mutex);
885 else
886 kfree(bu);
887
888out_unlock:
889 mutex_unlock(&ui->ui_mutex);
890 return err;
891}
892
893static int ubifs_readpage(struct file *file, struct page *page)
894{
895 if (ubifs_bulk_read(page))
896 return 0;
897 do_readpage(page);
898 unlock_page(page);
899 return 0;
900}
901
902static int do_writepage(struct page *page, int len)
903{
904 int err = 0, i, blen;
905 unsigned int block;
906 void *addr;
907 union ubifs_key key;
908 struct inode *inode = page->mapping->host;
909 struct ubifs_info *c = inode->i_sb->s_fs_info;
910
911#ifdef UBIFS_DEBUG
912 struct ubifs_inode *ui = ubifs_inode(inode);
913 spin_lock(&ui->ui_lock);
914 ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
915 spin_unlock(&ui->ui_lock);
916#endif
917
918
919 set_page_writeback(page);
920
921 addr = kmap(page);
922 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
923 i = 0;
924 while (len) {
925 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
926 data_key_init(c, &key, inode->i_ino, block);
927 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
928 if (err)
929 break;
930 if (++i >= UBIFS_BLOCKS_PER_PAGE)
931 break;
932 block += 1;
933 addr += blen;
934 len -= blen;
935 }
936 if (err) {
937 SetPageError(page);
938 ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
939 page->index, inode->i_ino, err);
940 ubifs_ro_mode(c, err);
941 }
942
943 ubifs_assert(c, PagePrivate(page));
944 if (PageChecked(page))
945 release_new_page_budget(c);
946 else
947 release_existing_page_budget(c);
948
949 atomic_long_dec(&c->dirty_pg_cnt);
950 detach_page_private(page);
951 ClearPageChecked(page);
952
953 kunmap(page);
954 unlock_page(page);
955 end_page_writeback(page);
956 return err;
957}
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1006{
1007 struct inode *inode = page->mapping->host;
1008 struct ubifs_info *c = inode->i_sb->s_fs_info;
1009 struct ubifs_inode *ui = ubifs_inode(inode);
1010 loff_t i_size = i_size_read(inode), synced_i_size;
1011 pgoff_t end_index = i_size >> PAGE_SHIFT;
1012 int err, len = i_size & (PAGE_SIZE - 1);
1013 void *kaddr;
1014
1015 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1016 inode->i_ino, page->index, page->flags);
1017 ubifs_assert(c, PagePrivate(page));
1018
1019
1020 if (page->index > end_index || (page->index == end_index && !len)) {
1021 err = 0;
1022 goto out_unlock;
1023 }
1024
1025 spin_lock(&ui->ui_lock);
1026 synced_i_size = ui->synced_i_size;
1027 spin_unlock(&ui->ui_lock);
1028
1029
1030 if (page->index < end_index) {
1031 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1032 err = inode->i_sb->s_op->write_inode(inode, NULL);
1033 if (err)
1034 goto out_unlock;
1035
1036
1037
1038
1039
1040
1041
1042
1043 }
1044 return do_writepage(page, PAGE_SIZE);
1045 }
1046
1047
1048
1049
1050
1051
1052
1053
1054 kaddr = kmap_atomic(page);
1055 memset(kaddr + len, 0, PAGE_SIZE - len);
1056 flush_dcache_page(page);
1057 kunmap_atomic(kaddr);
1058
1059 if (i_size > synced_i_size) {
1060 err = inode->i_sb->s_op->write_inode(inode, NULL);
1061 if (err)
1062 goto out_unlock;
1063 }
1064
1065 return do_writepage(page, len);
1066
1067out_unlock:
1068 unlock_page(page);
1069 return err;
1070}
1071
1072
1073
1074
1075
1076
1077static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1078{
1079 if (attr->ia_valid & ATTR_UID)
1080 inode->i_uid = attr->ia_uid;
1081 if (attr->ia_valid & ATTR_GID)
1082 inode->i_gid = attr->ia_gid;
1083 if (attr->ia_valid & ATTR_ATIME)
1084 inode->i_atime = attr->ia_atime;
1085 if (attr->ia_valid & ATTR_MTIME)
1086 inode->i_mtime = attr->ia_mtime;
1087 if (attr->ia_valid & ATTR_CTIME)
1088 inode->i_ctime = attr->ia_ctime;
1089 if (attr->ia_valid & ATTR_MODE) {
1090 umode_t mode = attr->ia_mode;
1091
1092 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1093 mode &= ~S_ISGID;
1094 inode->i_mode = mode;
1095 }
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static int do_truncation(struct ubifs_info *c, struct inode *inode,
1109 const struct iattr *attr)
1110{
1111 int err;
1112 struct ubifs_budget_req req;
1113 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1114 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1115 struct ubifs_inode *ui = ubifs_inode(inode);
1116
1117 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1118 memset(&req, 0, sizeof(struct ubifs_budget_req));
1119
1120
1121
1122
1123
1124
1125 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1126 req.dirtied_page = 1;
1127
1128 req.dirtied_ino = 1;
1129
1130 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1131 err = ubifs_budget_space(c, &req);
1132 if (err) {
1133
1134
1135
1136
1137 if (new_size || err != -ENOSPC)
1138 return err;
1139 budgeted = 0;
1140 }
1141
1142 truncate_setsize(inode, new_size);
1143
1144 if (offset) {
1145 pgoff_t index = new_size >> PAGE_SHIFT;
1146 struct page *page;
1147
1148 page = find_lock_page(inode->i_mapping, index);
1149 if (page) {
1150 if (PageDirty(page)) {
1151
1152
1153
1154
1155
1156
1157
1158
1159 ubifs_assert(c, PagePrivate(page));
1160
1161 clear_page_dirty_for_io(page);
1162 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1163 offset = new_size &
1164 (PAGE_SIZE - 1);
1165 err = do_writepage(page, offset);
1166 put_page(page);
1167 if (err)
1168 goto out_budg;
1169
1170
1171
1172
1173 } else {
1174
1175
1176
1177
1178
1179 unlock_page(page);
1180 put_page(page);
1181 }
1182 }
1183 }
1184
1185 mutex_lock(&ui->ui_mutex);
1186 ui->ui_size = inode->i_size;
1187
1188 inode->i_mtime = inode->i_ctime = current_time(inode);
1189
1190 do_attr_changes(inode, attr);
1191 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1192 mutex_unlock(&ui->ui_mutex);
1193
1194out_budg:
1195 if (budgeted)
1196 ubifs_release_budget(c, &req);
1197 else {
1198 c->bi.nospace = c->bi.nospace_rp = 0;
1199 smp_wmb();
1200 }
1201 return err;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214static int do_setattr(struct ubifs_info *c, struct inode *inode,
1215 const struct iattr *attr)
1216{
1217 int err, release;
1218 loff_t new_size = attr->ia_size;
1219 struct ubifs_inode *ui = ubifs_inode(inode);
1220 struct ubifs_budget_req req = { .dirtied_ino = 1,
1221 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1222
1223 err = ubifs_budget_space(c, &req);
1224 if (err)
1225 return err;
1226
1227 if (attr->ia_valid & ATTR_SIZE) {
1228 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1229 truncate_setsize(inode, new_size);
1230 }
1231
1232 mutex_lock(&ui->ui_mutex);
1233 if (attr->ia_valid & ATTR_SIZE) {
1234
1235 inode->i_mtime = inode->i_ctime = current_time(inode);
1236
1237 ui->ui_size = inode->i_size;
1238 }
1239
1240 do_attr_changes(inode, attr);
1241
1242 release = ui->dirty;
1243 if (attr->ia_valid & ATTR_SIZE)
1244
1245
1246
1247
1248 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1249 else
1250 mark_inode_dirty_sync(inode);
1251 mutex_unlock(&ui->ui_mutex);
1252
1253 if (release)
1254 ubifs_release_budget(c, &req);
1255 if (IS_SYNC(inode))
1256 err = inode->i_sb->s_op->write_inode(inode, NULL);
1257 return err;
1258}
1259
1260int ubifs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1261 struct iattr *attr)
1262{
1263 int err;
1264 struct inode *inode = d_inode(dentry);
1265 struct ubifs_info *c = inode->i_sb->s_fs_info;
1266
1267 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1268 inode->i_ino, inode->i_mode, attr->ia_valid);
1269 err = setattr_prepare(&init_user_ns, dentry, attr);
1270 if (err)
1271 return err;
1272
1273 err = dbg_check_synced_i_size(c, inode);
1274 if (err)
1275 return err;
1276
1277 err = fscrypt_prepare_setattr(dentry, attr);
1278 if (err)
1279 return err;
1280
1281 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1282
1283 err = do_truncation(c, inode, attr);
1284 else
1285 err = do_setattr(c, inode, attr);
1286
1287 return err;
1288}
1289
1290static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1291 size_t length)
1292{
1293 struct inode *inode = folio->mapping->host;
1294 struct ubifs_info *c = inode->i_sb->s_fs_info;
1295
1296 ubifs_assert(c, folio_test_private(folio));
1297 if (offset || length < folio_size(folio))
1298
1299 return;
1300
1301 if (folio_test_checked(folio))
1302 release_new_page_budget(c);
1303 else
1304 release_existing_page_budget(c);
1305
1306 atomic_long_dec(&c->dirty_pg_cnt);
1307 folio_detach_private(folio);
1308 folio_clear_checked(folio);
1309}
1310
1311int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1312{
1313 struct inode *inode = file->f_mapping->host;
1314 struct ubifs_info *c = inode->i_sb->s_fs_info;
1315 int err;
1316
1317 dbg_gen("syncing inode %lu", inode->i_ino);
1318
1319 if (c->ro_mount)
1320
1321
1322
1323
1324 return 0;
1325
1326 err = file_write_and_wait_range(file, start, end);
1327 if (err)
1328 return err;
1329 inode_lock(inode);
1330
1331
1332 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1333 err = inode->i_sb->s_op->write_inode(inode, NULL);
1334 if (err)
1335 goto out;
1336 }
1337
1338
1339
1340
1341
1342 err = ubifs_sync_wbufs_by_inode(c, inode);
1343out:
1344 inode_unlock(inode);
1345 return err;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static inline int mctime_update_needed(const struct inode *inode,
1358 const struct timespec64 *now)
1359{
1360 if (!timespec64_equal(&inode->i_mtime, now) ||
1361 !timespec64_equal(&inode->i_ctime, now))
1362 return 1;
1363 return 0;
1364}
1365
1366
1367
1368
1369
1370
1371
1372int ubifs_update_time(struct inode *inode, struct timespec64 *time,
1373 int flags)
1374{
1375 struct ubifs_inode *ui = ubifs_inode(inode);
1376 struct ubifs_info *c = inode->i_sb->s_fs_info;
1377 struct ubifs_budget_req req = { .dirtied_ino = 1,
1378 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1379 int err, release;
1380
1381 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1382 return generic_update_time(inode, time, flags);
1383
1384 err = ubifs_budget_space(c, &req);
1385 if (err)
1386 return err;
1387
1388 mutex_lock(&ui->ui_mutex);
1389 if (flags & S_ATIME)
1390 inode->i_atime = *time;
1391 if (flags & S_CTIME)
1392 inode->i_ctime = *time;
1393 if (flags & S_MTIME)
1394 inode->i_mtime = *time;
1395
1396 release = ui->dirty;
1397 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1398 mutex_unlock(&ui->ui_mutex);
1399 if (release)
1400 ubifs_release_budget(c, &req);
1401 return 0;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412static int update_mctime(struct inode *inode)
1413{
1414 struct timespec64 now = current_time(inode);
1415 struct ubifs_inode *ui = ubifs_inode(inode);
1416 struct ubifs_info *c = inode->i_sb->s_fs_info;
1417
1418 if (mctime_update_needed(inode, &now)) {
1419 int err, release;
1420 struct ubifs_budget_req req = { .dirtied_ino = 1,
1421 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1422
1423 err = ubifs_budget_space(c, &req);
1424 if (err)
1425 return err;
1426
1427 mutex_lock(&ui->ui_mutex);
1428 inode->i_mtime = inode->i_ctime = current_time(inode);
1429 release = ui->dirty;
1430 mark_inode_dirty_sync(inode);
1431 mutex_unlock(&ui->ui_mutex);
1432 if (release)
1433 ubifs_release_budget(c, &req);
1434 }
1435
1436 return 0;
1437}
1438
1439static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1440{
1441 int err = update_mctime(file_inode(iocb->ki_filp));
1442 if (err)
1443 return err;
1444
1445 return generic_file_write_iter(iocb, from);
1446}
1447
1448static bool ubifs_dirty_folio(struct address_space *mapping,
1449 struct folio *folio)
1450{
1451 bool ret;
1452 struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1453
1454 ret = filemap_dirty_folio(mapping, folio);
1455
1456
1457
1458
1459 ubifs_assert(c, ret == false);
1460 return ret;
1461}
1462
1463#ifdef CONFIG_MIGRATION
1464static int ubifs_migrate_page(struct address_space *mapping,
1465 struct page *newpage, struct page *page, enum migrate_mode mode)
1466{
1467 int rc;
1468
1469 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
1470 if (rc != MIGRATEPAGE_SUCCESS)
1471 return rc;
1472
1473 if (PagePrivate(page)) {
1474 detach_page_private(page);
1475 attach_page_private(newpage, (void *)1);
1476 }
1477
1478 if (mode != MIGRATE_SYNC_NO_COPY)
1479 migrate_page_copy(newpage, page);
1480 else
1481 migrate_page_states(newpage, page);
1482 return MIGRATEPAGE_SUCCESS;
1483}
1484#endif
1485
1486static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1487{
1488 struct inode *inode = page->mapping->host;
1489 struct ubifs_info *c = inode->i_sb->s_fs_info;
1490
1491
1492
1493
1494
1495 if (PageWriteback(page))
1496 return 0;
1497 ubifs_assert(c, PagePrivate(page));
1498 ubifs_assert(c, 0);
1499 detach_page_private(page);
1500 ClearPageChecked(page);
1501 return 1;
1502}
1503
1504
1505
1506
1507
1508static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1509{
1510 struct page *page = vmf->page;
1511 struct inode *inode = file_inode(vmf->vma->vm_file);
1512 struct ubifs_info *c = inode->i_sb->s_fs_info;
1513 struct timespec64 now = current_time(inode);
1514 struct ubifs_budget_req req = { .new_page = 1 };
1515 int err, update_time;
1516
1517 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1518 i_size_read(inode));
1519 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1520
1521 if (unlikely(c->ro_error))
1522 return VM_FAULT_SIGBUS;
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 update_time = mctime_update_needed(inode, &now);
1543 if (update_time)
1544
1545
1546
1547
1548 req.dirtied_ino = 1;
1549
1550 err = ubifs_budget_space(c, &req);
1551 if (unlikely(err)) {
1552 if (err == -ENOSPC)
1553 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1554 inode->i_ino);
1555 return VM_FAULT_SIGBUS;
1556 }
1557
1558 lock_page(page);
1559 if (unlikely(page->mapping != inode->i_mapping ||
1560 page_offset(page) > i_size_read(inode))) {
1561
1562 goto sigbus;
1563 }
1564
1565 if (PagePrivate(page))
1566 release_new_page_budget(c);
1567 else {
1568 if (!PageChecked(page))
1569 ubifs_convert_page_budget(c);
1570 attach_page_private(page, (void *)1);
1571 atomic_long_inc(&c->dirty_pg_cnt);
1572 __set_page_dirty_nobuffers(page);
1573 }
1574
1575 if (update_time) {
1576 int release;
1577 struct ubifs_inode *ui = ubifs_inode(inode);
1578
1579 mutex_lock(&ui->ui_mutex);
1580 inode->i_mtime = inode->i_ctime = current_time(inode);
1581 release = ui->dirty;
1582 mark_inode_dirty_sync(inode);
1583 mutex_unlock(&ui->ui_mutex);
1584 if (release)
1585 ubifs_release_dirty_inode_budget(c, ui);
1586 }
1587
1588 wait_for_stable_page(page);
1589 return VM_FAULT_LOCKED;
1590
1591sigbus:
1592 unlock_page(page);
1593 ubifs_release_budget(c, &req);
1594 return VM_FAULT_SIGBUS;
1595}
1596
1597static const struct vm_operations_struct ubifs_file_vm_ops = {
1598 .fault = filemap_fault,
1599 .map_pages = filemap_map_pages,
1600 .page_mkwrite = ubifs_vm_page_mkwrite,
1601};
1602
1603static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1604{
1605 int err;
1606
1607 err = generic_file_mmap(file, vma);
1608 if (err)
1609 return err;
1610 vma->vm_ops = &ubifs_file_vm_ops;
1611
1612 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1613 file_accessed(file);
1614
1615 return 0;
1616}
1617
1618static const char *ubifs_get_link(struct dentry *dentry,
1619 struct inode *inode,
1620 struct delayed_call *done)
1621{
1622 struct ubifs_inode *ui = ubifs_inode(inode);
1623
1624 if (!IS_ENCRYPTED(inode))
1625 return ui->data;
1626
1627 if (!dentry)
1628 return ERR_PTR(-ECHILD);
1629
1630 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1631}
1632
1633static int ubifs_symlink_getattr(struct user_namespace *mnt_userns,
1634 const struct path *path, struct kstat *stat,
1635 u32 request_mask, unsigned int query_flags)
1636{
1637 ubifs_getattr(mnt_userns, path, stat, request_mask, query_flags);
1638
1639 if (IS_ENCRYPTED(d_inode(path->dentry)))
1640 return fscrypt_symlink_getattr(path, stat);
1641 return 0;
1642}
1643
1644const struct address_space_operations ubifs_file_address_operations = {
1645 .readpage = ubifs_readpage,
1646 .writepage = ubifs_writepage,
1647 .write_begin = ubifs_write_begin,
1648 .write_end = ubifs_write_end,
1649 .invalidate_folio = ubifs_invalidate_folio,
1650 .dirty_folio = ubifs_dirty_folio,
1651#ifdef CONFIG_MIGRATION
1652 .migratepage = ubifs_migrate_page,
1653#endif
1654 .releasepage = ubifs_releasepage,
1655};
1656
1657const struct inode_operations ubifs_file_inode_operations = {
1658 .setattr = ubifs_setattr,
1659 .getattr = ubifs_getattr,
1660 .listxattr = ubifs_listxattr,
1661 .update_time = ubifs_update_time,
1662 .fileattr_get = ubifs_fileattr_get,
1663 .fileattr_set = ubifs_fileattr_set,
1664};
1665
1666const struct inode_operations ubifs_symlink_inode_operations = {
1667 .get_link = ubifs_get_link,
1668 .setattr = ubifs_setattr,
1669 .getattr = ubifs_symlink_getattr,
1670 .listxattr = ubifs_listxattr,
1671 .update_time = ubifs_update_time,
1672};
1673
1674const struct file_operations ubifs_file_operations = {
1675 .llseek = generic_file_llseek,
1676 .read_iter = generic_file_read_iter,
1677 .write_iter = ubifs_write_iter,
1678 .mmap = ubifs_file_mmap,
1679 .fsync = ubifs_fsync,
1680 .unlocked_ioctl = ubifs_ioctl,
1681 .splice_read = generic_file_splice_read,
1682 .splice_write = iter_file_splice_write,
1683 .open = fscrypt_file_open,
1684#ifdef CONFIG_COMPAT
1685 .compat_ioctl = ubifs_compat_ioctl,
1686#endif
1687};
1688