1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include "ubifs.h"
53#include <linux/mount.h>
54#include <linux/slab.h>
55#include <linux/migrate.h>
56
57static int read_block(struct inode *inode, void *addr, unsigned int block,
58 struct ubifs_data_node *dn)
59{
60 struct ubifs_info *c = inode->i_sb->s_fs_info;
61 int err, len, out_len;
62 union ubifs_key key;
63 unsigned int dlen;
64
65 data_key_init(c, &key, inode->i_ino, block);
66 err = ubifs_tnc_lookup(c, &key, dn);
67 if (err) {
68 if (err == -ENOENT)
69
70 memset(addr, 0, UBIFS_BLOCK_SIZE);
71 return err;
72 }
73
74 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
75 ubifs_inode(inode)->creat_sqnum);
76 len = le32_to_cpu(dn->size);
77 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
78 goto dump;
79
80 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
81
82 if (ubifs_crypt_is_encrypted(inode)) {
83 err = ubifs_decrypt(inode, dn, &dlen, block);
84 if (err)
85 goto dump;
86 }
87
88 out_len = UBIFS_BLOCK_SIZE;
89 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
90 le16_to_cpu(dn->compr_type));
91 if (err || len != out_len)
92 goto dump;
93
94
95
96
97
98
99 if (len < UBIFS_BLOCK_SIZE)
100 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
101
102 return 0;
103
104dump:
105 ubifs_err(c, "bad data node (block %u, inode %lu)",
106 block, inode->i_ino);
107 ubifs_dump_node(c, dn);
108 return -EINVAL;
109}
110
111static int do_readpage(struct page *page)
112{
113 void *addr;
114 int err = 0, i;
115 unsigned int block, beyond;
116 struct ubifs_data_node *dn;
117 struct inode *inode = page->mapping->host;
118 loff_t i_size = i_size_read(inode);
119
120 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
121 inode->i_ino, page->index, i_size, page->flags);
122 ubifs_assert(!PageChecked(page));
123 ubifs_assert(!PagePrivate(page));
124
125 addr = kmap(page);
126
127 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
128 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
129 if (block >= beyond) {
130
131 SetPageChecked(page);
132 memset(addr, 0, PAGE_SIZE);
133 goto out;
134 }
135
136 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
137 if (!dn) {
138 err = -ENOMEM;
139 goto error;
140 }
141
142 i = 0;
143 while (1) {
144 int ret;
145
146 if (block >= beyond) {
147
148 err = -ENOENT;
149 memset(addr, 0, UBIFS_BLOCK_SIZE);
150 } else {
151 ret = read_block(inode, addr, block, dn);
152 if (ret) {
153 err = ret;
154 if (err != -ENOENT)
155 break;
156 } else if (block + 1 == beyond) {
157 int dlen = le32_to_cpu(dn->size);
158 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
159
160 if (ilen && ilen < dlen)
161 memset(addr + ilen, 0, dlen - ilen);
162 }
163 }
164 if (++i >= UBIFS_BLOCKS_PER_PAGE)
165 break;
166 block += 1;
167 addr += UBIFS_BLOCK_SIZE;
168 }
169 if (err) {
170 struct ubifs_info *c = inode->i_sb->s_fs_info;
171 if (err == -ENOENT) {
172
173 SetPageChecked(page);
174 dbg_gen("hole");
175 goto out_free;
176 }
177 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
178 page->index, inode->i_ino, err);
179 goto error;
180 }
181
182out_free:
183 kfree(dn);
184out:
185 SetPageUptodate(page);
186 ClearPageError(page);
187 flush_dcache_page(page);
188 kunmap(page);
189 return 0;
190
191error:
192 kfree(dn);
193 ClearPageUptodate(page);
194 SetPageError(page);
195 flush_dcache_page(page);
196 kunmap(page);
197 return err;
198}
199
200
201
202
203
204
205
206
207static void release_new_page_budget(struct ubifs_info *c)
208{
209 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
210
211 ubifs_release_budget(c, &req);
212}
213
214
215
216
217
218
219
220
221static void release_existing_page_budget(struct ubifs_info *c)
222{
223 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
224
225 ubifs_release_budget(c, &req);
226}
227
228static int write_begin_slow(struct address_space *mapping,
229 loff_t pos, unsigned len, struct page **pagep,
230 unsigned flags)
231{
232 struct inode *inode = mapping->host;
233 struct ubifs_info *c = inode->i_sb->s_fs_info;
234 pgoff_t index = pos >> PAGE_SHIFT;
235 struct ubifs_budget_req req = { .new_page = 1 };
236 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
237 struct page *page;
238
239 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
240 inode->i_ino, pos, len, inode->i_size);
241
242
243
244
245
246
247
248
249
250 if (appending)
251
252 req.dirtied_ino = 1;
253
254 err = ubifs_budget_space(c, &req);
255 if (unlikely(err))
256 return err;
257
258 page = grab_cache_page_write_begin(mapping, index, flags);
259 if (unlikely(!page)) {
260 ubifs_release_budget(c, &req);
261 return -ENOMEM;
262 }
263
264 if (!PageUptodate(page)) {
265 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
266 SetPageChecked(page);
267 else {
268 err = do_readpage(page);
269 if (err) {
270 unlock_page(page);
271 put_page(page);
272 ubifs_release_budget(c, &req);
273 return err;
274 }
275 }
276
277 SetPageUptodate(page);
278 ClearPageError(page);
279 }
280
281 if (PagePrivate(page))
282
283
284
285
286
287
288
289
290
291
292 release_new_page_budget(c);
293 else if (!PageChecked(page))
294
295
296
297
298
299
300 ubifs_convert_page_budget(c);
301
302 if (appending) {
303 struct ubifs_inode *ui = ubifs_inode(inode);
304
305
306
307
308
309
310 mutex_lock(&ui->ui_mutex);
311 if (ui->dirty)
312
313
314
315
316 ubifs_release_dirty_inode_budget(c, ui);
317 }
318
319 *pagep = page;
320 return 0;
321}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336static int allocate_budget(struct ubifs_info *c, struct page *page,
337 struct ubifs_inode *ui, int appending)
338{
339 struct ubifs_budget_req req = { .fast = 1 };
340
341 if (PagePrivate(page)) {
342 if (!appending)
343
344
345
346
347 return 0;
348
349 mutex_lock(&ui->ui_mutex);
350 if (ui->dirty)
351
352
353
354
355
356
357
358
359
360 return 0;
361
362
363
364
365
366 req.dirtied_ino = 1;
367 } else {
368 if (PageChecked(page))
369
370
371
372
373
374
375
376 req.new_page = 1;
377 else
378
379
380
381
382
383 req.dirtied_page = 1;
384
385 if (appending) {
386 mutex_lock(&ui->ui_mutex);
387 if (!ui->dirty)
388
389
390
391
392
393 req.dirtied_ino = 1;
394 }
395 }
396
397 return ubifs_budget_space(c, &req);
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432static int ubifs_write_begin(struct file *file, struct address_space *mapping,
433 loff_t pos, unsigned len, unsigned flags,
434 struct page **pagep, void **fsdata)
435{
436 struct inode *inode = mapping->host;
437 struct ubifs_info *c = inode->i_sb->s_fs_info;
438 struct ubifs_inode *ui = ubifs_inode(inode);
439 pgoff_t index = pos >> PAGE_SHIFT;
440 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
441 int skipped_read = 0;
442 struct page *page;
443
444 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
445 ubifs_assert(!c->ro_media && !c->ro_mount);
446
447 if (unlikely(c->ro_error))
448 return -EROFS;
449
450
451 page = grab_cache_page_write_begin(mapping, index, flags);
452 if (unlikely(!page))
453 return -ENOMEM;
454
455 if (!PageUptodate(page)) {
456
457 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
458
459
460
461
462
463
464
465
466
467 SetPageChecked(page);
468 skipped_read = 1;
469 } else {
470 err = do_readpage(page);
471 if (err) {
472 unlock_page(page);
473 put_page(page);
474 return err;
475 }
476 }
477
478 SetPageUptodate(page);
479 ClearPageError(page);
480 }
481
482 err = allocate_budget(c, page, ui, appending);
483 if (unlikely(err)) {
484 ubifs_assert(err == -ENOSPC);
485
486
487
488
489 if (skipped_read) {
490 ClearPageChecked(page);
491 ClearPageUptodate(page);
492 }
493
494
495
496
497
498
499
500 if (appending) {
501 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
502 mutex_unlock(&ui->ui_mutex);
503 }
504 unlock_page(page);
505 put_page(page);
506
507 return write_begin_slow(mapping, pos, len, pagep, flags);
508 }
509
510
511
512
513
514
515
516 *pagep = page;
517 return 0;
518
519}
520
521
522
523
524
525
526
527
528
529
530
531static void cancel_budget(struct ubifs_info *c, struct page *page,
532 struct ubifs_inode *ui, int appending)
533{
534 if (appending) {
535 if (!ui->dirty)
536 ubifs_release_dirty_inode_budget(c, ui);
537 mutex_unlock(&ui->ui_mutex);
538 }
539 if (!PagePrivate(page)) {
540 if (PageChecked(page))
541 release_new_page_budget(c);
542 else
543 release_existing_page_budget(c);
544 }
545}
546
547static int ubifs_write_end(struct file *file, struct address_space *mapping,
548 loff_t pos, unsigned len, unsigned copied,
549 struct page *page, void *fsdata)
550{
551 struct inode *inode = mapping->host;
552 struct ubifs_inode *ui = ubifs_inode(inode);
553 struct ubifs_info *c = inode->i_sb->s_fs_info;
554 loff_t end_pos = pos + len;
555 int appending = !!(end_pos > inode->i_size);
556
557 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
558 inode->i_ino, pos, page->index, len, copied, inode->i_size);
559
560 if (unlikely(copied < len && len == PAGE_SIZE)) {
561
562
563
564
565
566
567
568
569
570 dbg_gen("copied %d instead of %d, read page and repeat",
571 copied, len);
572 cancel_budget(c, page, ui, appending);
573 ClearPageChecked(page);
574
575
576
577
578
579 copied = do_readpage(page);
580 goto out;
581 }
582
583 if (!PagePrivate(page)) {
584 SetPagePrivate(page);
585 atomic_long_inc(&c->dirty_pg_cnt);
586 __set_page_dirty_nobuffers(page);
587 }
588
589 if (appending) {
590 i_size_write(inode, end_pos);
591 ui->ui_size = end_pos;
592
593
594
595
596
597 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
598 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
599 mutex_unlock(&ui->ui_mutex);
600 }
601
602out:
603 unlock_page(page);
604 put_page(page);
605 return copied;
606}
607
608
609
610
611
612
613
614
615
616
617static int populate_page(struct ubifs_info *c, struct page *page,
618 struct bu_info *bu, int *n)
619{
620 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
621 struct inode *inode = page->mapping->host;
622 loff_t i_size = i_size_read(inode);
623 unsigned int page_block;
624 void *addr, *zaddr;
625 pgoff_t end_index;
626
627 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
628 inode->i_ino, page->index, i_size, page->flags);
629
630 addr = zaddr = kmap(page);
631
632 end_index = (i_size - 1) >> PAGE_SHIFT;
633 if (!i_size || page->index > end_index) {
634 hole = 1;
635 memset(addr, 0, PAGE_SIZE);
636 goto out_hole;
637 }
638
639 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
640 while (1) {
641 int err, len, out_len, dlen;
642
643 if (nn >= bu->cnt) {
644 hole = 1;
645 memset(addr, 0, UBIFS_BLOCK_SIZE);
646 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
647 struct ubifs_data_node *dn;
648
649 dn = bu->buf + (bu->zbranch[nn].offs - offs);
650
651 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
652 ubifs_inode(inode)->creat_sqnum);
653
654 len = le32_to_cpu(dn->size);
655 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
656 goto out_err;
657
658 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
659 out_len = UBIFS_BLOCK_SIZE;
660
661 if (ubifs_crypt_is_encrypted(inode)) {
662 err = ubifs_decrypt(inode, dn, &dlen, page_block);
663 if (err)
664 goto out_err;
665 }
666
667 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
668 le16_to_cpu(dn->compr_type));
669 if (err || len != out_len)
670 goto out_err;
671
672 if (len < UBIFS_BLOCK_SIZE)
673 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
674
675 nn += 1;
676 read = (i << UBIFS_BLOCK_SHIFT) + len;
677 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
678 nn += 1;
679 continue;
680 } else {
681 hole = 1;
682 memset(addr, 0, UBIFS_BLOCK_SIZE);
683 }
684 if (++i >= UBIFS_BLOCKS_PER_PAGE)
685 break;
686 addr += UBIFS_BLOCK_SIZE;
687 page_block += 1;
688 }
689
690 if (end_index == page->index) {
691 int len = i_size & (PAGE_SIZE - 1);
692
693 if (len && len < read)
694 memset(zaddr + len, 0, read - len);
695 }
696
697out_hole:
698 if (hole) {
699 SetPageChecked(page);
700 dbg_gen("hole");
701 }
702
703 SetPageUptodate(page);
704 ClearPageError(page);
705 flush_dcache_page(page);
706 kunmap(page);
707 *n = nn;
708 return 0;
709
710out_err:
711 ClearPageUptodate(page);
712 SetPageError(page);
713 flush_dcache_page(page);
714 kunmap(page);
715 ubifs_err(c, "bad data node (block %u, inode %lu)",
716 page_block, inode->i_ino);
717 return -EINVAL;
718}
719
720
721
722
723
724
725
726
727
728static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
729 struct page *page1)
730{
731 pgoff_t offset = page1->index, end_index;
732 struct address_space *mapping = page1->mapping;
733 struct inode *inode = mapping->host;
734 struct ubifs_inode *ui = ubifs_inode(inode);
735 int err, page_idx, page_cnt, ret = 0, n = 0;
736 int allocate = bu->buf ? 0 : 1;
737 loff_t isize;
738 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
739
740 err = ubifs_tnc_get_bu_keys(c, bu);
741 if (err)
742 goto out_warn;
743
744 if (bu->eof) {
745
746 ui->read_in_a_row = 1;
747 ui->bulk_read = 0;
748 }
749
750 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
751 if (!page_cnt) {
752
753
754
755
756
757
758 goto out_bu_off;
759 }
760
761 if (bu->cnt) {
762 if (allocate) {
763
764
765
766
767 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
768 bu->zbranch[bu->cnt - 1].len -
769 bu->zbranch[0].offs;
770 ubifs_assert(bu->buf_len > 0);
771 ubifs_assert(bu->buf_len <= c->leb_size);
772 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
773 if (!bu->buf)
774 goto out_bu_off;
775 }
776
777 err = ubifs_tnc_bulk_read(c, bu);
778 if (err)
779 goto out_warn;
780 }
781
782 err = populate_page(c, page1, bu, &n);
783 if (err)
784 goto out_warn;
785
786 unlock_page(page1);
787 ret = 1;
788
789 isize = i_size_read(inode);
790 if (isize == 0)
791 goto out_free;
792 end_index = ((isize - 1) >> PAGE_SHIFT);
793
794 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
795 pgoff_t page_offset = offset + page_idx;
796 struct page *page;
797
798 if (page_offset > end_index)
799 break;
800 page = find_or_create_page(mapping, page_offset, ra_gfp_mask);
801 if (!page)
802 break;
803 if (!PageUptodate(page))
804 err = populate_page(c, page, bu, &n);
805 unlock_page(page);
806 put_page(page);
807 if (err)
808 break;
809 }
810
811 ui->last_page_read = offset + page_idx - 1;
812
813out_free:
814 if (allocate)
815 kfree(bu->buf);
816 return ret;
817
818out_warn:
819 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
820 goto out_free;
821
822out_bu_off:
823 ui->read_in_a_row = ui->bulk_read = 0;
824 goto out_free;
825}
826
827
828
829
830
831
832
833
834
835
836static int ubifs_bulk_read(struct page *page)
837{
838 struct inode *inode = page->mapping->host;
839 struct ubifs_info *c = inode->i_sb->s_fs_info;
840 struct ubifs_inode *ui = ubifs_inode(inode);
841 pgoff_t index = page->index, last_page_read = ui->last_page_read;
842 struct bu_info *bu;
843 int err = 0, allocated = 0;
844
845 ui->last_page_read = index;
846 if (!c->bulk_read)
847 return 0;
848
849
850
851
852
853 if (!mutex_trylock(&ui->ui_mutex))
854 return 0;
855
856 if (index != last_page_read + 1) {
857
858 ui->read_in_a_row = 1;
859 if (ui->bulk_read)
860 ui->bulk_read = 0;
861 goto out_unlock;
862 }
863
864 if (!ui->bulk_read) {
865 ui->read_in_a_row += 1;
866 if (ui->read_in_a_row < 3)
867 goto out_unlock;
868
869 ui->bulk_read = 1;
870 }
871
872
873
874
875
876 if (mutex_trylock(&c->bu_mutex))
877 bu = &c->bu;
878 else {
879 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
880 if (!bu)
881 goto out_unlock;
882
883 bu->buf = NULL;
884 allocated = 1;
885 }
886
887 bu->buf_len = c->max_bu_buf_len;
888 data_key_init(c, &bu->key, inode->i_ino,
889 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
890 err = ubifs_do_bulk_read(c, bu, page);
891
892 if (!allocated)
893 mutex_unlock(&c->bu_mutex);
894 else
895 kfree(bu);
896
897out_unlock:
898 mutex_unlock(&ui->ui_mutex);
899 return err;
900}
901
902static int ubifs_readpage(struct file *file, struct page *page)
903{
904 if (ubifs_bulk_read(page))
905 return 0;
906 do_readpage(page);
907 unlock_page(page);
908 return 0;
909}
910
911static int do_writepage(struct page *page, int len)
912{
913 int err = 0, i, blen;
914 unsigned int block;
915 void *addr;
916 union ubifs_key key;
917 struct inode *inode = page->mapping->host;
918 struct ubifs_info *c = inode->i_sb->s_fs_info;
919
920#ifdef UBIFS_DEBUG
921 struct ubifs_inode *ui = ubifs_inode(inode);
922 spin_lock(&ui->ui_lock);
923 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
924 spin_unlock(&ui->ui_lock);
925#endif
926
927
928 set_page_writeback(page);
929
930 addr = kmap(page);
931 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
932 i = 0;
933 while (len) {
934 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
935 data_key_init(c, &key, inode->i_ino, block);
936 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
937 if (err)
938 break;
939 if (++i >= UBIFS_BLOCKS_PER_PAGE)
940 break;
941 block += 1;
942 addr += blen;
943 len -= blen;
944 }
945 if (err) {
946 SetPageError(page);
947 ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
948 page->index, inode->i_ino, err);
949 ubifs_ro_mode(c, err);
950 }
951
952 ubifs_assert(PagePrivate(page));
953 if (PageChecked(page))
954 release_new_page_budget(c);
955 else
956 release_existing_page_budget(c);
957
958 atomic_long_dec(&c->dirty_pg_cnt);
959 ClearPagePrivate(page);
960 ClearPageChecked(page);
961
962 kunmap(page);
963 unlock_page(page);
964 end_page_writeback(page);
965 return err;
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1015{
1016 struct inode *inode = page->mapping->host;
1017 struct ubifs_inode *ui = ubifs_inode(inode);
1018 loff_t i_size = i_size_read(inode), synced_i_size;
1019 pgoff_t end_index = i_size >> PAGE_SHIFT;
1020 int err, len = i_size & (PAGE_SIZE - 1);
1021 void *kaddr;
1022
1023 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1024 inode->i_ino, page->index, page->flags);
1025 ubifs_assert(PagePrivate(page));
1026
1027
1028 if (page->index > end_index || (page->index == end_index && !len)) {
1029 err = 0;
1030 goto out_unlock;
1031 }
1032
1033 spin_lock(&ui->ui_lock);
1034 synced_i_size = ui->synced_i_size;
1035 spin_unlock(&ui->ui_lock);
1036
1037
1038 if (page->index < end_index) {
1039 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1040 err = inode->i_sb->s_op->write_inode(inode, NULL);
1041 if (err)
1042 goto out_unlock;
1043
1044
1045
1046
1047
1048
1049
1050
1051 }
1052 return do_writepage(page, PAGE_SIZE);
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062 kaddr = kmap_atomic(page);
1063 memset(kaddr + len, 0, PAGE_SIZE - len);
1064 flush_dcache_page(page);
1065 kunmap_atomic(kaddr);
1066
1067 if (i_size > synced_i_size) {
1068 err = inode->i_sb->s_op->write_inode(inode, NULL);
1069 if (err)
1070 goto out_unlock;
1071 }
1072
1073 return do_writepage(page, len);
1074
1075out_unlock:
1076 unlock_page(page);
1077 return err;
1078}
1079
1080
1081
1082
1083
1084
1085static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1086{
1087 if (attr->ia_valid & ATTR_UID)
1088 inode->i_uid = attr->ia_uid;
1089 if (attr->ia_valid & ATTR_GID)
1090 inode->i_gid = attr->ia_gid;
1091 if (attr->ia_valid & ATTR_ATIME)
1092 inode->i_atime = timespec_trunc(attr->ia_atime,
1093 inode->i_sb->s_time_gran);
1094 if (attr->ia_valid & ATTR_MTIME)
1095 inode->i_mtime = timespec_trunc(attr->ia_mtime,
1096 inode->i_sb->s_time_gran);
1097 if (attr->ia_valid & ATTR_CTIME)
1098 inode->i_ctime = timespec_trunc(attr->ia_ctime,
1099 inode->i_sb->s_time_gran);
1100 if (attr->ia_valid & ATTR_MODE) {
1101 umode_t mode = attr->ia_mode;
1102
1103 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1104 mode &= ~S_ISGID;
1105 inode->i_mode = mode;
1106 }
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119static int do_truncation(struct ubifs_info *c, struct inode *inode,
1120 const struct iattr *attr)
1121{
1122 int err;
1123 struct ubifs_budget_req req;
1124 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1125 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1126 struct ubifs_inode *ui = ubifs_inode(inode);
1127
1128 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1129 memset(&req, 0, sizeof(struct ubifs_budget_req));
1130
1131
1132
1133
1134
1135
1136 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1137 req.dirtied_page = 1;
1138
1139 req.dirtied_ino = 1;
1140
1141 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1142 err = ubifs_budget_space(c, &req);
1143 if (err) {
1144
1145
1146
1147
1148 if (new_size || err != -ENOSPC)
1149 return err;
1150 budgeted = 0;
1151 }
1152
1153 truncate_setsize(inode, new_size);
1154
1155 if (offset) {
1156 pgoff_t index = new_size >> PAGE_SHIFT;
1157 struct page *page;
1158
1159 page = find_lock_page(inode->i_mapping, index);
1160 if (page) {
1161 if (PageDirty(page)) {
1162
1163
1164
1165
1166
1167
1168
1169
1170 ubifs_assert(PagePrivate(page));
1171
1172 clear_page_dirty_for_io(page);
1173 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1174 offset = new_size &
1175 (PAGE_SIZE - 1);
1176 err = do_writepage(page, offset);
1177 put_page(page);
1178 if (err)
1179 goto out_budg;
1180
1181
1182
1183
1184 } else {
1185
1186
1187
1188
1189
1190 unlock_page(page);
1191 put_page(page);
1192 }
1193 }
1194 }
1195
1196 mutex_lock(&ui->ui_mutex);
1197 ui->ui_size = inode->i_size;
1198
1199 inode->i_mtime = inode->i_ctime = current_time(inode);
1200
1201 do_attr_changes(inode, attr);
1202 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1203 mutex_unlock(&ui->ui_mutex);
1204
1205out_budg:
1206 if (budgeted)
1207 ubifs_release_budget(c, &req);
1208 else {
1209 c->bi.nospace = c->bi.nospace_rp = 0;
1210 smp_wmb();
1211 }
1212 return err;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static int do_setattr(struct ubifs_info *c, struct inode *inode,
1226 const struct iattr *attr)
1227{
1228 int err, release;
1229 loff_t new_size = attr->ia_size;
1230 struct ubifs_inode *ui = ubifs_inode(inode);
1231 struct ubifs_budget_req req = { .dirtied_ino = 1,
1232 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1233
1234 err = ubifs_budget_space(c, &req);
1235 if (err)
1236 return err;
1237
1238 if (attr->ia_valid & ATTR_SIZE) {
1239 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1240 truncate_setsize(inode, new_size);
1241 }
1242
1243 mutex_lock(&ui->ui_mutex);
1244 if (attr->ia_valid & ATTR_SIZE) {
1245
1246 inode->i_mtime = inode->i_ctime = current_time(inode);
1247
1248 ui->ui_size = inode->i_size;
1249 }
1250
1251 do_attr_changes(inode, attr);
1252
1253 release = ui->dirty;
1254 if (attr->ia_valid & ATTR_SIZE)
1255
1256
1257
1258
1259 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1260 else
1261 mark_inode_dirty_sync(inode);
1262 mutex_unlock(&ui->ui_mutex);
1263
1264 if (release)
1265 ubifs_release_budget(c, &req);
1266 if (IS_SYNC(inode))
1267 err = inode->i_sb->s_op->write_inode(inode, NULL);
1268 return err;
1269}
1270
1271int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1272{
1273 int err;
1274 struct inode *inode = d_inode(dentry);
1275 struct ubifs_info *c = inode->i_sb->s_fs_info;
1276
1277 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1278 inode->i_ino, inode->i_mode, attr->ia_valid);
1279 err = setattr_prepare(dentry, attr);
1280 if (err)
1281 return err;
1282
1283 err = dbg_check_synced_i_size(c, inode);
1284 if (err)
1285 return err;
1286
1287 err = fscrypt_prepare_setattr(dentry, attr);
1288 if (err)
1289 return err;
1290
1291 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1292
1293 err = do_truncation(c, inode, attr);
1294 else
1295 err = do_setattr(c, inode, attr);
1296
1297 return err;
1298}
1299
1300static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1301 unsigned int length)
1302{
1303 struct inode *inode = page->mapping->host;
1304 struct ubifs_info *c = inode->i_sb->s_fs_info;
1305
1306 ubifs_assert(PagePrivate(page));
1307 if (offset || length < PAGE_SIZE)
1308
1309 return;
1310
1311 if (PageChecked(page))
1312 release_new_page_budget(c);
1313 else
1314 release_existing_page_budget(c);
1315
1316 atomic_long_dec(&c->dirty_pg_cnt);
1317 ClearPagePrivate(page);
1318 ClearPageChecked(page);
1319}
1320
1321int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1322{
1323 struct inode *inode = file->f_mapping->host;
1324 struct ubifs_info *c = inode->i_sb->s_fs_info;
1325 int err;
1326
1327 dbg_gen("syncing inode %lu", inode->i_ino);
1328
1329 if (c->ro_mount)
1330
1331
1332
1333
1334 return 0;
1335
1336 err = file_write_and_wait_range(file, start, end);
1337 if (err)
1338 return err;
1339 inode_lock(inode);
1340
1341
1342 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1343 err = inode->i_sb->s_op->write_inode(inode, NULL);
1344 if (err)
1345 goto out;
1346 }
1347
1348
1349
1350
1351
1352 err = ubifs_sync_wbufs_by_inode(c, inode);
1353out:
1354 inode_unlock(inode);
1355 return err;
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static inline int mctime_update_needed(const struct inode *inode,
1368 const struct timespec *now)
1369{
1370 if (!timespec_equal(&inode->i_mtime, now) ||
1371 !timespec_equal(&inode->i_ctime, now))
1372 return 1;
1373 return 0;
1374}
1375
1376#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1377
1378
1379
1380
1381
1382
1383int ubifs_update_time(struct inode *inode, struct timespec *time,
1384 int flags)
1385{
1386 struct ubifs_inode *ui = ubifs_inode(inode);
1387 struct ubifs_info *c = inode->i_sb->s_fs_info;
1388 struct ubifs_budget_req req = { .dirtied_ino = 1,
1389 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1390 int iflags = I_DIRTY_TIME;
1391 int err, release;
1392
1393 err = ubifs_budget_space(c, &req);
1394 if (err)
1395 return err;
1396
1397 mutex_lock(&ui->ui_mutex);
1398 if (flags & S_ATIME)
1399 inode->i_atime = *time;
1400 if (flags & S_CTIME)
1401 inode->i_ctime = *time;
1402 if (flags & S_MTIME)
1403 inode->i_mtime = *time;
1404
1405 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
1406 iflags |= I_DIRTY_SYNC;
1407
1408 release = ui->dirty;
1409 __mark_inode_dirty(inode, iflags);
1410 mutex_unlock(&ui->ui_mutex);
1411 if (release)
1412 ubifs_release_budget(c, &req);
1413 return 0;
1414}
1415#endif
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425static int update_mctime(struct inode *inode)
1426{
1427 struct timespec now = current_time(inode);
1428 struct ubifs_inode *ui = ubifs_inode(inode);
1429 struct ubifs_info *c = inode->i_sb->s_fs_info;
1430
1431 if (mctime_update_needed(inode, &now)) {
1432 int err, release;
1433 struct ubifs_budget_req req = { .dirtied_ino = 1,
1434 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1435
1436 err = ubifs_budget_space(c, &req);
1437 if (err)
1438 return err;
1439
1440 mutex_lock(&ui->ui_mutex);
1441 inode->i_mtime = inode->i_ctime = current_time(inode);
1442 release = ui->dirty;
1443 mark_inode_dirty_sync(inode);
1444 mutex_unlock(&ui->ui_mutex);
1445 if (release)
1446 ubifs_release_budget(c, &req);
1447 }
1448
1449 return 0;
1450}
1451
1452static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1453{
1454 int err = update_mctime(file_inode(iocb->ki_filp));
1455 if (err)
1456 return err;
1457
1458 return generic_file_write_iter(iocb, from);
1459}
1460
1461static int ubifs_set_page_dirty(struct page *page)
1462{
1463 int ret;
1464
1465 ret = __set_page_dirty_nobuffers(page);
1466
1467
1468
1469
1470 ubifs_assert(ret == 0);
1471 return ret;
1472}
1473
1474#ifdef CONFIG_MIGRATION
1475static int ubifs_migrate_page(struct address_space *mapping,
1476 struct page *newpage, struct page *page, enum migrate_mode mode)
1477{
1478 int rc;
1479
1480 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1481 if (rc != MIGRATEPAGE_SUCCESS)
1482 return rc;
1483
1484 if (PagePrivate(page)) {
1485 ClearPagePrivate(page);
1486 SetPagePrivate(newpage);
1487 }
1488
1489 if (mode != MIGRATE_SYNC_NO_COPY)
1490 migrate_page_copy(newpage, page);
1491 else
1492 migrate_page_states(newpage, page);
1493 return MIGRATEPAGE_SUCCESS;
1494}
1495#endif
1496
1497static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1498{
1499
1500
1501
1502
1503 if (PageWriteback(page))
1504 return 0;
1505 ubifs_assert(PagePrivate(page));
1506 ubifs_assert(0);
1507 ClearPagePrivate(page);
1508 ClearPageChecked(page);
1509 return 1;
1510}
1511
1512
1513
1514
1515
1516static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1517{
1518 struct page *page = vmf->page;
1519 struct inode *inode = file_inode(vmf->vma->vm_file);
1520 struct ubifs_info *c = inode->i_sb->s_fs_info;
1521 struct timespec now = current_time(inode);
1522 struct ubifs_budget_req req = { .new_page = 1 };
1523 int err, update_time;
1524
1525 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1526 i_size_read(inode));
1527 ubifs_assert(!c->ro_media && !c->ro_mount);
1528
1529 if (unlikely(c->ro_error))
1530 return VM_FAULT_SIGBUS;
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 update_time = mctime_update_needed(inode, &now);
1551 if (update_time)
1552
1553
1554
1555
1556 req.dirtied_ino = 1;
1557
1558 err = ubifs_budget_space(c, &req);
1559 if (unlikely(err)) {
1560 if (err == -ENOSPC)
1561 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1562 inode->i_ino);
1563 return VM_FAULT_SIGBUS;
1564 }
1565
1566 lock_page(page);
1567 if (unlikely(page->mapping != inode->i_mapping ||
1568 page_offset(page) > i_size_read(inode))) {
1569
1570 err = -EINVAL;
1571 goto out_unlock;
1572 }
1573
1574 if (PagePrivate(page))
1575 release_new_page_budget(c);
1576 else {
1577 if (!PageChecked(page))
1578 ubifs_convert_page_budget(c);
1579 SetPagePrivate(page);
1580 atomic_long_inc(&c->dirty_pg_cnt);
1581 __set_page_dirty_nobuffers(page);
1582 }
1583
1584 if (update_time) {
1585 int release;
1586 struct ubifs_inode *ui = ubifs_inode(inode);
1587
1588 mutex_lock(&ui->ui_mutex);
1589 inode->i_mtime = inode->i_ctime = current_time(inode);
1590 release = ui->dirty;
1591 mark_inode_dirty_sync(inode);
1592 mutex_unlock(&ui->ui_mutex);
1593 if (release)
1594 ubifs_release_dirty_inode_budget(c, ui);
1595 }
1596
1597 wait_for_stable_page(page);
1598 return VM_FAULT_LOCKED;
1599
1600out_unlock:
1601 unlock_page(page);
1602 ubifs_release_budget(c, &req);
1603 if (err)
1604 err = VM_FAULT_SIGBUS;
1605 return err;
1606}
1607
1608static const struct vm_operations_struct ubifs_file_vm_ops = {
1609 .fault = filemap_fault,
1610 .map_pages = filemap_map_pages,
1611 .page_mkwrite = ubifs_vm_page_mkwrite,
1612};
1613
1614static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1615{
1616 int err;
1617
1618 err = generic_file_mmap(file, vma);
1619 if (err)
1620 return err;
1621 vma->vm_ops = &ubifs_file_vm_ops;
1622#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1623 file_accessed(file);
1624#endif
1625 return 0;
1626}
1627
1628static const char *ubifs_get_link(struct dentry *dentry,
1629 struct inode *inode,
1630 struct delayed_call *done)
1631{
1632 struct ubifs_inode *ui = ubifs_inode(inode);
1633
1634 if (!IS_ENCRYPTED(inode))
1635 return ui->data;
1636
1637 if (!dentry)
1638 return ERR_PTR(-ECHILD);
1639
1640 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1641}
1642
1643const struct address_space_operations ubifs_file_address_operations = {
1644 .readpage = ubifs_readpage,
1645 .writepage = ubifs_writepage,
1646 .write_begin = ubifs_write_begin,
1647 .write_end = ubifs_write_end,
1648 .invalidatepage = ubifs_invalidatepage,
1649 .set_page_dirty = ubifs_set_page_dirty,
1650#ifdef CONFIG_MIGRATION
1651 .migratepage = ubifs_migrate_page,
1652#endif
1653 .releasepage = ubifs_releasepage,
1654};
1655
1656const struct inode_operations ubifs_file_inode_operations = {
1657 .setattr = ubifs_setattr,
1658 .getattr = ubifs_getattr,
1659 .listxattr = ubifs_listxattr,
1660#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1661 .update_time = ubifs_update_time,
1662#endif
1663};
1664
1665const struct inode_operations ubifs_symlink_inode_operations = {
1666 .get_link = ubifs_get_link,
1667 .setattr = ubifs_setattr,
1668 .getattr = ubifs_getattr,
1669 .listxattr = ubifs_listxattr,
1670#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1671 .update_time = ubifs_update_time,
1672#endif
1673};
1674
1675const struct file_operations ubifs_file_operations = {
1676 .llseek = generic_file_llseek,
1677 .read_iter = generic_file_read_iter,
1678 .write_iter = ubifs_write_iter,
1679 .mmap = ubifs_file_mmap,
1680 .fsync = ubifs_fsync,
1681 .unlocked_ioctl = ubifs_ioctl,
1682 .splice_read = generic_file_splice_read,
1683 .splice_write = iter_file_splice_write,
1684 .open = fscrypt_file_open,
1685#ifdef CONFIG_COMPAT
1686 .compat_ioctl = ubifs_compat_ioctl,
1687#endif
1688};
1689