1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include "ubifs.h"
53#include <linux/mount.h>
54#include <linux/slab.h>
55#include <linux/migrate.h>
56
57static int read_block(struct inode *inode, void *addr, unsigned int block,
58 struct ubifs_data_node *dn)
59{
60 struct ubifs_info *c = inode->i_sb->s_fs_info;
61 int err, len, out_len;
62 union ubifs_key key;
63 unsigned int dlen;
64
65 data_key_init(c, &key, inode->i_ino, block);
66 err = ubifs_tnc_lookup(c, &key, dn);
67 if (err) {
68 if (err == -ENOENT)
69
70 memset(addr, 0, UBIFS_BLOCK_SIZE);
71 return err;
72 }
73
74 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
75 ubifs_inode(inode)->creat_sqnum);
76 len = le32_to_cpu(dn->size);
77 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
78 goto dump;
79
80 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
81 out_len = UBIFS_BLOCK_SIZE;
82 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
83 le16_to_cpu(dn->compr_type));
84 if (err || len != out_len)
85 goto dump;
86
87
88
89
90
91
92 if (len < UBIFS_BLOCK_SIZE)
93 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
94
95 return 0;
96
97dump:
98 ubifs_err(c, "bad data node (block %u, inode %lu)",
99 block, inode->i_ino);
100 ubifs_dump_node(c, dn);
101 return -EINVAL;
102}
103
104static int do_readpage(struct page *page)
105{
106 void *addr;
107 int err = 0, i;
108 unsigned int block, beyond;
109 struct ubifs_data_node *dn;
110 struct inode *inode = page->mapping->host;
111 loff_t i_size = i_size_read(inode);
112
113 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
114 inode->i_ino, page->index, i_size, page->flags);
115 ubifs_assert(!PageChecked(page));
116 ubifs_assert(!PagePrivate(page));
117
118 addr = kmap(page);
119
120 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
121 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
122 if (block >= beyond) {
123
124 SetPageChecked(page);
125 memset(addr, 0, PAGE_SIZE);
126 goto out;
127 }
128
129 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
130 if (!dn) {
131 err = -ENOMEM;
132 goto error;
133 }
134
135 i = 0;
136 while (1) {
137 int ret;
138
139 if (block >= beyond) {
140
141 err = -ENOENT;
142 memset(addr, 0, UBIFS_BLOCK_SIZE);
143 } else {
144 ret = read_block(inode, addr, block, dn);
145 if (ret) {
146 err = ret;
147 if (err != -ENOENT)
148 break;
149 } else if (block + 1 == beyond) {
150 int dlen = le32_to_cpu(dn->size);
151 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
152
153 if (ilen && ilen < dlen)
154 memset(addr + ilen, 0, dlen - ilen);
155 }
156 }
157 if (++i >= UBIFS_BLOCKS_PER_PAGE)
158 break;
159 block += 1;
160 addr += UBIFS_BLOCK_SIZE;
161 }
162 if (err) {
163 struct ubifs_info *c = inode->i_sb->s_fs_info;
164 if (err == -ENOENT) {
165
166 SetPageChecked(page);
167 dbg_gen("hole");
168 goto out_free;
169 }
170 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
171 page->index, inode->i_ino, err);
172 goto error;
173 }
174
175out_free:
176 kfree(dn);
177out:
178 SetPageUptodate(page);
179 ClearPageError(page);
180 flush_dcache_page(page);
181 kunmap(page);
182 return 0;
183
184error:
185 kfree(dn);
186 ClearPageUptodate(page);
187 SetPageError(page);
188 flush_dcache_page(page);
189 kunmap(page);
190 return err;
191}
192
193
194
195
196
197
198
199
200static void release_new_page_budget(struct ubifs_info *c)
201{
202 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
203
204 ubifs_release_budget(c, &req);
205}
206
207
208
209
210
211
212
213
214static void release_existing_page_budget(struct ubifs_info *c)
215{
216 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
217
218 ubifs_release_budget(c, &req);
219}
220
221static int write_begin_slow(struct address_space *mapping,
222 loff_t pos, unsigned len, struct page **pagep,
223 unsigned flags)
224{
225 struct inode *inode = mapping->host;
226 struct ubifs_info *c = inode->i_sb->s_fs_info;
227 pgoff_t index = pos >> PAGE_SHIFT;
228 struct ubifs_budget_req req = { .new_page = 1 };
229 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
230 struct page *page;
231
232 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
233 inode->i_ino, pos, len, inode->i_size);
234
235
236
237
238
239
240
241
242
243 if (appending)
244
245 req.dirtied_ino = 1;
246
247 err = ubifs_budget_space(c, &req);
248 if (unlikely(err))
249 return err;
250
251 page = grab_cache_page_write_begin(mapping, index, flags);
252 if (unlikely(!page)) {
253 ubifs_release_budget(c, &req);
254 return -ENOMEM;
255 }
256
257 if (!PageUptodate(page)) {
258 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
259 SetPageChecked(page);
260 else {
261 err = do_readpage(page);
262 if (err) {
263 unlock_page(page);
264 put_page(page);
265 ubifs_release_budget(c, &req);
266 return err;
267 }
268 }
269
270 SetPageUptodate(page);
271 ClearPageError(page);
272 }
273
274 if (PagePrivate(page))
275
276
277
278
279
280
281
282
283
284
285 release_new_page_budget(c);
286 else if (!PageChecked(page))
287
288
289
290
291
292
293 ubifs_convert_page_budget(c);
294
295 if (appending) {
296 struct ubifs_inode *ui = ubifs_inode(inode);
297
298
299
300
301
302
303 mutex_lock(&ui->ui_mutex);
304 if (ui->dirty)
305
306
307
308
309 ubifs_release_dirty_inode_budget(c, ui);
310 }
311
312 *pagep = page;
313 return 0;
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static int allocate_budget(struct ubifs_info *c, struct page *page,
330 struct ubifs_inode *ui, int appending)
331{
332 struct ubifs_budget_req req = { .fast = 1 };
333
334 if (PagePrivate(page)) {
335 if (!appending)
336
337
338
339
340 return 0;
341
342 mutex_lock(&ui->ui_mutex);
343 if (ui->dirty)
344
345
346
347
348
349
350
351
352
353 return 0;
354
355
356
357
358
359 req.dirtied_ino = 1;
360 } else {
361 if (PageChecked(page))
362
363
364
365
366
367
368
369 req.new_page = 1;
370 else
371
372
373
374
375
376 req.dirtied_page = 1;
377
378 if (appending) {
379 mutex_lock(&ui->ui_mutex);
380 if (!ui->dirty)
381
382
383
384
385
386 req.dirtied_ino = 1;
387 }
388 }
389
390 return ubifs_budget_space(c, &req);
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425static int ubifs_write_begin(struct file *file, struct address_space *mapping,
426 loff_t pos, unsigned len, unsigned flags,
427 struct page **pagep, void **fsdata)
428{
429 struct inode *inode = mapping->host;
430 struct ubifs_info *c = inode->i_sb->s_fs_info;
431 struct ubifs_inode *ui = ubifs_inode(inode);
432 pgoff_t index = pos >> PAGE_SHIFT;
433 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
434 int skipped_read = 0;
435 struct page *page;
436
437 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
438 ubifs_assert(!c->ro_media && !c->ro_mount);
439
440 if (unlikely(c->ro_error))
441 return -EROFS;
442
443
444 page = grab_cache_page_write_begin(mapping, index, flags);
445 if (unlikely(!page))
446 return -ENOMEM;
447
448 if (!PageUptodate(page)) {
449
450 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
451
452
453
454
455
456
457
458
459
460 SetPageChecked(page);
461 skipped_read = 1;
462 } else {
463 err = do_readpage(page);
464 if (err) {
465 unlock_page(page);
466 put_page(page);
467 return err;
468 }
469 }
470
471 SetPageUptodate(page);
472 ClearPageError(page);
473 }
474
475 err = allocate_budget(c, page, ui, appending);
476 if (unlikely(err)) {
477 ubifs_assert(err == -ENOSPC);
478
479
480
481
482 if (skipped_read) {
483 ClearPageChecked(page);
484 ClearPageUptodate(page);
485 }
486
487
488
489
490
491
492
493 if (appending) {
494 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
495 mutex_unlock(&ui->ui_mutex);
496 }
497 unlock_page(page);
498 put_page(page);
499
500 return write_begin_slow(mapping, pos, len, pagep, flags);
501 }
502
503
504
505
506
507
508
509 *pagep = page;
510 return 0;
511
512}
513
514
515
516
517
518
519
520
521
522
523
524static void cancel_budget(struct ubifs_info *c, struct page *page,
525 struct ubifs_inode *ui, int appending)
526{
527 if (appending) {
528 if (!ui->dirty)
529 ubifs_release_dirty_inode_budget(c, ui);
530 mutex_unlock(&ui->ui_mutex);
531 }
532 if (!PagePrivate(page)) {
533 if (PageChecked(page))
534 release_new_page_budget(c);
535 else
536 release_existing_page_budget(c);
537 }
538}
539
540static int ubifs_write_end(struct file *file, struct address_space *mapping,
541 loff_t pos, unsigned len, unsigned copied,
542 struct page *page, void *fsdata)
543{
544 struct inode *inode = mapping->host;
545 struct ubifs_inode *ui = ubifs_inode(inode);
546 struct ubifs_info *c = inode->i_sb->s_fs_info;
547 loff_t end_pos = pos + len;
548 int appending = !!(end_pos > inode->i_size);
549
550 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
551 inode->i_ino, pos, page->index, len, copied, inode->i_size);
552
553 if (unlikely(copied < len && len == PAGE_SIZE)) {
554
555
556
557
558
559
560
561
562
563 dbg_gen("copied %d instead of %d, read page and repeat",
564 copied, len);
565 cancel_budget(c, page, ui, appending);
566 ClearPageChecked(page);
567
568
569
570
571
572 copied = do_readpage(page);
573 goto out;
574 }
575
576 if (!PagePrivate(page)) {
577 SetPagePrivate(page);
578 atomic_long_inc(&c->dirty_pg_cnt);
579 __set_page_dirty_nobuffers(page);
580 }
581
582 if (appending) {
583 i_size_write(inode, end_pos);
584 ui->ui_size = end_pos;
585
586
587
588
589
590 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
591 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
592 mutex_unlock(&ui->ui_mutex);
593 }
594
595out:
596 unlock_page(page);
597 put_page(page);
598 return copied;
599}
600
601
602
603
604
605
606
607
608
609
610static int populate_page(struct ubifs_info *c, struct page *page,
611 struct bu_info *bu, int *n)
612{
613 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
614 struct inode *inode = page->mapping->host;
615 loff_t i_size = i_size_read(inode);
616 unsigned int page_block;
617 void *addr, *zaddr;
618 pgoff_t end_index;
619
620 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
621 inode->i_ino, page->index, i_size, page->flags);
622
623 addr = zaddr = kmap(page);
624
625 end_index = (i_size - 1) >> PAGE_SHIFT;
626 if (!i_size || page->index > end_index) {
627 hole = 1;
628 memset(addr, 0, PAGE_SIZE);
629 goto out_hole;
630 }
631
632 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
633 while (1) {
634 int err, len, out_len, dlen;
635
636 if (nn >= bu->cnt) {
637 hole = 1;
638 memset(addr, 0, UBIFS_BLOCK_SIZE);
639 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
640 struct ubifs_data_node *dn;
641
642 dn = bu->buf + (bu->zbranch[nn].offs - offs);
643
644 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
645 ubifs_inode(inode)->creat_sqnum);
646
647 len = le32_to_cpu(dn->size);
648 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
649 goto out_err;
650
651 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
652 out_len = UBIFS_BLOCK_SIZE;
653 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
654 le16_to_cpu(dn->compr_type));
655 if (err || len != out_len)
656 goto out_err;
657
658 if (len < UBIFS_BLOCK_SIZE)
659 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
660
661 nn += 1;
662 read = (i << UBIFS_BLOCK_SHIFT) + len;
663 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
664 nn += 1;
665 continue;
666 } else {
667 hole = 1;
668 memset(addr, 0, UBIFS_BLOCK_SIZE);
669 }
670 if (++i >= UBIFS_BLOCKS_PER_PAGE)
671 break;
672 addr += UBIFS_BLOCK_SIZE;
673 page_block += 1;
674 }
675
676 if (end_index == page->index) {
677 int len = i_size & (PAGE_SIZE - 1);
678
679 if (len && len < read)
680 memset(zaddr + len, 0, read - len);
681 }
682
683out_hole:
684 if (hole) {
685 SetPageChecked(page);
686 dbg_gen("hole");
687 }
688
689 SetPageUptodate(page);
690 ClearPageError(page);
691 flush_dcache_page(page);
692 kunmap(page);
693 *n = nn;
694 return 0;
695
696out_err:
697 ClearPageUptodate(page);
698 SetPageError(page);
699 flush_dcache_page(page);
700 kunmap(page);
701 ubifs_err(c, "bad data node (block %u, inode %lu)",
702 page_block, inode->i_ino);
703 return -EINVAL;
704}
705
706
707
708
709
710
711
712
713
714static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
715 struct page *page1)
716{
717 pgoff_t offset = page1->index, end_index;
718 struct address_space *mapping = page1->mapping;
719 struct inode *inode = mapping->host;
720 struct ubifs_inode *ui = ubifs_inode(inode);
721 int err, page_idx, page_cnt, ret = 0, n = 0;
722 int allocate = bu->buf ? 0 : 1;
723 loff_t isize;
724
725 err = ubifs_tnc_get_bu_keys(c, bu);
726 if (err)
727 goto out_warn;
728
729 if (bu->eof) {
730
731 ui->read_in_a_row = 1;
732 ui->bulk_read = 0;
733 }
734
735 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
736 if (!page_cnt) {
737
738
739
740
741
742
743 goto out_bu_off;
744 }
745
746 if (bu->cnt) {
747 if (allocate) {
748
749
750
751
752 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
753 bu->zbranch[bu->cnt - 1].len -
754 bu->zbranch[0].offs;
755 ubifs_assert(bu->buf_len > 0);
756 ubifs_assert(bu->buf_len <= c->leb_size);
757 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
758 if (!bu->buf)
759 goto out_bu_off;
760 }
761
762 err = ubifs_tnc_bulk_read(c, bu);
763 if (err)
764 goto out_warn;
765 }
766
767 err = populate_page(c, page1, bu, &n);
768 if (err)
769 goto out_warn;
770
771 unlock_page(page1);
772 ret = 1;
773
774 isize = i_size_read(inode);
775 if (isize == 0)
776 goto out_free;
777 end_index = ((isize - 1) >> PAGE_SHIFT);
778
779 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
780 pgoff_t page_offset = offset + page_idx;
781 struct page *page;
782
783 if (page_offset > end_index)
784 break;
785 page = find_or_create_page(mapping, page_offset,
786 GFP_NOFS | __GFP_COLD);
787 if (!page)
788 break;
789 if (!PageUptodate(page))
790 err = populate_page(c, page, bu, &n);
791 unlock_page(page);
792 put_page(page);
793 if (err)
794 break;
795 }
796
797 ui->last_page_read = offset + page_idx - 1;
798
799out_free:
800 if (allocate)
801 kfree(bu->buf);
802 return ret;
803
804out_warn:
805 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
806 goto out_free;
807
808out_bu_off:
809 ui->read_in_a_row = ui->bulk_read = 0;
810 goto out_free;
811}
812
813
814
815
816
817
818
819
820
821
822static int ubifs_bulk_read(struct page *page)
823{
824 struct inode *inode = page->mapping->host;
825 struct ubifs_info *c = inode->i_sb->s_fs_info;
826 struct ubifs_inode *ui = ubifs_inode(inode);
827 pgoff_t index = page->index, last_page_read = ui->last_page_read;
828 struct bu_info *bu;
829 int err = 0, allocated = 0;
830
831 ui->last_page_read = index;
832 if (!c->bulk_read)
833 return 0;
834
835
836
837
838
839 if (!mutex_trylock(&ui->ui_mutex))
840 return 0;
841
842 if (index != last_page_read + 1) {
843
844 ui->read_in_a_row = 1;
845 if (ui->bulk_read)
846 ui->bulk_read = 0;
847 goto out_unlock;
848 }
849
850 if (!ui->bulk_read) {
851 ui->read_in_a_row += 1;
852 if (ui->read_in_a_row < 3)
853 goto out_unlock;
854
855 ui->bulk_read = 1;
856 }
857
858
859
860
861
862 if (mutex_trylock(&c->bu_mutex))
863 bu = &c->bu;
864 else {
865 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
866 if (!bu)
867 goto out_unlock;
868
869 bu->buf = NULL;
870 allocated = 1;
871 }
872
873 bu->buf_len = c->max_bu_buf_len;
874 data_key_init(c, &bu->key, inode->i_ino,
875 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
876 err = ubifs_do_bulk_read(c, bu, page);
877
878 if (!allocated)
879 mutex_unlock(&c->bu_mutex);
880 else
881 kfree(bu);
882
883out_unlock:
884 mutex_unlock(&ui->ui_mutex);
885 return err;
886}
887
888static int ubifs_readpage(struct file *file, struct page *page)
889{
890 if (ubifs_bulk_read(page))
891 return 0;
892 do_readpage(page);
893 unlock_page(page);
894 return 0;
895}
896
897static int do_writepage(struct page *page, int len)
898{
899 int err = 0, i, blen;
900 unsigned int block;
901 void *addr;
902 union ubifs_key key;
903 struct inode *inode = page->mapping->host;
904 struct ubifs_info *c = inode->i_sb->s_fs_info;
905
906#ifdef UBIFS_DEBUG
907 struct ubifs_inode *ui = ubifs_inode(inode);
908 spin_lock(&ui->ui_lock);
909 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
910 spin_unlock(&ui->ui_lock);
911#endif
912
913
914 set_page_writeback(page);
915
916 addr = kmap(page);
917 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
918 i = 0;
919 while (len) {
920 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
921 data_key_init(c, &key, inode->i_ino, block);
922 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
923 if (err)
924 break;
925 if (++i >= UBIFS_BLOCKS_PER_PAGE)
926 break;
927 block += 1;
928 addr += blen;
929 len -= blen;
930 }
931 if (err) {
932 SetPageError(page);
933 ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
934 page->index, inode->i_ino, err);
935 ubifs_ro_mode(c, err);
936 }
937
938 ubifs_assert(PagePrivate(page));
939 if (PageChecked(page))
940 release_new_page_budget(c);
941 else
942 release_existing_page_budget(c);
943
944 atomic_long_dec(&c->dirty_pg_cnt);
945 ClearPagePrivate(page);
946 ClearPageChecked(page);
947
948 kunmap(page);
949 unlock_page(page);
950 end_page_writeback(page);
951 return err;
952}
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1001{
1002 struct inode *inode = page->mapping->host;
1003 struct ubifs_inode *ui = ubifs_inode(inode);
1004 loff_t i_size = i_size_read(inode), synced_i_size;
1005 pgoff_t end_index = i_size >> PAGE_SHIFT;
1006 int err, len = i_size & (PAGE_SIZE - 1);
1007 void *kaddr;
1008
1009 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1010 inode->i_ino, page->index, page->flags);
1011 ubifs_assert(PagePrivate(page));
1012
1013
1014 if (page->index > end_index || (page->index == end_index && !len)) {
1015 err = 0;
1016 goto out_unlock;
1017 }
1018
1019 spin_lock(&ui->ui_lock);
1020 synced_i_size = ui->synced_i_size;
1021 spin_unlock(&ui->ui_lock);
1022
1023
1024 if (page->index < end_index) {
1025 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1026 err = inode->i_sb->s_op->write_inode(inode, NULL);
1027 if (err)
1028 goto out_unlock;
1029
1030
1031
1032
1033
1034
1035
1036
1037 }
1038 return do_writepage(page, PAGE_SIZE);
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048 kaddr = kmap_atomic(page);
1049 memset(kaddr + len, 0, PAGE_SIZE - len);
1050 flush_dcache_page(page);
1051 kunmap_atomic(kaddr);
1052
1053 if (i_size > synced_i_size) {
1054 err = inode->i_sb->s_op->write_inode(inode, NULL);
1055 if (err)
1056 goto out_unlock;
1057 }
1058
1059 return do_writepage(page, len);
1060
1061out_unlock:
1062 unlock_page(page);
1063 return err;
1064}
1065
1066
1067
1068
1069
1070
1071static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1072{
1073 if (attr->ia_valid & ATTR_UID)
1074 inode->i_uid = attr->ia_uid;
1075 if (attr->ia_valid & ATTR_GID)
1076 inode->i_gid = attr->ia_gid;
1077 if (attr->ia_valid & ATTR_ATIME)
1078 inode->i_atime = timespec_trunc(attr->ia_atime,
1079 inode->i_sb->s_time_gran);
1080 if (attr->ia_valid & ATTR_MTIME)
1081 inode->i_mtime = timespec_trunc(attr->ia_mtime,
1082 inode->i_sb->s_time_gran);
1083 if (attr->ia_valid & ATTR_CTIME)
1084 inode->i_ctime = timespec_trunc(attr->ia_ctime,
1085 inode->i_sb->s_time_gran);
1086 if (attr->ia_valid & ATTR_MODE) {
1087 umode_t mode = attr->ia_mode;
1088
1089 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1090 mode &= ~S_ISGID;
1091 inode->i_mode = mode;
1092 }
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static int do_truncation(struct ubifs_info *c, struct inode *inode,
1106 const struct iattr *attr)
1107{
1108 int err;
1109 struct ubifs_budget_req req;
1110 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1111 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1112 struct ubifs_inode *ui = ubifs_inode(inode);
1113
1114 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1115 memset(&req, 0, sizeof(struct ubifs_budget_req));
1116
1117
1118
1119
1120
1121
1122 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1123 req.dirtied_page = 1;
1124
1125 req.dirtied_ino = 1;
1126
1127 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1128 err = ubifs_budget_space(c, &req);
1129 if (err) {
1130
1131
1132
1133
1134 if (new_size || err != -ENOSPC)
1135 return err;
1136 budgeted = 0;
1137 }
1138
1139 truncate_setsize(inode, new_size);
1140
1141 if (offset) {
1142 pgoff_t index = new_size >> PAGE_SHIFT;
1143 struct page *page;
1144
1145 page = find_lock_page(inode->i_mapping, index);
1146 if (page) {
1147 if (PageDirty(page)) {
1148
1149
1150
1151
1152
1153
1154
1155
1156 ubifs_assert(PagePrivate(page));
1157
1158 clear_page_dirty_for_io(page);
1159 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1160 offset = new_size &
1161 (PAGE_SIZE - 1);
1162 err = do_writepage(page, offset);
1163 put_page(page);
1164 if (err)
1165 goto out_budg;
1166
1167
1168
1169
1170 } else {
1171
1172
1173
1174
1175
1176 unlock_page(page);
1177 put_page(page);
1178 }
1179 }
1180 }
1181
1182 mutex_lock(&ui->ui_mutex);
1183 ui->ui_size = inode->i_size;
1184
1185 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1186
1187 do_attr_changes(inode, attr);
1188 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1189 mutex_unlock(&ui->ui_mutex);
1190
1191out_budg:
1192 if (budgeted)
1193 ubifs_release_budget(c, &req);
1194 else {
1195 c->bi.nospace = c->bi.nospace_rp = 0;
1196 smp_wmb();
1197 }
1198 return err;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static int do_setattr(struct ubifs_info *c, struct inode *inode,
1212 const struct iattr *attr)
1213{
1214 int err, release;
1215 loff_t new_size = attr->ia_size;
1216 struct ubifs_inode *ui = ubifs_inode(inode);
1217 struct ubifs_budget_req req = { .dirtied_ino = 1,
1218 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1219
1220 err = ubifs_budget_space(c, &req);
1221 if (err)
1222 return err;
1223
1224 if (attr->ia_valid & ATTR_SIZE) {
1225 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1226 truncate_setsize(inode, new_size);
1227 }
1228
1229 mutex_lock(&ui->ui_mutex);
1230 if (attr->ia_valid & ATTR_SIZE) {
1231
1232 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1233
1234 ui->ui_size = inode->i_size;
1235 }
1236
1237 do_attr_changes(inode, attr);
1238
1239 release = ui->dirty;
1240 if (attr->ia_valid & ATTR_SIZE)
1241
1242
1243
1244
1245 __mark_inode_dirty(inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
1246 else
1247 mark_inode_dirty_sync(inode);
1248 mutex_unlock(&ui->ui_mutex);
1249
1250 if (release)
1251 ubifs_release_budget(c, &req);
1252 if (IS_SYNC(inode))
1253 err = inode->i_sb->s_op->write_inode(inode, NULL);
1254 return err;
1255}
1256
1257int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1258{
1259 int err;
1260 struct inode *inode = d_inode(dentry);
1261 struct ubifs_info *c = inode->i_sb->s_fs_info;
1262
1263 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1264 inode->i_ino, inode->i_mode, attr->ia_valid);
1265 err = setattr_prepare(dentry, attr);
1266 if (err)
1267 return err;
1268
1269 err = dbg_check_synced_i_size(c, inode);
1270 if (err)
1271 return err;
1272
1273 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1274
1275 err = do_truncation(c, inode, attr);
1276 else
1277 err = do_setattr(c, inode, attr);
1278
1279 return err;
1280}
1281
1282static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1283 unsigned int length)
1284{
1285 struct inode *inode = page->mapping->host;
1286 struct ubifs_info *c = inode->i_sb->s_fs_info;
1287
1288 ubifs_assert(PagePrivate(page));
1289 if (offset || length < PAGE_SIZE)
1290
1291 return;
1292
1293 if (PageChecked(page))
1294 release_new_page_budget(c);
1295 else
1296 release_existing_page_budget(c);
1297
1298 atomic_long_dec(&c->dirty_pg_cnt);
1299 ClearPagePrivate(page);
1300 ClearPageChecked(page);
1301}
1302
1303int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1304{
1305 struct inode *inode = file->f_mapping->host;
1306 struct ubifs_info *c = inode->i_sb->s_fs_info;
1307 int err;
1308
1309 dbg_gen("syncing inode %lu", inode->i_ino);
1310
1311 if (c->ro_mount)
1312
1313
1314
1315
1316 return 0;
1317
1318 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1319 if (err)
1320 return err;
1321 inode_lock(inode);
1322
1323
1324 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1325 err = inode->i_sb->s_op->write_inode(inode, NULL);
1326 if (err)
1327 goto out;
1328 }
1329
1330
1331
1332
1333
1334 err = ubifs_sync_wbufs_by_inode(c, inode);
1335out:
1336 inode_unlock(inode);
1337 return err;
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349static inline int mctime_update_needed(const struct inode *inode,
1350 const struct timespec *now)
1351{
1352 if (!timespec_equal(&inode->i_mtime, now) ||
1353 !timespec_equal(&inode->i_ctime, now))
1354 return 1;
1355 return 0;
1356}
1357
1358#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1359
1360
1361
1362
1363
1364
1365int ubifs_update_time(struct inode *inode, struct timespec *time,
1366 int flags)
1367{
1368 struct ubifs_inode *ui = ubifs_inode(inode);
1369 struct ubifs_info *c = inode->i_sb->s_fs_info;
1370 struct ubifs_budget_req req = { .dirtied_ino = 1,
1371 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1372 int iflags = I_DIRTY_TIME;
1373 int err, release;
1374
1375 err = ubifs_budget_space(c, &req);
1376 if (err)
1377 return err;
1378
1379 mutex_lock(&ui->ui_mutex);
1380 if (flags & S_ATIME)
1381 inode->i_atime = *time;
1382 if (flags & S_CTIME)
1383 inode->i_ctime = *time;
1384 if (flags & S_MTIME)
1385 inode->i_mtime = *time;
1386
1387 if (!(inode->i_sb->s_flags & MS_LAZYTIME))
1388 iflags |= I_DIRTY_SYNC;
1389
1390 release = ui->dirty;
1391 __mark_inode_dirty(inode, iflags);
1392 mutex_unlock(&ui->ui_mutex);
1393 if (release)
1394 ubifs_release_budget(c, &req);
1395 return 0;
1396}
1397#endif
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407static int update_mctime(struct inode *inode)
1408{
1409 struct timespec now = ubifs_current_time(inode);
1410 struct ubifs_inode *ui = ubifs_inode(inode);
1411 struct ubifs_info *c = inode->i_sb->s_fs_info;
1412
1413 if (mctime_update_needed(inode, &now)) {
1414 int err, release;
1415 struct ubifs_budget_req req = { .dirtied_ino = 1,
1416 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1417
1418 err = ubifs_budget_space(c, &req);
1419 if (err)
1420 return err;
1421
1422 mutex_lock(&ui->ui_mutex);
1423 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1424 release = ui->dirty;
1425 mark_inode_dirty_sync(inode);
1426 mutex_unlock(&ui->ui_mutex);
1427 if (release)
1428 ubifs_release_budget(c, &req);
1429 }
1430
1431 return 0;
1432}
1433
1434static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1435{
1436 int err = update_mctime(file_inode(iocb->ki_filp));
1437 if (err)
1438 return err;
1439
1440 return generic_file_write_iter(iocb, from);
1441}
1442
1443static int ubifs_set_page_dirty(struct page *page)
1444{
1445 int ret;
1446
1447 ret = __set_page_dirty_nobuffers(page);
1448
1449
1450
1451
1452 ubifs_assert(ret == 0);
1453 return ret;
1454}
1455
1456#ifdef CONFIG_MIGRATION
1457static int ubifs_migrate_page(struct address_space *mapping,
1458 struct page *newpage, struct page *page, enum migrate_mode mode)
1459{
1460 int rc;
1461
1462 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1463 if (rc != MIGRATEPAGE_SUCCESS)
1464 return rc;
1465
1466 if (PagePrivate(page)) {
1467 ClearPagePrivate(page);
1468 SetPagePrivate(newpage);
1469 }
1470
1471 migrate_page_copy(newpage, page);
1472 return MIGRATEPAGE_SUCCESS;
1473}
1474#endif
1475
1476static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1477{
1478
1479
1480
1481
1482 if (PageWriteback(page))
1483 return 0;
1484 ubifs_assert(PagePrivate(page));
1485 ubifs_assert(0);
1486 ClearPagePrivate(page);
1487 ClearPageChecked(page);
1488 return 1;
1489}
1490
1491
1492
1493
1494
1495static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
1496 struct vm_fault *vmf)
1497{
1498 struct page *page = vmf->page;
1499 struct inode *inode = file_inode(vma->vm_file);
1500 struct ubifs_info *c = inode->i_sb->s_fs_info;
1501 struct timespec now = ubifs_current_time(inode);
1502 struct ubifs_budget_req req = { .new_page = 1 };
1503 int err, update_time;
1504
1505 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1506 i_size_read(inode));
1507 ubifs_assert(!c->ro_media && !c->ro_mount);
1508
1509 if (unlikely(c->ro_error))
1510 return VM_FAULT_SIGBUS;
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 update_time = mctime_update_needed(inode, &now);
1531 if (update_time)
1532
1533
1534
1535
1536 req.dirtied_ino = 1;
1537
1538 err = ubifs_budget_space(c, &req);
1539 if (unlikely(err)) {
1540 if (err == -ENOSPC)
1541 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1542 inode->i_ino);
1543 return VM_FAULT_SIGBUS;
1544 }
1545
1546 lock_page(page);
1547 if (unlikely(page->mapping != inode->i_mapping ||
1548 page_offset(page) > i_size_read(inode))) {
1549
1550 err = -EINVAL;
1551 goto out_unlock;
1552 }
1553
1554 if (PagePrivate(page))
1555 release_new_page_budget(c);
1556 else {
1557 if (!PageChecked(page))
1558 ubifs_convert_page_budget(c);
1559 SetPagePrivate(page);
1560 atomic_long_inc(&c->dirty_pg_cnt);
1561 __set_page_dirty_nobuffers(page);
1562 }
1563
1564 if (update_time) {
1565 int release;
1566 struct ubifs_inode *ui = ubifs_inode(inode);
1567
1568 mutex_lock(&ui->ui_mutex);
1569 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1570 release = ui->dirty;
1571 mark_inode_dirty_sync(inode);
1572 mutex_unlock(&ui->ui_mutex);
1573 if (release)
1574 ubifs_release_dirty_inode_budget(c, ui);
1575 }
1576
1577 wait_for_stable_page(page);
1578 return VM_FAULT_LOCKED;
1579
1580out_unlock:
1581 unlock_page(page);
1582 ubifs_release_budget(c, &req);
1583 if (err)
1584 err = VM_FAULT_SIGBUS;
1585 return err;
1586}
1587
1588static const struct vm_operations_struct ubifs_file_vm_ops = {
1589 .fault = filemap_fault,
1590 .map_pages = filemap_map_pages,
1591 .page_mkwrite = ubifs_vm_page_mkwrite,
1592};
1593
1594static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1595{
1596 int err;
1597
1598 err = generic_file_mmap(file, vma);
1599 if (err)
1600 return err;
1601 vma->vm_ops = &ubifs_file_vm_ops;
1602#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1603 file_accessed(file);
1604#endif
1605 return 0;
1606}
1607
1608const struct address_space_operations ubifs_file_address_operations = {
1609 .readpage = ubifs_readpage,
1610 .writepage = ubifs_writepage,
1611 .write_begin = ubifs_write_begin,
1612 .write_end = ubifs_write_end,
1613 .invalidatepage = ubifs_invalidatepage,
1614 .set_page_dirty = ubifs_set_page_dirty,
1615#ifdef CONFIG_MIGRATION
1616 .migratepage = ubifs_migrate_page,
1617#endif
1618 .releasepage = ubifs_releasepage,
1619};
1620
1621const struct inode_operations ubifs_file_inode_operations = {
1622 .setattr = ubifs_setattr,
1623 .getattr = ubifs_getattr,
1624 .listxattr = ubifs_listxattr,
1625#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1626 .update_time = ubifs_update_time,
1627#endif
1628};
1629
1630const struct inode_operations ubifs_symlink_inode_operations = {
1631 .readlink = generic_readlink,
1632 .get_link = simple_get_link,
1633 .setattr = ubifs_setattr,
1634 .getattr = ubifs_getattr,
1635 .listxattr = ubifs_listxattr,
1636#ifdef CONFIG_UBIFS_ATIME_SUPPORT
1637 .update_time = ubifs_update_time,
1638#endif
1639};
1640
1641const struct file_operations ubifs_file_operations = {
1642 .llseek = generic_file_llseek,
1643 .read_iter = generic_file_read_iter,
1644 .write_iter = ubifs_write_iter,
1645 .mmap = ubifs_file_mmap,
1646 .fsync = ubifs_fsync,
1647 .unlocked_ioctl = ubifs_ioctl,
1648 .splice_read = generic_file_splice_read,
1649 .splice_write = iter_file_splice_write,
1650#ifdef CONFIG_COMPAT
1651 .compat_ioctl = ubifs_compat_ioctl,
1652#endif
1653};
1654