1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include "ubifs.h"
53#include <linux/aio.h>
54#include <linux/mount.h>
55#include <linux/namei.h>
56#include <linux/slab.h>
57
58static int read_block(struct inode *inode, void *addr, unsigned int block,
59 struct ubifs_data_node *dn)
60{
61 struct ubifs_info *c = inode->i_sb->s_fs_info;
62 int err, len, out_len;
63 union ubifs_key key;
64 unsigned int dlen;
65
66 data_key_init(c, &key, inode->i_ino, block);
67 err = ubifs_tnc_lookup(c, &key, dn);
68 if (err) {
69 if (err == -ENOENT)
70
71 memset(addr, 0, UBIFS_BLOCK_SIZE);
72 return err;
73 }
74
75 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
76 ubifs_inode(inode)->creat_sqnum);
77 len = le32_to_cpu(dn->size);
78 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
79 goto dump;
80
81 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
82 out_len = UBIFS_BLOCK_SIZE;
83 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
84 le16_to_cpu(dn->compr_type));
85 if (err || len != out_len)
86 goto dump;
87
88
89
90
91
92
93 if (len < UBIFS_BLOCK_SIZE)
94 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
95
96 return 0;
97
98dump:
99 ubifs_err("bad data node (block %u, inode %lu)",
100 block, inode->i_ino);
101 ubifs_dump_node(c, dn);
102 return -EINVAL;
103}
104
105static int do_readpage(struct page *page)
106{
107 void *addr;
108 int err = 0, i;
109 unsigned int block, beyond;
110 struct ubifs_data_node *dn;
111 struct inode *inode = page->mapping->host;
112 loff_t i_size = i_size_read(inode);
113
114 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
115 inode->i_ino, page->index, i_size, page->flags);
116 ubifs_assert(!PageChecked(page));
117 ubifs_assert(!PagePrivate(page));
118
119 addr = kmap(page);
120
121 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
122 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
123 if (block >= beyond) {
124
125 SetPageChecked(page);
126 memset(addr, 0, PAGE_CACHE_SIZE);
127 goto out;
128 }
129
130 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
131 if (!dn) {
132 err = -ENOMEM;
133 goto error;
134 }
135
136 i = 0;
137 while (1) {
138 int ret;
139
140 if (block >= beyond) {
141
142 err = -ENOENT;
143 memset(addr, 0, UBIFS_BLOCK_SIZE);
144 } else {
145 ret = read_block(inode, addr, block, dn);
146 if (ret) {
147 err = ret;
148 if (err != -ENOENT)
149 break;
150 } else if (block + 1 == beyond) {
151 int dlen = le32_to_cpu(dn->size);
152 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
153
154 if (ilen && ilen < dlen)
155 memset(addr + ilen, 0, dlen - ilen);
156 }
157 }
158 if (++i >= UBIFS_BLOCKS_PER_PAGE)
159 break;
160 block += 1;
161 addr += UBIFS_BLOCK_SIZE;
162 }
163 if (err) {
164 if (err == -ENOENT) {
165
166 SetPageChecked(page);
167 dbg_gen("hole");
168 goto out_free;
169 }
170 ubifs_err("cannot read page %lu of inode %lu, error %d",
171 page->index, inode->i_ino, err);
172 goto error;
173 }
174
175out_free:
176 kfree(dn);
177out:
178 SetPageUptodate(page);
179 ClearPageError(page);
180 flush_dcache_page(page);
181 kunmap(page);
182 return 0;
183
184error:
185 kfree(dn);
186 ClearPageUptodate(page);
187 SetPageError(page);
188 flush_dcache_page(page);
189 kunmap(page);
190 return err;
191}
192
193
194
195
196
197
198
199
200static void release_new_page_budget(struct ubifs_info *c)
201{
202 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
203
204 ubifs_release_budget(c, &req);
205}
206
207
208
209
210
211
212
213
214static void release_existing_page_budget(struct ubifs_info *c)
215{
216 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
217
218 ubifs_release_budget(c, &req);
219}
220
221static int write_begin_slow(struct address_space *mapping,
222 loff_t pos, unsigned len, struct page **pagep,
223 unsigned flags)
224{
225 struct inode *inode = mapping->host;
226 struct ubifs_info *c = inode->i_sb->s_fs_info;
227 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
228 struct ubifs_budget_req req = { .new_page = 1 };
229 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
230 struct page *page;
231
232 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
233 inode->i_ino, pos, len, inode->i_size);
234
235
236
237
238
239
240
241
242
243 if (appending)
244
245 req.dirtied_ino = 1;
246
247 err = ubifs_budget_space(c, &req);
248 if (unlikely(err))
249 return err;
250
251 page = grab_cache_page_write_begin(mapping, index, flags);
252 if (unlikely(!page)) {
253 ubifs_release_budget(c, &req);
254 return -ENOMEM;
255 }
256
257 if (!PageUptodate(page)) {
258 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
259 SetPageChecked(page);
260 else {
261 err = do_readpage(page);
262 if (err) {
263 unlock_page(page);
264 page_cache_release(page);
265 return err;
266 }
267 }
268
269 SetPageUptodate(page);
270 ClearPageError(page);
271 }
272
273 if (PagePrivate(page))
274
275
276
277
278
279
280
281
282
283
284 release_new_page_budget(c);
285 else if (!PageChecked(page))
286
287
288
289
290
291
292 ubifs_convert_page_budget(c);
293
294 if (appending) {
295 struct ubifs_inode *ui = ubifs_inode(inode);
296
297
298
299
300
301
302 mutex_lock(&ui->ui_mutex);
303 if (ui->dirty)
304
305
306
307
308 ubifs_release_dirty_inode_budget(c, ui);
309 }
310
311 *pagep = page;
312 return 0;
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328static int allocate_budget(struct ubifs_info *c, struct page *page,
329 struct ubifs_inode *ui, int appending)
330{
331 struct ubifs_budget_req req = { .fast = 1 };
332
333 if (PagePrivate(page)) {
334 if (!appending)
335
336
337
338
339 return 0;
340
341 mutex_lock(&ui->ui_mutex);
342 if (ui->dirty)
343
344
345
346
347
348
349
350
351
352 return 0;
353
354
355
356
357
358 req.dirtied_ino = 1;
359 } else {
360 if (PageChecked(page))
361
362
363
364
365
366
367
368 req.new_page = 1;
369 else
370
371
372
373
374
375 req.dirtied_page = 1;
376
377 if (appending) {
378 mutex_lock(&ui->ui_mutex);
379 if (!ui->dirty)
380
381
382
383
384
385 req.dirtied_ino = 1;
386 }
387 }
388
389 return ubifs_budget_space(c, &req);
390}
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424static int ubifs_write_begin(struct file *file, struct address_space *mapping,
425 loff_t pos, unsigned len, unsigned flags,
426 struct page **pagep, void **fsdata)
427{
428 struct inode *inode = mapping->host;
429 struct ubifs_info *c = inode->i_sb->s_fs_info;
430 struct ubifs_inode *ui = ubifs_inode(inode);
431 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
433 int skipped_read = 0;
434 struct page *page;
435
436 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
437 ubifs_assert(!c->ro_media && !c->ro_mount);
438
439 if (unlikely(c->ro_error))
440 return -EROFS;
441
442
443 page = grab_cache_page_write_begin(mapping, index, flags);
444 if (unlikely(!page))
445 return -ENOMEM;
446
447 if (!PageUptodate(page)) {
448
449 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
450
451
452
453
454
455
456
457
458
459 SetPageChecked(page);
460 skipped_read = 1;
461 } else {
462 err = do_readpage(page);
463 if (err) {
464 unlock_page(page);
465 page_cache_release(page);
466 return err;
467 }
468 }
469
470 SetPageUptodate(page);
471 ClearPageError(page);
472 }
473
474 err = allocate_budget(c, page, ui, appending);
475 if (unlikely(err)) {
476 ubifs_assert(err == -ENOSPC);
477
478
479
480
481 if (skipped_read) {
482 ClearPageChecked(page);
483 ClearPageUptodate(page);
484 }
485
486
487
488
489
490
491
492 if (appending) {
493 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
494 mutex_unlock(&ui->ui_mutex);
495 }
496 unlock_page(page);
497 page_cache_release(page);
498
499 return write_begin_slow(mapping, pos, len, pagep, flags);
500 }
501
502
503
504
505
506
507
508 *pagep = page;
509 return 0;
510
511}
512
513
514
515
516
517
518
519
520
521
522
523static void cancel_budget(struct ubifs_info *c, struct page *page,
524 struct ubifs_inode *ui, int appending)
525{
526 if (appending) {
527 if (!ui->dirty)
528 ubifs_release_dirty_inode_budget(c, ui);
529 mutex_unlock(&ui->ui_mutex);
530 }
531 if (!PagePrivate(page)) {
532 if (PageChecked(page))
533 release_new_page_budget(c);
534 else
535 release_existing_page_budget(c);
536 }
537}
538
539static int ubifs_write_end(struct file *file, struct address_space *mapping,
540 loff_t pos, unsigned len, unsigned copied,
541 struct page *page, void *fsdata)
542{
543 struct inode *inode = mapping->host;
544 struct ubifs_inode *ui = ubifs_inode(inode);
545 struct ubifs_info *c = inode->i_sb->s_fs_info;
546 loff_t end_pos = pos + len;
547 int appending = !!(end_pos > inode->i_size);
548
549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
550 inode->i_ino, pos, page->index, len, copied, inode->i_size);
551
552 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
553
554
555
556
557
558
559
560
561
562 dbg_gen("copied %d instead of %d, read page and repeat",
563 copied, len);
564 cancel_budget(c, page, ui, appending);
565 ClearPageChecked(page);
566
567
568
569
570
571 copied = do_readpage(page);
572 goto out;
573 }
574
575 if (!PagePrivate(page)) {
576 SetPagePrivate(page);
577 atomic_long_inc(&c->dirty_pg_cnt);
578 __set_page_dirty_nobuffers(page);
579 }
580
581 if (appending) {
582 i_size_write(inode, end_pos);
583 ui->ui_size = end_pos;
584
585
586
587
588
589 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
590 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
591 mutex_unlock(&ui->ui_mutex);
592 }
593
594out:
595 unlock_page(page);
596 page_cache_release(page);
597 return copied;
598}
599
600
601
602
603
604
605
606
607
608
609static int populate_page(struct ubifs_info *c, struct page *page,
610 struct bu_info *bu, int *n)
611{
612 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
613 struct inode *inode = page->mapping->host;
614 loff_t i_size = i_size_read(inode);
615 unsigned int page_block;
616 void *addr, *zaddr;
617 pgoff_t end_index;
618
619 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
620 inode->i_ino, page->index, i_size, page->flags);
621
622 addr = zaddr = kmap(page);
623
624 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
625 if (!i_size || page->index > end_index) {
626 hole = 1;
627 memset(addr, 0, PAGE_CACHE_SIZE);
628 goto out_hole;
629 }
630
631 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
632 while (1) {
633 int err, len, out_len, dlen;
634
635 if (nn >= bu->cnt) {
636 hole = 1;
637 memset(addr, 0, UBIFS_BLOCK_SIZE);
638 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
639 struct ubifs_data_node *dn;
640
641 dn = bu->buf + (bu->zbranch[nn].offs - offs);
642
643 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
644 ubifs_inode(inode)->creat_sqnum);
645
646 len = le32_to_cpu(dn->size);
647 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
648 goto out_err;
649
650 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
651 out_len = UBIFS_BLOCK_SIZE;
652 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
653 le16_to_cpu(dn->compr_type));
654 if (err || len != out_len)
655 goto out_err;
656
657 if (len < UBIFS_BLOCK_SIZE)
658 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
659
660 nn += 1;
661 read = (i << UBIFS_BLOCK_SHIFT) + len;
662 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
663 nn += 1;
664 continue;
665 } else {
666 hole = 1;
667 memset(addr, 0, UBIFS_BLOCK_SIZE);
668 }
669 if (++i >= UBIFS_BLOCKS_PER_PAGE)
670 break;
671 addr += UBIFS_BLOCK_SIZE;
672 page_block += 1;
673 }
674
675 if (end_index == page->index) {
676 int len = i_size & (PAGE_CACHE_SIZE - 1);
677
678 if (len && len < read)
679 memset(zaddr + len, 0, read - len);
680 }
681
682out_hole:
683 if (hole) {
684 SetPageChecked(page);
685 dbg_gen("hole");
686 }
687
688 SetPageUptodate(page);
689 ClearPageError(page);
690 flush_dcache_page(page);
691 kunmap(page);
692 *n = nn;
693 return 0;
694
695out_err:
696 ClearPageUptodate(page);
697 SetPageError(page);
698 flush_dcache_page(page);
699 kunmap(page);
700 ubifs_err("bad data node (block %u, inode %lu)",
701 page_block, inode->i_ino);
702 return -EINVAL;
703}
704
705
706
707
708
709
710
711
712
713static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
714 struct page *page1)
715{
716 pgoff_t offset = page1->index, end_index;
717 struct address_space *mapping = page1->mapping;
718 struct inode *inode = mapping->host;
719 struct ubifs_inode *ui = ubifs_inode(inode);
720 int err, page_idx, page_cnt, ret = 0, n = 0;
721 int allocate = bu->buf ? 0 : 1;
722 loff_t isize;
723
724 err = ubifs_tnc_get_bu_keys(c, bu);
725 if (err)
726 goto out_warn;
727
728 if (bu->eof) {
729
730 ui->read_in_a_row = 1;
731 ui->bulk_read = 0;
732 }
733
734 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
735 if (!page_cnt) {
736
737
738
739
740
741
742 goto out_bu_off;
743 }
744
745 if (bu->cnt) {
746 if (allocate) {
747
748
749
750
751 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
752 bu->zbranch[bu->cnt - 1].len -
753 bu->zbranch[0].offs;
754 ubifs_assert(bu->buf_len > 0);
755 ubifs_assert(bu->buf_len <= c->leb_size);
756 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
757 if (!bu->buf)
758 goto out_bu_off;
759 }
760
761 err = ubifs_tnc_bulk_read(c, bu);
762 if (err)
763 goto out_warn;
764 }
765
766 err = populate_page(c, page1, bu, &n);
767 if (err)
768 goto out_warn;
769
770 unlock_page(page1);
771 ret = 1;
772
773 isize = i_size_read(inode);
774 if (isize == 0)
775 goto out_free;
776 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
777
778 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
779 pgoff_t page_offset = offset + page_idx;
780 struct page *page;
781
782 if (page_offset > end_index)
783 break;
784 page = find_or_create_page(mapping, page_offset,
785 GFP_NOFS | __GFP_COLD);
786 if (!page)
787 break;
788 if (!PageUptodate(page))
789 err = populate_page(c, page, bu, &n);
790 unlock_page(page);
791 page_cache_release(page);
792 if (err)
793 break;
794 }
795
796 ui->last_page_read = offset + page_idx - 1;
797
798out_free:
799 if (allocate)
800 kfree(bu->buf);
801 return ret;
802
803out_warn:
804 ubifs_warn("ignoring error %d and skipping bulk-read", err);
805 goto out_free;
806
807out_bu_off:
808 ui->read_in_a_row = ui->bulk_read = 0;
809 goto out_free;
810}
811
812
813
814
815
816
817
818
819
820
821static int ubifs_bulk_read(struct page *page)
822{
823 struct inode *inode = page->mapping->host;
824 struct ubifs_info *c = inode->i_sb->s_fs_info;
825 struct ubifs_inode *ui = ubifs_inode(inode);
826 pgoff_t index = page->index, last_page_read = ui->last_page_read;
827 struct bu_info *bu;
828 int err = 0, allocated = 0;
829
830 ui->last_page_read = index;
831 if (!c->bulk_read)
832 return 0;
833
834
835
836
837
838 if (!mutex_trylock(&ui->ui_mutex))
839 return 0;
840
841 if (index != last_page_read + 1) {
842
843 ui->read_in_a_row = 1;
844 if (ui->bulk_read)
845 ui->bulk_read = 0;
846 goto out_unlock;
847 }
848
849 if (!ui->bulk_read) {
850 ui->read_in_a_row += 1;
851 if (ui->read_in_a_row < 3)
852 goto out_unlock;
853
854 ui->bulk_read = 1;
855 }
856
857
858
859
860
861 if (mutex_trylock(&c->bu_mutex))
862 bu = &c->bu;
863 else {
864 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
865 if (!bu)
866 goto out_unlock;
867
868 bu->buf = NULL;
869 allocated = 1;
870 }
871
872 bu->buf_len = c->max_bu_buf_len;
873 data_key_init(c, &bu->key, inode->i_ino,
874 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
875 err = ubifs_do_bulk_read(c, bu, page);
876
877 if (!allocated)
878 mutex_unlock(&c->bu_mutex);
879 else
880 kfree(bu);
881
882out_unlock:
883 mutex_unlock(&ui->ui_mutex);
884 return err;
885}
886
887static int ubifs_readpage(struct file *file, struct page *page)
888{
889 if (ubifs_bulk_read(page))
890 return 0;
891 do_readpage(page);
892 unlock_page(page);
893 return 0;
894}
895
896static int do_writepage(struct page *page, int len)
897{
898 int err = 0, i, blen;
899 unsigned int block;
900 void *addr;
901 union ubifs_key key;
902 struct inode *inode = page->mapping->host;
903 struct ubifs_info *c = inode->i_sb->s_fs_info;
904
905#ifdef UBIFS_DEBUG
906 struct ubifs_inode *ui = ubifs_inode(inode);
907 spin_lock(&ui->ui_lock);
908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT);
909 spin_unlock(&ui->ui_lock);
910#endif
911
912
913 set_page_writeback(page);
914
915 addr = kmap(page);
916 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
917 i = 0;
918 while (len) {
919 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
920 data_key_init(c, &key, inode->i_ino, block);
921 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
922 if (err)
923 break;
924 if (++i >= UBIFS_BLOCKS_PER_PAGE)
925 break;
926 block += 1;
927 addr += blen;
928 len -= blen;
929 }
930 if (err) {
931 SetPageError(page);
932 ubifs_err("cannot write page %lu of inode %lu, error %d",
933 page->index, inode->i_ino, err);
934 ubifs_ro_mode(c, err);
935 }
936
937 ubifs_assert(PagePrivate(page));
938 if (PageChecked(page))
939 release_new_page_budget(c);
940 else
941 release_existing_page_budget(c);
942
943 atomic_long_dec(&c->dirty_pg_cnt);
944 ClearPagePrivate(page);
945 ClearPageChecked(page);
946
947 kunmap(page);
948 unlock_page(page);
949 end_page_writeback(page);
950 return err;
951}
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1000{
1001 struct inode *inode = page->mapping->host;
1002 struct ubifs_inode *ui = ubifs_inode(inode);
1003 loff_t i_size = i_size_read(inode), synced_i_size;
1004 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
1005 int err, len = i_size & (PAGE_CACHE_SIZE - 1);
1006 void *kaddr;
1007
1008 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1009 inode->i_ino, page->index, page->flags);
1010 ubifs_assert(PagePrivate(page));
1011
1012
1013 if (page->index > end_index || (page->index == end_index && !len)) {
1014 err = 0;
1015 goto out_unlock;
1016 }
1017
1018 spin_lock(&ui->ui_lock);
1019 synced_i_size = ui->synced_i_size;
1020 spin_unlock(&ui->ui_lock);
1021
1022
1023 if (page->index < end_index) {
1024 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
1025 err = inode->i_sb->s_op->write_inode(inode, NULL);
1026 if (err)
1027 goto out_unlock;
1028
1029
1030
1031
1032
1033
1034
1035
1036 }
1037 return do_writepage(page, PAGE_CACHE_SIZE);
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047 kaddr = kmap_atomic(page);
1048 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
1049 flush_dcache_page(page);
1050 kunmap_atomic(kaddr);
1051
1052 if (i_size > synced_i_size) {
1053 err = inode->i_sb->s_op->write_inode(inode, NULL);
1054 if (err)
1055 goto out_unlock;
1056 }
1057
1058 return do_writepage(page, len);
1059
1060out_unlock:
1061 unlock_page(page);
1062 return err;
1063}
1064
1065
1066
1067
1068
1069
1070static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1071{
1072 if (attr->ia_valid & ATTR_UID)
1073 inode->i_uid = attr->ia_uid;
1074 if (attr->ia_valid & ATTR_GID)
1075 inode->i_gid = attr->ia_gid;
1076 if (attr->ia_valid & ATTR_ATIME)
1077 inode->i_atime = timespec_trunc(attr->ia_atime,
1078 inode->i_sb->s_time_gran);
1079 if (attr->ia_valid & ATTR_MTIME)
1080 inode->i_mtime = timespec_trunc(attr->ia_mtime,
1081 inode->i_sb->s_time_gran);
1082 if (attr->ia_valid & ATTR_CTIME)
1083 inode->i_ctime = timespec_trunc(attr->ia_ctime,
1084 inode->i_sb->s_time_gran);
1085 if (attr->ia_valid & ATTR_MODE) {
1086 umode_t mode = attr->ia_mode;
1087
1088 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1089 mode &= ~S_ISGID;
1090 inode->i_mode = mode;
1091 }
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104static int do_truncation(struct ubifs_info *c, struct inode *inode,
1105 const struct iattr *attr)
1106{
1107 int err;
1108 struct ubifs_budget_req req;
1109 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1110 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1111 struct ubifs_inode *ui = ubifs_inode(inode);
1112
1113 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1114 memset(&req, 0, sizeof(struct ubifs_budget_req));
1115
1116
1117
1118
1119
1120
1121 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1122 req.dirtied_page = 1;
1123
1124 req.dirtied_ino = 1;
1125
1126 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1127 err = ubifs_budget_space(c, &req);
1128 if (err) {
1129
1130
1131
1132
1133 if (new_size || err != -ENOSPC)
1134 return err;
1135 budgeted = 0;
1136 }
1137
1138 truncate_setsize(inode, new_size);
1139
1140 if (offset) {
1141 pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
1142 struct page *page;
1143
1144 page = find_lock_page(inode->i_mapping, index);
1145 if (page) {
1146 if (PageDirty(page)) {
1147
1148
1149
1150
1151
1152
1153
1154
1155 ubifs_assert(PagePrivate(page));
1156
1157 clear_page_dirty_for_io(page);
1158 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1159 offset = new_size &
1160 (PAGE_CACHE_SIZE - 1);
1161 err = do_writepage(page, offset);
1162 page_cache_release(page);
1163 if (err)
1164 goto out_budg;
1165
1166
1167
1168
1169 } else {
1170
1171
1172
1173
1174
1175 unlock_page(page);
1176 page_cache_release(page);
1177 }
1178 }
1179 }
1180
1181 mutex_lock(&ui->ui_mutex);
1182 ui->ui_size = inode->i_size;
1183
1184 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1185
1186 do_attr_changes(inode, attr);
1187 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1188 mutex_unlock(&ui->ui_mutex);
1189
1190out_budg:
1191 if (budgeted)
1192 ubifs_release_budget(c, &req);
1193 else {
1194 c->bi.nospace = c->bi.nospace_rp = 0;
1195 smp_wmb();
1196 }
1197 return err;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static int do_setattr(struct ubifs_info *c, struct inode *inode,
1211 const struct iattr *attr)
1212{
1213 int err, release;
1214 loff_t new_size = attr->ia_size;
1215 struct ubifs_inode *ui = ubifs_inode(inode);
1216 struct ubifs_budget_req req = { .dirtied_ino = 1,
1217 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1218
1219 err = ubifs_budget_space(c, &req);
1220 if (err)
1221 return err;
1222
1223 if (attr->ia_valid & ATTR_SIZE) {
1224 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1225 truncate_setsize(inode, new_size);
1226 }
1227
1228 mutex_lock(&ui->ui_mutex);
1229 if (attr->ia_valid & ATTR_SIZE) {
1230
1231 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1232
1233 ui->ui_size = inode->i_size;
1234 }
1235
1236 do_attr_changes(inode, attr);
1237
1238 release = ui->dirty;
1239 if (attr->ia_valid & ATTR_SIZE)
1240
1241
1242
1243
1244 __mark_inode_dirty(inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
1245 else
1246 mark_inode_dirty_sync(inode);
1247 mutex_unlock(&ui->ui_mutex);
1248
1249 if (release)
1250 ubifs_release_budget(c, &req);
1251 if (IS_SYNC(inode))
1252 err = inode->i_sb->s_op->write_inode(inode, NULL);
1253 return err;
1254}
1255
1256int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1257{
1258 int err;
1259 struct inode *inode = dentry->d_inode;
1260 struct ubifs_info *c = inode->i_sb->s_fs_info;
1261
1262 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1263 inode->i_ino, inode->i_mode, attr->ia_valid);
1264 err = inode_change_ok(inode, attr);
1265 if (err)
1266 return err;
1267
1268 err = dbg_check_synced_i_size(c, inode);
1269 if (err)
1270 return err;
1271
1272 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1273
1274 err = do_truncation(c, inode, attr);
1275 else
1276 err = do_setattr(c, inode, attr);
1277
1278 return err;
1279}
1280
1281static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1282 unsigned int length)
1283{
1284 struct inode *inode = page->mapping->host;
1285 struct ubifs_info *c = inode->i_sb->s_fs_info;
1286
1287 ubifs_assert(PagePrivate(page));
1288 if (offset || length < PAGE_CACHE_SIZE)
1289
1290 return;
1291
1292 if (PageChecked(page))
1293 release_new_page_budget(c);
1294 else
1295 release_existing_page_budget(c);
1296
1297 atomic_long_dec(&c->dirty_pg_cnt);
1298 ClearPagePrivate(page);
1299 ClearPageChecked(page);
1300}
1301
1302static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
1303{
1304 struct ubifs_inode *ui = ubifs_inode(dentry->d_inode);
1305
1306 nd_set_link(nd, ui->data);
1307 return NULL;
1308}
1309
1310int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1311{
1312 struct inode *inode = file->f_mapping->host;
1313 struct ubifs_info *c = inode->i_sb->s_fs_info;
1314 int err;
1315
1316 dbg_gen("syncing inode %lu", inode->i_ino);
1317
1318 if (c->ro_mount)
1319
1320
1321
1322
1323 return 0;
1324
1325 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1326 if (err)
1327 return err;
1328 mutex_lock(&inode->i_mutex);
1329
1330
1331 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1332 err = inode->i_sb->s_op->write_inode(inode, NULL);
1333 if (err)
1334 goto out;
1335 }
1336
1337
1338
1339
1340
1341 err = ubifs_sync_wbufs_by_inode(c, inode);
1342out:
1343 mutex_unlock(&inode->i_mutex);
1344 return err;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356static inline int mctime_update_needed(const struct inode *inode,
1357 const struct timespec *now)
1358{
1359 if (!timespec_equal(&inode->i_mtime, now) ||
1360 !timespec_equal(&inode->i_ctime, now))
1361 return 1;
1362 return 0;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static int update_mctime(struct inode *inode)
1374{
1375 struct timespec now = ubifs_current_time(inode);
1376 struct ubifs_inode *ui = ubifs_inode(inode);
1377 struct ubifs_info *c = inode->i_sb->s_fs_info;
1378
1379 if (mctime_update_needed(inode, &now)) {
1380 int err, release;
1381 struct ubifs_budget_req req = { .dirtied_ino = 1,
1382 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1383
1384 err = ubifs_budget_space(c, &req);
1385 if (err)
1386 return err;
1387
1388 mutex_lock(&ui->ui_mutex);
1389 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1390 release = ui->dirty;
1391 mark_inode_dirty_sync(inode);
1392 mutex_unlock(&ui->ui_mutex);
1393 if (release)
1394 ubifs_release_budget(c, &req);
1395 }
1396
1397 return 0;
1398}
1399
1400static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1401{
1402 int err = update_mctime(file_inode(iocb->ki_filp));
1403 if (err)
1404 return err;
1405
1406 return generic_file_write_iter(iocb, from);
1407}
1408
1409static int ubifs_set_page_dirty(struct page *page)
1410{
1411 int ret;
1412
1413 ret = __set_page_dirty_nobuffers(page);
1414
1415
1416
1417
1418 ubifs_assert(ret == 0);
1419 return ret;
1420}
1421
1422static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1423{
1424
1425
1426
1427
1428 if (PageWriteback(page))
1429 return 0;
1430 ubifs_assert(PagePrivate(page));
1431 ubifs_assert(0);
1432 ClearPagePrivate(page);
1433 ClearPageChecked(page);
1434 return 1;
1435}
1436
1437
1438
1439
1440
1441static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
1442 struct vm_fault *vmf)
1443{
1444 struct page *page = vmf->page;
1445 struct inode *inode = file_inode(vma->vm_file);
1446 struct ubifs_info *c = inode->i_sb->s_fs_info;
1447 struct timespec now = ubifs_current_time(inode);
1448 struct ubifs_budget_req req = { .new_page = 1 };
1449 int err, update_time;
1450
1451 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1452 i_size_read(inode));
1453 ubifs_assert(!c->ro_media && !c->ro_mount);
1454
1455 if (unlikely(c->ro_error))
1456 return VM_FAULT_SIGBUS;
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476 update_time = mctime_update_needed(inode, &now);
1477 if (update_time)
1478
1479
1480
1481
1482 req.dirtied_ino = 1;
1483
1484 err = ubifs_budget_space(c, &req);
1485 if (unlikely(err)) {
1486 if (err == -ENOSPC)
1487 ubifs_warn("out of space for mmapped file (inode number %lu)",
1488 inode->i_ino);
1489 return VM_FAULT_SIGBUS;
1490 }
1491
1492 lock_page(page);
1493 if (unlikely(page->mapping != inode->i_mapping ||
1494 page_offset(page) > i_size_read(inode))) {
1495
1496 err = -EINVAL;
1497 goto out_unlock;
1498 }
1499
1500 if (PagePrivate(page))
1501 release_new_page_budget(c);
1502 else {
1503 if (!PageChecked(page))
1504 ubifs_convert_page_budget(c);
1505 SetPagePrivate(page);
1506 atomic_long_inc(&c->dirty_pg_cnt);
1507 __set_page_dirty_nobuffers(page);
1508 }
1509
1510 if (update_time) {
1511 int release;
1512 struct ubifs_inode *ui = ubifs_inode(inode);
1513
1514 mutex_lock(&ui->ui_mutex);
1515 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1516 release = ui->dirty;
1517 mark_inode_dirty_sync(inode);
1518 mutex_unlock(&ui->ui_mutex);
1519 if (release)
1520 ubifs_release_dirty_inode_budget(c, ui);
1521 }
1522
1523 wait_for_stable_page(page);
1524 return VM_FAULT_LOCKED;
1525
1526out_unlock:
1527 unlock_page(page);
1528 ubifs_release_budget(c, &req);
1529 if (err)
1530 err = VM_FAULT_SIGBUS;
1531 return err;
1532}
1533
1534static const struct vm_operations_struct ubifs_file_vm_ops = {
1535 .fault = filemap_fault,
1536 .map_pages = filemap_map_pages,
1537 .page_mkwrite = ubifs_vm_page_mkwrite,
1538 .remap_pages = generic_file_remap_pages,
1539};
1540
1541static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1542{
1543 int err;
1544
1545 err = generic_file_mmap(file, vma);
1546 if (err)
1547 return err;
1548 vma->vm_ops = &ubifs_file_vm_ops;
1549 return 0;
1550}
1551
1552const struct address_space_operations ubifs_file_address_operations = {
1553 .readpage = ubifs_readpage,
1554 .writepage = ubifs_writepage,
1555 .write_begin = ubifs_write_begin,
1556 .write_end = ubifs_write_end,
1557 .invalidatepage = ubifs_invalidatepage,
1558 .set_page_dirty = ubifs_set_page_dirty,
1559 .releasepage = ubifs_releasepage,
1560};
1561
1562const struct inode_operations ubifs_file_inode_operations = {
1563 .setattr = ubifs_setattr,
1564 .getattr = ubifs_getattr,
1565 .setxattr = ubifs_setxattr,
1566 .getxattr = ubifs_getxattr,
1567 .listxattr = ubifs_listxattr,
1568 .removexattr = ubifs_removexattr,
1569};
1570
1571const struct inode_operations ubifs_symlink_inode_operations = {
1572 .readlink = generic_readlink,
1573 .follow_link = ubifs_follow_link,
1574 .setattr = ubifs_setattr,
1575 .getattr = ubifs_getattr,
1576};
1577
1578const struct file_operations ubifs_file_operations = {
1579 .llseek = generic_file_llseek,
1580 .read = new_sync_read,
1581 .write = new_sync_write,
1582 .read_iter = generic_file_read_iter,
1583 .write_iter = ubifs_write_iter,
1584 .mmap = ubifs_file_mmap,
1585 .fsync = ubifs_fsync,
1586 .unlocked_ioctl = ubifs_ioctl,
1587 .splice_read = generic_file_splice_read,
1588 .splice_write = iter_file_splice_write,
1589#ifdef CONFIG_COMPAT
1590 .compat_ioctl = ubifs_compat_ioctl,
1591#endif
1592};
1593