1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include "ubifs.h"
53#include <linux/aio.h>
54#include <linux/mount.h>
55#include <linux/namei.h>
56#include <linux/slab.h>
57
58static int read_block(struct inode *inode, void *addr, unsigned int block,
59 struct ubifs_data_node *dn)
60{
61 struct ubifs_info *c = inode->i_sb->s_fs_info;
62 int err, len, out_len;
63 union ubifs_key key;
64 unsigned int dlen;
65
66 data_key_init(c, &key, inode->i_ino, block);
67 err = ubifs_tnc_lookup(c, &key, dn);
68 if (err) {
69 if (err == -ENOENT)
70
71 memset(addr, 0, UBIFS_BLOCK_SIZE);
72 return err;
73 }
74
75 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
76 ubifs_inode(inode)->creat_sqnum);
77 len = le32_to_cpu(dn->size);
78 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
79 goto dump;
80
81 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
82 out_len = UBIFS_BLOCK_SIZE;
83 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
84 le16_to_cpu(dn->compr_type));
85 if (err || len != out_len)
86 goto dump;
87
88
89
90
91
92
93 if (len < UBIFS_BLOCK_SIZE)
94 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
95
96 return 0;
97
98dump:
99 ubifs_err("bad data node (block %u, inode %lu)",
100 block, inode->i_ino);
101 ubifs_dump_node(c, dn);
102 return -EINVAL;
103}
104
105static int do_readpage(struct page *page)
106{
107 void *addr;
108 int err = 0, i;
109 unsigned int block, beyond;
110 struct ubifs_data_node *dn;
111 struct inode *inode = page->mapping->host;
112 loff_t i_size = i_size_read(inode);
113
114 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
115 inode->i_ino, page->index, i_size, page->flags);
116 ubifs_assert(!PageChecked(page));
117 ubifs_assert(!PagePrivate(page));
118
119 addr = kmap(page);
120
121 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
122 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
123 if (block >= beyond) {
124
125 SetPageChecked(page);
126 memset(addr, 0, PAGE_CACHE_SIZE);
127 goto out;
128 }
129
130 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
131 if (!dn) {
132 err = -ENOMEM;
133 goto error;
134 }
135
136 i = 0;
137 while (1) {
138 int ret;
139
140 if (block >= beyond) {
141
142 err = -ENOENT;
143 memset(addr, 0, UBIFS_BLOCK_SIZE);
144 } else {
145 ret = read_block(inode, addr, block, dn);
146 if (ret) {
147 err = ret;
148 if (err != -ENOENT)
149 break;
150 } else if (block + 1 == beyond) {
151 int dlen = le32_to_cpu(dn->size);
152 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
153
154 if (ilen && ilen < dlen)
155 memset(addr + ilen, 0, dlen - ilen);
156 }
157 }
158 if (++i >= UBIFS_BLOCKS_PER_PAGE)
159 break;
160 block += 1;
161 addr += UBIFS_BLOCK_SIZE;
162 }
163 if (err) {
164 if (err == -ENOENT) {
165
166 SetPageChecked(page);
167 dbg_gen("hole");
168 goto out_free;
169 }
170 ubifs_err("cannot read page %lu of inode %lu, error %d",
171 page->index, inode->i_ino, err);
172 goto error;
173 }
174
175out_free:
176 kfree(dn);
177out:
178 SetPageUptodate(page);
179 ClearPageError(page);
180 flush_dcache_page(page);
181 kunmap(page);
182 return 0;
183
184error:
185 kfree(dn);
186 ClearPageUptodate(page);
187 SetPageError(page);
188 flush_dcache_page(page);
189 kunmap(page);
190 return err;
191}
192
193
194
195
196
197
198
199
200static void release_new_page_budget(struct ubifs_info *c)
201{
202 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
203
204 ubifs_release_budget(c, &req);
205}
206
207
208
209
210
211
212
213
214static void release_existing_page_budget(struct ubifs_info *c)
215{
216 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
217
218 ubifs_release_budget(c, &req);
219}
220
221static int write_begin_slow(struct address_space *mapping,
222 loff_t pos, unsigned len, struct page **pagep,
223 unsigned flags)
224{
225 struct inode *inode = mapping->host;
226 struct ubifs_info *c = inode->i_sb->s_fs_info;
227 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
228 struct ubifs_budget_req req = { .new_page = 1 };
229 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
230 struct page *page;
231
232 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
233 inode->i_ino, pos, len, inode->i_size);
234
235
236
237
238
239
240
241
242
243 if (appending)
244
245 req.dirtied_ino = 1;
246
247 err = ubifs_budget_space(c, &req);
248 if (unlikely(err))
249 return err;
250
251 page = grab_cache_page_write_begin(mapping, index, flags);
252 if (unlikely(!page)) {
253 ubifs_release_budget(c, &req);
254 return -ENOMEM;
255 }
256
257 if (!PageUptodate(page)) {
258 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
259 SetPageChecked(page);
260 else {
261 err = do_readpage(page);
262 if (err) {
263 unlock_page(page);
264 page_cache_release(page);
265 return err;
266 }
267 }
268
269 SetPageUptodate(page);
270 ClearPageError(page);
271 }
272
273 if (PagePrivate(page))
274
275
276
277
278
279
280
281
282
283
284 release_new_page_budget(c);
285 else if (!PageChecked(page))
286
287
288
289
290
291
292 ubifs_convert_page_budget(c);
293
294 if (appending) {
295 struct ubifs_inode *ui = ubifs_inode(inode);
296
297
298
299
300
301
302 mutex_lock(&ui->ui_mutex);
303 if (ui->dirty)
304
305
306
307
308 ubifs_release_dirty_inode_budget(c, ui);
309 }
310
311 *pagep = page;
312 return 0;
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328static int allocate_budget(struct ubifs_info *c, struct page *page,
329 struct ubifs_inode *ui, int appending)
330{
331 struct ubifs_budget_req req = { .fast = 1 };
332
333 if (PagePrivate(page)) {
334 if (!appending)
335
336
337
338
339 return 0;
340
341 mutex_lock(&ui->ui_mutex);
342 if (ui->dirty)
343
344
345
346
347
348
349
350
351
352 return 0;
353
354
355
356
357
358 req.dirtied_ino = 1;
359 } else {
360 if (PageChecked(page))
361
362
363
364
365
366
367
368 req.new_page = 1;
369 else
370
371
372
373
374
375 req.dirtied_page = 1;
376
377 if (appending) {
378 mutex_lock(&ui->ui_mutex);
379 if (!ui->dirty)
380
381
382
383
384
385 req.dirtied_ino = 1;
386 }
387 }
388
389 return ubifs_budget_space(c, &req);
390}
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424static int ubifs_write_begin(struct file *file, struct address_space *mapping,
425 loff_t pos, unsigned len, unsigned flags,
426 struct page **pagep, void **fsdata)
427{
428 struct inode *inode = mapping->host;
429 struct ubifs_info *c = inode->i_sb->s_fs_info;
430 struct ubifs_inode *ui = ubifs_inode(inode);
431 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
432 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
433 int skipped_read = 0;
434 struct page *page;
435
436 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
437 ubifs_assert(!c->ro_media && !c->ro_mount);
438
439 if (unlikely(c->ro_error))
440 return -EROFS;
441
442
443 page = grab_cache_page_write_begin(mapping, index, flags);
444 if (unlikely(!page))
445 return -ENOMEM;
446
447 if (!PageUptodate(page)) {
448
449 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
450
451
452
453
454
455
456
457
458
459 SetPageChecked(page);
460 skipped_read = 1;
461 } else {
462 err = do_readpage(page);
463 if (err) {
464 unlock_page(page);
465 page_cache_release(page);
466 return err;
467 }
468 }
469
470 SetPageUptodate(page);
471 ClearPageError(page);
472 }
473
474 err = allocate_budget(c, page, ui, appending);
475 if (unlikely(err)) {
476 ubifs_assert(err == -ENOSPC);
477
478
479
480
481 if (skipped_read) {
482 ClearPageChecked(page);
483 ClearPageUptodate(page);
484 }
485
486
487
488
489
490
491
492 if (appending) {
493 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
494 mutex_unlock(&ui->ui_mutex);
495 }
496 unlock_page(page);
497 page_cache_release(page);
498
499 return write_begin_slow(mapping, pos, len, pagep, flags);
500 }
501
502
503
504
505
506
507
508 *pagep = page;
509 return 0;
510
511}
512
513
514
515
516
517
518
519
520
521
522
523static void cancel_budget(struct ubifs_info *c, struct page *page,
524 struct ubifs_inode *ui, int appending)
525{
526 if (appending) {
527 if (!ui->dirty)
528 ubifs_release_dirty_inode_budget(c, ui);
529 mutex_unlock(&ui->ui_mutex);
530 }
531 if (!PagePrivate(page)) {
532 if (PageChecked(page))
533 release_new_page_budget(c);
534 else
535 release_existing_page_budget(c);
536 }
537}
538
539static int ubifs_write_end(struct file *file, struct address_space *mapping,
540 loff_t pos, unsigned len, unsigned copied,
541 struct page *page, void *fsdata)
542{
543 struct inode *inode = mapping->host;
544 struct ubifs_inode *ui = ubifs_inode(inode);
545 struct ubifs_info *c = inode->i_sb->s_fs_info;
546 loff_t end_pos = pos + len;
547 int appending = !!(end_pos > inode->i_size);
548
549 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
550 inode->i_ino, pos, page->index, len, copied, inode->i_size);
551
552 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
553
554
555
556
557
558
559
560
561
562 dbg_gen("copied %d instead of %d, read page and repeat",
563 copied, len);
564 cancel_budget(c, page, ui, appending);
565 ClearPageChecked(page);
566
567
568
569
570
571 copied = do_readpage(page);
572 goto out;
573 }
574
575 if (!PagePrivate(page)) {
576 SetPagePrivate(page);
577 atomic_long_inc(&c->dirty_pg_cnt);
578 __set_page_dirty_nobuffers(page);
579 }
580
581 if (appending) {
582 i_size_write(inode, end_pos);
583 ui->ui_size = end_pos;
584
585
586
587
588
589 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
590 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
591 mutex_unlock(&ui->ui_mutex);
592 }
593
594out:
595 unlock_page(page);
596 page_cache_release(page);
597 return copied;
598}
599
600
601
602
603
604
605
606
607
608
609static int populate_page(struct ubifs_info *c, struct page *page,
610 struct bu_info *bu, int *n)
611{
612 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
613 struct inode *inode = page->mapping->host;
614 loff_t i_size = i_size_read(inode);
615 unsigned int page_block;
616 void *addr, *zaddr;
617 pgoff_t end_index;
618
619 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
620 inode->i_ino, page->index, i_size, page->flags);
621
622 addr = zaddr = kmap(page);
623
624 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
625 if (!i_size || page->index > end_index) {
626 hole = 1;
627 memset(addr, 0, PAGE_CACHE_SIZE);
628 goto out_hole;
629 }
630
631 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
632 while (1) {
633 int err, len, out_len, dlen;
634
635 if (nn >= bu->cnt) {
636 hole = 1;
637 memset(addr, 0, UBIFS_BLOCK_SIZE);
638 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
639 struct ubifs_data_node *dn;
640
641 dn = bu->buf + (bu->zbranch[nn].offs - offs);
642
643 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
644 ubifs_inode(inode)->creat_sqnum);
645
646 len = le32_to_cpu(dn->size);
647 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
648 goto out_err;
649
650 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
651 out_len = UBIFS_BLOCK_SIZE;
652 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
653 le16_to_cpu(dn->compr_type));
654 if (err || len != out_len)
655 goto out_err;
656
657 if (len < UBIFS_BLOCK_SIZE)
658 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
659
660 nn += 1;
661 read = (i << UBIFS_BLOCK_SHIFT) + len;
662 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
663 nn += 1;
664 continue;
665 } else {
666 hole = 1;
667 memset(addr, 0, UBIFS_BLOCK_SIZE);
668 }
669 if (++i >= UBIFS_BLOCKS_PER_PAGE)
670 break;
671 addr += UBIFS_BLOCK_SIZE;
672 page_block += 1;
673 }
674
675 if (end_index == page->index) {
676 int len = i_size & (PAGE_CACHE_SIZE - 1);
677
678 if (len && len < read)
679 memset(zaddr + len, 0, read - len);
680 }
681
682out_hole:
683 if (hole) {
684 SetPageChecked(page);
685 dbg_gen("hole");
686 }
687
688 SetPageUptodate(page);
689 ClearPageError(page);
690 flush_dcache_page(page);
691 kunmap(page);
692 *n = nn;
693 return 0;
694
695out_err:
696 ClearPageUptodate(page);
697 SetPageError(page);
698 flush_dcache_page(page);
699 kunmap(page);
700 ubifs_err("bad data node (block %u, inode %lu)",
701 page_block, inode->i_ino);
702 return -EINVAL;
703}
704
705
706
707
708
709
710
711
712
713static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
714 struct page *page1)
715{
716 pgoff_t offset = page1->index, end_index;
717 struct address_space *mapping = page1->mapping;
718 struct inode *inode = mapping->host;
719 struct ubifs_inode *ui = ubifs_inode(inode);
720 int err, page_idx, page_cnt, ret = 0, n = 0;
721 int allocate = bu->buf ? 0 : 1;
722 loff_t isize;
723
724 err = ubifs_tnc_get_bu_keys(c, bu);
725 if (err)
726 goto out_warn;
727
728 if (bu->eof) {
729
730 ui->read_in_a_row = 1;
731 ui->bulk_read = 0;
732 }
733
734 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
735 if (!page_cnt) {
736
737
738
739
740
741
742 goto out_bu_off;
743 }
744
745 if (bu->cnt) {
746 if (allocate) {
747
748
749
750
751 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
752 bu->zbranch[bu->cnt - 1].len -
753 bu->zbranch[0].offs;
754 ubifs_assert(bu->buf_len > 0);
755 ubifs_assert(bu->buf_len <= c->leb_size);
756 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
757 if (!bu->buf)
758 goto out_bu_off;
759 }
760
761 err = ubifs_tnc_bulk_read(c, bu);
762 if (err)
763 goto out_warn;
764 }
765
766 err = populate_page(c, page1, bu, &n);
767 if (err)
768 goto out_warn;
769
770 unlock_page(page1);
771 ret = 1;
772
773 isize = i_size_read(inode);
774 if (isize == 0)
775 goto out_free;
776 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
777
778 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
779 pgoff_t page_offset = offset + page_idx;
780 struct page *page;
781
782 if (page_offset > end_index)
783 break;
784 page = find_or_create_page(mapping, page_offset,
785 GFP_NOFS | __GFP_COLD);
786 if (!page)
787 break;
788 if (!PageUptodate(page))
789 err = populate_page(c, page, bu, &n);
790 unlock_page(page);
791 page_cache_release(page);
792 if (err)
793 break;
794 }
795
796 ui->last_page_read = offset + page_idx - 1;
797
798out_free:
799 if (allocate)
800 kfree(bu->buf);
801 return ret;
802
803out_warn:
804 ubifs_warn("ignoring error %d and skipping bulk-read", err);
805 goto out_free;
806
807out_bu_off:
808 ui->read_in_a_row = ui->bulk_read = 0;
809 goto out_free;
810}
811
812
813
814
815
816
817
818
819
820
821static int ubifs_bulk_read(struct page *page)
822{
823 struct inode *inode = page->mapping->host;
824 struct ubifs_info *c = inode->i_sb->s_fs_info;
825 struct ubifs_inode *ui = ubifs_inode(inode);
826 pgoff_t index = page->index, last_page_read = ui->last_page_read;
827 struct bu_info *bu;
828 int err = 0, allocated = 0;
829
830 ui->last_page_read = index;
831 if (!c->bulk_read)
832 return 0;
833
834
835
836
837
838 if (!mutex_trylock(&ui->ui_mutex))
839 return 0;
840
841 if (index != last_page_read + 1) {
842
843 ui->read_in_a_row = 1;
844 if (ui->bulk_read)
845 ui->bulk_read = 0;
846 goto out_unlock;
847 }
848
849 if (!ui->bulk_read) {
850 ui->read_in_a_row += 1;
851 if (ui->read_in_a_row < 3)
852 goto out_unlock;
853
854 ui->bulk_read = 1;
855 }
856
857
858
859
860
861 if (mutex_trylock(&c->bu_mutex))
862 bu = &c->bu;
863 else {
864 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
865 if (!bu)
866 goto out_unlock;
867
868 bu->buf = NULL;
869 allocated = 1;
870 }
871
872 bu->buf_len = c->max_bu_buf_len;
873 data_key_init(c, &bu->key, inode->i_ino,
874 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
875 err = ubifs_do_bulk_read(c, bu, page);
876
877 if (!allocated)
878 mutex_unlock(&c->bu_mutex);
879 else
880 kfree(bu);
881
882out_unlock:
883 mutex_unlock(&ui->ui_mutex);
884 return err;
885}
886
887static int ubifs_readpage(struct file *file, struct page *page)
888{
889 if (ubifs_bulk_read(page))
890 return 0;
891 do_readpage(page);
892 unlock_page(page);
893 return 0;
894}
895
896static int do_writepage(struct page *page, int len)
897{
898 int err = 0, i, blen;
899 unsigned int block;
900 void *addr;
901 union ubifs_key key;
902 struct inode *inode = page->mapping->host;
903 struct ubifs_info *c = inode->i_sb->s_fs_info;
904
905#ifdef UBIFS_DEBUG
906 spin_lock(&ui->ui_lock);
907 ubifs_assert(page->index <= ui->synced_i_size << PAGE_CACHE_SIZE);
908 spin_unlock(&ui->ui_lock);
909#endif
910
911
912 set_page_writeback(page);
913
914 addr = kmap(page);
915 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
916 i = 0;
917 while (len) {
918 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
919 data_key_init(c, &key, inode->i_ino, block);
920 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
921 if (err)
922 break;
923 if (++i >= UBIFS_BLOCKS_PER_PAGE)
924 break;
925 block += 1;
926 addr += blen;
927 len -= blen;
928 }
929 if (err) {
930 SetPageError(page);
931 ubifs_err("cannot write page %lu of inode %lu, error %d",
932 page->index, inode->i_ino, err);
933 ubifs_ro_mode(c, err);
934 }
935
936 ubifs_assert(PagePrivate(page));
937 if (PageChecked(page))
938 release_new_page_budget(c);
939 else
940 release_existing_page_budget(c);
941
942 atomic_long_dec(&c->dirty_pg_cnt);
943 ClearPagePrivate(page);
944 ClearPageChecked(page);
945
946 kunmap(page);
947 unlock_page(page);
948 end_page_writeback(page);
949 return err;
950}
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
999{
1000 struct inode *inode = page->mapping->host;
1001 struct ubifs_inode *ui = ubifs_inode(inode);
1002 loff_t i_size = i_size_read(inode), synced_i_size;
1003 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
1004 int err, len = i_size & (PAGE_CACHE_SIZE - 1);
1005 void *kaddr;
1006
1007 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1008 inode->i_ino, page->index, page->flags);
1009 ubifs_assert(PagePrivate(page));
1010
1011
1012 if (page->index > end_index || (page->index == end_index && !len)) {
1013 err = 0;
1014 goto out_unlock;
1015 }
1016
1017 spin_lock(&ui->ui_lock);
1018 synced_i_size = ui->synced_i_size;
1019 spin_unlock(&ui->ui_lock);
1020
1021
1022 if (page->index < end_index) {
1023 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
1024 err = inode->i_sb->s_op->write_inode(inode, NULL);
1025 if (err)
1026 goto out_unlock;
1027
1028
1029
1030
1031
1032
1033
1034
1035 }
1036 return do_writepage(page, PAGE_CACHE_SIZE);
1037 }
1038
1039
1040
1041
1042
1043
1044
1045
1046 kaddr = kmap_atomic(page);
1047 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
1048 flush_dcache_page(page);
1049 kunmap_atomic(kaddr);
1050
1051 if (i_size > synced_i_size) {
1052 err = inode->i_sb->s_op->write_inode(inode, NULL);
1053 if (err)
1054 goto out_unlock;
1055 }
1056
1057 return do_writepage(page, len);
1058
1059out_unlock:
1060 unlock_page(page);
1061 return err;
1062}
1063
1064
1065
1066
1067
1068
1069static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1070{
1071 if (attr->ia_valid & ATTR_UID)
1072 inode->i_uid = attr->ia_uid;
1073 if (attr->ia_valid & ATTR_GID)
1074 inode->i_gid = attr->ia_gid;
1075 if (attr->ia_valid & ATTR_ATIME)
1076 inode->i_atime = timespec_trunc(attr->ia_atime,
1077 inode->i_sb->s_time_gran);
1078 if (attr->ia_valid & ATTR_MTIME)
1079 inode->i_mtime = timespec_trunc(attr->ia_mtime,
1080 inode->i_sb->s_time_gran);
1081 if (attr->ia_valid & ATTR_CTIME)
1082 inode->i_ctime = timespec_trunc(attr->ia_ctime,
1083 inode->i_sb->s_time_gran);
1084 if (attr->ia_valid & ATTR_MODE) {
1085 umode_t mode = attr->ia_mode;
1086
1087 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1088 mode &= ~S_ISGID;
1089 inode->i_mode = mode;
1090 }
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static int do_truncation(struct ubifs_info *c, struct inode *inode,
1104 const struct iattr *attr)
1105{
1106 int err;
1107 struct ubifs_budget_req req;
1108 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1109 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1110 struct ubifs_inode *ui = ubifs_inode(inode);
1111
1112 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1113 memset(&req, 0, sizeof(struct ubifs_budget_req));
1114
1115
1116
1117
1118
1119
1120 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1121 req.dirtied_page = 1;
1122
1123 req.dirtied_ino = 1;
1124
1125 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1126 err = ubifs_budget_space(c, &req);
1127 if (err) {
1128
1129
1130
1131
1132 if (new_size || err != -ENOSPC)
1133 return err;
1134 budgeted = 0;
1135 }
1136
1137 truncate_setsize(inode, new_size);
1138
1139 if (offset) {
1140 pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
1141 struct page *page;
1142
1143 page = find_lock_page(inode->i_mapping, index);
1144 if (page) {
1145 if (PageDirty(page)) {
1146
1147
1148
1149
1150
1151
1152
1153
1154 ubifs_assert(PagePrivate(page));
1155
1156 clear_page_dirty_for_io(page);
1157 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1158 offset = new_size &
1159 (PAGE_CACHE_SIZE - 1);
1160 err = do_writepage(page, offset);
1161 page_cache_release(page);
1162 if (err)
1163 goto out_budg;
1164
1165
1166
1167
1168 } else {
1169
1170
1171
1172
1173
1174 unlock_page(page);
1175 page_cache_release(page);
1176 }
1177 }
1178 }
1179
1180 mutex_lock(&ui->ui_mutex);
1181 ui->ui_size = inode->i_size;
1182
1183 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1184
1185 do_attr_changes(inode, attr);
1186 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1187 mutex_unlock(&ui->ui_mutex);
1188
1189out_budg:
1190 if (budgeted)
1191 ubifs_release_budget(c, &req);
1192 else {
1193 c->bi.nospace = c->bi.nospace_rp = 0;
1194 smp_wmb();
1195 }
1196 return err;
1197}
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209static int do_setattr(struct ubifs_info *c, struct inode *inode,
1210 const struct iattr *attr)
1211{
1212 int err, release;
1213 loff_t new_size = attr->ia_size;
1214 struct ubifs_inode *ui = ubifs_inode(inode);
1215 struct ubifs_budget_req req = { .dirtied_ino = 1,
1216 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1217
1218 err = ubifs_budget_space(c, &req);
1219 if (err)
1220 return err;
1221
1222 if (attr->ia_valid & ATTR_SIZE) {
1223 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1224 truncate_setsize(inode, new_size);
1225 }
1226
1227 mutex_lock(&ui->ui_mutex);
1228 if (attr->ia_valid & ATTR_SIZE) {
1229
1230 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1231
1232 ui->ui_size = inode->i_size;
1233 }
1234
1235 do_attr_changes(inode, attr);
1236
1237 release = ui->dirty;
1238 if (attr->ia_valid & ATTR_SIZE)
1239
1240
1241
1242
1243 __mark_inode_dirty(inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
1244 else
1245 mark_inode_dirty_sync(inode);
1246 mutex_unlock(&ui->ui_mutex);
1247
1248 if (release)
1249 ubifs_release_budget(c, &req);
1250 if (IS_SYNC(inode))
1251 err = inode->i_sb->s_op->write_inode(inode, NULL);
1252 return err;
1253}
1254
1255int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1256{
1257 int err;
1258 struct inode *inode = dentry->d_inode;
1259 struct ubifs_info *c = inode->i_sb->s_fs_info;
1260
1261 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1262 inode->i_ino, inode->i_mode, attr->ia_valid);
1263 err = inode_change_ok(inode, attr);
1264 if (err)
1265 return err;
1266
1267 err = dbg_check_synced_i_size(c, inode);
1268 if (err)
1269 return err;
1270
1271 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1272
1273 err = do_truncation(c, inode, attr);
1274 else
1275 err = do_setattr(c, inode, attr);
1276
1277 return err;
1278}
1279
1280static void ubifs_invalidatepage(struct page *page, unsigned int offset,
1281 unsigned int length)
1282{
1283 struct inode *inode = page->mapping->host;
1284 struct ubifs_info *c = inode->i_sb->s_fs_info;
1285
1286 ubifs_assert(PagePrivate(page));
1287 if (offset || length < PAGE_CACHE_SIZE)
1288
1289 return;
1290
1291 if (PageChecked(page))
1292 release_new_page_budget(c);
1293 else
1294 release_existing_page_budget(c);
1295
1296 atomic_long_dec(&c->dirty_pg_cnt);
1297 ClearPagePrivate(page);
1298 ClearPageChecked(page);
1299}
1300
1301static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
1302{
1303 struct ubifs_inode *ui = ubifs_inode(dentry->d_inode);
1304
1305 nd_set_link(nd, ui->data);
1306 return NULL;
1307}
1308
1309int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1310{
1311 struct inode *inode = file->f_mapping->host;
1312 struct ubifs_info *c = inode->i_sb->s_fs_info;
1313 int err;
1314
1315 dbg_gen("syncing inode %lu", inode->i_ino);
1316
1317 if (c->ro_mount)
1318
1319
1320
1321
1322 return 0;
1323
1324 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1325 if (err)
1326 return err;
1327 mutex_lock(&inode->i_mutex);
1328
1329
1330 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1331 err = inode->i_sb->s_op->write_inode(inode, NULL);
1332 if (err)
1333 goto out;
1334 }
1335
1336
1337
1338
1339
1340 err = ubifs_sync_wbufs_by_inode(c, inode);
1341out:
1342 mutex_unlock(&inode->i_mutex);
1343 return err;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355static inline int mctime_update_needed(const struct inode *inode,
1356 const struct timespec *now)
1357{
1358 if (!timespec_equal(&inode->i_mtime, now) ||
1359 !timespec_equal(&inode->i_ctime, now))
1360 return 1;
1361 return 0;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static int update_mctime(struct ubifs_info *c, struct inode *inode)
1374{
1375 struct timespec now = ubifs_current_time(inode);
1376 struct ubifs_inode *ui = ubifs_inode(inode);
1377
1378 if (mctime_update_needed(inode, &now)) {
1379 int err, release;
1380 struct ubifs_budget_req req = { .dirtied_ino = 1,
1381 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1382
1383 err = ubifs_budget_space(c, &req);
1384 if (err)
1385 return err;
1386
1387 mutex_lock(&ui->ui_mutex);
1388 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1389 release = ui->dirty;
1390 mark_inode_dirty_sync(inode);
1391 mutex_unlock(&ui->ui_mutex);
1392 if (release)
1393 ubifs_release_budget(c, &req);
1394 }
1395
1396 return 0;
1397}
1398
1399static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
1400 unsigned long nr_segs, loff_t pos)
1401{
1402 int err;
1403 struct inode *inode = iocb->ki_filp->f_mapping->host;
1404 struct ubifs_info *c = inode->i_sb->s_fs_info;
1405
1406 err = update_mctime(c, inode);
1407 if (err)
1408 return err;
1409
1410 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1411}
1412
1413static int ubifs_set_page_dirty(struct page *page)
1414{
1415 int ret;
1416
1417 ret = __set_page_dirty_nobuffers(page);
1418
1419
1420
1421
1422 ubifs_assert(ret == 0);
1423 return ret;
1424}
1425
1426static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1427{
1428
1429
1430
1431
1432 if (PageWriteback(page))
1433 return 0;
1434 ubifs_assert(PagePrivate(page));
1435 ubifs_assert(0);
1436 ClearPagePrivate(page);
1437 ClearPageChecked(page);
1438 return 1;
1439}
1440
1441
1442
1443
1444
1445static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
1446 struct vm_fault *vmf)
1447{
1448 struct page *page = vmf->page;
1449 struct inode *inode = file_inode(vma->vm_file);
1450 struct ubifs_info *c = inode->i_sb->s_fs_info;
1451 struct timespec now = ubifs_current_time(inode);
1452 struct ubifs_budget_req req = { .new_page = 1 };
1453 int err, update_time;
1454
1455 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1456 i_size_read(inode));
1457 ubifs_assert(!c->ro_media && !c->ro_mount);
1458
1459 if (unlikely(c->ro_error))
1460 return VM_FAULT_SIGBUS;
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 update_time = mctime_update_needed(inode, &now);
1481 if (update_time)
1482
1483
1484
1485
1486 req.dirtied_ino = 1;
1487
1488 err = ubifs_budget_space(c, &req);
1489 if (unlikely(err)) {
1490 if (err == -ENOSPC)
1491 ubifs_warn("out of space for mmapped file (inode number %lu)",
1492 inode->i_ino);
1493 return VM_FAULT_SIGBUS;
1494 }
1495
1496 lock_page(page);
1497 if (unlikely(page->mapping != inode->i_mapping ||
1498 page_offset(page) > i_size_read(inode))) {
1499
1500 err = -EINVAL;
1501 goto out_unlock;
1502 }
1503
1504 if (PagePrivate(page))
1505 release_new_page_budget(c);
1506 else {
1507 if (!PageChecked(page))
1508 ubifs_convert_page_budget(c);
1509 SetPagePrivate(page);
1510 atomic_long_inc(&c->dirty_pg_cnt);
1511 __set_page_dirty_nobuffers(page);
1512 }
1513
1514 if (update_time) {
1515 int release;
1516 struct ubifs_inode *ui = ubifs_inode(inode);
1517
1518 mutex_lock(&ui->ui_mutex);
1519 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1520 release = ui->dirty;
1521 mark_inode_dirty_sync(inode);
1522 mutex_unlock(&ui->ui_mutex);
1523 if (release)
1524 ubifs_release_dirty_inode_budget(c, ui);
1525 }
1526
1527 wait_for_stable_page(page);
1528 unlock_page(page);
1529 return 0;
1530
1531out_unlock:
1532 unlock_page(page);
1533 ubifs_release_budget(c, &req);
1534 if (err)
1535 err = VM_FAULT_SIGBUS;
1536 return err;
1537}
1538
1539static const struct vm_operations_struct ubifs_file_vm_ops = {
1540 .fault = filemap_fault,
1541 .page_mkwrite = ubifs_vm_page_mkwrite,
1542 .remap_pages = generic_file_remap_pages,
1543};
1544
1545static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1546{
1547 int err;
1548
1549 err = generic_file_mmap(file, vma);
1550 if (err)
1551 return err;
1552 vma->vm_ops = &ubifs_file_vm_ops;
1553 return 0;
1554}
1555
1556const struct address_space_operations ubifs_file_address_operations = {
1557 .readpage = ubifs_readpage,
1558 .writepage = ubifs_writepage,
1559 .write_begin = ubifs_write_begin,
1560 .write_end = ubifs_write_end,
1561 .invalidatepage = ubifs_invalidatepage,
1562 .set_page_dirty = ubifs_set_page_dirty,
1563 .releasepage = ubifs_releasepage,
1564};
1565
1566const struct inode_operations ubifs_file_inode_operations = {
1567 .setattr = ubifs_setattr,
1568 .getattr = ubifs_getattr,
1569 .setxattr = ubifs_setxattr,
1570 .getxattr = ubifs_getxattr,
1571 .listxattr = ubifs_listxattr,
1572 .removexattr = ubifs_removexattr,
1573};
1574
1575const struct inode_operations ubifs_symlink_inode_operations = {
1576 .readlink = generic_readlink,
1577 .follow_link = ubifs_follow_link,
1578 .setattr = ubifs_setattr,
1579 .getattr = ubifs_getattr,
1580};
1581
1582const struct file_operations ubifs_file_operations = {
1583 .llseek = generic_file_llseek,
1584 .read = do_sync_read,
1585 .write = do_sync_write,
1586 .aio_read = generic_file_aio_read,
1587 .aio_write = ubifs_aio_write,
1588 .mmap = ubifs_file_mmap,
1589 .fsync = ubifs_fsync,
1590 .unlocked_ioctl = ubifs_ioctl,
1591 .splice_read = generic_file_splice_read,
1592 .splice_write = generic_file_splice_write,
1593#ifdef CONFIG_COMPAT
1594 .compat_ioctl = ubifs_compat_ioctl,
1595#endif
1596};
1597