1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include "ubifs.h"
53#include <linux/mount.h>
54#include <linux/namei.h>
55
56static int read_block(struct inode *inode, void *addr, unsigned int block,
57 struct ubifs_data_node *dn)
58{
59 struct ubifs_info *c = inode->i_sb->s_fs_info;
60 int err, len, out_len;
61 union ubifs_key key;
62 unsigned int dlen;
63
64 data_key_init(c, &key, inode->i_ino, block);
65 err = ubifs_tnc_lookup(c, &key, dn);
66 if (err) {
67 if (err == -ENOENT)
68
69 memset(addr, 0, UBIFS_BLOCK_SIZE);
70 return err;
71 }
72
73 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
74 ubifs_inode(inode)->creat_sqnum);
75 len = le32_to_cpu(dn->size);
76 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
77 goto dump;
78
79 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
80 out_len = UBIFS_BLOCK_SIZE;
81 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
82 le16_to_cpu(dn->compr_type));
83 if (err || len != out_len)
84 goto dump;
85
86
87
88
89
90
91 if (len < UBIFS_BLOCK_SIZE)
92 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
93
94 return 0;
95
96dump:
97 ubifs_err("bad data node (block %u, inode %lu)",
98 block, inode->i_ino);
99 dbg_dump_node(c, dn);
100 return -EINVAL;
101}
102
103static int do_readpage(struct page *page)
104{
105 void *addr;
106 int err = 0, i;
107 unsigned int block, beyond;
108 struct ubifs_data_node *dn;
109 struct inode *inode = page->mapping->host;
110 loff_t i_size = i_size_read(inode);
111
112 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
113 inode->i_ino, page->index, i_size, page->flags);
114 ubifs_assert(!PageChecked(page));
115 ubifs_assert(!PagePrivate(page));
116
117 addr = kmap(page);
118
119 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
120 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
121 if (block >= beyond) {
122
123 SetPageChecked(page);
124 memset(addr, 0, PAGE_CACHE_SIZE);
125 goto out;
126 }
127
128 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
129 if (!dn) {
130 err = -ENOMEM;
131 goto error;
132 }
133
134 i = 0;
135 while (1) {
136 int ret;
137
138 if (block >= beyond) {
139
140 err = -ENOENT;
141 memset(addr, 0, UBIFS_BLOCK_SIZE);
142 } else {
143 ret = read_block(inode, addr, block, dn);
144 if (ret) {
145 err = ret;
146 if (err != -ENOENT)
147 break;
148 } else if (block + 1 == beyond) {
149 int dlen = le32_to_cpu(dn->size);
150 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
151
152 if (ilen && ilen < dlen)
153 memset(addr + ilen, 0, dlen - ilen);
154 }
155 }
156 if (++i >= UBIFS_BLOCKS_PER_PAGE)
157 break;
158 block += 1;
159 addr += UBIFS_BLOCK_SIZE;
160 }
161 if (err) {
162 if (err == -ENOENT) {
163
164 SetPageChecked(page);
165 dbg_gen("hole");
166 goto out_free;
167 }
168 ubifs_err("cannot read page %lu of inode %lu, error %d",
169 page->index, inode->i_ino, err);
170 goto error;
171 }
172
173out_free:
174 kfree(dn);
175out:
176 SetPageUptodate(page);
177 ClearPageError(page);
178 flush_dcache_page(page);
179 kunmap(page);
180 return 0;
181
182error:
183 kfree(dn);
184 ClearPageUptodate(page);
185 SetPageError(page);
186 flush_dcache_page(page);
187 kunmap(page);
188 return err;
189}
190
191
192
193
194
195
196
197
198static void release_new_page_budget(struct ubifs_info *c)
199{
200 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
201
202 ubifs_release_budget(c, &req);
203}
204
205
206
207
208
209
210
211
212static void release_existing_page_budget(struct ubifs_info *c)
213{
214 struct ubifs_budget_req req = { .dd_growth = c->page_budget};
215
216 ubifs_release_budget(c, &req);
217}
218
219static int write_begin_slow(struct address_space *mapping,
220 loff_t pos, unsigned len, struct page **pagep,
221 unsigned flags)
222{
223 struct inode *inode = mapping->host;
224 struct ubifs_info *c = inode->i_sb->s_fs_info;
225 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
226 struct ubifs_budget_req req = { .new_page = 1 };
227 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
228 struct page *page;
229
230 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
231 inode->i_ino, pos, len, inode->i_size);
232
233
234
235
236
237
238
239
240
241 if (appending)
242
243 req.dirtied_ino = 1;
244
245 err = ubifs_budget_space(c, &req);
246 if (unlikely(err))
247 return err;
248
249 page = grab_cache_page_write_begin(mapping, index, flags);
250 if (unlikely(!page)) {
251 ubifs_release_budget(c, &req);
252 return -ENOMEM;
253 }
254
255 if (!PageUptodate(page)) {
256 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
257 SetPageChecked(page);
258 else {
259 err = do_readpage(page);
260 if (err) {
261 unlock_page(page);
262 page_cache_release(page);
263 return err;
264 }
265 }
266
267 SetPageUptodate(page);
268 ClearPageError(page);
269 }
270
271 if (PagePrivate(page))
272
273
274
275
276
277
278
279
280
281
282 release_new_page_budget(c);
283 else if (!PageChecked(page))
284
285
286
287
288
289
290 ubifs_convert_page_budget(c);
291
292 if (appending) {
293 struct ubifs_inode *ui = ubifs_inode(inode);
294
295
296
297
298
299
300 mutex_lock(&ui->ui_mutex);
301 if (ui->dirty)
302
303
304
305
306 ubifs_release_dirty_inode_budget(c, ui);
307 }
308
309 *pagep = page;
310 return 0;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static int allocate_budget(struct ubifs_info *c, struct page *page,
327 struct ubifs_inode *ui, int appending)
328{
329 struct ubifs_budget_req req = { .fast = 1 };
330
331 if (PagePrivate(page)) {
332 if (!appending)
333
334
335
336
337 return 0;
338
339 mutex_lock(&ui->ui_mutex);
340 if (ui->dirty)
341
342
343
344
345
346
347
348
349
350 return 0;
351
352
353
354
355
356 req.dirtied_ino = 1;
357 } else {
358 if (PageChecked(page))
359
360
361
362
363
364
365
366 req.new_page = 1;
367 else
368
369
370
371
372
373 req.dirtied_page = 1;
374
375 if (appending) {
376 mutex_lock(&ui->ui_mutex);
377 if (!ui->dirty)
378
379
380
381
382
383 req.dirtied_ino = 1;
384 }
385 }
386
387 return ubifs_budget_space(c, &req);
388}
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422static int ubifs_write_begin(struct file *file, struct address_space *mapping,
423 loff_t pos, unsigned len, unsigned flags,
424 struct page **pagep, void **fsdata)
425{
426 struct inode *inode = mapping->host;
427 struct ubifs_info *c = inode->i_sb->s_fs_info;
428 struct ubifs_inode *ui = ubifs_inode(inode);
429 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
430 int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
431 int skipped_read = 0;
432 struct page *page;
433
434 ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
435
436 if (unlikely(c->ro_media))
437 return -EROFS;
438
439
440 page = grab_cache_page_write_begin(mapping, index, flags);
441 if (unlikely(!page))
442 return -ENOMEM;
443
444 if (!PageUptodate(page)) {
445
446 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
447
448
449
450
451
452
453
454 SetPageChecked(page);
455 skipped_read = 1;
456 } else {
457 err = do_readpage(page);
458 if (err) {
459 unlock_page(page);
460 page_cache_release(page);
461 return err;
462 }
463 }
464
465 SetPageUptodate(page);
466 ClearPageError(page);
467 }
468
469 err = allocate_budget(c, page, ui, appending);
470 if (unlikely(err)) {
471 ubifs_assert(err == -ENOSPC);
472
473
474
475
476 if (skipped_read) {
477 ClearPageChecked(page);
478 ClearPageUptodate(page);
479 }
480
481
482
483
484
485
486
487 if (appending) {
488 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
489 mutex_unlock(&ui->ui_mutex);
490 }
491 unlock_page(page);
492 page_cache_release(page);
493
494 return write_begin_slow(mapping, pos, len, pagep, flags);
495 }
496
497
498
499
500
501
502
503 *pagep = page;
504 return 0;
505
506}
507
508
509
510
511
512
513
514
515
516
517
518static void cancel_budget(struct ubifs_info *c, struct page *page,
519 struct ubifs_inode *ui, int appending)
520{
521 if (appending) {
522 if (!ui->dirty)
523 ubifs_release_dirty_inode_budget(c, ui);
524 mutex_unlock(&ui->ui_mutex);
525 }
526 if (!PagePrivate(page)) {
527 if (PageChecked(page))
528 release_new_page_budget(c);
529 else
530 release_existing_page_budget(c);
531 }
532}
533
534static int ubifs_write_end(struct file *file, struct address_space *mapping,
535 loff_t pos, unsigned len, unsigned copied,
536 struct page *page, void *fsdata)
537{
538 struct inode *inode = mapping->host;
539 struct ubifs_inode *ui = ubifs_inode(inode);
540 struct ubifs_info *c = inode->i_sb->s_fs_info;
541 loff_t end_pos = pos + len;
542 int appending = !!(end_pos > inode->i_size);
543
544 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
545 inode->i_ino, pos, page->index, len, copied, inode->i_size);
546
547 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
548
549
550
551
552
553
554
555
556
557 dbg_gen("copied %d instead of %d, read page and repeat",
558 copied, len);
559 cancel_budget(c, page, ui, appending);
560
561
562
563
564
565 copied = do_readpage(page);
566 goto out;
567 }
568
569 if (!PagePrivate(page)) {
570 SetPagePrivate(page);
571 atomic_long_inc(&c->dirty_pg_cnt);
572 __set_page_dirty_nobuffers(page);
573 }
574
575 if (appending) {
576 i_size_write(inode, end_pos);
577 ui->ui_size = end_pos;
578
579
580
581
582
583 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
584 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
585 mutex_unlock(&ui->ui_mutex);
586 }
587
588out:
589 unlock_page(page);
590 page_cache_release(page);
591 return copied;
592}
593
594
595
596
597
598
599
600
601
602
603static int populate_page(struct ubifs_info *c, struct page *page,
604 struct bu_info *bu, int *n)
605{
606 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
607 struct inode *inode = page->mapping->host;
608 loff_t i_size = i_size_read(inode);
609 unsigned int page_block;
610 void *addr, *zaddr;
611 pgoff_t end_index;
612
613 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
614 inode->i_ino, page->index, i_size, page->flags);
615
616 addr = zaddr = kmap(page);
617
618 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
619 if (!i_size || page->index > end_index) {
620 hole = 1;
621 memset(addr, 0, PAGE_CACHE_SIZE);
622 goto out_hole;
623 }
624
625 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
626 while (1) {
627 int err, len, out_len, dlen;
628
629 if (nn >= bu->cnt) {
630 hole = 1;
631 memset(addr, 0, UBIFS_BLOCK_SIZE);
632 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
633 struct ubifs_data_node *dn;
634
635 dn = bu->buf + (bu->zbranch[nn].offs - offs);
636
637 ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
638 ubifs_inode(inode)->creat_sqnum);
639
640 len = le32_to_cpu(dn->size);
641 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
642 goto out_err;
643
644 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
645 out_len = UBIFS_BLOCK_SIZE;
646 err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
647 le16_to_cpu(dn->compr_type));
648 if (err || len != out_len)
649 goto out_err;
650
651 if (len < UBIFS_BLOCK_SIZE)
652 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
653
654 nn += 1;
655 read = (i << UBIFS_BLOCK_SHIFT) + len;
656 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
657 nn += 1;
658 continue;
659 } else {
660 hole = 1;
661 memset(addr, 0, UBIFS_BLOCK_SIZE);
662 }
663 if (++i >= UBIFS_BLOCKS_PER_PAGE)
664 break;
665 addr += UBIFS_BLOCK_SIZE;
666 page_block += 1;
667 }
668
669 if (end_index == page->index) {
670 int len = i_size & (PAGE_CACHE_SIZE - 1);
671
672 if (len && len < read)
673 memset(zaddr + len, 0, read - len);
674 }
675
676out_hole:
677 if (hole) {
678 SetPageChecked(page);
679 dbg_gen("hole");
680 }
681
682 SetPageUptodate(page);
683 ClearPageError(page);
684 flush_dcache_page(page);
685 kunmap(page);
686 *n = nn;
687 return 0;
688
689out_err:
690 ClearPageUptodate(page);
691 SetPageError(page);
692 flush_dcache_page(page);
693 kunmap(page);
694 ubifs_err("bad data node (block %u, inode %lu)",
695 page_block, inode->i_ino);
696 return -EINVAL;
697}
698
699
700
701
702
703
704
705
706
707static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
708 struct page *page1)
709{
710 pgoff_t offset = page1->index, end_index;
711 struct address_space *mapping = page1->mapping;
712 struct inode *inode = mapping->host;
713 struct ubifs_inode *ui = ubifs_inode(inode);
714 int err, page_idx, page_cnt, ret = 0, n = 0;
715 int allocate = bu->buf ? 0 : 1;
716 loff_t isize;
717
718 err = ubifs_tnc_get_bu_keys(c, bu);
719 if (err)
720 goto out_warn;
721
722 if (bu->eof) {
723
724 ui->read_in_a_row = 1;
725 ui->bulk_read = 0;
726 }
727
728 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
729 if (!page_cnt) {
730
731
732
733
734
735
736 goto out_bu_off;
737 }
738
739 if (bu->cnt) {
740 if (allocate) {
741
742
743
744
745 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
746 bu->zbranch[bu->cnt - 1].len -
747 bu->zbranch[0].offs;
748 ubifs_assert(bu->buf_len > 0);
749 ubifs_assert(bu->buf_len <= c->leb_size);
750 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
751 if (!bu->buf)
752 goto out_bu_off;
753 }
754
755 err = ubifs_tnc_bulk_read(c, bu);
756 if (err)
757 goto out_warn;
758 }
759
760 err = populate_page(c, page1, bu, &n);
761 if (err)
762 goto out_warn;
763
764 unlock_page(page1);
765 ret = 1;
766
767 isize = i_size_read(inode);
768 if (isize == 0)
769 goto out_free;
770 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
771
772 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
773 pgoff_t page_offset = offset + page_idx;
774 struct page *page;
775
776 if (page_offset > end_index)
777 break;
778 page = find_or_create_page(mapping, page_offset,
779 GFP_NOFS | __GFP_COLD);
780 if (!page)
781 break;
782 if (!PageUptodate(page))
783 err = populate_page(c, page, bu, &n);
784 unlock_page(page);
785 page_cache_release(page);
786 if (err)
787 break;
788 }
789
790 ui->last_page_read = offset + page_idx - 1;
791
792out_free:
793 if (allocate)
794 kfree(bu->buf);
795 return ret;
796
797out_warn:
798 ubifs_warn("ignoring error %d and skipping bulk-read", err);
799 goto out_free;
800
801out_bu_off:
802 ui->read_in_a_row = ui->bulk_read = 0;
803 goto out_free;
804}
805
806
807
808
809
810
811
812
813
814
815static int ubifs_bulk_read(struct page *page)
816{
817 struct inode *inode = page->mapping->host;
818 struct ubifs_info *c = inode->i_sb->s_fs_info;
819 struct ubifs_inode *ui = ubifs_inode(inode);
820 pgoff_t index = page->index, last_page_read = ui->last_page_read;
821 struct bu_info *bu;
822 int err = 0, allocated = 0;
823
824 ui->last_page_read = index;
825 if (!c->bulk_read)
826 return 0;
827
828
829
830
831
832 if (!mutex_trylock(&ui->ui_mutex))
833 return 0;
834
835 if (index != last_page_read + 1) {
836
837 ui->read_in_a_row = 1;
838 if (ui->bulk_read)
839 ui->bulk_read = 0;
840 goto out_unlock;
841 }
842
843 if (!ui->bulk_read) {
844 ui->read_in_a_row += 1;
845 if (ui->read_in_a_row < 3)
846 goto out_unlock;
847
848 ui->bulk_read = 1;
849 }
850
851
852
853
854
855 if (mutex_trylock(&c->bu_mutex))
856 bu = &c->bu;
857 else {
858 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
859 if (!bu)
860 goto out_unlock;
861
862 bu->buf = NULL;
863 allocated = 1;
864 }
865
866 bu->buf_len = c->max_bu_buf_len;
867 data_key_init(c, &bu->key, inode->i_ino,
868 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
869 err = ubifs_do_bulk_read(c, bu, page);
870
871 if (!allocated)
872 mutex_unlock(&c->bu_mutex);
873 else
874 kfree(bu);
875
876out_unlock:
877 mutex_unlock(&ui->ui_mutex);
878 return err;
879}
880
881static int ubifs_readpage(struct file *file, struct page *page)
882{
883 if (ubifs_bulk_read(page))
884 return 0;
885 do_readpage(page);
886 unlock_page(page);
887 return 0;
888}
889
890static int do_writepage(struct page *page, int len)
891{
892 int err = 0, i, blen;
893 unsigned int block;
894 void *addr;
895 union ubifs_key key;
896 struct inode *inode = page->mapping->host;
897 struct ubifs_info *c = inode->i_sb->s_fs_info;
898
899#ifdef UBIFS_DEBUG
900 spin_lock(&ui->ui_lock);
901 ubifs_assert(page->index <= ui->synced_i_size << PAGE_CACHE_SIZE);
902 spin_unlock(&ui->ui_lock);
903#endif
904
905
906 set_page_writeback(page);
907
908 addr = kmap(page);
909 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
910 i = 0;
911 while (len) {
912 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
913 data_key_init(c, &key, inode->i_ino, block);
914 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
915 if (err)
916 break;
917 if (++i >= UBIFS_BLOCKS_PER_PAGE)
918 break;
919 block += 1;
920 addr += blen;
921 len -= blen;
922 }
923 if (err) {
924 SetPageError(page);
925 ubifs_err("cannot write page %lu of inode %lu, error %d",
926 page->index, inode->i_ino, err);
927 ubifs_ro_mode(c, err);
928 }
929
930 ubifs_assert(PagePrivate(page));
931 if (PageChecked(page))
932 release_new_page_budget(c);
933 else
934 release_existing_page_budget(c);
935
936 atomic_long_dec(&c->dirty_pg_cnt);
937 ClearPagePrivate(page);
938 ClearPageChecked(page);
939
940 kunmap(page);
941 unlock_page(page);
942 end_page_writeback(page);
943 return err;
944}
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
989{
990 struct inode *inode = page->mapping->host;
991 struct ubifs_inode *ui = ubifs_inode(inode);
992 loff_t i_size = i_size_read(inode), synced_i_size;
993 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
994 int err, len = i_size & (PAGE_CACHE_SIZE - 1);
995 void *kaddr;
996
997 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
998 inode->i_ino, page->index, page->flags);
999 ubifs_assert(PagePrivate(page));
1000
1001
1002 if (page->index > end_index || (page->index == end_index && !len)) {
1003 err = 0;
1004 goto out_unlock;
1005 }
1006
1007 spin_lock(&ui->ui_lock);
1008 synced_i_size = ui->synced_i_size;
1009 spin_unlock(&ui->ui_lock);
1010
1011
1012 if (page->index < end_index) {
1013 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
1014 err = inode->i_sb->s_op->write_inode(inode, 1);
1015 if (err)
1016 goto out_unlock;
1017
1018
1019
1020
1021
1022
1023
1024
1025 }
1026 return do_writepage(page, PAGE_CACHE_SIZE);
1027 }
1028
1029
1030
1031
1032
1033
1034
1035
1036 kaddr = kmap_atomic(page, KM_USER0);
1037 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
1038 flush_dcache_page(page);
1039 kunmap_atomic(kaddr, KM_USER0);
1040
1041 if (i_size > synced_i_size) {
1042 err = inode->i_sb->s_op->write_inode(inode, 1);
1043 if (err)
1044 goto out_unlock;
1045 }
1046
1047 return do_writepage(page, len);
1048
1049out_unlock:
1050 unlock_page(page);
1051 return err;
1052}
1053
1054
1055
1056
1057
1058
1059static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1060{
1061 if (attr->ia_valid & ATTR_UID)
1062 inode->i_uid = attr->ia_uid;
1063 if (attr->ia_valid & ATTR_GID)
1064 inode->i_gid = attr->ia_gid;
1065 if (attr->ia_valid & ATTR_ATIME)
1066 inode->i_atime = timespec_trunc(attr->ia_atime,
1067 inode->i_sb->s_time_gran);
1068 if (attr->ia_valid & ATTR_MTIME)
1069 inode->i_mtime = timespec_trunc(attr->ia_mtime,
1070 inode->i_sb->s_time_gran);
1071 if (attr->ia_valid & ATTR_CTIME)
1072 inode->i_ctime = timespec_trunc(attr->ia_ctime,
1073 inode->i_sb->s_time_gran);
1074 if (attr->ia_valid & ATTR_MODE) {
1075 umode_t mode = attr->ia_mode;
1076
1077 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1078 mode &= ~S_ISGID;
1079 inode->i_mode = mode;
1080 }
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093static int do_truncation(struct ubifs_info *c, struct inode *inode,
1094 const struct iattr *attr)
1095{
1096 int err;
1097 struct ubifs_budget_req req;
1098 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1099 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1100 struct ubifs_inode *ui = ubifs_inode(inode);
1101
1102 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1103 memset(&req, 0, sizeof(struct ubifs_budget_req));
1104
1105
1106
1107
1108
1109
1110 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1111 req.dirtied_page = 1;
1112
1113 req.dirtied_ino = 1;
1114
1115 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1116 err = ubifs_budget_space(c, &req);
1117 if (err) {
1118
1119
1120
1121
1122 if (new_size || err != -ENOSPC)
1123 return err;
1124 budgeted = 0;
1125 }
1126
1127 err = vmtruncate(inode, new_size);
1128 if (err)
1129 goto out_budg;
1130
1131 if (offset) {
1132 pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
1133 struct page *page;
1134
1135 page = find_lock_page(inode->i_mapping, index);
1136 if (page) {
1137 if (PageDirty(page)) {
1138
1139
1140
1141
1142
1143
1144
1145
1146 ubifs_assert(PagePrivate(page));
1147
1148 clear_page_dirty_for_io(page);
1149 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1150 offset = new_size &
1151 (PAGE_CACHE_SIZE - 1);
1152 err = do_writepage(page, offset);
1153 page_cache_release(page);
1154 if (err)
1155 goto out_budg;
1156
1157
1158
1159
1160 } else {
1161
1162
1163
1164
1165
1166 unlock_page(page);
1167 page_cache_release(page);
1168 }
1169 }
1170 }
1171
1172 mutex_lock(&ui->ui_mutex);
1173 ui->ui_size = inode->i_size;
1174
1175 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1176
1177 do_attr_changes(inode, attr);
1178 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1179 mutex_unlock(&ui->ui_mutex);
1180
1181out_budg:
1182 if (budgeted)
1183 ubifs_release_budget(c, &req);
1184 else {
1185 c->nospace = c->nospace_rp = 0;
1186 smp_wmb();
1187 }
1188 return err;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static int do_setattr(struct ubifs_info *c, struct inode *inode,
1202 const struct iattr *attr)
1203{
1204 int err, release;
1205 loff_t new_size = attr->ia_size;
1206 struct ubifs_inode *ui = ubifs_inode(inode);
1207 struct ubifs_budget_req req = { .dirtied_ino = 1,
1208 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1209
1210 err = ubifs_budget_space(c, &req);
1211 if (err)
1212 return err;
1213
1214 if (attr->ia_valid & ATTR_SIZE) {
1215 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1216 err = vmtruncate(inode, new_size);
1217 if (err)
1218 goto out;
1219 }
1220
1221 mutex_lock(&ui->ui_mutex);
1222 if (attr->ia_valid & ATTR_SIZE) {
1223
1224 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1225
1226 ui->ui_size = inode->i_size;
1227 }
1228
1229 do_attr_changes(inode, attr);
1230
1231 release = ui->dirty;
1232 if (attr->ia_valid & ATTR_SIZE)
1233
1234
1235
1236
1237 __mark_inode_dirty(inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
1238 else
1239 mark_inode_dirty_sync(inode);
1240 mutex_unlock(&ui->ui_mutex);
1241
1242 if (release)
1243 ubifs_release_budget(c, &req);
1244 if (IS_SYNC(inode))
1245 err = inode->i_sb->s_op->write_inode(inode, 1);
1246 return err;
1247
1248out:
1249 ubifs_release_budget(c, &req);
1250 return err;
1251}
1252
1253int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1254{
1255 int err;
1256 struct inode *inode = dentry->d_inode;
1257 struct ubifs_info *c = inode->i_sb->s_fs_info;
1258
1259 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1260 inode->i_ino, inode->i_mode, attr->ia_valid);
1261 err = inode_change_ok(inode, attr);
1262 if (err)
1263 return err;
1264
1265 err = dbg_check_synced_i_size(inode);
1266 if (err)
1267 return err;
1268
1269 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1270
1271 err = do_truncation(c, inode, attr);
1272 else
1273 err = do_setattr(c, inode, attr);
1274
1275 return err;
1276}
1277
1278static void ubifs_invalidatepage(struct page *page, unsigned long offset)
1279{
1280 struct inode *inode = page->mapping->host;
1281 struct ubifs_info *c = inode->i_sb->s_fs_info;
1282
1283 ubifs_assert(PagePrivate(page));
1284 if (offset)
1285
1286 return;
1287
1288 if (PageChecked(page))
1289 release_new_page_budget(c);
1290 else
1291 release_existing_page_budget(c);
1292
1293 atomic_long_dec(&c->dirty_pg_cnt);
1294 ClearPagePrivate(page);
1295 ClearPageChecked(page);
1296}
1297
1298static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
1299{
1300 struct ubifs_inode *ui = ubifs_inode(dentry->d_inode);
1301
1302 nd_set_link(nd, ui->data);
1303 return NULL;
1304}
1305
1306int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1307{
1308 struct inode *inode = dentry->d_inode;
1309 struct ubifs_info *c = inode->i_sb->s_fs_info;
1310 int err;
1311
1312 dbg_gen("syncing inode %lu", inode->i_ino);
1313
1314
1315
1316
1317
1318 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1319 err = inode->i_sb->s_op->write_inode(inode, 1);
1320 if (err)
1321 return err;
1322 }
1323
1324
1325
1326
1327
1328 err = ubifs_sync_wbufs_by_inode(c, inode);
1329 if (err)
1330 return err;
1331
1332 return 0;
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static inline int mctime_update_needed(const struct inode *inode,
1345 const struct timespec *now)
1346{
1347 if (!timespec_equal(&inode->i_mtime, now) ||
1348 !timespec_equal(&inode->i_ctime, now))
1349 return 1;
1350 return 0;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static int update_mctime(struct ubifs_info *c, struct inode *inode)
1363{
1364 struct timespec now = ubifs_current_time(inode);
1365 struct ubifs_inode *ui = ubifs_inode(inode);
1366
1367 if (mctime_update_needed(inode, &now)) {
1368 int err, release;
1369 struct ubifs_budget_req req = { .dirtied_ino = 1,
1370 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1371
1372 err = ubifs_budget_space(c, &req);
1373 if (err)
1374 return err;
1375
1376 mutex_lock(&ui->ui_mutex);
1377 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1378 release = ui->dirty;
1379 mark_inode_dirty_sync(inode);
1380 mutex_unlock(&ui->ui_mutex);
1381 if (release)
1382 ubifs_release_budget(c, &req);
1383 }
1384
1385 return 0;
1386}
1387
1388static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
1389 unsigned long nr_segs, loff_t pos)
1390{
1391 int err;
1392 ssize_t ret;
1393 struct inode *inode = iocb->ki_filp->f_mapping->host;
1394 struct ubifs_info *c = inode->i_sb->s_fs_info;
1395
1396 err = update_mctime(c, inode);
1397 if (err)
1398 return err;
1399
1400 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
1401 if (ret < 0)
1402 return ret;
1403
1404 if (ret > 0 && (IS_SYNC(inode) || iocb->ki_filp->f_flags & O_SYNC)) {
1405 err = ubifs_sync_wbufs_by_inode(c, inode);
1406 if (err)
1407 return err;
1408 }
1409
1410 return ret;
1411}
1412
1413static int ubifs_set_page_dirty(struct page *page)
1414{
1415 int ret;
1416
1417 ret = __set_page_dirty_nobuffers(page);
1418
1419
1420
1421
1422 ubifs_assert(ret == 0);
1423 return ret;
1424}
1425
1426static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1427{
1428
1429
1430
1431
1432 if (PageWriteback(page))
1433 return 0;
1434 ubifs_assert(PagePrivate(page));
1435 ubifs_assert(0);
1436 ClearPagePrivate(page);
1437 ClearPageChecked(page);
1438 return 1;
1439}
1440
1441
1442
1443
1444
1445static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1446{
1447 struct page *page = vmf->page;
1448 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1449 struct ubifs_info *c = inode->i_sb->s_fs_info;
1450 struct timespec now = ubifs_current_time(inode);
1451 struct ubifs_budget_req req = { .new_page = 1 };
1452 int err, update_time;
1453
1454 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1455 i_size_read(inode));
1456 ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));
1457
1458 if (unlikely(c->ro_media))
1459 return VM_FAULT_SIGBUS;
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 update_time = mctime_update_needed(inode, &now);
1480 if (update_time)
1481
1482
1483
1484
1485 req.dirtied_ino = 1;
1486
1487 err = ubifs_budget_space(c, &req);
1488 if (unlikely(err)) {
1489 if (err == -ENOSPC)
1490 ubifs_warn("out of space for mmapped file "
1491 "(inode number %lu)", inode->i_ino);
1492 return VM_FAULT_SIGBUS;
1493 }
1494
1495 lock_page(page);
1496 if (unlikely(page->mapping != inode->i_mapping ||
1497 page_offset(page) > i_size_read(inode))) {
1498
1499 err = -EINVAL;
1500 goto out_unlock;
1501 }
1502
1503 if (PagePrivate(page))
1504 release_new_page_budget(c);
1505 else {
1506 if (!PageChecked(page))
1507 ubifs_convert_page_budget(c);
1508 SetPagePrivate(page);
1509 atomic_long_inc(&c->dirty_pg_cnt);
1510 __set_page_dirty_nobuffers(page);
1511 }
1512
1513 if (update_time) {
1514 int release;
1515 struct ubifs_inode *ui = ubifs_inode(inode);
1516
1517 mutex_lock(&ui->ui_mutex);
1518 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1519 release = ui->dirty;
1520 mark_inode_dirty_sync(inode);
1521 mutex_unlock(&ui->ui_mutex);
1522 if (release)
1523 ubifs_release_dirty_inode_budget(c, ui);
1524 }
1525
1526 unlock_page(page);
1527 return 0;
1528
1529out_unlock:
1530 unlock_page(page);
1531 ubifs_release_budget(c, &req);
1532 if (err)
1533 err = VM_FAULT_SIGBUS;
1534 return err;
1535}
1536
1537static const struct vm_operations_struct ubifs_file_vm_ops = {
1538 .fault = filemap_fault,
1539 .page_mkwrite = ubifs_vm_page_mkwrite,
1540};
1541
1542static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1543{
1544 int err;
1545
1546
1547 err = generic_file_mmap(file, vma);
1548 if (err)
1549 return err;
1550 vma->vm_ops = &ubifs_file_vm_ops;
1551 return 0;
1552}
1553
1554const struct address_space_operations ubifs_file_address_operations = {
1555 .readpage = ubifs_readpage,
1556 .writepage = ubifs_writepage,
1557 .write_begin = ubifs_write_begin,
1558 .write_end = ubifs_write_end,
1559 .invalidatepage = ubifs_invalidatepage,
1560 .set_page_dirty = ubifs_set_page_dirty,
1561 .releasepage = ubifs_releasepage,
1562};
1563
1564const struct inode_operations ubifs_file_inode_operations = {
1565 .setattr = ubifs_setattr,
1566 .getattr = ubifs_getattr,
1567#ifdef CONFIG_UBIFS_FS_XATTR
1568 .setxattr = ubifs_setxattr,
1569 .getxattr = ubifs_getxattr,
1570 .listxattr = ubifs_listxattr,
1571 .removexattr = ubifs_removexattr,
1572#endif
1573};
1574
1575const struct inode_operations ubifs_symlink_inode_operations = {
1576 .readlink = generic_readlink,
1577 .follow_link = ubifs_follow_link,
1578 .setattr = ubifs_setattr,
1579 .getattr = ubifs_getattr,
1580};
1581
1582const struct file_operations ubifs_file_operations = {
1583 .llseek = generic_file_llseek,
1584 .read = do_sync_read,
1585 .write = do_sync_write,
1586 .aio_read = generic_file_aio_read,
1587 .aio_write = ubifs_aio_write,
1588 .mmap = ubifs_file_mmap,
1589 .fsync = ubifs_fsync,
1590 .unlocked_ioctl = ubifs_ioctl,
1591 .splice_read = generic_file_splice_read,
1592 .splice_write = generic_file_splice_write,
1593#ifdef CONFIG_COMPAT
1594 .compat_ioctl = ubifs_compat_ioctl,
1595#endif
1596};
1597