1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#include "ubifs.h"
62
63
64
65
66
67static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68{
69 memset(ino->padding1, 0, 4);
70 memset(ino->padding2, 0, 26);
71}
72
73
74
75
76
77
78static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79{
80 dent->padding1 = 0;
81}
82
83
84
85
86
87
88static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
89{
90 memset(trun->padding, 0, 12);
91}
92
93
94
95
96
97
98
99
100
101
102
103
104
105static int reserve_space(struct ubifs_info *c, int jhead, int len)
106{
107 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
108 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
109
110
111
112
113
114
115 ubifs_assert(!c->ro_media && !c->ro_mount);
116 squeeze = (jhead == BASEHD);
117again:
118 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
119
120 if (c->ro_error) {
121 err = -EROFS;
122 goto out_unlock;
123 }
124
125 avail = c->leb_size - wbuf->offs - wbuf->used;
126 if (wbuf->lnum != -1 && avail >= len)
127 return 0;
128
129
130
131
132
133 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
134 if (lnum >= 0)
135 goto out;
136
137 err = lnum;
138 if (err != -ENOSPC)
139 goto out_unlock;
140
141
142
143
144
145
146 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
147 mutex_unlock(&wbuf->io_mutex);
148
149 lnum = ubifs_garbage_collect(c, 0);
150 if (lnum < 0) {
151 err = lnum;
152 if (err != -ENOSPC)
153 return err;
154
155
156
157
158
159
160
161 dbg_jnl("GC couldn't make a free LEB for jhead %s",
162 dbg_jhead(jhead));
163 if (retries++ < 2) {
164 dbg_jnl("retry (%d)", retries);
165 goto again;
166 }
167
168 dbg_jnl("return -ENOSPC");
169 return err;
170 }
171
172 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
173 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
174 avail = c->leb_size - wbuf->offs - wbuf->used;
175
176 if (wbuf->lnum != -1 && avail >= len) {
177
178
179
180
181
182 dbg_jnl("return LEB %d back, already have LEB %d:%d",
183 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
184 err = ubifs_return_leb(c, lnum);
185 if (err)
186 goto out_unlock;
187 return 0;
188 }
189
190 offs = 0;
191
192out:
193
194
195
196
197
198
199
200
201 err = ubifs_wbuf_sync_nolock(wbuf);
202 if (err)
203 goto out_return;
204 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
205 if (err)
206 goto out_return;
207 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
208 if (err)
209 goto out_unlock;
210
211 return 0;
212
213out_unlock:
214 mutex_unlock(&wbuf->io_mutex);
215 return err;
216
217out_return:
218
219 ubifs_assert(err < 0);
220 err1 = ubifs_return_leb(c, lnum);
221 if (err1 && err == -EAGAIN)
222
223
224
225
226
227 err = err1;
228 mutex_unlock(&wbuf->io_mutex);
229 return err;
230}
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
246 int *lnum, int *offs)
247{
248 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
249
250 ubifs_assert(jhead != GCHD);
251
252 *lnum = c->jheads[jhead].wbuf.lnum;
253 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
254
255 dbg_jnl("jhead %s, LEB %d:%d, len %d",
256 dbg_jhead(jhead), *lnum, *offs, len);
257 ubifs_prepare_node(c, node, len, 0);
258
259 return ubifs_wbuf_write_nolock(wbuf, node, len);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
277 int *lnum, int *offs, int sync)
278{
279 int err;
280 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
281
282 ubifs_assert(jhead != GCHD);
283
284 *lnum = c->jheads[jhead].wbuf.lnum;
285 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
286 dbg_jnl("jhead %s, LEB %d:%d, len %d",
287 dbg_jhead(jhead), *lnum, *offs, len);
288
289 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
290 if (err)
291 return err;
292 if (sync)
293 err = ubifs_wbuf_sync_nolock(wbuf);
294 return err;
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313static int make_reservation(struct ubifs_info *c, int jhead, int len)
314{
315 int err, cmt_retries = 0, nospc_retries = 0;
316
317again:
318 down_read(&c->commit_sem);
319 err = reserve_space(c, jhead, len);
320 if (!err)
321 return 0;
322 up_read(&c->commit_sem);
323
324 if (err == -ENOSPC) {
325
326
327
328
329
330
331 if (nospc_retries++ < 2) {
332 dbg_jnl("no space, retry");
333 err = -EAGAIN;
334 }
335
336
337
338
339
340
341
342 }
343
344 if (err != -EAGAIN)
345 goto out;
346
347
348
349
350
351 if (cmt_retries > 128) {
352
353
354
355
356 ubifs_err(c, "stuck in space allocation");
357 err = -ENOSPC;
358 goto out;
359 } else if (cmt_retries > 32)
360 ubifs_warn(c, "too many space allocation re-tries (%d)",
361 cmt_retries);
362
363 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
364 cmt_retries);
365 cmt_retries += 1;
366
367 err = ubifs_run_commit(c);
368 if (err)
369 return err;
370 goto again;
371
372out:
373 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
374 len, jhead, err);
375 if (err == -ENOSPC) {
376
377 down_write(&c->commit_sem);
378 dump_stack();
379 ubifs_dump_budg(c, &c->bi);
380 ubifs_dump_lprops(c);
381 cmt_retries = dbg_check_lprops(c);
382 up_write(&c->commit_sem);
383 }
384 return err;
385}
386
387
388
389
390
391
392
393
394
395
396static inline void release_head(struct ubifs_info *c, int jhead)
397{
398 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
399}
400
401
402
403
404
405
406
407
408static void finish_reservation(struct ubifs_info *c)
409{
410 up_read(&c->commit_sem);
411}
412
413
414
415
416
417static int get_dent_type(int mode)
418{
419 switch (mode & S_IFMT) {
420 case S_IFREG:
421 return UBIFS_ITYPE_REG;
422 case S_IFDIR:
423 return UBIFS_ITYPE_DIR;
424 case S_IFLNK:
425 return UBIFS_ITYPE_LNK;
426 case S_IFBLK:
427 return UBIFS_ITYPE_BLK;
428 case S_IFCHR:
429 return UBIFS_ITYPE_CHR;
430 case S_IFIFO:
431 return UBIFS_ITYPE_FIFO;
432 case S_IFSOCK:
433 return UBIFS_ITYPE_SOCK;
434 default:
435 BUG();
436 }
437 return 0;
438}
439
440
441
442
443
444
445
446
447static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
448 const struct inode *inode, int last)
449{
450 int data_len = 0, last_reference = !inode->i_nlink;
451 struct ubifs_inode *ui = ubifs_inode(inode);
452
453 ino->ch.node_type = UBIFS_INO_NODE;
454 ino_key_init_flash(c, &ino->key, inode->i_ino);
455 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
456 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
457 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
458 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
459 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
460 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
461 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
462 ino->uid = cpu_to_le32(i_uid_read(inode));
463 ino->gid = cpu_to_le32(i_gid_read(inode));
464 ino->mode = cpu_to_le32(inode->i_mode);
465 ino->flags = cpu_to_le32(ui->flags);
466 ino->size = cpu_to_le64(ui->ui_size);
467 ino->nlink = cpu_to_le32(inode->i_nlink);
468 ino->compr_type = cpu_to_le16(ui->compr_type);
469 ino->data_len = cpu_to_le32(ui->data_len);
470 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
471 ino->xattr_size = cpu_to_le32(ui->xattr_size);
472 ino->xattr_names = cpu_to_le32(ui->xattr_names);
473 zero_ino_node_unused(ino);
474
475
476
477
478
479 if (!last_reference) {
480 memcpy(ino->data, ui->data, ui->data_len);
481 data_len = ui->data_len;
482 }
483
484 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
485}
486
487
488
489
490
491
492
493
494
495
496
497static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
498{
499 if (ui->dirty)
500 ubifs_release_dirty_inode_budget(c, ui);
501 ui->dirty = 0;
502}
503
504static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
505{
506 if (c->double_hash)
507 dent->cookie = prandom_u32();
508 else
509 dent->cookie = 0;
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
540 const struct fscrypt_name *nm, const struct inode *inode,
541 int deletion, int xent)
542{
543 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
544 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
545 int last_reference = !!(deletion && inode->i_nlink == 0);
546 struct ubifs_inode *ui = ubifs_inode(inode);
547 struct ubifs_inode *host_ui = ubifs_inode(dir);
548 struct ubifs_dent_node *dent;
549 struct ubifs_ino_node *ino;
550 union ubifs_key dent_key, ino_key;
551
552 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
553
554 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
555 ilen = UBIFS_INO_NODE_SZ;
556
557
558
559
560
561
562
563 if (!last_reference) {
564 ilen += ui->data_len;
565 sync |= IS_SYNC(inode);
566 }
567
568 aligned_dlen = ALIGN(dlen, 8);
569 aligned_ilen = ALIGN(ilen, 8);
570
571 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
572
573 len += host_ui->data_len;
574
575 dent = kzalloc(len, GFP_NOFS);
576 if (!dent)
577 return -ENOMEM;
578
579
580 err = make_reservation(c, BASEHD, len);
581 if (err)
582 goto out_free;
583
584 if (!xent) {
585 dent->ch.node_type = UBIFS_DENT_NODE;
586 if (nm->hash)
587 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
588 else
589 dent_key_init(c, &dent_key, dir->i_ino, nm);
590 } else {
591 dent->ch.node_type = UBIFS_XENT_NODE;
592 xent_key_init(c, &dent_key, dir->i_ino, nm);
593 }
594
595 key_write(c, &dent_key, dent->key);
596 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
597 dent->type = get_dent_type(inode->i_mode);
598 dent->nlen = cpu_to_le16(fname_len(nm));
599 memcpy(dent->name, fname_name(nm), fname_len(nm));
600 dent->name[fname_len(nm)] = '\0';
601 set_dent_cookie(c, dent);
602
603 zero_dent_node_unused(dent);
604 ubifs_prep_grp_node(c, dent, dlen, 0);
605
606 ino = (void *)dent + aligned_dlen;
607 pack_inode(c, ino, inode, 0);
608 ino = (void *)ino + aligned_ilen;
609 pack_inode(c, ino, dir, 1);
610
611 if (last_reference) {
612 err = ubifs_add_orphan(c, inode->i_ino);
613 if (err) {
614 release_head(c, BASEHD);
615 goto out_finish;
616 }
617 ui->del_cmtno = c->cmt_no;
618 }
619
620 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
621 if (err)
622 goto out_release;
623 if (!sync) {
624 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
625
626 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
627 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
628 }
629 release_head(c, BASEHD);
630 kfree(dent);
631
632 if (deletion) {
633 if (nm->hash)
634 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
635 else
636 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
637 if (err)
638 goto out_ro;
639 err = ubifs_add_dirt(c, lnum, dlen);
640 } else
641 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
642 if (err)
643 goto out_ro;
644
645
646
647
648
649
650
651 ino_key_init(c, &ino_key, inode->i_ino);
652 ino_offs = dent_offs + aligned_dlen;
653 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
654 if (err)
655 goto out_ro;
656
657 ino_key_init(c, &ino_key, dir->i_ino);
658 ino_offs += aligned_ilen;
659 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
660 UBIFS_INO_NODE_SZ + host_ui->data_len);
661 if (err)
662 goto out_ro;
663
664 finish_reservation(c);
665 spin_lock(&ui->ui_lock);
666 ui->synced_i_size = ui->ui_size;
667 spin_unlock(&ui->ui_lock);
668 mark_inode_clean(c, ui);
669 mark_inode_clean(c, host_ui);
670 return 0;
671
672out_finish:
673 finish_reservation(c);
674out_free:
675 kfree(dent);
676 return err;
677
678out_release:
679 release_head(c, BASEHD);
680 kfree(dent);
681out_ro:
682 ubifs_ro_mode(c, err);
683 if (last_reference)
684 ubifs_delete_orphan(c, inode->i_ino);
685 finish_reservation(c);
686 return err;
687}
688
689
690
691
692
693
694
695
696
697
698
699
700int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
701 const union ubifs_key *key, const void *buf, int len)
702{
703 struct ubifs_data_node *data;
704 int err, lnum, offs, compr_type, out_len, compr_len;
705 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
706 struct ubifs_inode *ui = ubifs_inode(inode);
707 bool encrypted = ubifs_crypt_is_encrypted(inode);
708
709 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
710 (unsigned long)key_inum(c, key), key_block(c, key), len);
711 ubifs_assert(len <= UBIFS_BLOCK_SIZE);
712
713 if (encrypted)
714 dlen += UBIFS_CIPHER_BLOCK_SIZE;
715
716 data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
717 if (!data) {
718
719
720
721
722
723
724
725 allocated = 0;
726 mutex_lock(&c->write_reserve_mutex);
727 data = c->write_reserve_buf;
728 }
729
730 data->ch.node_type = UBIFS_DATA_NODE;
731 key_write(c, key, &data->key);
732 data->size = cpu_to_le32(len);
733
734 if (!(ui->flags & UBIFS_COMPR_FL))
735
736 compr_type = UBIFS_COMPR_NONE;
737 else
738 compr_type = ui->compr_type;
739
740 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
741 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
742 ubifs_assert(compr_len <= UBIFS_BLOCK_SIZE);
743
744 if (encrypted) {
745 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
746 if (err)
747 goto out_free;
748
749 } else {
750 data->compr_size = 0;
751 out_len = compr_len;
752 }
753
754 dlen = UBIFS_DATA_NODE_SZ + out_len;
755 data->compr_type = cpu_to_le16(compr_type);
756
757
758 err = make_reservation(c, DATAHD, dlen);
759 if (err)
760 goto out_free;
761
762 err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
763 if (err)
764 goto out_release;
765 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
766 release_head(c, DATAHD);
767
768 err = ubifs_tnc_add(c, key, lnum, offs, dlen);
769 if (err)
770 goto out_ro;
771
772 finish_reservation(c);
773 if (!allocated)
774 mutex_unlock(&c->write_reserve_mutex);
775 else
776 kfree(data);
777 return 0;
778
779out_release:
780 release_head(c, DATAHD);
781out_ro:
782 ubifs_ro_mode(c, err);
783 finish_reservation(c);
784out_free:
785 if (!allocated)
786 mutex_unlock(&c->write_reserve_mutex);
787 else
788 kfree(data);
789 return err;
790}
791
792
793
794
795
796
797
798
799
800
801int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
802{
803 int err, lnum, offs;
804 struct ubifs_ino_node *ino;
805 struct ubifs_inode *ui = ubifs_inode(inode);
806 int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
807
808 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
809
810
811
812
813
814 if (!last_reference) {
815 len += ui->data_len;
816 sync = IS_SYNC(inode);
817 }
818 ino = kmalloc(len, GFP_NOFS);
819 if (!ino)
820 return -ENOMEM;
821
822
823 err = make_reservation(c, BASEHD, len);
824 if (err)
825 goto out_free;
826
827 pack_inode(c, ino, inode, 1);
828 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
829 if (err)
830 goto out_release;
831 if (!sync)
832 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
833 inode->i_ino);
834 release_head(c, BASEHD);
835
836 if (last_reference) {
837 err = ubifs_tnc_remove_ino(c, inode->i_ino);
838 if (err)
839 goto out_ro;
840 ubifs_delete_orphan(c, inode->i_ino);
841 err = ubifs_add_dirt(c, lnum, len);
842 } else {
843 union ubifs_key key;
844
845 ino_key_init(c, &key, inode->i_ino);
846 err = ubifs_tnc_add(c, &key, lnum, offs, len);
847 }
848 if (err)
849 goto out_ro;
850
851 finish_reservation(c);
852 spin_lock(&ui->ui_lock);
853 ui->synced_i_size = ui->ui_size;
854 spin_unlock(&ui->ui_lock);
855 kfree(ino);
856 return 0;
857
858out_release:
859 release_head(c, BASEHD);
860out_ro:
861 ubifs_ro_mode(c, err);
862 finish_reservation(c);
863out_free:
864 kfree(ino);
865 return err;
866}
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
898{
899 int err;
900 struct ubifs_inode *ui = ubifs_inode(inode);
901
902 ubifs_assert(inode->i_nlink == 0);
903
904 if (ui->del_cmtno != c->cmt_no)
905
906 return ubifs_jnl_write_inode(c, inode);
907
908 down_read(&c->commit_sem);
909
910
911
912
913 if (ui->del_cmtno != c->cmt_no) {
914 up_read(&c->commit_sem);
915 return ubifs_jnl_write_inode(c, inode);
916 }
917
918 err = ubifs_tnc_remove_ino(c, inode->i_ino);
919 if (err)
920 ubifs_ro_mode(c, err);
921 else
922 ubifs_delete_orphan(c, inode->i_ino);
923 up_read(&c->commit_sem);
924 return err;
925}
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
944 const struct inode *fst_inode,
945 const struct fscrypt_name *fst_nm,
946 const struct inode *snd_dir,
947 const struct inode *snd_inode,
948 const struct fscrypt_name *snd_nm, int sync)
949{
950 union ubifs_key key;
951 struct ubifs_dent_node *dent1, *dent2;
952 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
953 int aligned_dlen1, aligned_dlen2;
954 int twoparents = (fst_dir != snd_dir);
955 void *p;
956
957 ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
958 ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
959 ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
960 ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
961
962 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
963 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
964 aligned_dlen1 = ALIGN(dlen1, 8);
965 aligned_dlen2 = ALIGN(dlen2, 8);
966
967 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
968 if (twoparents)
969 len += plen;
970
971 dent1 = kzalloc(len, GFP_NOFS);
972 if (!dent1)
973 return -ENOMEM;
974
975
976 err = make_reservation(c, BASEHD, len);
977 if (err)
978 goto out_free;
979
980
981 dent1->ch.node_type = UBIFS_DENT_NODE;
982 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
983 dent1->inum = cpu_to_le64(fst_inode->i_ino);
984 dent1->type = get_dent_type(fst_inode->i_mode);
985 dent1->nlen = cpu_to_le16(fname_len(snd_nm));
986 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
987 dent1->name[fname_len(snd_nm)] = '\0';
988 set_dent_cookie(c, dent1);
989 zero_dent_node_unused(dent1);
990 ubifs_prep_grp_node(c, dent1, dlen1, 0);
991
992
993 dent2 = (void *)dent1 + aligned_dlen1;
994 dent2->ch.node_type = UBIFS_DENT_NODE;
995 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
996 dent2->inum = cpu_to_le64(snd_inode->i_ino);
997 dent2->type = get_dent_type(snd_inode->i_mode);
998 dent2->nlen = cpu_to_le16(fname_len(fst_nm));
999 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
1000 dent2->name[fname_len(fst_nm)] = '\0';
1001 set_dent_cookie(c, dent2);
1002 zero_dent_node_unused(dent2);
1003 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1004
1005 p = (void *)dent2 + aligned_dlen2;
1006 if (!twoparents)
1007 pack_inode(c, p, fst_dir, 1);
1008 else {
1009 pack_inode(c, p, fst_dir, 0);
1010 p += ALIGN(plen, 8);
1011 pack_inode(c, p, snd_dir, 1);
1012 }
1013
1014 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
1015 if (err)
1016 goto out_release;
1017 if (!sync) {
1018 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1019
1020 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
1021 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
1022 }
1023 release_head(c, BASEHD);
1024
1025 dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
1026 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, snd_nm);
1027 if (err)
1028 goto out_ro;
1029
1030 offs += aligned_dlen1;
1031 dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
1032 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, fst_nm);
1033 if (err)
1034 goto out_ro;
1035
1036 offs += aligned_dlen2;
1037
1038 ino_key_init(c, &key, fst_dir->i_ino);
1039 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1040 if (err)
1041 goto out_ro;
1042
1043 if (twoparents) {
1044 offs += ALIGN(plen, 8);
1045 ino_key_init(c, &key, snd_dir->i_ino);
1046 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1047 if (err)
1048 goto out_ro;
1049 }
1050
1051 finish_reservation(c);
1052
1053 mark_inode_clean(c, ubifs_inode(fst_dir));
1054 if (twoparents)
1055 mark_inode_clean(c, ubifs_inode(snd_dir));
1056 kfree(dent1);
1057 return 0;
1058
1059out_release:
1060 release_head(c, BASEHD);
1061out_ro:
1062 ubifs_ro_mode(c, err);
1063 finish_reservation(c);
1064out_free:
1065 kfree(dent1);
1066 return err;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
1084 const struct inode *old_inode,
1085 const struct fscrypt_name *old_nm,
1086 const struct inode *new_dir,
1087 const struct inode *new_inode,
1088 const struct fscrypt_name *new_nm,
1089 const struct inode *whiteout, int sync)
1090{
1091 void *p;
1092 union ubifs_key key;
1093 struct ubifs_dent_node *dent, *dent2;
1094 int err, dlen1, dlen2, ilen, lnum, offs, len;
1095 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
1096 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
1097 int move = (old_dir != new_dir);
1098 struct ubifs_inode *uninitialized_var(new_ui);
1099
1100 ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
1101 ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
1102 ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
1103 ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
1104
1105 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
1106 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
1107 if (new_inode) {
1108 new_ui = ubifs_inode(new_inode);
1109 ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
1110 ilen = UBIFS_INO_NODE_SZ;
1111 if (!last_reference)
1112 ilen += new_ui->data_len;
1113 } else
1114 ilen = 0;
1115
1116 aligned_dlen1 = ALIGN(dlen1, 8);
1117 aligned_dlen2 = ALIGN(dlen2, 8);
1118 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
1119 if (move)
1120 len += plen;
1121 dent = kzalloc(len, GFP_NOFS);
1122 if (!dent)
1123 return -ENOMEM;
1124
1125
1126 err = make_reservation(c, BASEHD, len);
1127 if (err)
1128 goto out_free;
1129
1130
1131 dent->ch.node_type = UBIFS_DENT_NODE;
1132 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
1133 dent->inum = cpu_to_le64(old_inode->i_ino);
1134 dent->type = get_dent_type(old_inode->i_mode);
1135 dent->nlen = cpu_to_le16(fname_len(new_nm));
1136 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
1137 dent->name[fname_len(new_nm)] = '\0';
1138 set_dent_cookie(c, dent);
1139 zero_dent_node_unused(dent);
1140 ubifs_prep_grp_node(c, dent, dlen1, 0);
1141
1142 dent2 = (void *)dent + aligned_dlen1;
1143 dent2->ch.node_type = UBIFS_DENT_NODE;
1144 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
1145
1146 if (whiteout) {
1147 dent2->inum = cpu_to_le64(whiteout->i_ino);
1148 dent2->type = get_dent_type(whiteout->i_mode);
1149 } else {
1150
1151 dent2->inum = 0;
1152 dent2->type = DT_UNKNOWN;
1153 }
1154 dent2->nlen = cpu_to_le16(fname_len(old_nm));
1155 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
1156 dent2->name[fname_len(old_nm)] = '\0';
1157 set_dent_cookie(c, dent2);
1158 zero_dent_node_unused(dent2);
1159 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1160
1161 p = (void *)dent2 + aligned_dlen2;
1162 if (new_inode) {
1163 pack_inode(c, p, new_inode, 0);
1164 p += ALIGN(ilen, 8);
1165 }
1166
1167 if (!move)
1168 pack_inode(c, p, old_dir, 1);
1169 else {
1170 pack_inode(c, p, old_dir, 0);
1171 p += ALIGN(plen, 8);
1172 pack_inode(c, p, new_dir, 1);
1173 }
1174
1175 if (last_reference) {
1176 err = ubifs_add_orphan(c, new_inode->i_ino);
1177 if (err) {
1178 release_head(c, BASEHD);
1179 goto out_finish;
1180 }
1181 new_ui->del_cmtno = c->cmt_no;
1182 }
1183
1184 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1185 if (err)
1186 goto out_release;
1187 if (!sync) {
1188 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1189
1190 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1191 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1192 if (new_inode)
1193 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1194 new_inode->i_ino);
1195 }
1196 release_head(c, BASEHD);
1197
1198 dent_key_init(c, &key, new_dir->i_ino, new_nm);
1199 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, new_nm);
1200 if (err)
1201 goto out_ro;
1202
1203 offs += aligned_dlen1;
1204 if (whiteout) {
1205 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1206 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, old_nm);
1207 if (err)
1208 goto out_ro;
1209
1210 ubifs_delete_orphan(c, whiteout->i_ino);
1211 } else {
1212 err = ubifs_add_dirt(c, lnum, dlen2);
1213 if (err)
1214 goto out_ro;
1215
1216 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1217 err = ubifs_tnc_remove_nm(c, &key, old_nm);
1218 if (err)
1219 goto out_ro;
1220 }
1221
1222 offs += aligned_dlen2;
1223 if (new_inode) {
1224 ino_key_init(c, &key, new_inode->i_ino);
1225 err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1226 if (err)
1227 goto out_ro;
1228 offs += ALIGN(ilen, 8);
1229 }
1230
1231 ino_key_init(c, &key, old_dir->i_ino);
1232 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1233 if (err)
1234 goto out_ro;
1235
1236 if (move) {
1237 offs += ALIGN(plen, 8);
1238 ino_key_init(c, &key, new_dir->i_ino);
1239 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1240 if (err)
1241 goto out_ro;
1242 }
1243
1244 finish_reservation(c);
1245 if (new_inode) {
1246 mark_inode_clean(c, new_ui);
1247 spin_lock(&new_ui->ui_lock);
1248 new_ui->synced_i_size = new_ui->ui_size;
1249 spin_unlock(&new_ui->ui_lock);
1250 }
1251 mark_inode_clean(c, ubifs_inode(old_dir));
1252 if (move)
1253 mark_inode_clean(c, ubifs_inode(new_dir));
1254 kfree(dent);
1255 return 0;
1256
1257out_release:
1258 release_head(c, BASEHD);
1259out_ro:
1260 ubifs_ro_mode(c, err);
1261 if (last_reference)
1262 ubifs_delete_orphan(c, new_inode->i_ino);
1263out_finish:
1264 finish_reservation(c);
1265out_free:
1266 kfree(dent);
1267 return err;
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
1282 unsigned int block, struct ubifs_data_node *dn,
1283 int *new_len)
1284{
1285 void *buf;
1286 int err, dlen, compr_type, out_len, old_dlen;
1287
1288 out_len = le32_to_cpu(dn->size);
1289 buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1290 if (!buf)
1291 return -ENOMEM;
1292
1293 dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
1294 compr_type = le16_to_cpu(dn->compr_type);
1295
1296 if (ubifs_crypt_is_encrypted(inode)) {
1297 err = ubifs_decrypt(inode, dn, &dlen, block);
1298 if (err)
1299 goto out;
1300 }
1301
1302 if (compr_type == UBIFS_COMPR_NONE) {
1303 out_len = *new_len;
1304 } else {
1305 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
1306 if (err)
1307 goto out;
1308
1309 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1310 }
1311
1312 if (ubifs_crypt_is_encrypted(inode)) {
1313 err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
1314 if (err)
1315 goto out;
1316
1317 out_len = old_dlen;
1318 } else {
1319 dn->compr_size = 0;
1320 }
1321
1322 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
1323 dn->compr_type = cpu_to_le16(compr_type);
1324 dn->size = cpu_to_le32(*new_len);
1325 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1326 err = 0;
1327out:
1328 kfree(buf);
1329 return err;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1348 loff_t old_size, loff_t new_size)
1349{
1350 union ubifs_key key, to_key;
1351 struct ubifs_ino_node *ino;
1352 struct ubifs_trun_node *trun;
1353 struct ubifs_data_node *uninitialized_var(dn);
1354 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1355 struct ubifs_inode *ui = ubifs_inode(inode);
1356 ino_t inum = inode->i_ino;
1357 unsigned int blk;
1358
1359 dbg_jnl("ino %lu, size %lld -> %lld",
1360 (unsigned long)inum, old_size, new_size);
1361 ubifs_assert(!ui->data_len);
1362 ubifs_assert(S_ISREG(inode->i_mode));
1363 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
1364
1365 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1366 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
1367 ino = kmalloc(sz, GFP_NOFS);
1368 if (!ino)
1369 return -ENOMEM;
1370
1371 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1372 trun->ch.node_type = UBIFS_TRUN_NODE;
1373 trun->inum = cpu_to_le32(inum);
1374 trun->old_size = cpu_to_le64(old_size);
1375 trun->new_size = cpu_to_le64(new_size);
1376 zero_trun_node_unused(trun);
1377
1378 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1379 if (dlen) {
1380
1381 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1382 blk = new_size >> UBIFS_BLOCK_SHIFT;
1383 data_key_init(c, &key, inum, blk);
1384 dbg_jnlk(&key, "last block key ");
1385 err = ubifs_tnc_lookup(c, &key, dn);
1386 if (err == -ENOENT)
1387 dlen = 0;
1388 else if (err)
1389 goto out_free;
1390 else {
1391 if (le32_to_cpu(dn->size) <= dlen)
1392 dlen = 0;
1393 else {
1394 err = truncate_data_node(c, inode, blk, dn, &dlen);
1395 if (err)
1396 goto out_free;
1397 }
1398 }
1399 }
1400
1401
1402 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1403 if (dlen)
1404 len += dlen;
1405 err = make_reservation(c, BASEHD, len);
1406 if (err)
1407 goto out_free;
1408
1409 pack_inode(c, ino, inode, 0);
1410 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1411 if (dlen)
1412 ubifs_prep_grp_node(c, dn, dlen, 1);
1413
1414 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1415 if (err)
1416 goto out_release;
1417 if (!sync)
1418 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1419 release_head(c, BASEHD);
1420
1421 if (dlen) {
1422 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1423 err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1424 if (err)
1425 goto out_ro;
1426 }
1427
1428 ino_key_init(c, &key, inum);
1429 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1430 if (err)
1431 goto out_ro;
1432
1433 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1434 if (err)
1435 goto out_ro;
1436
1437 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1438 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1439 data_key_init(c, &key, inum, blk);
1440
1441 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
1442 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
1443 data_key_init(c, &to_key, inum, blk);
1444
1445 err = ubifs_tnc_remove_range(c, &key, &to_key);
1446 if (err)
1447 goto out_ro;
1448
1449 finish_reservation(c);
1450 spin_lock(&ui->ui_lock);
1451 ui->synced_i_size = ui->ui_size;
1452 spin_unlock(&ui->ui_lock);
1453 mark_inode_clean(c, ui);
1454 kfree(ino);
1455 return 0;
1456
1457out_release:
1458 release_head(c, BASEHD);
1459out_ro:
1460 ubifs_ro_mode(c, err);
1461 finish_reservation(c);
1462out_free:
1463 kfree(ino);
1464 return err;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
1481 const struct inode *inode,
1482 const struct fscrypt_name *nm)
1483{
1484 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1485 struct ubifs_dent_node *xent;
1486 struct ubifs_ino_node *ino;
1487 union ubifs_key xent_key, key1, key2;
1488 int sync = IS_DIRSYNC(host);
1489 struct ubifs_inode *host_ui = ubifs_inode(host);
1490
1491 ubifs_assert(inode->i_nlink == 0);
1492 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1493
1494
1495
1496
1497
1498 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
1499 aligned_xlen = ALIGN(xlen, 8);
1500 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1501 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1502
1503 xent = kzalloc(len, GFP_NOFS);
1504 if (!xent)
1505 return -ENOMEM;
1506
1507
1508 err = make_reservation(c, BASEHD, len);
1509 if (err) {
1510 kfree(xent);
1511 return err;
1512 }
1513
1514 xent->ch.node_type = UBIFS_XENT_NODE;
1515 xent_key_init(c, &xent_key, host->i_ino, nm);
1516 key_write(c, &xent_key, xent->key);
1517 xent->inum = 0;
1518 xent->type = get_dent_type(inode->i_mode);
1519 xent->nlen = cpu_to_le16(fname_len(nm));
1520 memcpy(xent->name, fname_name(nm), fname_len(nm));
1521 xent->name[fname_len(nm)] = '\0';
1522 zero_dent_node_unused(xent);
1523 ubifs_prep_grp_node(c, xent, xlen, 0);
1524
1525 ino = (void *)xent + aligned_xlen;
1526 pack_inode(c, ino, inode, 0);
1527 ino = (void *)ino + UBIFS_INO_NODE_SZ;
1528 pack_inode(c, ino, host, 1);
1529
1530 err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1531 if (!sync && !err)
1532 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1533 release_head(c, BASEHD);
1534 kfree(xent);
1535 if (err)
1536 goto out_ro;
1537
1538
1539 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1540 if (err)
1541 goto out_ro;
1542 err = ubifs_add_dirt(c, lnum, xlen);
1543 if (err)
1544 goto out_ro;
1545
1546
1547
1548
1549
1550 lowest_ino_key(c, &key1, inode->i_ino);
1551 highest_ino_key(c, &key2, inode->i_ino);
1552 err = ubifs_tnc_remove_range(c, &key1, &key2);
1553 if (err)
1554 goto out_ro;
1555 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1556 if (err)
1557 goto out_ro;
1558
1559
1560 ino_key_init(c, &key1, host->i_ino);
1561 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1562 if (err)
1563 goto out_ro;
1564
1565 finish_reservation(c);
1566 spin_lock(&host_ui->ui_lock);
1567 host_ui->synced_i_size = host_ui->ui_size;
1568 spin_unlock(&host_ui->ui_lock);
1569 mark_inode_clean(c, host_ui);
1570 return 0;
1571
1572out_ro:
1573 ubifs_ro_mode(c, err);
1574 finish_reservation(c);
1575 return err;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1592 const struct inode *host)
1593{
1594 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
1595 struct ubifs_inode *host_ui = ubifs_inode(host);
1596 struct ubifs_ino_node *ino;
1597 union ubifs_key key;
1598 int sync = IS_DIRSYNC(host);
1599
1600 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1601 ubifs_assert(host->i_nlink > 0);
1602 ubifs_assert(inode->i_nlink > 0);
1603 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1604
1605 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1606 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1607 aligned_len1 = ALIGN(len1, 8);
1608 aligned_len = aligned_len1 + ALIGN(len2, 8);
1609
1610 ino = kzalloc(aligned_len, GFP_NOFS);
1611 if (!ino)
1612 return -ENOMEM;
1613
1614
1615 err = make_reservation(c, BASEHD, aligned_len);
1616 if (err)
1617 goto out_free;
1618
1619 pack_inode(c, ino, host, 0);
1620 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
1621
1622 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1623 if (!sync && !err) {
1624 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1625
1626 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1627 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1628 }
1629 release_head(c, BASEHD);
1630 if (err)
1631 goto out_ro;
1632
1633 ino_key_init(c, &key, host->i_ino);
1634 err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1635 if (err)
1636 goto out_ro;
1637
1638 ino_key_init(c, &key, inode->i_ino);
1639 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1640 if (err)
1641 goto out_ro;
1642
1643 finish_reservation(c);
1644 spin_lock(&host_ui->ui_lock);
1645 host_ui->synced_i_size = host_ui->ui_size;
1646 spin_unlock(&host_ui->ui_lock);
1647 mark_inode_clean(c, host_ui);
1648 kfree(ino);
1649 return 0;
1650
1651out_ro:
1652 ubifs_ro_mode(c, err);
1653 finish_reservation(c);
1654out_free:
1655 kfree(ino);
1656 return err;
1657}
1658
1659