1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#include "ubifs.h"
62
63
64
65
66
67static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68{
69 memset(ino->padding1, 0, 4);
70 memset(ino->padding2, 0, 26);
71}
72
73
74
75
76
77
78static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79{
80 dent->padding1 = 0;
81}
82
83
84
85
86
87
88static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
89{
90 memset(trun->padding, 0, 12);
91}
92
93
94
95
96
97
98
99
100
101
102
103
104static int reserve_space(struct ubifs_info *c, int jhead, int len)
105{
106 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
107 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
108
109
110
111
112
113
114 ubifs_assert(c, !c->ro_media && !c->ro_mount);
115 squeeze = (jhead == BASEHD);
116again:
117 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
118
119 if (c->ro_error) {
120 err = -EROFS;
121 goto out_unlock;
122 }
123
124 avail = c->leb_size - wbuf->offs - wbuf->used;
125 if (wbuf->lnum != -1 && avail >= len)
126 return 0;
127
128
129
130
131
132 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
133 if (lnum >= 0)
134 goto out;
135
136 err = lnum;
137 if (err != -ENOSPC)
138 goto out_unlock;
139
140
141
142
143
144
145 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
146 mutex_unlock(&wbuf->io_mutex);
147
148 lnum = ubifs_garbage_collect(c, 0);
149 if (lnum < 0) {
150 err = lnum;
151 if (err != -ENOSPC)
152 return err;
153
154
155
156
157
158
159
160 dbg_jnl("GC couldn't make a free LEB for jhead %s",
161 dbg_jhead(jhead));
162 if (retries++ < 2) {
163 dbg_jnl("retry (%d)", retries);
164 goto again;
165 }
166
167 dbg_jnl("return -ENOSPC");
168 return err;
169 }
170
171 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
172 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
173 avail = c->leb_size - wbuf->offs - wbuf->used;
174
175 if (wbuf->lnum != -1 && avail >= len) {
176
177
178
179
180
181 dbg_jnl("return LEB %d back, already have LEB %d:%d",
182 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
183 err = ubifs_return_leb(c, lnum);
184 if (err)
185 goto out_unlock;
186 return 0;
187 }
188
189 offs = 0;
190
191out:
192
193
194
195
196
197
198
199
200 err = ubifs_wbuf_sync_nolock(wbuf);
201 if (err)
202 goto out_return;
203 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
204 if (err)
205 goto out_return;
206 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
207 if (err)
208 goto out_unlock;
209
210 return 0;
211
212out_unlock:
213 mutex_unlock(&wbuf->io_mutex);
214 return err;
215
216out_return:
217
218 ubifs_assert(c, err < 0);
219 err1 = ubifs_return_leb(c, lnum);
220 if (err1 && err == -EAGAIN)
221
222
223
224
225
226 err = err1;
227 mutex_unlock(&wbuf->io_mutex);
228 return err;
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
245 int *lnum, int *offs)
246{
247 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
248
249 ubifs_assert(c, jhead != GCHD);
250
251 *lnum = c->jheads[jhead].wbuf.lnum;
252 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
253
254 dbg_jnl("jhead %s, LEB %d:%d, len %d",
255 dbg_jhead(jhead), *lnum, *offs, len);
256 ubifs_prepare_node(c, node, len, 0);
257
258 return ubifs_wbuf_write_nolock(wbuf, node, len);
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
276 int *lnum, int *offs, int sync)
277{
278 int err;
279 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
280
281 ubifs_assert(c, jhead != GCHD);
282
283 *lnum = c->jheads[jhead].wbuf.lnum;
284 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
285 dbg_jnl("jhead %s, LEB %d:%d, len %d",
286 dbg_jhead(jhead), *lnum, *offs, len);
287
288 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
289 if (err)
290 return err;
291 if (sync)
292 err = ubifs_wbuf_sync_nolock(wbuf);
293 return err;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312static int make_reservation(struct ubifs_info *c, int jhead, int len)
313{
314 int err, cmt_retries = 0, nospc_retries = 0;
315
316again:
317 down_read(&c->commit_sem);
318 err = reserve_space(c, jhead, len);
319 if (!err)
320
321 return 0;
322 up_read(&c->commit_sem);
323
324 if (err == -ENOSPC) {
325
326
327
328
329
330
331 if (nospc_retries++ < 2) {
332 dbg_jnl("no space, retry");
333 err = -EAGAIN;
334 }
335
336
337
338
339
340
341
342 }
343
344 if (err != -EAGAIN)
345 goto out;
346
347
348
349
350
351 if (cmt_retries > 128) {
352
353
354
355
356 ubifs_err(c, "stuck in space allocation");
357 err = -ENOSPC;
358 goto out;
359 } else if (cmt_retries > 32)
360 ubifs_warn(c, "too many space allocation re-tries (%d)",
361 cmt_retries);
362
363 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
364 cmt_retries);
365 cmt_retries += 1;
366
367 err = ubifs_run_commit(c);
368 if (err)
369 return err;
370 goto again;
371
372out:
373 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
374 len, jhead, err);
375 if (err == -ENOSPC) {
376
377 down_write(&c->commit_sem);
378 dump_stack();
379 ubifs_dump_budg(c, &c->bi);
380 ubifs_dump_lprops(c);
381 cmt_retries = dbg_check_lprops(c);
382 up_write(&c->commit_sem);
383 }
384 return err;
385}
386
387
388
389
390
391
392
393
394
395
396static inline void release_head(struct ubifs_info *c, int jhead)
397{
398 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
399}
400
401
402
403
404
405
406
407
408static void finish_reservation(struct ubifs_info *c)
409{
410 up_read(&c->commit_sem);
411}
412
413
414
415
416
417static int get_dent_type(int mode)
418{
419 switch (mode & S_IFMT) {
420 case S_IFREG:
421 return UBIFS_ITYPE_REG;
422 case S_IFDIR:
423 return UBIFS_ITYPE_DIR;
424 case S_IFLNK:
425 return UBIFS_ITYPE_LNK;
426 case S_IFBLK:
427 return UBIFS_ITYPE_BLK;
428 case S_IFCHR:
429 return UBIFS_ITYPE_CHR;
430 case S_IFIFO:
431 return UBIFS_ITYPE_FIFO;
432 case S_IFSOCK:
433 return UBIFS_ITYPE_SOCK;
434 default:
435 BUG();
436 }
437 return 0;
438}
439
440
441
442
443
444
445
446
447static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
448 const struct inode *inode, int last)
449{
450 int data_len = 0, last_reference = !inode->i_nlink;
451 struct ubifs_inode *ui = ubifs_inode(inode);
452
453 ino->ch.node_type = UBIFS_INO_NODE;
454 ino_key_init_flash(c, &ino->key, inode->i_ino);
455 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
456 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
457 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
458 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
459 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
460 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
461 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
462 ino->uid = cpu_to_le32(i_uid_read(inode));
463 ino->gid = cpu_to_le32(i_gid_read(inode));
464 ino->mode = cpu_to_le32(inode->i_mode);
465 ino->flags = cpu_to_le32(ui->flags);
466 ino->size = cpu_to_le64(ui->ui_size);
467 ino->nlink = cpu_to_le32(inode->i_nlink);
468 ino->compr_type = cpu_to_le16(ui->compr_type);
469 ino->data_len = cpu_to_le32(ui->data_len);
470 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
471 ino->xattr_size = cpu_to_le32(ui->xattr_size);
472 ino->xattr_names = cpu_to_le32(ui->xattr_names);
473 zero_ino_node_unused(ino);
474
475
476
477
478
479 if (!last_reference) {
480 memcpy(ino->data, ui->data, ui->data_len);
481 data_len = ui->data_len;
482 }
483
484 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
485}
486
487
488
489
490
491
492
493
494
495
496
497static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
498{
499 if (ui->dirty)
500 ubifs_release_dirty_inode_budget(c, ui);
501 ui->dirty = 0;
502}
503
504static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
505{
506 if (c->double_hash)
507 dent->cookie = prandom_u32();
508 else
509 dent->cookie = 0;
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
540 const struct fscrypt_name *nm, const struct inode *inode,
541 int deletion, int xent)
542{
543 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
544 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
545 int last_reference = !!(deletion && inode->i_nlink == 0);
546 struct ubifs_inode *ui = ubifs_inode(inode);
547 struct ubifs_inode *host_ui = ubifs_inode(dir);
548 struct ubifs_dent_node *dent;
549 struct ubifs_ino_node *ino;
550 union ubifs_key dent_key, ino_key;
551
552 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
553
554 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
555 ilen = UBIFS_INO_NODE_SZ;
556
557
558
559
560
561
562
563 if (!last_reference) {
564 ilen += ui->data_len;
565 sync |= IS_SYNC(inode);
566 }
567
568 aligned_dlen = ALIGN(dlen, 8);
569 aligned_ilen = ALIGN(ilen, 8);
570
571 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
572
573 len += host_ui->data_len;
574
575 dent = kzalloc(len, GFP_NOFS);
576 if (!dent)
577 return -ENOMEM;
578
579
580 err = make_reservation(c, BASEHD, len);
581 if (err)
582 goto out_free;
583
584 if (!xent) {
585 dent->ch.node_type = UBIFS_DENT_NODE;
586 if (nm->hash)
587 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
588 else
589 dent_key_init(c, &dent_key, dir->i_ino, nm);
590 } else {
591 dent->ch.node_type = UBIFS_XENT_NODE;
592 xent_key_init(c, &dent_key, dir->i_ino, nm);
593 }
594
595 key_write(c, &dent_key, dent->key);
596 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
597 dent->type = get_dent_type(inode->i_mode);
598 dent->nlen = cpu_to_le16(fname_len(nm));
599 memcpy(dent->name, fname_name(nm), fname_len(nm));
600 dent->name[fname_len(nm)] = '\0';
601 set_dent_cookie(c, dent);
602
603 zero_dent_node_unused(dent);
604 ubifs_prep_grp_node(c, dent, dlen, 0);
605
606 ino = (void *)dent + aligned_dlen;
607 pack_inode(c, ino, inode, 0);
608 ino = (void *)ino + aligned_ilen;
609 pack_inode(c, ino, dir, 1);
610
611 if (last_reference) {
612 err = ubifs_add_orphan(c, inode->i_ino);
613 if (err) {
614 release_head(c, BASEHD);
615 goto out_finish;
616 }
617 ui->del_cmtno = c->cmt_no;
618 }
619
620 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
621 if (err)
622 goto out_release;
623 if (!sync) {
624 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
625
626 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
627 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
628 }
629 release_head(c, BASEHD);
630 kfree(dent);
631
632 if (deletion) {
633 if (nm->hash)
634 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
635 else
636 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
637 if (err)
638 goto out_ro;
639 err = ubifs_add_dirt(c, lnum, dlen);
640 } else
641 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
642 if (err)
643 goto out_ro;
644
645
646
647
648
649
650
651 ino_key_init(c, &ino_key, inode->i_ino);
652 ino_offs = dent_offs + aligned_dlen;
653 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
654 if (err)
655 goto out_ro;
656
657 ino_key_init(c, &ino_key, dir->i_ino);
658 ino_offs += aligned_ilen;
659 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
660 UBIFS_INO_NODE_SZ + host_ui->data_len);
661 if (err)
662 goto out_ro;
663
664 finish_reservation(c);
665 spin_lock(&ui->ui_lock);
666 ui->synced_i_size = ui->ui_size;
667 spin_unlock(&ui->ui_lock);
668 if (xent) {
669 spin_lock(&host_ui->ui_lock);
670 host_ui->synced_i_size = host_ui->ui_size;
671 spin_unlock(&host_ui->ui_lock);
672 }
673 mark_inode_clean(c, ui);
674 mark_inode_clean(c, host_ui);
675 return 0;
676
677out_finish:
678 finish_reservation(c);
679out_free:
680 kfree(dent);
681 return err;
682
683out_release:
684 release_head(c, BASEHD);
685 kfree(dent);
686out_ro:
687 ubifs_ro_mode(c, err);
688 if (last_reference)
689 ubifs_delete_orphan(c, inode->i_ino);
690 finish_reservation(c);
691 return err;
692}
693
694
695
696
697
698
699
700
701
702
703
704
705int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
706 const union ubifs_key *key, const void *buf, int len)
707{
708 struct ubifs_data_node *data;
709 int err, lnum, offs, compr_type, out_len, compr_len;
710 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
711 struct ubifs_inode *ui = ubifs_inode(inode);
712 bool encrypted = ubifs_crypt_is_encrypted(inode);
713
714 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
715 (unsigned long)key_inum(c, key), key_block(c, key), len);
716 ubifs_assert(c, len <= UBIFS_BLOCK_SIZE);
717
718 if (encrypted)
719 dlen += UBIFS_CIPHER_BLOCK_SIZE;
720
721 data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
722 if (!data) {
723
724
725
726
727
728
729
730 allocated = 0;
731 mutex_lock(&c->write_reserve_mutex);
732 data = c->write_reserve_buf;
733 }
734
735 data->ch.node_type = UBIFS_DATA_NODE;
736 key_write(c, key, &data->key);
737 data->size = cpu_to_le32(len);
738
739 if (!(ui->flags & UBIFS_COMPR_FL))
740
741 compr_type = UBIFS_COMPR_NONE;
742 else
743 compr_type = ui->compr_type;
744
745 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
746 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
747 ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE);
748
749 if (encrypted) {
750 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
751 if (err)
752 goto out_free;
753
754 } else {
755 data->compr_size = 0;
756 out_len = compr_len;
757 }
758
759 dlen = UBIFS_DATA_NODE_SZ + out_len;
760 data->compr_type = cpu_to_le16(compr_type);
761
762
763 err = make_reservation(c, DATAHD, dlen);
764 if (err)
765 goto out_free;
766
767 err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
768 if (err)
769 goto out_release;
770 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
771 release_head(c, DATAHD);
772
773 err = ubifs_tnc_add(c, key, lnum, offs, dlen);
774 if (err)
775 goto out_ro;
776
777 finish_reservation(c);
778 if (!allocated)
779 mutex_unlock(&c->write_reserve_mutex);
780 else
781 kfree(data);
782 return 0;
783
784out_release:
785 release_head(c, DATAHD);
786out_ro:
787 ubifs_ro_mode(c, err);
788 finish_reservation(c);
789out_free:
790 if (!allocated)
791 mutex_unlock(&c->write_reserve_mutex);
792 else
793 kfree(data);
794 return err;
795}
796
797
798
799
800
801
802
803
804
805
806int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
807{
808 int err, lnum, offs;
809 struct ubifs_ino_node *ino;
810 struct ubifs_inode *ui = ubifs_inode(inode);
811 int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
812
813 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
814
815
816
817
818
819 if (!last_reference) {
820 len += ui->data_len;
821 sync = IS_SYNC(inode);
822 }
823 ino = kmalloc(len, GFP_NOFS);
824 if (!ino)
825 return -ENOMEM;
826
827
828 err = make_reservation(c, BASEHD, len);
829 if (err)
830 goto out_free;
831
832 pack_inode(c, ino, inode, 1);
833 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
834 if (err)
835 goto out_release;
836 if (!sync)
837 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
838 inode->i_ino);
839 release_head(c, BASEHD);
840
841 if (last_reference) {
842 err = ubifs_tnc_remove_ino(c, inode->i_ino);
843 if (err)
844 goto out_ro;
845 ubifs_delete_orphan(c, inode->i_ino);
846 err = ubifs_add_dirt(c, lnum, len);
847 } else {
848 union ubifs_key key;
849
850 ino_key_init(c, &key, inode->i_ino);
851 err = ubifs_tnc_add(c, &key, lnum, offs, len);
852 }
853 if (err)
854 goto out_ro;
855
856 finish_reservation(c);
857 spin_lock(&ui->ui_lock);
858 ui->synced_i_size = ui->ui_size;
859 spin_unlock(&ui->ui_lock);
860 kfree(ino);
861 return 0;
862
863out_release:
864 release_head(c, BASEHD);
865out_ro:
866 ubifs_ro_mode(c, err);
867 finish_reservation(c);
868out_free:
869 kfree(ino);
870 return err;
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
903{
904 int err;
905 struct ubifs_inode *ui = ubifs_inode(inode);
906
907 ubifs_assert(c, inode->i_nlink == 0);
908
909 if (ui->del_cmtno != c->cmt_no)
910
911 return ubifs_jnl_write_inode(c, inode);
912
913 down_read(&c->commit_sem);
914
915
916
917
918 if (ui->del_cmtno != c->cmt_no) {
919 up_read(&c->commit_sem);
920 return ubifs_jnl_write_inode(c, inode);
921 }
922
923 err = ubifs_tnc_remove_ino(c, inode->i_ino);
924 if (err)
925 ubifs_ro_mode(c, err);
926 else
927 ubifs_delete_orphan(c, inode->i_ino);
928 up_read(&c->commit_sem);
929 return err;
930}
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
949 const struct inode *fst_inode,
950 const struct fscrypt_name *fst_nm,
951 const struct inode *snd_dir,
952 const struct inode *snd_inode,
953 const struct fscrypt_name *snd_nm, int sync)
954{
955 union ubifs_key key;
956 struct ubifs_dent_node *dent1, *dent2;
957 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
958 int aligned_dlen1, aligned_dlen2;
959 int twoparents = (fst_dir != snd_dir);
960 void *p;
961
962 ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0);
963 ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0);
964 ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
965 ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
966
967 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
968 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
969 aligned_dlen1 = ALIGN(dlen1, 8);
970 aligned_dlen2 = ALIGN(dlen2, 8);
971
972 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
973 if (twoparents)
974 len += plen;
975
976 dent1 = kzalloc(len, GFP_NOFS);
977 if (!dent1)
978 return -ENOMEM;
979
980
981 err = make_reservation(c, BASEHD, len);
982 if (err)
983 goto out_free;
984
985
986 dent1->ch.node_type = UBIFS_DENT_NODE;
987 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
988 dent1->inum = cpu_to_le64(fst_inode->i_ino);
989 dent1->type = get_dent_type(fst_inode->i_mode);
990 dent1->nlen = cpu_to_le16(fname_len(snd_nm));
991 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
992 dent1->name[fname_len(snd_nm)] = '\0';
993 set_dent_cookie(c, dent1);
994 zero_dent_node_unused(dent1);
995 ubifs_prep_grp_node(c, dent1, dlen1, 0);
996
997
998 dent2 = (void *)dent1 + aligned_dlen1;
999 dent2->ch.node_type = UBIFS_DENT_NODE;
1000 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
1001 dent2->inum = cpu_to_le64(snd_inode->i_ino);
1002 dent2->type = get_dent_type(snd_inode->i_mode);
1003 dent2->nlen = cpu_to_le16(fname_len(fst_nm));
1004 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
1005 dent2->name[fname_len(fst_nm)] = '\0';
1006 set_dent_cookie(c, dent2);
1007 zero_dent_node_unused(dent2);
1008 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1009
1010 p = (void *)dent2 + aligned_dlen2;
1011 if (!twoparents)
1012 pack_inode(c, p, fst_dir, 1);
1013 else {
1014 pack_inode(c, p, fst_dir, 0);
1015 p += ALIGN(plen, 8);
1016 pack_inode(c, p, snd_dir, 1);
1017 }
1018
1019 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
1020 if (err)
1021 goto out_release;
1022 if (!sync) {
1023 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1024
1025 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
1026 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
1027 }
1028 release_head(c, BASEHD);
1029
1030 dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
1031 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, snd_nm);
1032 if (err)
1033 goto out_ro;
1034
1035 offs += aligned_dlen1;
1036 dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
1037 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, fst_nm);
1038 if (err)
1039 goto out_ro;
1040
1041 offs += aligned_dlen2;
1042
1043 ino_key_init(c, &key, fst_dir->i_ino);
1044 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1045 if (err)
1046 goto out_ro;
1047
1048 if (twoparents) {
1049 offs += ALIGN(plen, 8);
1050 ino_key_init(c, &key, snd_dir->i_ino);
1051 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1052 if (err)
1053 goto out_ro;
1054 }
1055
1056 finish_reservation(c);
1057
1058 mark_inode_clean(c, ubifs_inode(fst_dir));
1059 if (twoparents)
1060 mark_inode_clean(c, ubifs_inode(snd_dir));
1061 kfree(dent1);
1062 return 0;
1063
1064out_release:
1065 release_head(c, BASEHD);
1066out_ro:
1067 ubifs_ro_mode(c, err);
1068 finish_reservation(c);
1069out_free:
1070 kfree(dent1);
1071 return err;
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
1089 const struct inode *old_inode,
1090 const struct fscrypt_name *old_nm,
1091 const struct inode *new_dir,
1092 const struct inode *new_inode,
1093 const struct fscrypt_name *new_nm,
1094 const struct inode *whiteout, int sync)
1095{
1096 void *p;
1097 union ubifs_key key;
1098 struct ubifs_dent_node *dent, *dent2;
1099 int err, dlen1, dlen2, ilen, lnum, offs, len;
1100 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
1101 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
1102 int move = (old_dir != new_dir);
1103 struct ubifs_inode *uninitialized_var(new_ui);
1104
1105 ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0);
1106 ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0);
1107 ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
1108 ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
1109
1110 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
1111 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
1112 if (new_inode) {
1113 new_ui = ubifs_inode(new_inode);
1114 ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex));
1115 ilen = UBIFS_INO_NODE_SZ;
1116 if (!last_reference)
1117 ilen += new_ui->data_len;
1118 } else
1119 ilen = 0;
1120
1121 aligned_dlen1 = ALIGN(dlen1, 8);
1122 aligned_dlen2 = ALIGN(dlen2, 8);
1123 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
1124 if (move)
1125 len += plen;
1126 dent = kzalloc(len, GFP_NOFS);
1127 if (!dent)
1128 return -ENOMEM;
1129
1130
1131 err = make_reservation(c, BASEHD, len);
1132 if (err)
1133 goto out_free;
1134
1135
1136 dent->ch.node_type = UBIFS_DENT_NODE;
1137 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
1138 dent->inum = cpu_to_le64(old_inode->i_ino);
1139 dent->type = get_dent_type(old_inode->i_mode);
1140 dent->nlen = cpu_to_le16(fname_len(new_nm));
1141 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
1142 dent->name[fname_len(new_nm)] = '\0';
1143 set_dent_cookie(c, dent);
1144 zero_dent_node_unused(dent);
1145 ubifs_prep_grp_node(c, dent, dlen1, 0);
1146
1147 dent2 = (void *)dent + aligned_dlen1;
1148 dent2->ch.node_type = UBIFS_DENT_NODE;
1149 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
1150
1151 if (whiteout) {
1152 dent2->inum = cpu_to_le64(whiteout->i_ino);
1153 dent2->type = get_dent_type(whiteout->i_mode);
1154 } else {
1155
1156 dent2->inum = 0;
1157 dent2->type = DT_UNKNOWN;
1158 }
1159 dent2->nlen = cpu_to_le16(fname_len(old_nm));
1160 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
1161 dent2->name[fname_len(old_nm)] = '\0';
1162 set_dent_cookie(c, dent2);
1163 zero_dent_node_unused(dent2);
1164 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1165
1166 p = (void *)dent2 + aligned_dlen2;
1167 if (new_inode) {
1168 pack_inode(c, p, new_inode, 0);
1169 p += ALIGN(ilen, 8);
1170 }
1171
1172 if (!move)
1173 pack_inode(c, p, old_dir, 1);
1174 else {
1175 pack_inode(c, p, old_dir, 0);
1176 p += ALIGN(plen, 8);
1177 pack_inode(c, p, new_dir, 1);
1178 }
1179
1180 if (last_reference) {
1181 err = ubifs_add_orphan(c, new_inode->i_ino);
1182 if (err) {
1183 release_head(c, BASEHD);
1184 goto out_finish;
1185 }
1186 new_ui->del_cmtno = c->cmt_no;
1187 }
1188
1189 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1190 if (err)
1191 goto out_release;
1192 if (!sync) {
1193 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1194
1195 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1196 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1197 if (new_inode)
1198 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1199 new_inode->i_ino);
1200 }
1201 release_head(c, BASEHD);
1202
1203 dent_key_init(c, &key, new_dir->i_ino, new_nm);
1204 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, new_nm);
1205 if (err)
1206 goto out_ro;
1207
1208 offs += aligned_dlen1;
1209 if (whiteout) {
1210 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1211 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, old_nm);
1212 if (err)
1213 goto out_ro;
1214
1215 ubifs_delete_orphan(c, whiteout->i_ino);
1216 } else {
1217 err = ubifs_add_dirt(c, lnum, dlen2);
1218 if (err)
1219 goto out_ro;
1220
1221 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1222 err = ubifs_tnc_remove_nm(c, &key, old_nm);
1223 if (err)
1224 goto out_ro;
1225 }
1226
1227 offs += aligned_dlen2;
1228 if (new_inode) {
1229 ino_key_init(c, &key, new_inode->i_ino);
1230 err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1231 if (err)
1232 goto out_ro;
1233 offs += ALIGN(ilen, 8);
1234 }
1235
1236 ino_key_init(c, &key, old_dir->i_ino);
1237 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1238 if (err)
1239 goto out_ro;
1240
1241 if (move) {
1242 offs += ALIGN(plen, 8);
1243 ino_key_init(c, &key, new_dir->i_ino);
1244 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1245 if (err)
1246 goto out_ro;
1247 }
1248
1249 finish_reservation(c);
1250 if (new_inode) {
1251 mark_inode_clean(c, new_ui);
1252 spin_lock(&new_ui->ui_lock);
1253 new_ui->synced_i_size = new_ui->ui_size;
1254 spin_unlock(&new_ui->ui_lock);
1255 }
1256 mark_inode_clean(c, ubifs_inode(old_dir));
1257 if (move)
1258 mark_inode_clean(c, ubifs_inode(new_dir));
1259 kfree(dent);
1260 return 0;
1261
1262out_release:
1263 release_head(c, BASEHD);
1264out_ro:
1265 ubifs_ro_mode(c, err);
1266 if (last_reference)
1267 ubifs_delete_orphan(c, new_inode->i_ino);
1268out_finish:
1269 finish_reservation(c);
1270out_free:
1271 kfree(dent);
1272 return err;
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
1287 unsigned int block, struct ubifs_data_node *dn,
1288 int *new_len)
1289{
1290 void *buf;
1291 int err, dlen, compr_type, out_len, old_dlen;
1292
1293 out_len = le32_to_cpu(dn->size);
1294 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
1295 if (!buf)
1296 return -ENOMEM;
1297
1298 dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
1299 compr_type = le16_to_cpu(dn->compr_type);
1300
1301 if (ubifs_crypt_is_encrypted(inode)) {
1302 err = ubifs_decrypt(inode, dn, &dlen, block);
1303 if (err)
1304 goto out;
1305 }
1306
1307 if (compr_type == UBIFS_COMPR_NONE) {
1308 out_len = *new_len;
1309 } else {
1310 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
1311 if (err)
1312 goto out;
1313
1314 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1315 }
1316
1317 if (ubifs_crypt_is_encrypted(inode)) {
1318 err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
1319 if (err)
1320 goto out;
1321
1322 out_len = old_dlen;
1323 } else {
1324 dn->compr_size = 0;
1325 }
1326
1327 ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE);
1328 dn->compr_type = cpu_to_le16(compr_type);
1329 dn->size = cpu_to_le32(*new_len);
1330 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1331 err = 0;
1332out:
1333 kfree(buf);
1334 return err;
1335}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1353 loff_t old_size, loff_t new_size)
1354{
1355 union ubifs_key key, to_key;
1356 struct ubifs_ino_node *ino;
1357 struct ubifs_trun_node *trun;
1358 struct ubifs_data_node *uninitialized_var(dn);
1359 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1360 struct ubifs_inode *ui = ubifs_inode(inode);
1361 ino_t inum = inode->i_ino;
1362 unsigned int blk;
1363
1364 dbg_jnl("ino %lu, size %lld -> %lld",
1365 (unsigned long)inum, old_size, new_size);
1366 ubifs_assert(c, !ui->data_len);
1367 ubifs_assert(c, S_ISREG(inode->i_mode));
1368 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
1369
1370 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1371 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
1372 ino = kmalloc(sz, GFP_NOFS);
1373 if (!ino)
1374 return -ENOMEM;
1375
1376 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1377 trun->ch.node_type = UBIFS_TRUN_NODE;
1378 trun->inum = cpu_to_le32(inum);
1379 trun->old_size = cpu_to_le64(old_size);
1380 trun->new_size = cpu_to_le64(new_size);
1381 zero_trun_node_unused(trun);
1382
1383 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1384 if (dlen) {
1385
1386 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1387 blk = new_size >> UBIFS_BLOCK_SHIFT;
1388 data_key_init(c, &key, inum, blk);
1389 dbg_jnlk(&key, "last block key ");
1390 err = ubifs_tnc_lookup(c, &key, dn);
1391 if (err == -ENOENT)
1392 dlen = 0;
1393 else if (err)
1394 goto out_free;
1395 else {
1396 int dn_len = le32_to_cpu(dn->size);
1397
1398 if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
1399 ubifs_err(c, "bad data node (block %u, inode %lu)",
1400 blk, inode->i_ino);
1401 ubifs_dump_node(c, dn);
1402 goto out_free;
1403 }
1404
1405 if (dn_len <= dlen)
1406 dlen = 0;
1407 else {
1408 err = truncate_data_node(c, inode, blk, dn, &dlen);
1409 if (err)
1410 goto out_free;
1411 }
1412 }
1413 }
1414
1415
1416 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1417 if (dlen)
1418 len += dlen;
1419 err = make_reservation(c, BASEHD, len);
1420 if (err)
1421 goto out_free;
1422
1423 pack_inode(c, ino, inode, 0);
1424 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1425 if (dlen)
1426 ubifs_prep_grp_node(c, dn, dlen, 1);
1427
1428 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1429 if (err)
1430 goto out_release;
1431 if (!sync)
1432 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1433 release_head(c, BASEHD);
1434
1435 if (dlen) {
1436 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1437 err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1438 if (err)
1439 goto out_ro;
1440 }
1441
1442 ino_key_init(c, &key, inum);
1443 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1444 if (err)
1445 goto out_ro;
1446
1447 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1448 if (err)
1449 goto out_ro;
1450
1451 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1452 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1453 data_key_init(c, &key, inum, blk);
1454
1455 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
1456 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
1457 data_key_init(c, &to_key, inum, blk);
1458
1459 err = ubifs_tnc_remove_range(c, &key, &to_key);
1460 if (err)
1461 goto out_ro;
1462
1463 finish_reservation(c);
1464 spin_lock(&ui->ui_lock);
1465 ui->synced_i_size = ui->ui_size;
1466 spin_unlock(&ui->ui_lock);
1467 mark_inode_clean(c, ui);
1468 kfree(ino);
1469 return 0;
1470
1471out_release:
1472 release_head(c, BASEHD);
1473out_ro:
1474 ubifs_ro_mode(c, err);
1475 finish_reservation(c);
1476out_free:
1477 kfree(ino);
1478 return err;
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
1495 const struct inode *inode,
1496 const struct fscrypt_name *nm)
1497{
1498 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1499 struct ubifs_dent_node *xent;
1500 struct ubifs_ino_node *ino;
1501 union ubifs_key xent_key, key1, key2;
1502 int sync = IS_DIRSYNC(host);
1503 struct ubifs_inode *host_ui = ubifs_inode(host);
1504
1505 ubifs_assert(c, inode->i_nlink == 0);
1506 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
1507
1508
1509
1510
1511
1512 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
1513 aligned_xlen = ALIGN(xlen, 8);
1514 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1515 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1516
1517 xent = kzalloc(len, GFP_NOFS);
1518 if (!xent)
1519 return -ENOMEM;
1520
1521
1522 err = make_reservation(c, BASEHD, len);
1523 if (err) {
1524 kfree(xent);
1525 return err;
1526 }
1527
1528 xent->ch.node_type = UBIFS_XENT_NODE;
1529 xent_key_init(c, &xent_key, host->i_ino, nm);
1530 key_write(c, &xent_key, xent->key);
1531 xent->inum = 0;
1532 xent->type = get_dent_type(inode->i_mode);
1533 xent->nlen = cpu_to_le16(fname_len(nm));
1534 memcpy(xent->name, fname_name(nm), fname_len(nm));
1535 xent->name[fname_len(nm)] = '\0';
1536 zero_dent_node_unused(xent);
1537 ubifs_prep_grp_node(c, xent, xlen, 0);
1538
1539 ino = (void *)xent + aligned_xlen;
1540 pack_inode(c, ino, inode, 0);
1541 ino = (void *)ino + UBIFS_INO_NODE_SZ;
1542 pack_inode(c, ino, host, 1);
1543
1544 err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1545 if (!sync && !err)
1546 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1547 release_head(c, BASEHD);
1548 kfree(xent);
1549 if (err)
1550 goto out_ro;
1551
1552
1553 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1554 if (err)
1555 goto out_ro;
1556 err = ubifs_add_dirt(c, lnum, xlen);
1557 if (err)
1558 goto out_ro;
1559
1560
1561
1562
1563
1564 lowest_ino_key(c, &key1, inode->i_ino);
1565 highest_ino_key(c, &key2, inode->i_ino);
1566 err = ubifs_tnc_remove_range(c, &key1, &key2);
1567 if (err)
1568 goto out_ro;
1569 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1570 if (err)
1571 goto out_ro;
1572
1573
1574 ino_key_init(c, &key1, host->i_ino);
1575 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1576 if (err)
1577 goto out_ro;
1578
1579 finish_reservation(c);
1580 spin_lock(&host_ui->ui_lock);
1581 host_ui->synced_i_size = host_ui->ui_size;
1582 spin_unlock(&host_ui->ui_lock);
1583 mark_inode_clean(c, host_ui);
1584 return 0;
1585
1586out_ro:
1587 ubifs_ro_mode(c, err);
1588 finish_reservation(c);
1589 return err;
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1606 const struct inode *host)
1607{
1608 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
1609 struct ubifs_inode *host_ui = ubifs_inode(host);
1610 struct ubifs_ino_node *ino;
1611 union ubifs_key key;
1612 int sync = IS_DIRSYNC(host);
1613
1614 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1615 ubifs_assert(c, host->i_nlink > 0);
1616 ubifs_assert(c, inode->i_nlink > 0);
1617 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
1618
1619 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1620 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1621 aligned_len1 = ALIGN(len1, 8);
1622 aligned_len = aligned_len1 + ALIGN(len2, 8);
1623
1624 ino = kzalloc(aligned_len, GFP_NOFS);
1625 if (!ino)
1626 return -ENOMEM;
1627
1628
1629 err = make_reservation(c, BASEHD, aligned_len);
1630 if (err)
1631 goto out_free;
1632
1633 pack_inode(c, ino, host, 0);
1634 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
1635
1636 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1637 if (!sync && !err) {
1638 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1639
1640 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1641 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1642 }
1643 release_head(c, BASEHD);
1644 if (err)
1645 goto out_ro;
1646
1647 ino_key_init(c, &key, host->i_ino);
1648 err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1649 if (err)
1650 goto out_ro;
1651
1652 ino_key_init(c, &key, inode->i_ino);
1653 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1654 if (err)
1655 goto out_ro;
1656
1657 finish_reservation(c);
1658 spin_lock(&host_ui->ui_lock);
1659 host_ui->synced_i_size = host_ui->ui_size;
1660 spin_unlock(&host_ui->ui_lock);
1661 mark_inode_clean(c, host_ui);
1662 kfree(ino);
1663 return 0;
1664
1665out_ro:
1666 ubifs_ro_mode(c, err);
1667 finish_reservation(c);
1668out_free:
1669 kfree(ino);
1670 return err;
1671}
1672
1673