1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#include "ubifs.h"
62
63
64
65
66
67static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68{
69 memset(ino->padding1, 0, 4);
70 memset(ino->padding2, 0, 26);
71}
72
73
74
75
76
77
78static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79{
80 dent->padding1 = 0;
81 memset(dent->padding2, 0, 4);
82}
83
84
85
86
87
88static inline void zero_data_node_unused(struct ubifs_data_node *data)
89{
90 memset(data->padding, 0, 2);
91}
92
93
94
95
96
97
98static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
99{
100 memset(trun->padding, 0, 12);
101}
102
103
104
105
106
107
108
109
110
111
112
113
114
115static int reserve_space(struct ubifs_info *c, int jhead, int len)
116{
117 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
118 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
119
120
121
122
123
124
125 ubifs_assert(!c->ro_media && !c->ro_mount);
126 squeeze = (jhead == BASEHD);
127again:
128 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
129
130 if (c->ro_error) {
131 err = -EROFS;
132 goto out_unlock;
133 }
134
135 avail = c->leb_size - wbuf->offs - wbuf->used;
136 if (wbuf->lnum != -1 && avail >= len)
137 return 0;
138
139
140
141
142
143 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
144 if (lnum >= 0)
145 goto out;
146
147 err = lnum;
148 if (err != -ENOSPC)
149 goto out_unlock;
150
151
152
153
154
155
156 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
157 mutex_unlock(&wbuf->io_mutex);
158
159 lnum = ubifs_garbage_collect(c, 0);
160 if (lnum < 0) {
161 err = lnum;
162 if (err != -ENOSPC)
163 return err;
164
165
166
167
168
169
170
171 dbg_jnl("GC couldn't make a free LEB for jhead %s",
172 dbg_jhead(jhead));
173 if (retries++ < 2) {
174 dbg_jnl("retry (%d)", retries);
175 goto again;
176 }
177
178 dbg_jnl("return -ENOSPC");
179 return err;
180 }
181
182 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
183 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
184 avail = c->leb_size - wbuf->offs - wbuf->used;
185
186 if (wbuf->lnum != -1 && avail >= len) {
187
188
189
190
191
192 dbg_jnl("return LEB %d back, already have LEB %d:%d",
193 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
194 err = ubifs_return_leb(c, lnum);
195 if (err)
196 goto out_unlock;
197 return 0;
198 }
199
200 offs = 0;
201
202out:
203
204
205
206
207
208
209
210
211 err = ubifs_wbuf_sync_nolock(wbuf);
212 if (err)
213 goto out_return;
214 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
215 if (err)
216 goto out_return;
217 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
218 if (err)
219 goto out_unlock;
220
221 return 0;
222
223out_unlock:
224 mutex_unlock(&wbuf->io_mutex);
225 return err;
226
227out_return:
228
229 ubifs_assert(err < 0);
230 err1 = ubifs_return_leb(c, lnum);
231 if (err1 && err == -EAGAIN)
232
233
234
235
236
237 err = err1;
238 mutex_unlock(&wbuf->io_mutex);
239 return err;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
256 int *lnum, int *offs)
257{
258 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
259
260 ubifs_assert(jhead != GCHD);
261
262 *lnum = c->jheads[jhead].wbuf.lnum;
263 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
264
265 dbg_jnl("jhead %s, LEB %d:%d, len %d",
266 dbg_jhead(jhead), *lnum, *offs, len);
267 ubifs_prepare_node(c, node, len, 0);
268
269 return ubifs_wbuf_write_nolock(wbuf, node, len);
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
287 int *lnum, int *offs, int sync)
288{
289 int err;
290 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
291
292 ubifs_assert(jhead != GCHD);
293
294 *lnum = c->jheads[jhead].wbuf.lnum;
295 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
296 dbg_jnl("jhead %s, LEB %d:%d, len %d",
297 dbg_jhead(jhead), *lnum, *offs, len);
298
299 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
300 if (err)
301 return err;
302 if (sync)
303 err = ubifs_wbuf_sync_nolock(wbuf);
304 return err;
305}
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323static int make_reservation(struct ubifs_info *c, int jhead, int len)
324{
325 int err, cmt_retries = 0, nospc_retries = 0;
326
327again:
328 down_read(&c->commit_sem);
329 err = reserve_space(c, jhead, len);
330 if (!err)
331 return 0;
332 up_read(&c->commit_sem);
333
334 if (err == -ENOSPC) {
335
336
337
338
339
340
341 if (nospc_retries++ < 2) {
342 dbg_jnl("no space, retry");
343 err = -EAGAIN;
344 }
345
346
347
348
349
350
351
352 }
353
354 if (err != -EAGAIN)
355 goto out;
356
357
358
359
360
361 if (cmt_retries > 128) {
362
363
364
365
366 ubifs_err(c, "stuck in space allocation");
367 err = -ENOSPC;
368 goto out;
369 } else if (cmt_retries > 32)
370 ubifs_warn(c, "too many space allocation re-tries (%d)",
371 cmt_retries);
372
373 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
374 cmt_retries);
375 cmt_retries += 1;
376
377 err = ubifs_run_commit(c);
378 if (err)
379 return err;
380 goto again;
381
382out:
383 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
384 len, jhead, err);
385 if (err == -ENOSPC) {
386
387 down_write(&c->commit_sem);
388 dump_stack();
389 ubifs_dump_budg(c, &c->bi);
390 ubifs_dump_lprops(c);
391 cmt_retries = dbg_check_lprops(c);
392 up_write(&c->commit_sem);
393 }
394 return err;
395}
396
397
398
399
400
401
402
403
404
405
406static inline void release_head(struct ubifs_info *c, int jhead)
407{
408 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
409}
410
411
412
413
414
415
416
417
418static void finish_reservation(struct ubifs_info *c)
419{
420 up_read(&c->commit_sem);
421}
422
423
424
425
426
427static int get_dent_type(int mode)
428{
429 switch (mode & S_IFMT) {
430 case S_IFREG:
431 return UBIFS_ITYPE_REG;
432 case S_IFDIR:
433 return UBIFS_ITYPE_DIR;
434 case S_IFLNK:
435 return UBIFS_ITYPE_LNK;
436 case S_IFBLK:
437 return UBIFS_ITYPE_BLK;
438 case S_IFCHR:
439 return UBIFS_ITYPE_CHR;
440 case S_IFIFO:
441 return UBIFS_ITYPE_FIFO;
442 case S_IFSOCK:
443 return UBIFS_ITYPE_SOCK;
444 default:
445 BUG();
446 }
447 return 0;
448}
449
450
451
452
453
454
455
456
457static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
458 const struct inode *inode, int last)
459{
460 int data_len = 0, last_reference = !inode->i_nlink;
461 struct ubifs_inode *ui = ubifs_inode(inode);
462
463 ino->ch.node_type = UBIFS_INO_NODE;
464 ino_key_init_flash(c, &ino->key, inode->i_ino);
465 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
466 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
467 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
468 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
469 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
470 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
471 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
472 ino->uid = cpu_to_le32(i_uid_read(inode));
473 ino->gid = cpu_to_le32(i_gid_read(inode));
474 ino->mode = cpu_to_le32(inode->i_mode);
475 ino->flags = cpu_to_le32(ui->flags);
476 ino->size = cpu_to_le64(ui->ui_size);
477 ino->nlink = cpu_to_le32(inode->i_nlink);
478 ino->compr_type = cpu_to_le16(ui->compr_type);
479 ino->data_len = cpu_to_le32(ui->data_len);
480 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
481 ino->xattr_size = cpu_to_le32(ui->xattr_size);
482 ino->xattr_names = cpu_to_le32(ui->xattr_names);
483 zero_ino_node_unused(ino);
484
485
486
487
488
489 if (!last_reference) {
490 memcpy(ino->data, ui->data, ui->data_len);
491 data_len = ui->data_len;
492 }
493
494 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
495}
496
497
498
499
500
501
502
503
504
505
506
507static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
508{
509 if (ui->dirty)
510 ubifs_release_dirty_inode_budget(c, ui);
511 ui->dirty = 0;
512}
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
542 const struct qstr *nm, const struct inode *inode,
543 int deletion, int xent)
544{
545 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
546 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
547 int last_reference = !!(deletion && inode->i_nlink == 0);
548 struct ubifs_inode *ui = ubifs_inode(inode);
549 struct ubifs_inode *host_ui = ubifs_inode(dir);
550 struct ubifs_dent_node *dent;
551 struct ubifs_ino_node *ino;
552 union ubifs_key dent_key, ino_key;
553
554 dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
555 inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
556 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
557
558 dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
559 ilen = UBIFS_INO_NODE_SZ;
560
561
562
563
564
565
566
567 if (!last_reference) {
568 ilen += ui->data_len;
569 sync |= IS_SYNC(inode);
570 }
571
572 aligned_dlen = ALIGN(dlen, 8);
573 aligned_ilen = ALIGN(ilen, 8);
574
575 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
576
577 len += host_ui->data_len;
578
579 dent = kmalloc(len, GFP_NOFS);
580 if (!dent)
581 return -ENOMEM;
582
583
584 err = make_reservation(c, BASEHD, len);
585 if (err)
586 goto out_free;
587
588 if (!xent) {
589 dent->ch.node_type = UBIFS_DENT_NODE;
590 dent_key_init(c, &dent_key, dir->i_ino, nm);
591 } else {
592 dent->ch.node_type = UBIFS_XENT_NODE;
593 xent_key_init(c, &dent_key, dir->i_ino, nm);
594 }
595
596 key_write(c, &dent_key, dent->key);
597 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
598 dent->type = get_dent_type(inode->i_mode);
599 dent->nlen = cpu_to_le16(nm->len);
600 memcpy(dent->name, nm->name, nm->len);
601 dent->name[nm->len] = '\0';
602 zero_dent_node_unused(dent);
603 ubifs_prep_grp_node(c, dent, dlen, 0);
604
605 ino = (void *)dent + aligned_dlen;
606 pack_inode(c, ino, inode, 0);
607 ino = (void *)ino + aligned_ilen;
608 pack_inode(c, ino, dir, 1);
609
610 if (last_reference) {
611 err = ubifs_add_orphan(c, inode->i_ino);
612 if (err) {
613 release_head(c, BASEHD);
614 goto out_finish;
615 }
616 ui->del_cmtno = c->cmt_no;
617 }
618
619 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
620 if (err)
621 goto out_release;
622 if (!sync) {
623 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
624
625 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
626 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
627 }
628 release_head(c, BASEHD);
629 kfree(dent);
630
631 if (deletion) {
632 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
633 if (err)
634 goto out_ro;
635 err = ubifs_add_dirt(c, lnum, dlen);
636 } else
637 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
638 if (err)
639 goto out_ro;
640
641
642
643
644
645
646
647 ino_key_init(c, &ino_key, inode->i_ino);
648 ino_offs = dent_offs + aligned_dlen;
649 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
650 if (err)
651 goto out_ro;
652
653 ino_key_init(c, &ino_key, dir->i_ino);
654 ino_offs += aligned_ilen;
655 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
656 UBIFS_INO_NODE_SZ + host_ui->data_len);
657 if (err)
658 goto out_ro;
659
660 finish_reservation(c);
661 spin_lock(&ui->ui_lock);
662 ui->synced_i_size = ui->ui_size;
663 spin_unlock(&ui->ui_lock);
664 mark_inode_clean(c, ui);
665 mark_inode_clean(c, host_ui);
666 return 0;
667
668out_finish:
669 finish_reservation(c);
670out_free:
671 kfree(dent);
672 return err;
673
674out_release:
675 release_head(c, BASEHD);
676 kfree(dent);
677out_ro:
678 ubifs_ro_mode(c, err);
679 if (last_reference)
680 ubifs_delete_orphan(c, inode->i_ino);
681 finish_reservation(c);
682 return err;
683}
684
685
686
687
688
689
690
691
692
693
694
695
696int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
697 const union ubifs_key *key, const void *buf, int len)
698{
699 struct ubifs_data_node *data;
700 int err, lnum, offs, compr_type, out_len;
701 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
702 struct ubifs_inode *ui = ubifs_inode(inode);
703
704 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
705 (unsigned long)key_inum(c, key), key_block(c, key), len);
706 ubifs_assert(len <= UBIFS_BLOCK_SIZE);
707
708 data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
709 if (!data) {
710
711
712
713
714
715
716
717 allocated = 0;
718 mutex_lock(&c->write_reserve_mutex);
719 data = c->write_reserve_buf;
720 }
721
722 data->ch.node_type = UBIFS_DATA_NODE;
723 key_write(c, key, &data->key);
724 data->size = cpu_to_le32(len);
725 zero_data_node_unused(data);
726
727 if (!(ui->flags & UBIFS_COMPR_FL))
728
729 compr_type = UBIFS_COMPR_NONE;
730 else
731 compr_type = ui->compr_type;
732
733 out_len = dlen - UBIFS_DATA_NODE_SZ;
734 ubifs_compress(c, buf, len, &data->data, &out_len, &compr_type);
735 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
736
737 dlen = UBIFS_DATA_NODE_SZ + out_len;
738 data->compr_type = cpu_to_le16(compr_type);
739
740
741 err = make_reservation(c, DATAHD, dlen);
742 if (err)
743 goto out_free;
744
745 err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
746 if (err)
747 goto out_release;
748 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
749 release_head(c, DATAHD);
750
751 err = ubifs_tnc_add(c, key, lnum, offs, dlen);
752 if (err)
753 goto out_ro;
754
755 finish_reservation(c);
756 if (!allocated)
757 mutex_unlock(&c->write_reserve_mutex);
758 else
759 kfree(data);
760 return 0;
761
762out_release:
763 release_head(c, DATAHD);
764out_ro:
765 ubifs_ro_mode(c, err);
766 finish_reservation(c);
767out_free:
768 if (!allocated)
769 mutex_unlock(&c->write_reserve_mutex);
770 else
771 kfree(data);
772 return err;
773}
774
775
776
777
778
779
780
781
782
783
784int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
785{
786 int err, lnum, offs;
787 struct ubifs_ino_node *ino;
788 struct ubifs_inode *ui = ubifs_inode(inode);
789 int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
790
791 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
792
793
794
795
796
797 if (!last_reference) {
798 len += ui->data_len;
799 sync = IS_SYNC(inode);
800 }
801 ino = kmalloc(len, GFP_NOFS);
802 if (!ino)
803 return -ENOMEM;
804
805
806 err = make_reservation(c, BASEHD, len);
807 if (err)
808 goto out_free;
809
810 pack_inode(c, ino, inode, 1);
811 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
812 if (err)
813 goto out_release;
814 if (!sync)
815 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
816 inode->i_ino);
817 release_head(c, BASEHD);
818
819 if (last_reference) {
820 err = ubifs_tnc_remove_ino(c, inode->i_ino);
821 if (err)
822 goto out_ro;
823 ubifs_delete_orphan(c, inode->i_ino);
824 err = ubifs_add_dirt(c, lnum, len);
825 } else {
826 union ubifs_key key;
827
828 ino_key_init(c, &key, inode->i_ino);
829 err = ubifs_tnc_add(c, &key, lnum, offs, len);
830 }
831 if (err)
832 goto out_ro;
833
834 finish_reservation(c);
835 spin_lock(&ui->ui_lock);
836 ui->synced_i_size = ui->ui_size;
837 spin_unlock(&ui->ui_lock);
838 kfree(ino);
839 return 0;
840
841out_release:
842 release_head(c, BASEHD);
843out_ro:
844 ubifs_ro_mode(c, err);
845 finish_reservation(c);
846out_free:
847 kfree(ino);
848 return err;
849}
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
881{
882 int err;
883 struct ubifs_inode *ui = ubifs_inode(inode);
884
885 ubifs_assert(inode->i_nlink == 0);
886
887 if (ui->del_cmtno != c->cmt_no)
888
889 return ubifs_jnl_write_inode(c, inode);
890
891 down_read(&c->commit_sem);
892
893
894
895
896 if (ui->del_cmtno != c->cmt_no) {
897 up_read(&c->commit_sem);
898 return ubifs_jnl_write_inode(c, inode);
899 }
900
901 err = ubifs_tnc_remove_ino(c, inode->i_ino);
902 if (err)
903 ubifs_ro_mode(c, err);
904 else
905 ubifs_delete_orphan(c, inode->i_ino);
906 up_read(&c->commit_sem);
907 return err;
908}
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
925 const struct dentry *old_dentry,
926 const struct inode *new_dir,
927 const struct dentry *new_dentry, int sync)
928{
929 void *p;
930 union ubifs_key key;
931 struct ubifs_dent_node *dent, *dent2;
932 int err, dlen1, dlen2, ilen, lnum, offs, len;
933 const struct inode *old_inode = d_inode(old_dentry);
934 const struct inode *new_inode = d_inode(new_dentry);
935 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
936 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
937 int move = (old_dir != new_dir);
938 struct ubifs_inode *uninitialized_var(new_ui);
939
940 dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
941 old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
942 ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
943 ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
944 ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
945 ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
946
947 dlen1 = UBIFS_DENT_NODE_SZ + new_dentry->d_name.len + 1;
948 dlen2 = UBIFS_DENT_NODE_SZ + old_dentry->d_name.len + 1;
949 if (new_inode) {
950 new_ui = ubifs_inode(new_inode);
951 ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
952 ilen = UBIFS_INO_NODE_SZ;
953 if (!last_reference)
954 ilen += new_ui->data_len;
955 } else
956 ilen = 0;
957
958 aligned_dlen1 = ALIGN(dlen1, 8);
959 aligned_dlen2 = ALIGN(dlen2, 8);
960 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
961 if (old_dir != new_dir)
962 len += plen;
963 dent = kmalloc(len, GFP_NOFS);
964 if (!dent)
965 return -ENOMEM;
966
967
968 err = make_reservation(c, BASEHD, len);
969 if (err)
970 goto out_free;
971
972
973 dent->ch.node_type = UBIFS_DENT_NODE;
974 dent_key_init_flash(c, &dent->key, new_dir->i_ino, &new_dentry->d_name);
975 dent->inum = cpu_to_le64(old_inode->i_ino);
976 dent->type = get_dent_type(old_inode->i_mode);
977 dent->nlen = cpu_to_le16(new_dentry->d_name.len);
978 memcpy(dent->name, new_dentry->d_name.name, new_dentry->d_name.len);
979 dent->name[new_dentry->d_name.len] = '\0';
980 zero_dent_node_unused(dent);
981 ubifs_prep_grp_node(c, dent, dlen1, 0);
982
983
984 dent2 = (void *)dent + aligned_dlen1;
985 dent2->ch.node_type = UBIFS_DENT_NODE;
986 dent_key_init_flash(c, &dent2->key, old_dir->i_ino,
987 &old_dentry->d_name);
988 dent2->inum = 0;
989 dent2->type = DT_UNKNOWN;
990 dent2->nlen = cpu_to_le16(old_dentry->d_name.len);
991 memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len);
992 dent2->name[old_dentry->d_name.len] = '\0';
993 zero_dent_node_unused(dent2);
994 ubifs_prep_grp_node(c, dent2, dlen2, 0);
995
996 p = (void *)dent2 + aligned_dlen2;
997 if (new_inode) {
998 pack_inode(c, p, new_inode, 0);
999 p += ALIGN(ilen, 8);
1000 }
1001
1002 if (!move)
1003 pack_inode(c, p, old_dir, 1);
1004 else {
1005 pack_inode(c, p, old_dir, 0);
1006 p += ALIGN(plen, 8);
1007 pack_inode(c, p, new_dir, 1);
1008 }
1009
1010 if (last_reference) {
1011 err = ubifs_add_orphan(c, new_inode->i_ino);
1012 if (err) {
1013 release_head(c, BASEHD);
1014 goto out_finish;
1015 }
1016 new_ui->del_cmtno = c->cmt_no;
1017 }
1018
1019 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1020 if (err)
1021 goto out_release;
1022 if (!sync) {
1023 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1024
1025 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1026 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1027 if (new_inode)
1028 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1029 new_inode->i_ino);
1030 }
1031 release_head(c, BASEHD);
1032
1033 dent_key_init(c, &key, new_dir->i_ino, &new_dentry->d_name);
1034 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &new_dentry->d_name);
1035 if (err)
1036 goto out_ro;
1037
1038 err = ubifs_add_dirt(c, lnum, dlen2);
1039 if (err)
1040 goto out_ro;
1041
1042 dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
1043 err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
1044 if (err)
1045 goto out_ro;
1046
1047 offs += aligned_dlen1 + aligned_dlen2;
1048 if (new_inode) {
1049 ino_key_init(c, &key, new_inode->i_ino);
1050 err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1051 if (err)
1052 goto out_ro;
1053 offs += ALIGN(ilen, 8);
1054 }
1055
1056 ino_key_init(c, &key, old_dir->i_ino);
1057 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1058 if (err)
1059 goto out_ro;
1060
1061 if (old_dir != new_dir) {
1062 offs += ALIGN(plen, 8);
1063 ino_key_init(c, &key, new_dir->i_ino);
1064 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1065 if (err)
1066 goto out_ro;
1067 }
1068
1069 finish_reservation(c);
1070 if (new_inode) {
1071 mark_inode_clean(c, new_ui);
1072 spin_lock(&new_ui->ui_lock);
1073 new_ui->synced_i_size = new_ui->ui_size;
1074 spin_unlock(&new_ui->ui_lock);
1075 }
1076 mark_inode_clean(c, ubifs_inode(old_dir));
1077 if (move)
1078 mark_inode_clean(c, ubifs_inode(new_dir));
1079 kfree(dent);
1080 return 0;
1081
1082out_release:
1083 release_head(c, BASEHD);
1084out_ro:
1085 ubifs_ro_mode(c, err);
1086 if (last_reference)
1087 ubifs_delete_orphan(c, new_inode->i_ino);
1088out_finish:
1089 finish_reservation(c);
1090out_free:
1091 kfree(dent);
1092 return err;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static int recomp_data_node(const struct ubifs_info *c,
1104 struct ubifs_data_node *dn, int *new_len)
1105{
1106 void *buf;
1107 int err, len, compr_type, out_len;
1108
1109 out_len = le32_to_cpu(dn->size);
1110 buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1111 if (!buf)
1112 return -ENOMEM;
1113
1114 len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
1115 compr_type = le16_to_cpu(dn->compr_type);
1116 err = ubifs_decompress(c, &dn->data, len, buf, &out_len, compr_type);
1117 if (err)
1118 goto out;
1119
1120 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1121 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
1122 dn->compr_type = cpu_to_le16(compr_type);
1123 dn->size = cpu_to_le32(*new_len);
1124 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1125out:
1126 kfree(buf);
1127 return err;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1146 loff_t old_size, loff_t new_size)
1147{
1148 union ubifs_key key, to_key;
1149 struct ubifs_ino_node *ino;
1150 struct ubifs_trun_node *trun;
1151 struct ubifs_data_node *uninitialized_var(dn);
1152 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1153 struct ubifs_inode *ui = ubifs_inode(inode);
1154 ino_t inum = inode->i_ino;
1155 unsigned int blk;
1156
1157 dbg_jnl("ino %lu, size %lld -> %lld",
1158 (unsigned long)inum, old_size, new_size);
1159 ubifs_assert(!ui->data_len);
1160 ubifs_assert(S_ISREG(inode->i_mode));
1161 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
1162
1163 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1164 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
1165 ino = kmalloc(sz, GFP_NOFS);
1166 if (!ino)
1167 return -ENOMEM;
1168
1169 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1170 trun->ch.node_type = UBIFS_TRUN_NODE;
1171 trun->inum = cpu_to_le32(inum);
1172 trun->old_size = cpu_to_le64(old_size);
1173 trun->new_size = cpu_to_le64(new_size);
1174 zero_trun_node_unused(trun);
1175
1176 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1177 if (dlen) {
1178
1179 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1180 blk = new_size >> UBIFS_BLOCK_SHIFT;
1181 data_key_init(c, &key, inum, blk);
1182 dbg_jnlk(&key, "last block key ");
1183 err = ubifs_tnc_lookup(c, &key, dn);
1184 if (err == -ENOENT)
1185 dlen = 0;
1186 else if (err)
1187 goto out_free;
1188 else {
1189 if (le32_to_cpu(dn->size) <= dlen)
1190 dlen = 0;
1191 else {
1192 int compr_type = le16_to_cpu(dn->compr_type);
1193
1194 if (compr_type != UBIFS_COMPR_NONE) {
1195 err = recomp_data_node(c, dn, &dlen);
1196 if (err)
1197 goto out_free;
1198 } else {
1199 dn->size = cpu_to_le32(dlen);
1200 dlen += UBIFS_DATA_NODE_SZ;
1201 }
1202 zero_data_node_unused(dn);
1203 }
1204 }
1205 }
1206
1207
1208 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1209 if (dlen)
1210 len += dlen;
1211 err = make_reservation(c, BASEHD, len);
1212 if (err)
1213 goto out_free;
1214
1215 pack_inode(c, ino, inode, 0);
1216 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1217 if (dlen)
1218 ubifs_prep_grp_node(c, dn, dlen, 1);
1219
1220 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1221 if (err)
1222 goto out_release;
1223 if (!sync)
1224 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1225 release_head(c, BASEHD);
1226
1227 if (dlen) {
1228 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1229 err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1230 if (err)
1231 goto out_ro;
1232 }
1233
1234 ino_key_init(c, &key, inum);
1235 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1236 if (err)
1237 goto out_ro;
1238
1239 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1240 if (err)
1241 goto out_ro;
1242
1243 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1244 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1245 data_key_init(c, &key, inum, blk);
1246
1247 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
1248 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
1249 data_key_init(c, &to_key, inum, blk);
1250
1251 err = ubifs_tnc_remove_range(c, &key, &to_key);
1252 if (err)
1253 goto out_ro;
1254
1255 finish_reservation(c);
1256 spin_lock(&ui->ui_lock);
1257 ui->synced_i_size = ui->ui_size;
1258 spin_unlock(&ui->ui_lock);
1259 mark_inode_clean(c, ui);
1260 kfree(ino);
1261 return 0;
1262
1263out_release:
1264 release_head(c, BASEHD);
1265out_ro:
1266 ubifs_ro_mode(c, err);
1267 finish_reservation(c);
1268out_free:
1269 kfree(ino);
1270 return err;
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
1287 const struct inode *inode, const struct qstr *nm)
1288{
1289 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1290 struct ubifs_dent_node *xent;
1291 struct ubifs_ino_node *ino;
1292 union ubifs_key xent_key, key1, key2;
1293 int sync = IS_DIRSYNC(host);
1294 struct ubifs_inode *host_ui = ubifs_inode(host);
1295
1296 dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
1297 host->i_ino, inode->i_ino, nm->name,
1298 ubifs_inode(inode)->data_len);
1299 ubifs_assert(inode->i_nlink == 0);
1300 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1301
1302
1303
1304
1305
1306 xlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
1307 aligned_xlen = ALIGN(xlen, 8);
1308 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1309 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1310
1311 xent = kmalloc(len, GFP_NOFS);
1312 if (!xent)
1313 return -ENOMEM;
1314
1315
1316 err = make_reservation(c, BASEHD, len);
1317 if (err) {
1318 kfree(xent);
1319 return err;
1320 }
1321
1322 xent->ch.node_type = UBIFS_XENT_NODE;
1323 xent_key_init(c, &xent_key, host->i_ino, nm);
1324 key_write(c, &xent_key, xent->key);
1325 xent->inum = 0;
1326 xent->type = get_dent_type(inode->i_mode);
1327 xent->nlen = cpu_to_le16(nm->len);
1328 memcpy(xent->name, nm->name, nm->len);
1329 xent->name[nm->len] = '\0';
1330 zero_dent_node_unused(xent);
1331 ubifs_prep_grp_node(c, xent, xlen, 0);
1332
1333 ino = (void *)xent + aligned_xlen;
1334 pack_inode(c, ino, inode, 0);
1335 ino = (void *)ino + UBIFS_INO_NODE_SZ;
1336 pack_inode(c, ino, host, 1);
1337
1338 err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1339 if (!sync && !err)
1340 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1341 release_head(c, BASEHD);
1342 kfree(xent);
1343 if (err)
1344 goto out_ro;
1345
1346
1347 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1348 if (err)
1349 goto out_ro;
1350 err = ubifs_add_dirt(c, lnum, xlen);
1351 if (err)
1352 goto out_ro;
1353
1354
1355
1356
1357
1358 lowest_ino_key(c, &key1, inode->i_ino);
1359 highest_ino_key(c, &key2, inode->i_ino);
1360 err = ubifs_tnc_remove_range(c, &key1, &key2);
1361 if (err)
1362 goto out_ro;
1363 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1364 if (err)
1365 goto out_ro;
1366
1367
1368 ino_key_init(c, &key1, host->i_ino);
1369 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1370 if (err)
1371 goto out_ro;
1372
1373 finish_reservation(c);
1374 spin_lock(&host_ui->ui_lock);
1375 host_ui->synced_i_size = host_ui->ui_size;
1376 spin_unlock(&host_ui->ui_lock);
1377 mark_inode_clean(c, host_ui);
1378 return 0;
1379
1380out_ro:
1381 ubifs_ro_mode(c, err);
1382 finish_reservation(c);
1383 return err;
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1400 const struct inode *host)
1401{
1402 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
1403 struct ubifs_inode *host_ui = ubifs_inode(host);
1404 struct ubifs_ino_node *ino;
1405 union ubifs_key key;
1406 int sync = IS_DIRSYNC(host);
1407
1408 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1409 ubifs_assert(host->i_nlink > 0);
1410 ubifs_assert(inode->i_nlink > 0);
1411 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1412
1413 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1414 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1415 aligned_len1 = ALIGN(len1, 8);
1416 aligned_len = aligned_len1 + ALIGN(len2, 8);
1417
1418 ino = kmalloc(aligned_len, GFP_NOFS);
1419 if (!ino)
1420 return -ENOMEM;
1421
1422
1423 err = make_reservation(c, BASEHD, aligned_len);
1424 if (err)
1425 goto out_free;
1426
1427 pack_inode(c, ino, host, 0);
1428 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
1429
1430 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1431 if (!sync && !err) {
1432 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1433
1434 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1435 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1436 }
1437 release_head(c, BASEHD);
1438 if (err)
1439 goto out_ro;
1440
1441 ino_key_init(c, &key, host->i_ino);
1442 err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1443 if (err)
1444 goto out_ro;
1445
1446 ino_key_init(c, &key, inode->i_ino);
1447 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1448 if (err)
1449 goto out_ro;
1450
1451 finish_reservation(c);
1452 spin_lock(&host_ui->ui_lock);
1453 host_ui->synced_i_size = host_ui->ui_size;
1454 spin_unlock(&host_ui->ui_lock);
1455 mark_inode_clean(c, host_ui);
1456 kfree(ino);
1457 return 0;
1458
1459out_ro:
1460 ubifs_ro_mode(c, err);
1461 finish_reservation(c);
1462out_free:
1463 kfree(ino);
1464 return err;
1465}
1466
1467