1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#include "ubifs.h"
62
63
64
65
66
67static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68{
69 memset(ino->padding1, 0, 4);
70 memset(ino->padding2, 0, 26);
71}
72
73
74
75
76
77
78static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79{
80 dent->padding1 = 0;
81 memset(dent->padding2, 0, 4);
82}
83
84
85
86
87
88static inline void zero_data_node_unused(struct ubifs_data_node *data)
89{
90 memset(data->padding, 0, 2);
91}
92
93
94
95
96
97
98static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
99{
100 memset(trun->padding, 0, 12);
101}
102
103
104
105
106
107
108
109
110
111
112
113
114
115static int reserve_space(struct ubifs_info *c, int jhead, int len)
116{
117 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
118 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
119
120
121
122
123
124
125 ubifs_assert(!c->ro_media && !c->ro_mount);
126 squeeze = (jhead == BASEHD);
127again:
128 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
129
130 if (c->ro_error) {
131 err = -EROFS;
132 goto out_unlock;
133 }
134
135 avail = c->leb_size - wbuf->offs - wbuf->used;
136 if (wbuf->lnum != -1 && avail >= len)
137 return 0;
138
139
140
141
142
143 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
144 if (lnum >= 0)
145 goto out;
146
147 err = lnum;
148 if (err != -ENOSPC)
149 goto out_unlock;
150
151
152
153
154
155
156 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
157 mutex_unlock(&wbuf->io_mutex);
158
159 lnum = ubifs_garbage_collect(c, 0);
160 if (lnum < 0) {
161 err = lnum;
162 if (err != -ENOSPC)
163 return err;
164
165
166
167
168
169
170
171 dbg_jnl("GC couldn't make a free LEB for jhead %s",
172 dbg_jhead(jhead));
173 if (retries++ < 2) {
174 dbg_jnl("retry (%d)", retries);
175 goto again;
176 }
177
178 dbg_jnl("return -ENOSPC");
179 return err;
180 }
181
182 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
183 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
184 avail = c->leb_size - wbuf->offs - wbuf->used;
185
186 if (wbuf->lnum != -1 && avail >= len) {
187
188
189
190
191
192 dbg_jnl("return LEB %d back, already have LEB %d:%d",
193 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
194 err = ubifs_return_leb(c, lnum);
195 if (err)
196 goto out_unlock;
197 return 0;
198 }
199
200 offs = 0;
201
202out:
203
204
205
206
207
208
209
210
211 err = ubifs_wbuf_sync_nolock(wbuf);
212 if (err)
213 goto out_return;
214 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
215 if (err)
216 goto out_return;
217 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
218 if (err)
219 goto out_unlock;
220
221 return 0;
222
223out_unlock:
224 mutex_unlock(&wbuf->io_mutex);
225 return err;
226
227out_return:
228
229 ubifs_assert(err < 0);
230 err1 = ubifs_return_leb(c, lnum);
231 if (err1 && err == -EAGAIN)
232
233
234
235
236
237 err = err1;
238 mutex_unlock(&wbuf->io_mutex);
239 return err;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
256 int *lnum, int *offs)
257{
258 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
259
260 ubifs_assert(jhead != GCHD);
261
262 *lnum = c->jheads[jhead].wbuf.lnum;
263 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
264
265 dbg_jnl("jhead %s, LEB %d:%d, len %d",
266 dbg_jhead(jhead), *lnum, *offs, len);
267 ubifs_prepare_node(c, node, len, 0);
268
269 return ubifs_wbuf_write_nolock(wbuf, node, len);
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
287 int *lnum, int *offs, int sync)
288{
289 int err;
290 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
291
292 ubifs_assert(jhead != GCHD);
293
294 *lnum = c->jheads[jhead].wbuf.lnum;
295 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
296 dbg_jnl("jhead %s, LEB %d:%d, len %d",
297 dbg_jhead(jhead), *lnum, *offs, len);
298
299 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
300 if (err)
301 return err;
302 if (sync)
303 err = ubifs_wbuf_sync_nolock(wbuf);
304 return err;
305}
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323static int make_reservation(struct ubifs_info *c, int jhead, int len)
324{
325 int err, cmt_retries = 0, nospc_retries = 0;
326
327again:
328 down_read(&c->commit_sem);
329 err = reserve_space(c, jhead, len);
330 if (!err)
331 return 0;
332 up_read(&c->commit_sem);
333
334 if (err == -ENOSPC) {
335
336
337
338
339
340
341 if (nospc_retries++ < 2) {
342 dbg_jnl("no space, retry");
343 err = -EAGAIN;
344 }
345
346
347
348
349
350
351
352 }
353
354 if (err != -EAGAIN)
355 goto out;
356
357
358
359
360
361 if (cmt_retries > 128) {
362
363
364
365
366 ubifs_err("stuck in space allocation");
367 err = -ENOSPC;
368 goto out;
369 } else if (cmt_retries > 32)
370 ubifs_warn("too many space allocation re-tries (%d)",
371 cmt_retries);
372
373 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
374 cmt_retries);
375 cmt_retries += 1;
376
377 err = ubifs_run_commit(c);
378 if (err)
379 return err;
380 goto again;
381
382out:
383 ubifs_err("cannot reserve %d bytes in jhead %d, error %d",
384 len, jhead, err);
385 if (err == -ENOSPC) {
386
387 down_write(&c->commit_sem);
388 dump_stack();
389 ubifs_dump_budg(c, &c->bi);
390 ubifs_dump_lprops(c);
391 cmt_retries = dbg_check_lprops(c);
392 up_write(&c->commit_sem);
393 }
394 return err;
395}
396
397
398
399
400
401
402
403
404
405
406static inline void release_head(struct ubifs_info *c, int jhead)
407{
408 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
409}
410
411
412
413
414
415
416
417
418static void finish_reservation(struct ubifs_info *c)
419{
420 up_read(&c->commit_sem);
421}
422
423
424
425
426
427static int get_dent_type(int mode)
428{
429 switch (mode & S_IFMT) {
430 case S_IFREG:
431 return UBIFS_ITYPE_REG;
432 case S_IFDIR:
433 return UBIFS_ITYPE_DIR;
434 case S_IFLNK:
435 return UBIFS_ITYPE_LNK;
436 case S_IFBLK:
437 return UBIFS_ITYPE_BLK;
438 case S_IFCHR:
439 return UBIFS_ITYPE_CHR;
440 case S_IFIFO:
441 return UBIFS_ITYPE_FIFO;
442 case S_IFSOCK:
443 return UBIFS_ITYPE_SOCK;
444 default:
445 BUG();
446 }
447 return 0;
448}
449
450
451
452
453
454
455
456
457static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
458 const struct inode *inode, int last)
459{
460 int data_len = 0, last_reference = !inode->i_nlink;
461 struct ubifs_inode *ui = ubifs_inode(inode);
462
463 ino->ch.node_type = UBIFS_INO_NODE;
464 ino_key_init_flash(c, &ino->key, inode->i_ino);
465 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
466 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
467 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
468 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
469 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
470 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
471 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
472 ino->uid = cpu_to_le32(inode->i_uid);
473 ino->gid = cpu_to_le32(inode->i_gid);
474 ino->mode = cpu_to_le32(inode->i_mode);
475 ino->flags = cpu_to_le32(ui->flags);
476 ino->size = cpu_to_le64(ui->ui_size);
477 ino->nlink = cpu_to_le32(inode->i_nlink);
478 ino->compr_type = cpu_to_le16(ui->compr_type);
479 ino->data_len = cpu_to_le32(ui->data_len);
480 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
481 ino->xattr_size = cpu_to_le32(ui->xattr_size);
482 ino->xattr_names = cpu_to_le32(ui->xattr_names);
483 zero_ino_node_unused(ino);
484
485
486
487
488
489 if (!last_reference) {
490 memcpy(ino->data, ui->data, ui->data_len);
491 data_len = ui->data_len;
492 }
493
494 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
495}
496
497
498
499
500
501
502
503
504
505
506
507static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
508{
509 if (ui->dirty)
510 ubifs_release_dirty_inode_budget(c, ui);
511 ui->dirty = 0;
512}
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
542 const struct qstr *nm, const struct inode *inode,
543 int deletion, int xent)
544{
545 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
546 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
547 int last_reference = !!(deletion && inode->i_nlink == 0);
548 struct ubifs_inode *ui = ubifs_inode(inode);
549 struct ubifs_inode *dir_ui = ubifs_inode(dir);
550 struct ubifs_dent_node *dent;
551 struct ubifs_ino_node *ino;
552 union ubifs_key dent_key, ino_key;
553
554 dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
555 inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
556 ubifs_assert(dir_ui->data_len == 0);
557 ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
558
559 dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
560 ilen = UBIFS_INO_NODE_SZ;
561
562
563
564
565
566
567
568 if (!last_reference) {
569 ilen += ui->data_len;
570 sync |= IS_SYNC(inode);
571 }
572
573 aligned_dlen = ALIGN(dlen, 8);
574 aligned_ilen = ALIGN(ilen, 8);
575 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
576 dent = kmalloc(len, GFP_NOFS);
577 if (!dent)
578 return -ENOMEM;
579
580
581 err = make_reservation(c, BASEHD, len);
582 if (err)
583 goto out_free;
584
585 if (!xent) {
586 dent->ch.node_type = UBIFS_DENT_NODE;
587 dent_key_init(c, &dent_key, dir->i_ino, nm);
588 } else {
589 dent->ch.node_type = UBIFS_XENT_NODE;
590 xent_key_init(c, &dent_key, dir->i_ino, nm);
591 }
592
593 key_write(c, &dent_key, dent->key);
594 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
595 dent->type = get_dent_type(inode->i_mode);
596 dent->nlen = cpu_to_le16(nm->len);
597 memcpy(dent->name, nm->name, nm->len);
598 dent->name[nm->len] = '\0';
599 zero_dent_node_unused(dent);
600 ubifs_prep_grp_node(c, dent, dlen, 0);
601
602 ino = (void *)dent + aligned_dlen;
603 pack_inode(c, ino, inode, 0);
604 ino = (void *)ino + aligned_ilen;
605 pack_inode(c, ino, dir, 1);
606
607 if (last_reference) {
608 err = ubifs_add_orphan(c, inode->i_ino);
609 if (err) {
610 release_head(c, BASEHD);
611 goto out_finish;
612 }
613 ui->del_cmtno = c->cmt_no;
614 }
615
616 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
617 if (err)
618 goto out_release;
619 if (!sync) {
620 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
621
622 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
623 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
624 }
625 release_head(c, BASEHD);
626 kfree(dent);
627
628 if (deletion) {
629 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
630 if (err)
631 goto out_ro;
632 err = ubifs_add_dirt(c, lnum, dlen);
633 } else
634 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
635 if (err)
636 goto out_ro;
637
638
639
640
641
642
643
644 ino_key_init(c, &ino_key, inode->i_ino);
645 ino_offs = dent_offs + aligned_dlen;
646 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
647 if (err)
648 goto out_ro;
649
650 ino_key_init(c, &ino_key, dir->i_ino);
651 ino_offs += aligned_ilen;
652 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
653 if (err)
654 goto out_ro;
655
656 finish_reservation(c);
657 spin_lock(&ui->ui_lock);
658 ui->synced_i_size = ui->ui_size;
659 spin_unlock(&ui->ui_lock);
660 mark_inode_clean(c, ui);
661 mark_inode_clean(c, dir_ui);
662 return 0;
663
664out_finish:
665 finish_reservation(c);
666out_free:
667 kfree(dent);
668 return err;
669
670out_release:
671 release_head(c, BASEHD);
672 kfree(dent);
673out_ro:
674 ubifs_ro_mode(c, err);
675 if (last_reference)
676 ubifs_delete_orphan(c, inode->i_ino);
677 finish_reservation(c);
678 return err;
679}
680
681
682
683
684
685
686
687
688
689
690
691
692int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
693 const union ubifs_key *key, const void *buf, int len)
694{
695 struct ubifs_data_node *data;
696 int err, lnum, offs, compr_type, out_len;
697 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
698 struct ubifs_inode *ui = ubifs_inode(inode);
699
700 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
701 (unsigned long)key_inum(c, key), key_block(c, key), len);
702 ubifs_assert(len <= UBIFS_BLOCK_SIZE);
703
704 data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
705 if (!data) {
706
707
708
709
710
711
712
713 allocated = 0;
714 mutex_lock(&c->write_reserve_mutex);
715 data = c->write_reserve_buf;
716 }
717
718 data->ch.node_type = UBIFS_DATA_NODE;
719 key_write(c, key, &data->key);
720 data->size = cpu_to_le32(len);
721 zero_data_node_unused(data);
722
723 if (!(ui->flags & UBIFS_COMPR_FL))
724
725 compr_type = UBIFS_COMPR_NONE;
726 else
727 compr_type = ui->compr_type;
728
729 out_len = dlen - UBIFS_DATA_NODE_SZ;
730 ubifs_compress(buf, len, &data->data, &out_len, &compr_type);
731 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
732
733 dlen = UBIFS_DATA_NODE_SZ + out_len;
734 data->compr_type = cpu_to_le16(compr_type);
735
736
737 err = make_reservation(c, DATAHD, dlen);
738 if (err)
739 goto out_free;
740
741 err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
742 if (err)
743 goto out_release;
744 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
745 release_head(c, DATAHD);
746
747 err = ubifs_tnc_add(c, key, lnum, offs, dlen);
748 if (err)
749 goto out_ro;
750
751 finish_reservation(c);
752 if (!allocated)
753 mutex_unlock(&c->write_reserve_mutex);
754 else
755 kfree(data);
756 return 0;
757
758out_release:
759 release_head(c, DATAHD);
760out_ro:
761 ubifs_ro_mode(c, err);
762 finish_reservation(c);
763out_free:
764 if (!allocated)
765 mutex_unlock(&c->write_reserve_mutex);
766 else
767 kfree(data);
768 return err;
769}
770
771
772
773
774
775
776
777
778
779
780int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
781{
782 int err, lnum, offs;
783 struct ubifs_ino_node *ino;
784 struct ubifs_inode *ui = ubifs_inode(inode);
785 int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
786
787 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
788
789
790
791
792
793 if (!last_reference) {
794 len += ui->data_len;
795 sync = IS_SYNC(inode);
796 }
797 ino = kmalloc(len, GFP_NOFS);
798 if (!ino)
799 return -ENOMEM;
800
801
802 err = make_reservation(c, BASEHD, len);
803 if (err)
804 goto out_free;
805
806 pack_inode(c, ino, inode, 1);
807 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
808 if (err)
809 goto out_release;
810 if (!sync)
811 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
812 inode->i_ino);
813 release_head(c, BASEHD);
814
815 if (last_reference) {
816 err = ubifs_tnc_remove_ino(c, inode->i_ino);
817 if (err)
818 goto out_ro;
819 ubifs_delete_orphan(c, inode->i_ino);
820 err = ubifs_add_dirt(c, lnum, len);
821 } else {
822 union ubifs_key key;
823
824 ino_key_init(c, &key, inode->i_ino);
825 err = ubifs_tnc_add(c, &key, lnum, offs, len);
826 }
827 if (err)
828 goto out_ro;
829
830 finish_reservation(c);
831 spin_lock(&ui->ui_lock);
832 ui->synced_i_size = ui->ui_size;
833 spin_unlock(&ui->ui_lock);
834 kfree(ino);
835 return 0;
836
837out_release:
838 release_head(c, BASEHD);
839out_ro:
840 ubifs_ro_mode(c, err);
841 finish_reservation(c);
842out_free:
843 kfree(ino);
844 return err;
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
877{
878 int err;
879 struct ubifs_inode *ui = ubifs_inode(inode);
880
881 ubifs_assert(inode->i_nlink == 0);
882
883 if (ui->del_cmtno != c->cmt_no)
884
885 return ubifs_jnl_write_inode(c, inode);
886
887 down_read(&c->commit_sem);
888
889
890
891
892 if (ui->del_cmtno != c->cmt_no) {
893 up_read(&c->commit_sem);
894 return ubifs_jnl_write_inode(c, inode);
895 }
896
897 err = ubifs_tnc_remove_ino(c, inode->i_ino);
898 if (err)
899 ubifs_ro_mode(c, err);
900 else
901 ubifs_delete_orphan(c, inode->i_ino);
902 up_read(&c->commit_sem);
903 return err;
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
921 const struct dentry *old_dentry,
922 const struct inode *new_dir,
923 const struct dentry *new_dentry, int sync)
924{
925 void *p;
926 union ubifs_key key;
927 struct ubifs_dent_node *dent, *dent2;
928 int err, dlen1, dlen2, ilen, lnum, offs, len;
929 const struct inode *old_inode = old_dentry->d_inode;
930 const struct inode *new_inode = new_dentry->d_inode;
931 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
932 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
933 int move = (old_dir != new_dir);
934 struct ubifs_inode *uninitialized_var(new_ui);
935
936 dbg_jnl("dent '%.*s' in dir ino %lu to dent '%.*s' in dir ino %lu",
937 old_dentry->d_name.len, old_dentry->d_name.name,
938 old_dir->i_ino, new_dentry->d_name.len,
939 new_dentry->d_name.name, new_dir->i_ino);
940 ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
941 ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
942 ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
943 ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
944
945 dlen1 = UBIFS_DENT_NODE_SZ + new_dentry->d_name.len + 1;
946 dlen2 = UBIFS_DENT_NODE_SZ + old_dentry->d_name.len + 1;
947 if (new_inode) {
948 new_ui = ubifs_inode(new_inode);
949 ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
950 ilen = UBIFS_INO_NODE_SZ;
951 if (!last_reference)
952 ilen += new_ui->data_len;
953 } else
954 ilen = 0;
955
956 aligned_dlen1 = ALIGN(dlen1, 8);
957 aligned_dlen2 = ALIGN(dlen2, 8);
958 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
959 if (old_dir != new_dir)
960 len += plen;
961 dent = kmalloc(len, GFP_NOFS);
962 if (!dent)
963 return -ENOMEM;
964
965
966 err = make_reservation(c, BASEHD, len);
967 if (err)
968 goto out_free;
969
970
971 dent->ch.node_type = UBIFS_DENT_NODE;
972 dent_key_init_flash(c, &dent->key, new_dir->i_ino, &new_dentry->d_name);
973 dent->inum = cpu_to_le64(old_inode->i_ino);
974 dent->type = get_dent_type(old_inode->i_mode);
975 dent->nlen = cpu_to_le16(new_dentry->d_name.len);
976 memcpy(dent->name, new_dentry->d_name.name, new_dentry->d_name.len);
977 dent->name[new_dentry->d_name.len] = '\0';
978 zero_dent_node_unused(dent);
979 ubifs_prep_grp_node(c, dent, dlen1, 0);
980
981
982 dent2 = (void *)dent + aligned_dlen1;
983 dent2->ch.node_type = UBIFS_DENT_NODE;
984 dent_key_init_flash(c, &dent2->key, old_dir->i_ino,
985 &old_dentry->d_name);
986 dent2->inum = 0;
987 dent2->type = DT_UNKNOWN;
988 dent2->nlen = cpu_to_le16(old_dentry->d_name.len);
989 memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len);
990 dent2->name[old_dentry->d_name.len] = '\0';
991 zero_dent_node_unused(dent2);
992 ubifs_prep_grp_node(c, dent2, dlen2, 0);
993
994 p = (void *)dent2 + aligned_dlen2;
995 if (new_inode) {
996 pack_inode(c, p, new_inode, 0);
997 p += ALIGN(ilen, 8);
998 }
999
1000 if (!move)
1001 pack_inode(c, p, old_dir, 1);
1002 else {
1003 pack_inode(c, p, old_dir, 0);
1004 p += ALIGN(plen, 8);
1005 pack_inode(c, p, new_dir, 1);
1006 }
1007
1008 if (last_reference) {
1009 err = ubifs_add_orphan(c, new_inode->i_ino);
1010 if (err) {
1011 release_head(c, BASEHD);
1012 goto out_finish;
1013 }
1014 new_ui->del_cmtno = c->cmt_no;
1015 }
1016
1017 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1018 if (err)
1019 goto out_release;
1020 if (!sync) {
1021 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1022
1023 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1024 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1025 if (new_inode)
1026 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1027 new_inode->i_ino);
1028 }
1029 release_head(c, BASEHD);
1030
1031 dent_key_init(c, &key, new_dir->i_ino, &new_dentry->d_name);
1032 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &new_dentry->d_name);
1033 if (err)
1034 goto out_ro;
1035
1036 err = ubifs_add_dirt(c, lnum, dlen2);
1037 if (err)
1038 goto out_ro;
1039
1040 dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
1041 err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
1042 if (err)
1043 goto out_ro;
1044
1045 offs += aligned_dlen1 + aligned_dlen2;
1046 if (new_inode) {
1047 ino_key_init(c, &key, new_inode->i_ino);
1048 err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1049 if (err)
1050 goto out_ro;
1051 offs += ALIGN(ilen, 8);
1052 }
1053
1054 ino_key_init(c, &key, old_dir->i_ino);
1055 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1056 if (err)
1057 goto out_ro;
1058
1059 if (old_dir != new_dir) {
1060 offs += ALIGN(plen, 8);
1061 ino_key_init(c, &key, new_dir->i_ino);
1062 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1063 if (err)
1064 goto out_ro;
1065 }
1066
1067 finish_reservation(c);
1068 if (new_inode) {
1069 mark_inode_clean(c, new_ui);
1070 spin_lock(&new_ui->ui_lock);
1071 new_ui->synced_i_size = new_ui->ui_size;
1072 spin_unlock(&new_ui->ui_lock);
1073 }
1074 mark_inode_clean(c, ubifs_inode(old_dir));
1075 if (move)
1076 mark_inode_clean(c, ubifs_inode(new_dir));
1077 kfree(dent);
1078 return 0;
1079
1080out_release:
1081 release_head(c, BASEHD);
1082out_ro:
1083 ubifs_ro_mode(c, err);
1084 if (last_reference)
1085 ubifs_delete_orphan(c, new_inode->i_ino);
1086out_finish:
1087 finish_reservation(c);
1088out_free:
1089 kfree(dent);
1090 return err;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static int recomp_data_node(struct ubifs_data_node *dn, int *new_len)
1102{
1103 void *buf;
1104 int err, len, compr_type, out_len;
1105
1106 out_len = le32_to_cpu(dn->size);
1107 buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1108 if (!buf)
1109 return -ENOMEM;
1110
1111 len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
1112 compr_type = le16_to_cpu(dn->compr_type);
1113 err = ubifs_decompress(&dn->data, len, buf, &out_len, compr_type);
1114 if (err)
1115 goto out;
1116
1117 ubifs_compress(buf, *new_len, &dn->data, &out_len, &compr_type);
1118 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
1119 dn->compr_type = cpu_to_le16(compr_type);
1120 dn->size = cpu_to_le32(*new_len);
1121 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1122out:
1123 kfree(buf);
1124 return err;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1143 loff_t old_size, loff_t new_size)
1144{
1145 union ubifs_key key, to_key;
1146 struct ubifs_ino_node *ino;
1147 struct ubifs_trun_node *trun;
1148 struct ubifs_data_node *uninitialized_var(dn);
1149 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1150 struct ubifs_inode *ui = ubifs_inode(inode);
1151 ino_t inum = inode->i_ino;
1152 unsigned int blk;
1153
1154 dbg_jnl("ino %lu, size %lld -> %lld",
1155 (unsigned long)inum, old_size, new_size);
1156 ubifs_assert(!ui->data_len);
1157 ubifs_assert(S_ISREG(inode->i_mode));
1158 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
1159
1160 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1161 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
1162 ino = kmalloc(sz, GFP_NOFS);
1163 if (!ino)
1164 return -ENOMEM;
1165
1166 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1167 trun->ch.node_type = UBIFS_TRUN_NODE;
1168 trun->inum = cpu_to_le32(inum);
1169 trun->old_size = cpu_to_le64(old_size);
1170 trun->new_size = cpu_to_le64(new_size);
1171 zero_trun_node_unused(trun);
1172
1173 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1174 if (dlen) {
1175
1176 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1177 blk = new_size >> UBIFS_BLOCK_SHIFT;
1178 data_key_init(c, &key, inum, blk);
1179 dbg_jnlk(&key, "last block key ");
1180 err = ubifs_tnc_lookup(c, &key, dn);
1181 if (err == -ENOENT)
1182 dlen = 0;
1183 else if (err)
1184 goto out_free;
1185 else {
1186 if (le32_to_cpu(dn->size) <= dlen)
1187 dlen = 0;
1188 else {
1189 int compr_type = le16_to_cpu(dn->compr_type);
1190
1191 if (compr_type != UBIFS_COMPR_NONE) {
1192 err = recomp_data_node(dn, &dlen);
1193 if (err)
1194 goto out_free;
1195 } else {
1196 dn->size = cpu_to_le32(dlen);
1197 dlen += UBIFS_DATA_NODE_SZ;
1198 }
1199 zero_data_node_unused(dn);
1200 }
1201 }
1202 }
1203
1204
1205 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1206 if (dlen)
1207 len += dlen;
1208 err = make_reservation(c, BASEHD, len);
1209 if (err)
1210 goto out_free;
1211
1212 pack_inode(c, ino, inode, 0);
1213 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1214 if (dlen)
1215 ubifs_prep_grp_node(c, dn, dlen, 1);
1216
1217 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1218 if (err)
1219 goto out_release;
1220 if (!sync)
1221 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1222 release_head(c, BASEHD);
1223
1224 if (dlen) {
1225 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1226 err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1227 if (err)
1228 goto out_ro;
1229 }
1230
1231 ino_key_init(c, &key, inum);
1232 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1233 if (err)
1234 goto out_ro;
1235
1236 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1237 if (err)
1238 goto out_ro;
1239
1240 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1241 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1242 data_key_init(c, &key, inum, blk);
1243
1244 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
1245 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
1246 data_key_init(c, &to_key, inum, blk);
1247
1248 err = ubifs_tnc_remove_range(c, &key, &to_key);
1249 if (err)
1250 goto out_ro;
1251
1252 finish_reservation(c);
1253 spin_lock(&ui->ui_lock);
1254 ui->synced_i_size = ui->ui_size;
1255 spin_unlock(&ui->ui_lock);
1256 mark_inode_clean(c, ui);
1257 kfree(ino);
1258 return 0;
1259
1260out_release:
1261 release_head(c, BASEHD);
1262out_ro:
1263 ubifs_ro_mode(c, err);
1264 finish_reservation(c);
1265out_free:
1266 kfree(ino);
1267 return err;
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
1284 const struct inode *inode, const struct qstr *nm)
1285{
1286 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1287 struct ubifs_dent_node *xent;
1288 struct ubifs_ino_node *ino;
1289 union ubifs_key xent_key, key1, key2;
1290 int sync = IS_DIRSYNC(host);
1291 struct ubifs_inode *host_ui = ubifs_inode(host);
1292
1293 dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
1294 host->i_ino, inode->i_ino, nm->name,
1295 ubifs_inode(inode)->data_len);
1296 ubifs_assert(inode->i_nlink == 0);
1297 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1298
1299
1300
1301
1302
1303 xlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
1304 aligned_xlen = ALIGN(xlen, 8);
1305 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1306 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1307
1308 xent = kmalloc(len, GFP_NOFS);
1309 if (!xent)
1310 return -ENOMEM;
1311
1312
1313 err = make_reservation(c, BASEHD, len);
1314 if (err) {
1315 kfree(xent);
1316 return err;
1317 }
1318
1319 xent->ch.node_type = UBIFS_XENT_NODE;
1320 xent_key_init(c, &xent_key, host->i_ino, nm);
1321 key_write(c, &xent_key, xent->key);
1322 xent->inum = 0;
1323 xent->type = get_dent_type(inode->i_mode);
1324 xent->nlen = cpu_to_le16(nm->len);
1325 memcpy(xent->name, nm->name, nm->len);
1326 xent->name[nm->len] = '\0';
1327 zero_dent_node_unused(xent);
1328 ubifs_prep_grp_node(c, xent, xlen, 0);
1329
1330 ino = (void *)xent + aligned_xlen;
1331 pack_inode(c, ino, inode, 0);
1332 ino = (void *)ino + UBIFS_INO_NODE_SZ;
1333 pack_inode(c, ino, host, 1);
1334
1335 err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1336 if (!sync && !err)
1337 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1338 release_head(c, BASEHD);
1339 kfree(xent);
1340 if (err)
1341 goto out_ro;
1342
1343
1344 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1345 if (err)
1346 goto out_ro;
1347 err = ubifs_add_dirt(c, lnum, xlen);
1348 if (err)
1349 goto out_ro;
1350
1351
1352
1353
1354
1355 lowest_ino_key(c, &key1, inode->i_ino);
1356 highest_ino_key(c, &key2, inode->i_ino);
1357 err = ubifs_tnc_remove_range(c, &key1, &key2);
1358 if (err)
1359 goto out_ro;
1360 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1361 if (err)
1362 goto out_ro;
1363
1364
1365 ino_key_init(c, &key1, host->i_ino);
1366 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1367 if (err)
1368 goto out_ro;
1369
1370 finish_reservation(c);
1371 spin_lock(&host_ui->ui_lock);
1372 host_ui->synced_i_size = host_ui->ui_size;
1373 spin_unlock(&host_ui->ui_lock);
1374 mark_inode_clean(c, host_ui);
1375 return 0;
1376
1377out_ro:
1378 ubifs_ro_mode(c, err);
1379 finish_reservation(c);
1380 return err;
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1397 const struct inode *host)
1398{
1399 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
1400 struct ubifs_inode *host_ui = ubifs_inode(host);
1401 struct ubifs_ino_node *ino;
1402 union ubifs_key key;
1403 int sync = IS_DIRSYNC(host);
1404
1405 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1406 ubifs_assert(host->i_nlink > 0);
1407 ubifs_assert(inode->i_nlink > 0);
1408 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1409
1410 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1411 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1412 aligned_len1 = ALIGN(len1, 8);
1413 aligned_len = aligned_len1 + ALIGN(len2, 8);
1414
1415 ino = kmalloc(aligned_len, GFP_NOFS);
1416 if (!ino)
1417 return -ENOMEM;
1418
1419
1420 err = make_reservation(c, BASEHD, aligned_len);
1421 if (err)
1422 goto out_free;
1423
1424 pack_inode(c, ino, host, 0);
1425 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
1426
1427 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1428 if (!sync && !err) {
1429 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1430
1431 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1432 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1433 }
1434 release_head(c, BASEHD);
1435 if (err)
1436 goto out_ro;
1437
1438 ino_key_init(c, &key, host->i_ino);
1439 err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1440 if (err)
1441 goto out_ro;
1442
1443 ino_key_init(c, &key, inode->i_ino);
1444 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1445 if (err)
1446 goto out_ro;
1447
1448 finish_reservation(c);
1449 spin_lock(&host_ui->ui_lock);
1450 host_ui->synced_i_size = host_ui->ui_size;
1451 spin_unlock(&host_ui->ui_lock);
1452 mark_inode_clean(c, host_ui);
1453 kfree(ino);
1454 return 0;
1455
1456out_ro:
1457 ubifs_ro_mode(c, err);
1458 finish_reservation(c);
1459out_free:
1460 kfree(ino);
1461 return err;
1462}
1463
1464