1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#include "ubifs.h"
62
63
64
65
66
67static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68{
69 memset(ino->padding1, 0, 4);
70 memset(ino->padding2, 0, 26);
71}
72
73
74
75
76
77
78static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79{
80 dent->padding1 = 0;
81 memset(dent->padding2, 0, 4);
82}
83
84
85
86
87
88static inline void zero_data_node_unused(struct ubifs_data_node *data)
89{
90 memset(data->padding, 0, 2);
91}
92
93
94
95
96
97
98static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
99{
100 memset(trun->padding, 0, 12);
101}
102
103
104
105
106
107
108
109
110
111
112
113
114
115static int reserve_space(struct ubifs_info *c, int jhead, int len)
116{
117 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
118 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
119
120
121
122
123
124
125 squeeze = (jhead == BASEHD);
126again:
127 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
128
129 if (c->ro_media) {
130 err = -EROFS;
131 goto out_unlock;
132 }
133
134 avail = c->leb_size - wbuf->offs - wbuf->used;
135 if (wbuf->lnum != -1 && avail >= len)
136 return 0;
137
138
139
140
141
142 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
143 if (lnum >= 0) {
144
145 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
146 if (err)
147 goto out_return;
148
149 goto out;
150 }
151
152 err = lnum;
153 if (err != -ENOSPC)
154 goto out_unlock;
155
156
157
158
159
160
161 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
162 mutex_unlock(&wbuf->io_mutex);
163
164 lnum = ubifs_garbage_collect(c, 0);
165 if (lnum < 0) {
166 err = lnum;
167 if (err != -ENOSPC)
168 return err;
169
170
171
172
173
174
175
176 dbg_jnl("GC couldn't make a free LEB for jhead %s",
177 dbg_jhead(jhead));
178 if (retries++ < 2) {
179 dbg_jnl("retry (%d)", retries);
180 goto again;
181 }
182
183 dbg_jnl("return -ENOSPC");
184 return err;
185 }
186
187 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
188 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
189 avail = c->leb_size - wbuf->offs - wbuf->used;
190
191 if (wbuf->lnum != -1 && avail >= len) {
192
193
194
195
196
197 dbg_jnl("return LEB %d back, already have LEB %d:%d",
198 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
199 err = ubifs_return_leb(c, lnum);
200 if (err)
201 goto out_unlock;
202 return 0;
203 }
204
205 err = ubifs_add_bud_to_log(c, jhead, lnum, 0);
206 if (err)
207 goto out_return;
208 offs = 0;
209
210out:
211 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype);
212 if (err)
213 goto out_unlock;
214
215 return 0;
216
217out_unlock:
218 mutex_unlock(&wbuf->io_mutex);
219 return err;
220
221out_return:
222
223 ubifs_assert(err < 0);
224 err1 = ubifs_return_leb(c, lnum);
225 if (err1 && err == -EAGAIN)
226
227
228
229
230
231 err = err1;
232 mutex_unlock(&wbuf->io_mutex);
233 return err;
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
250 int *lnum, int *offs)
251{
252 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
253
254 ubifs_assert(jhead != GCHD);
255
256 *lnum = c->jheads[jhead].wbuf.lnum;
257 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
258
259 dbg_jnl("jhead %s, LEB %d:%d, len %d",
260 dbg_jhead(jhead), *lnum, *offs, len);
261 ubifs_prepare_node(c, node, len, 0);
262
263 return ubifs_wbuf_write_nolock(wbuf, node, len);
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
281 int *lnum, int *offs, int sync)
282{
283 int err;
284 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
285
286 ubifs_assert(jhead != GCHD);
287
288 *lnum = c->jheads[jhead].wbuf.lnum;
289 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
290 dbg_jnl("jhead %s, LEB %d:%d, len %d",
291 dbg_jhead(jhead), *lnum, *offs, len);
292
293 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
294 if (err)
295 return err;
296 if (sync)
297 err = ubifs_wbuf_sync_nolock(wbuf);
298 return err;
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int make_reservation(struct ubifs_info *c, int jhead, int len)
318{
319 int err, cmt_retries = 0, nospc_retries = 0;
320
321again:
322 down_read(&c->commit_sem);
323 err = reserve_space(c, jhead, len);
324 if (!err)
325 return 0;
326 up_read(&c->commit_sem);
327
328 if (err == -ENOSPC) {
329
330
331
332
333
334
335 if (nospc_retries++ < 2) {
336 dbg_jnl("no space, retry");
337 err = -EAGAIN;
338 }
339
340
341
342
343
344
345
346 }
347
348 if (err != -EAGAIN)
349 goto out;
350
351
352
353
354
355 if (cmt_retries > 128) {
356
357
358
359
360 ubifs_err("stuck in space allocation");
361 err = -ENOSPC;
362 goto out;
363 } else if (cmt_retries > 32)
364 ubifs_warn("too many space allocation re-tries (%d)",
365 cmt_retries);
366
367 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
368 cmt_retries);
369 cmt_retries += 1;
370
371 err = ubifs_run_commit(c);
372 if (err)
373 return err;
374 goto again;
375
376out:
377 ubifs_err("cannot reserve %d bytes in jhead %d, error %d",
378 len, jhead, err);
379 if (err == -ENOSPC) {
380
381 down_write(&c->commit_sem);
382 spin_lock(&c->space_lock);
383 dbg_dump_stack();
384 dbg_dump_budg(c);
385 spin_unlock(&c->space_lock);
386 dbg_dump_lprops(c);
387 cmt_retries = dbg_check_lprops(c);
388 up_write(&c->commit_sem);
389 }
390 return err;
391}
392
393
394
395
396
397
398
399
400
401
402static inline void release_head(struct ubifs_info *c, int jhead)
403{
404 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
405}
406
407
408
409
410
411
412
413
414static void finish_reservation(struct ubifs_info *c)
415{
416 up_read(&c->commit_sem);
417}
418
419
420
421
422
423static int get_dent_type(int mode)
424{
425 switch (mode & S_IFMT) {
426 case S_IFREG:
427 return UBIFS_ITYPE_REG;
428 case S_IFDIR:
429 return UBIFS_ITYPE_DIR;
430 case S_IFLNK:
431 return UBIFS_ITYPE_LNK;
432 case S_IFBLK:
433 return UBIFS_ITYPE_BLK;
434 case S_IFCHR:
435 return UBIFS_ITYPE_CHR;
436 case S_IFIFO:
437 return UBIFS_ITYPE_FIFO;
438 case S_IFSOCK:
439 return UBIFS_ITYPE_SOCK;
440 default:
441 BUG();
442 }
443 return 0;
444}
445
446
447
448
449
450
451
452
453static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
454 const struct inode *inode, int last)
455{
456 int data_len = 0, last_reference = !inode->i_nlink;
457 struct ubifs_inode *ui = ubifs_inode(inode);
458
459 ino->ch.node_type = UBIFS_INO_NODE;
460 ino_key_init_flash(c, &ino->key, inode->i_ino);
461 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
462 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
463 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
464 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
465 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
466 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
467 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
468 ino->uid = cpu_to_le32(inode->i_uid);
469 ino->gid = cpu_to_le32(inode->i_gid);
470 ino->mode = cpu_to_le32(inode->i_mode);
471 ino->flags = cpu_to_le32(ui->flags);
472 ino->size = cpu_to_le64(ui->ui_size);
473 ino->nlink = cpu_to_le32(inode->i_nlink);
474 ino->compr_type = cpu_to_le16(ui->compr_type);
475 ino->data_len = cpu_to_le32(ui->data_len);
476 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
477 ino->xattr_size = cpu_to_le32(ui->xattr_size);
478 ino->xattr_names = cpu_to_le32(ui->xattr_names);
479 zero_ino_node_unused(ino);
480
481
482
483
484
485 if (!last_reference) {
486 memcpy(ino->data, ui->data, ui->data_len);
487 data_len = ui->data_len;
488 }
489
490 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
491}
492
493
494
495
496
497
498
499
500
501
502
503static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
504{
505 if (ui->dirty)
506 ubifs_release_dirty_inode_budget(c, ui);
507 ui->dirty = 0;
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
538 const struct qstr *nm, const struct inode *inode,
539 int deletion, int xent)
540{
541 int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
542 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
543 int last_reference = !!(deletion && inode->i_nlink == 0);
544 struct ubifs_inode *ui = ubifs_inode(inode);
545 struct ubifs_inode *dir_ui = ubifs_inode(dir);
546 struct ubifs_dent_node *dent;
547 struct ubifs_ino_node *ino;
548 union ubifs_key dent_key, ino_key;
549
550 dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
551 inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
552 ubifs_assert(dir_ui->data_len == 0);
553 ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
554
555 dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
556 ilen = UBIFS_INO_NODE_SZ;
557
558
559
560
561
562
563
564 if (!last_reference) {
565 ilen += ui->data_len;
566 sync |= IS_SYNC(inode);
567 }
568
569 aligned_dlen = ALIGN(dlen, 8);
570 aligned_ilen = ALIGN(ilen, 8);
571 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
572 dent = kmalloc(len, GFP_NOFS);
573 if (!dent)
574 return -ENOMEM;
575
576
577 err = make_reservation(c, BASEHD, len);
578 if (err)
579 goto out_free;
580
581 if (!xent) {
582 dent->ch.node_type = UBIFS_DENT_NODE;
583 dent_key_init(c, &dent_key, dir->i_ino, nm);
584 } else {
585 dent->ch.node_type = UBIFS_XENT_NODE;
586 xent_key_init(c, &dent_key, dir->i_ino, nm);
587 }
588
589 key_write(c, &dent_key, dent->key);
590 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
591 dent->type = get_dent_type(inode->i_mode);
592 dent->nlen = cpu_to_le16(nm->len);
593 memcpy(dent->name, nm->name, nm->len);
594 dent->name[nm->len] = '\0';
595 zero_dent_node_unused(dent);
596 ubifs_prep_grp_node(c, dent, dlen, 0);
597
598 ino = (void *)dent + aligned_dlen;
599 pack_inode(c, ino, inode, 0);
600 ino = (void *)ino + aligned_ilen;
601 pack_inode(c, ino, dir, 1);
602
603 if (last_reference) {
604 err = ubifs_add_orphan(c, inode->i_ino);
605 if (err) {
606 release_head(c, BASEHD);
607 goto out_finish;
608 }
609 ui->del_cmtno = c->cmt_no;
610 }
611
612 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
613 if (err)
614 goto out_release;
615 if (!sync) {
616 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
617
618 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
619 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
620 }
621 release_head(c, BASEHD);
622 kfree(dent);
623
624 if (deletion) {
625 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
626 if (err)
627 goto out_ro;
628 err = ubifs_add_dirt(c, lnum, dlen);
629 } else
630 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
631 if (err)
632 goto out_ro;
633
634
635
636
637
638
639
640 ino_key_init(c, &ino_key, inode->i_ino);
641 ino_offs = dent_offs + aligned_dlen;
642 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
643 if (err)
644 goto out_ro;
645
646 ino_key_init(c, &ino_key, dir->i_ino);
647 ino_offs += aligned_ilen;
648 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
649 if (err)
650 goto out_ro;
651
652 finish_reservation(c);
653 spin_lock(&ui->ui_lock);
654 ui->synced_i_size = ui->ui_size;
655 spin_unlock(&ui->ui_lock);
656 mark_inode_clean(c, ui);
657 mark_inode_clean(c, dir_ui);
658 return 0;
659
660out_finish:
661 finish_reservation(c);
662out_free:
663 kfree(dent);
664 return err;
665
666out_release:
667 release_head(c, BASEHD);
668out_ro:
669 ubifs_ro_mode(c, err);
670 if (last_reference)
671 ubifs_delete_orphan(c, inode->i_ino);
672 finish_reservation(c);
673 return err;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
688 const union ubifs_key *key, const void *buf, int len)
689{
690 struct ubifs_data_node *data;
691 int err, lnum, offs, compr_type, out_len;
692 int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR;
693 struct ubifs_inode *ui = ubifs_inode(inode);
694
695 dbg_jnl("ino %lu, blk %u, len %d, key %s",
696 (unsigned long)key_inum(c, key), key_block(c, key), len,
697 DBGKEY(key));
698 ubifs_assert(len <= UBIFS_BLOCK_SIZE);
699
700 data = kmalloc(dlen, GFP_NOFS);
701 if (!data)
702 return -ENOMEM;
703
704 data->ch.node_type = UBIFS_DATA_NODE;
705 key_write(c, key, &data->key);
706 data->size = cpu_to_le32(len);
707 zero_data_node_unused(data);
708
709 if (!(ui->flags & UBIFS_COMPR_FL))
710
711 compr_type = UBIFS_COMPR_NONE;
712 else
713 compr_type = ui->compr_type;
714
715 out_len = dlen - UBIFS_DATA_NODE_SZ;
716 ubifs_compress(buf, len, &data->data, &out_len, &compr_type);
717 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
718
719 dlen = UBIFS_DATA_NODE_SZ + out_len;
720 data->compr_type = cpu_to_le16(compr_type);
721
722
723 err = make_reservation(c, DATAHD, dlen);
724 if (err)
725 goto out_free;
726
727 err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
728 if (err)
729 goto out_release;
730 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
731 release_head(c, DATAHD);
732
733 err = ubifs_tnc_add(c, key, lnum, offs, dlen);
734 if (err)
735 goto out_ro;
736
737 finish_reservation(c);
738 kfree(data);
739 return 0;
740
741out_release:
742 release_head(c, DATAHD);
743out_ro:
744 ubifs_ro_mode(c, err);
745 finish_reservation(c);
746out_free:
747 kfree(data);
748 return err;
749}
750
751
752
753
754
755
756
757
758
759
760int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
761{
762 int err, lnum, offs;
763 struct ubifs_ino_node *ino;
764 struct ubifs_inode *ui = ubifs_inode(inode);
765 int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
766
767 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
768
769
770
771
772
773 if (!last_reference) {
774 len += ui->data_len;
775 sync = IS_SYNC(inode);
776 }
777 ino = kmalloc(len, GFP_NOFS);
778 if (!ino)
779 return -ENOMEM;
780
781
782 err = make_reservation(c, BASEHD, len);
783 if (err)
784 goto out_free;
785
786 pack_inode(c, ino, inode, 1);
787 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
788 if (err)
789 goto out_release;
790 if (!sync)
791 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
792 inode->i_ino);
793 release_head(c, BASEHD);
794
795 if (last_reference) {
796 err = ubifs_tnc_remove_ino(c, inode->i_ino);
797 if (err)
798 goto out_ro;
799 ubifs_delete_orphan(c, inode->i_ino);
800 err = ubifs_add_dirt(c, lnum, len);
801 } else {
802 union ubifs_key key;
803
804 ino_key_init(c, &key, inode->i_ino);
805 err = ubifs_tnc_add(c, &key, lnum, offs, len);
806 }
807 if (err)
808 goto out_ro;
809
810 finish_reservation(c);
811 spin_lock(&ui->ui_lock);
812 ui->synced_i_size = ui->ui_size;
813 spin_unlock(&ui->ui_lock);
814 kfree(ino);
815 return 0;
816
817out_release:
818 release_head(c, BASEHD);
819out_ro:
820 ubifs_ro_mode(c, err);
821 finish_reservation(c);
822out_free:
823 kfree(ino);
824 return err;
825}
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
857{
858 int err;
859 struct ubifs_inode *ui = ubifs_inode(inode);
860
861 ubifs_assert(inode->i_nlink == 0);
862
863 if (ui->del_cmtno != c->cmt_no)
864
865 return ubifs_jnl_write_inode(c, inode);
866
867 down_read(&c->commit_sem);
868
869
870
871
872 if (ui->del_cmtno != c->cmt_no) {
873 up_read(&c->commit_sem);
874 return ubifs_jnl_write_inode(c, inode);
875 }
876
877 err = ubifs_tnc_remove_ino(c, inode->i_ino);
878 if (err)
879 ubifs_ro_mode(c, err);
880 else
881 ubifs_delete_orphan(c, inode->i_ino);
882 up_read(&c->commit_sem);
883 return err;
884}
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
901 const struct dentry *old_dentry,
902 const struct inode *new_dir,
903 const struct dentry *new_dentry, int sync)
904{
905 void *p;
906 union ubifs_key key;
907 struct ubifs_dent_node *dent, *dent2;
908 int err, dlen1, dlen2, ilen, lnum, offs, len;
909 const struct inode *old_inode = old_dentry->d_inode;
910 const struct inode *new_inode = new_dentry->d_inode;
911 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
912 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
913 int move = (old_dir != new_dir);
914 struct ubifs_inode *uninitialized_var(new_ui);
915
916 dbg_jnl("dent '%.*s' in dir ino %lu to dent '%.*s' in dir ino %lu",
917 old_dentry->d_name.len, old_dentry->d_name.name,
918 old_dir->i_ino, new_dentry->d_name.len,
919 new_dentry->d_name.name, new_dir->i_ino);
920 ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
921 ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
922 ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
923 ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
924
925 dlen1 = UBIFS_DENT_NODE_SZ + new_dentry->d_name.len + 1;
926 dlen2 = UBIFS_DENT_NODE_SZ + old_dentry->d_name.len + 1;
927 if (new_inode) {
928 new_ui = ubifs_inode(new_inode);
929 ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
930 ilen = UBIFS_INO_NODE_SZ;
931 if (!last_reference)
932 ilen += new_ui->data_len;
933 } else
934 ilen = 0;
935
936 aligned_dlen1 = ALIGN(dlen1, 8);
937 aligned_dlen2 = ALIGN(dlen2, 8);
938 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
939 if (old_dir != new_dir)
940 len += plen;
941 dent = kmalloc(len, GFP_NOFS);
942 if (!dent)
943 return -ENOMEM;
944
945
946 err = make_reservation(c, BASEHD, len);
947 if (err)
948 goto out_free;
949
950
951 dent->ch.node_type = UBIFS_DENT_NODE;
952 dent_key_init_flash(c, &dent->key, new_dir->i_ino, &new_dentry->d_name);
953 dent->inum = cpu_to_le64(old_inode->i_ino);
954 dent->type = get_dent_type(old_inode->i_mode);
955 dent->nlen = cpu_to_le16(new_dentry->d_name.len);
956 memcpy(dent->name, new_dentry->d_name.name, new_dentry->d_name.len);
957 dent->name[new_dentry->d_name.len] = '\0';
958 zero_dent_node_unused(dent);
959 ubifs_prep_grp_node(c, dent, dlen1, 0);
960
961
962 dent2 = (void *)dent + aligned_dlen1;
963 dent2->ch.node_type = UBIFS_DENT_NODE;
964 dent_key_init_flash(c, &dent2->key, old_dir->i_ino,
965 &old_dentry->d_name);
966 dent2->inum = 0;
967 dent2->type = DT_UNKNOWN;
968 dent2->nlen = cpu_to_le16(old_dentry->d_name.len);
969 memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len);
970 dent2->name[old_dentry->d_name.len] = '\0';
971 zero_dent_node_unused(dent2);
972 ubifs_prep_grp_node(c, dent2, dlen2, 0);
973
974 p = (void *)dent2 + aligned_dlen2;
975 if (new_inode) {
976 pack_inode(c, p, new_inode, 0);
977 p += ALIGN(ilen, 8);
978 }
979
980 if (!move)
981 pack_inode(c, p, old_dir, 1);
982 else {
983 pack_inode(c, p, old_dir, 0);
984 p += ALIGN(plen, 8);
985 pack_inode(c, p, new_dir, 1);
986 }
987
988 if (last_reference) {
989 err = ubifs_add_orphan(c, new_inode->i_ino);
990 if (err) {
991 release_head(c, BASEHD);
992 goto out_finish;
993 }
994 new_ui->del_cmtno = c->cmt_no;
995 }
996
997 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
998 if (err)
999 goto out_release;
1000 if (!sync) {
1001 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1002
1003 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1004 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1005 if (new_inode)
1006 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1007 new_inode->i_ino);
1008 }
1009 release_head(c, BASEHD);
1010
1011 dent_key_init(c, &key, new_dir->i_ino, &new_dentry->d_name);
1012 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &new_dentry->d_name);
1013 if (err)
1014 goto out_ro;
1015
1016 err = ubifs_add_dirt(c, lnum, dlen2);
1017 if (err)
1018 goto out_ro;
1019
1020 dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
1021 err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
1022 if (err)
1023 goto out_ro;
1024
1025 offs += aligned_dlen1 + aligned_dlen2;
1026 if (new_inode) {
1027 ino_key_init(c, &key, new_inode->i_ino);
1028 err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1029 if (err)
1030 goto out_ro;
1031 offs += ALIGN(ilen, 8);
1032 }
1033
1034 ino_key_init(c, &key, old_dir->i_ino);
1035 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1036 if (err)
1037 goto out_ro;
1038
1039 if (old_dir != new_dir) {
1040 offs += ALIGN(plen, 8);
1041 ino_key_init(c, &key, new_dir->i_ino);
1042 err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1043 if (err)
1044 goto out_ro;
1045 }
1046
1047 finish_reservation(c);
1048 if (new_inode) {
1049 mark_inode_clean(c, new_ui);
1050 spin_lock(&new_ui->ui_lock);
1051 new_ui->synced_i_size = new_ui->ui_size;
1052 spin_unlock(&new_ui->ui_lock);
1053 }
1054 mark_inode_clean(c, ubifs_inode(old_dir));
1055 if (move)
1056 mark_inode_clean(c, ubifs_inode(new_dir));
1057 kfree(dent);
1058 return 0;
1059
1060out_release:
1061 release_head(c, BASEHD);
1062out_ro:
1063 ubifs_ro_mode(c, err);
1064 if (last_reference)
1065 ubifs_delete_orphan(c, new_inode->i_ino);
1066out_finish:
1067 finish_reservation(c);
1068out_free:
1069 kfree(dent);
1070 return err;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static int recomp_data_node(struct ubifs_data_node *dn, int *new_len)
1082{
1083 void *buf;
1084 int err, len, compr_type, out_len;
1085
1086 out_len = le32_to_cpu(dn->size);
1087 buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1088 if (!buf)
1089 return -ENOMEM;
1090
1091 len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
1092 compr_type = le16_to_cpu(dn->compr_type);
1093 err = ubifs_decompress(&dn->data, len, buf, &out_len, compr_type);
1094 if (err)
1095 goto out;
1096
1097 ubifs_compress(buf, *new_len, &dn->data, &out_len, &compr_type);
1098 ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
1099 dn->compr_type = cpu_to_le16(compr_type);
1100 dn->size = cpu_to_le32(*new_len);
1101 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1102out:
1103 kfree(buf);
1104 return err;
1105}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1123 loff_t old_size, loff_t new_size)
1124{
1125 union ubifs_key key, to_key;
1126 struct ubifs_ino_node *ino;
1127 struct ubifs_trun_node *trun;
1128 struct ubifs_data_node *uninitialized_var(dn);
1129 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1130 struct ubifs_inode *ui = ubifs_inode(inode);
1131 ino_t inum = inode->i_ino;
1132 unsigned int blk;
1133
1134 dbg_jnl("ino %lu, size %lld -> %lld",
1135 (unsigned long)inum, old_size, new_size);
1136 ubifs_assert(!ui->data_len);
1137 ubifs_assert(S_ISREG(inode->i_mode));
1138 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
1139
1140 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1141 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
1142 ino = kmalloc(sz, GFP_NOFS);
1143 if (!ino)
1144 return -ENOMEM;
1145
1146 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1147 trun->ch.node_type = UBIFS_TRUN_NODE;
1148 trun->inum = cpu_to_le32(inum);
1149 trun->old_size = cpu_to_le64(old_size);
1150 trun->new_size = cpu_to_le64(new_size);
1151 zero_trun_node_unused(trun);
1152
1153 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1154 if (dlen) {
1155
1156 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1157 blk = new_size >> UBIFS_BLOCK_SHIFT;
1158 data_key_init(c, &key, inum, blk);
1159 dbg_jnl("last block key %s", DBGKEY(&key));
1160 err = ubifs_tnc_lookup(c, &key, dn);
1161 if (err == -ENOENT)
1162 dlen = 0;
1163 else if (err)
1164 goto out_free;
1165 else {
1166 if (le32_to_cpu(dn->size) <= dlen)
1167 dlen = 0;
1168 else {
1169 int compr_type = le16_to_cpu(dn->compr_type);
1170
1171 if (compr_type != UBIFS_COMPR_NONE) {
1172 err = recomp_data_node(dn, &dlen);
1173 if (err)
1174 goto out_free;
1175 } else {
1176 dn->size = cpu_to_le32(dlen);
1177 dlen += UBIFS_DATA_NODE_SZ;
1178 }
1179 zero_data_node_unused(dn);
1180 }
1181 }
1182 }
1183
1184
1185 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1186 if (dlen)
1187 len += dlen;
1188 err = make_reservation(c, BASEHD, len);
1189 if (err)
1190 goto out_free;
1191
1192 pack_inode(c, ino, inode, 0);
1193 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1194 if (dlen)
1195 ubifs_prep_grp_node(c, dn, dlen, 1);
1196
1197 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1198 if (err)
1199 goto out_release;
1200 if (!sync)
1201 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1202 release_head(c, BASEHD);
1203
1204 if (dlen) {
1205 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1206 err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1207 if (err)
1208 goto out_ro;
1209 }
1210
1211 ino_key_init(c, &key, inum);
1212 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1213 if (err)
1214 goto out_ro;
1215
1216 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1217 if (err)
1218 goto out_ro;
1219
1220 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1221 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1222 data_key_init(c, &key, inum, blk);
1223
1224 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
1225 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
1226 data_key_init(c, &to_key, inum, blk);
1227
1228 err = ubifs_tnc_remove_range(c, &key, &to_key);
1229 if (err)
1230 goto out_ro;
1231
1232 finish_reservation(c);
1233 spin_lock(&ui->ui_lock);
1234 ui->synced_i_size = ui->ui_size;
1235 spin_unlock(&ui->ui_lock);
1236 mark_inode_clean(c, ui);
1237 kfree(ino);
1238 return 0;
1239
1240out_release:
1241 release_head(c, BASEHD);
1242out_ro:
1243 ubifs_ro_mode(c, err);
1244 finish_reservation(c);
1245out_free:
1246 kfree(ino);
1247 return err;
1248}
1249
1250#ifdef CONFIG_UBIFS_FS_XATTR
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
1265 const struct inode *inode, const struct qstr *nm)
1266{
1267 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1268 struct ubifs_dent_node *xent;
1269 struct ubifs_ino_node *ino;
1270 union ubifs_key xent_key, key1, key2;
1271 int sync = IS_DIRSYNC(host);
1272 struct ubifs_inode *host_ui = ubifs_inode(host);
1273
1274 dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
1275 host->i_ino, inode->i_ino, nm->name,
1276 ubifs_inode(inode)->data_len);
1277 ubifs_assert(inode->i_nlink == 0);
1278 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1279
1280
1281
1282
1283
1284 xlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
1285 aligned_xlen = ALIGN(xlen, 8);
1286 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1287 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1288
1289 xent = kmalloc(len, GFP_NOFS);
1290 if (!xent)
1291 return -ENOMEM;
1292
1293
1294 err = make_reservation(c, BASEHD, len);
1295 if (err) {
1296 kfree(xent);
1297 return err;
1298 }
1299
1300 xent->ch.node_type = UBIFS_XENT_NODE;
1301 xent_key_init(c, &xent_key, host->i_ino, nm);
1302 key_write(c, &xent_key, xent->key);
1303 xent->inum = 0;
1304 xent->type = get_dent_type(inode->i_mode);
1305 xent->nlen = cpu_to_le16(nm->len);
1306 memcpy(xent->name, nm->name, nm->len);
1307 xent->name[nm->len] = '\0';
1308 zero_dent_node_unused(xent);
1309 ubifs_prep_grp_node(c, xent, xlen, 0);
1310
1311 ino = (void *)xent + aligned_xlen;
1312 pack_inode(c, ino, inode, 0);
1313 ino = (void *)ino + UBIFS_INO_NODE_SZ;
1314 pack_inode(c, ino, host, 1);
1315
1316 err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1317 if (!sync && !err)
1318 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1319 release_head(c, BASEHD);
1320 kfree(xent);
1321 if (err)
1322 goto out_ro;
1323
1324
1325 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1326 if (err)
1327 goto out_ro;
1328 err = ubifs_add_dirt(c, lnum, xlen);
1329 if (err)
1330 goto out_ro;
1331
1332
1333
1334
1335
1336 lowest_ino_key(c, &key1, inode->i_ino);
1337 highest_ino_key(c, &key2, inode->i_ino);
1338 err = ubifs_tnc_remove_range(c, &key1, &key2);
1339 if (err)
1340 goto out_ro;
1341 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1342 if (err)
1343 goto out_ro;
1344
1345
1346 ino_key_init(c, &key1, host->i_ino);
1347 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1348 if (err)
1349 goto out_ro;
1350
1351 finish_reservation(c);
1352 spin_lock(&host_ui->ui_lock);
1353 host_ui->synced_i_size = host_ui->ui_size;
1354 spin_unlock(&host_ui->ui_lock);
1355 mark_inode_clean(c, host_ui);
1356 return 0;
1357
1358out_ro:
1359 ubifs_ro_mode(c, err);
1360 finish_reservation(c);
1361 return err;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1378 const struct inode *host)
1379{
1380 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
1381 struct ubifs_inode *host_ui = ubifs_inode(host);
1382 struct ubifs_ino_node *ino;
1383 union ubifs_key key;
1384 int sync = IS_DIRSYNC(host);
1385
1386 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1387 ubifs_assert(host->i_nlink > 0);
1388 ubifs_assert(inode->i_nlink > 0);
1389 ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1390
1391 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1392 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1393 aligned_len1 = ALIGN(len1, 8);
1394 aligned_len = aligned_len1 + ALIGN(len2, 8);
1395
1396 ino = kmalloc(aligned_len, GFP_NOFS);
1397 if (!ino)
1398 return -ENOMEM;
1399
1400
1401 err = make_reservation(c, BASEHD, aligned_len);
1402 if (err)
1403 goto out_free;
1404
1405 pack_inode(c, ino, host, 0);
1406 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
1407
1408 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1409 if (!sync && !err) {
1410 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1411
1412 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1413 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1414 }
1415 release_head(c, BASEHD);
1416 if (err)
1417 goto out_ro;
1418
1419 ino_key_init(c, &key, host->i_ino);
1420 err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1421 if (err)
1422 goto out_ro;
1423
1424 ino_key_init(c, &key, inode->i_ino);
1425 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1426 if (err)
1427 goto out_ro;
1428
1429 finish_reservation(c);
1430 spin_lock(&host_ui->ui_lock);
1431 host_ui->synced_i_size = host_ui->ui_size;
1432 spin_unlock(&host_ui->ui_lock);
1433 mark_inode_clean(c, host_ui);
1434 kfree(ino);
1435 return 0;
1436
1437out_ro:
1438 ubifs_ro_mode(c, err);
1439 finish_reservation(c);
1440out_free:
1441 kfree(ino);
1442 return err;
1443}
1444
1445#endif
1446