1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/freezer.h>
47#include <linux/kthread.h>
48#include <linux/slab.h>
49#include "ubifs.h"
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static int nothing_to_commit(struct ubifs_info *c)
69{
70
71
72
73
74 if (c->mounting || c->remounting_rw)
75 return 0;
76
77
78
79
80
81 if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode))
82 return 0;
83
84
85
86
87
88
89
90
91 if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags))
92 return 0;
93
94 ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
95 ubifs_assert(c->dirty_pn_cnt == 0);
96 ubifs_assert(c->dirty_nn_cnt == 0);
97
98 return 1;
99}
100
101
102
103
104
105
106
107
108
109static int do_commit(struct ubifs_info *c)
110{
111 int err, new_ltail_lnum, old_ltail_lnum, i;
112 struct ubifs_zbranch zroot;
113 struct ubifs_lp_stats lst;
114
115 dbg_cmt("start");
116 ubifs_assert(!c->ro_media && !c->ro_mount);
117
118 if (c->ro_error) {
119 err = -EROFS;
120 goto out_up;
121 }
122
123 if (nothing_to_commit(c)) {
124 up_write(&c->commit_sem);
125 err = 0;
126 goto out_cancel;
127 }
128
129
130 for (i = 0; i < c->jhead_cnt; i++) {
131 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
132 if (err)
133 goto out_up;
134 }
135
136 c->cmt_no += 1;
137 err = ubifs_gc_start_commit(c);
138 if (err)
139 goto out_up;
140 err = dbg_check_lprops(c);
141 if (err)
142 goto out_up;
143 err = ubifs_log_start_commit(c, &new_ltail_lnum);
144 if (err)
145 goto out_up;
146 err = ubifs_tnc_start_commit(c, &zroot);
147 if (err)
148 goto out_up;
149 err = ubifs_lpt_start_commit(c);
150 if (err)
151 goto out_up;
152 err = ubifs_orphan_start_commit(c);
153 if (err)
154 goto out_up;
155
156 ubifs_get_lp_stats(c, &lst);
157
158 up_write(&c->commit_sem);
159
160 err = ubifs_tnc_end_commit(c);
161 if (err)
162 goto out;
163 err = ubifs_lpt_end_commit(c);
164 if (err)
165 goto out;
166 err = ubifs_orphan_end_commit(c);
167 if (err)
168 goto out;
169 old_ltail_lnum = c->ltail_lnum;
170 err = ubifs_log_end_commit(c, new_ltail_lnum);
171 if (err)
172 goto out;
173 err = dbg_check_old_index(c, &zroot);
174 if (err)
175 goto out;
176
177 mutex_lock(&c->mst_mutex);
178 c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
179 c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
180 c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
181 c->mst_node->root_offs = cpu_to_le32(zroot.offs);
182 c->mst_node->root_len = cpu_to_le32(zroot.len);
183 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum);
184 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs);
185 c->mst_node->index_size = cpu_to_le64(c->bi.old_idx_sz);
186 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum);
187 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs);
188 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum);
189 c->mst_node->nhead_offs = cpu_to_le32(c->nhead_offs);
190 c->mst_node->ltab_lnum = cpu_to_le32(c->ltab_lnum);
191 c->mst_node->ltab_offs = cpu_to_le32(c->ltab_offs);
192 c->mst_node->lsave_lnum = cpu_to_le32(c->lsave_lnum);
193 c->mst_node->lsave_offs = cpu_to_le32(c->lsave_offs);
194 c->mst_node->lscan_lnum = cpu_to_le32(c->lscan_lnum);
195 c->mst_node->empty_lebs = cpu_to_le32(lst.empty_lebs);
196 c->mst_node->idx_lebs = cpu_to_le32(lst.idx_lebs);
197 c->mst_node->total_free = cpu_to_le64(lst.total_free);
198 c->mst_node->total_dirty = cpu_to_le64(lst.total_dirty);
199 c->mst_node->total_used = cpu_to_le64(lst.total_used);
200 c->mst_node->total_dead = cpu_to_le64(lst.total_dead);
201 c->mst_node->total_dark = cpu_to_le64(lst.total_dark);
202 if (c->no_orphs)
203 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
204 else
205 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
206 err = ubifs_write_master(c);
207 mutex_unlock(&c->mst_mutex);
208 if (err)
209 goto out;
210
211 err = ubifs_log_post_commit(c, old_ltail_lnum);
212 if (err)
213 goto out;
214 err = ubifs_gc_end_commit(c);
215 if (err)
216 goto out;
217 err = ubifs_lpt_post_commit(c);
218 if (err)
219 goto out;
220
221out_cancel:
222 spin_lock(&c->cs_lock);
223 c->cmt_state = COMMIT_RESTING;
224 wake_up(&c->cmt_wq);
225 dbg_cmt("commit end");
226 spin_unlock(&c->cs_lock);
227 return 0;
228
229out_up:
230 up_write(&c->commit_sem);
231out:
232 ubifs_err("commit failed, error %d", err);
233 spin_lock(&c->cs_lock);
234 c->cmt_state = COMMIT_BROKEN;
235 wake_up(&c->cmt_wq);
236 spin_unlock(&c->cs_lock);
237 ubifs_ro_mode(c, err);
238 return err;
239}
240
241
242
243
244
245
246
247
248static int run_bg_commit(struct ubifs_info *c)
249{
250 spin_lock(&c->cs_lock);
251
252
253
254
255 if (c->cmt_state != COMMIT_BACKGROUND &&
256 c->cmt_state != COMMIT_REQUIRED)
257 goto out;
258 spin_unlock(&c->cs_lock);
259
260 down_write(&c->commit_sem);
261 spin_lock(&c->cs_lock);
262 if (c->cmt_state == COMMIT_REQUIRED)
263 c->cmt_state = COMMIT_RUNNING_REQUIRED;
264 else if (c->cmt_state == COMMIT_BACKGROUND)
265 c->cmt_state = COMMIT_RUNNING_BACKGROUND;
266 else
267 goto out_cmt_unlock;
268 spin_unlock(&c->cs_lock);
269
270 return do_commit(c);
271
272out_cmt_unlock:
273 up_write(&c->commit_sem);
274out:
275 spin_unlock(&c->cs_lock);
276 return 0;
277}
278
279
280
281
282
283
284
285
286
287
288
289
290
291int ubifs_bg_thread(void *info)
292{
293 int err;
294 struct ubifs_info *c = info;
295
296 ubifs_msg("background thread \"%s\" started, PID %d",
297 c->bgt_name, current->pid);
298 set_freezable();
299
300 while (1) {
301 if (kthread_should_stop())
302 break;
303
304 if (try_to_freeze())
305 continue;
306
307 set_current_state(TASK_INTERRUPTIBLE);
308
309 if (!c->need_bgt) {
310
311
312
313
314
315 if (kthread_should_stop())
316 break;
317 schedule();
318 continue;
319 } else
320 __set_current_state(TASK_RUNNING);
321
322 c->need_bgt = 0;
323 err = ubifs_bg_wbufs_sync(c);
324 if (err)
325 ubifs_ro_mode(c, err);
326
327 run_bg_commit(c);
328 cond_resched();
329 }
330
331 ubifs_msg("background thread \"%s\" stops", c->bgt_name);
332 return 0;
333}
334
335
336
337
338
339
340
341
342void ubifs_commit_required(struct ubifs_info *c)
343{
344 spin_lock(&c->cs_lock);
345 switch (c->cmt_state) {
346 case COMMIT_RESTING:
347 case COMMIT_BACKGROUND:
348 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
349 dbg_cstate(COMMIT_REQUIRED));
350 c->cmt_state = COMMIT_REQUIRED;
351 break;
352 case COMMIT_RUNNING_BACKGROUND:
353 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
354 dbg_cstate(COMMIT_RUNNING_REQUIRED));
355 c->cmt_state = COMMIT_RUNNING_REQUIRED;
356 break;
357 case COMMIT_REQUIRED:
358 case COMMIT_RUNNING_REQUIRED:
359 case COMMIT_BROKEN:
360 break;
361 }
362 spin_unlock(&c->cs_lock);
363}
364
365
366
367
368
369
370
371
372void ubifs_request_bg_commit(struct ubifs_info *c)
373{
374 spin_lock(&c->cs_lock);
375 if (c->cmt_state == COMMIT_RESTING) {
376 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
377 dbg_cstate(COMMIT_BACKGROUND));
378 c->cmt_state = COMMIT_BACKGROUND;
379 spin_unlock(&c->cs_lock);
380 ubifs_wake_up_bgt(c);
381 } else
382 spin_unlock(&c->cs_lock);
383}
384
385
386
387
388
389
390
391static int wait_for_commit(struct ubifs_info *c)
392{
393 dbg_cmt("pid %d goes sleep", current->pid);
394
395
396
397
398
399
400
401
402 wait_event(c->cmt_wq, c->cmt_state != COMMIT_RUNNING_BACKGROUND &&
403 c->cmt_state != COMMIT_RUNNING_REQUIRED);
404 dbg_cmt("commit finished, pid %d woke up", current->pid);
405 return 0;
406}
407
408
409
410
411
412
413
414
415int ubifs_run_commit(struct ubifs_info *c)
416{
417 int err = 0;
418
419 spin_lock(&c->cs_lock);
420 if (c->cmt_state == COMMIT_BROKEN) {
421 err = -EROFS;
422 goto out;
423 }
424
425 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
426
427
428
429
430 c->cmt_state = COMMIT_RUNNING_REQUIRED;
431
432 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
433 spin_unlock(&c->cs_lock);
434 return wait_for_commit(c);
435 }
436 spin_unlock(&c->cs_lock);
437
438
439
440 down_write(&c->commit_sem);
441 spin_lock(&c->cs_lock);
442
443
444
445
446 if (c->cmt_state == COMMIT_BROKEN) {
447 err = -EROFS;
448 goto out_cmt_unlock;
449 }
450
451 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
452 c->cmt_state = COMMIT_RUNNING_REQUIRED;
453
454 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
455 up_write(&c->commit_sem);
456 spin_unlock(&c->cs_lock);
457 return wait_for_commit(c);
458 }
459 c->cmt_state = COMMIT_RUNNING_REQUIRED;
460 spin_unlock(&c->cs_lock);
461
462 err = do_commit(c);
463 return err;
464
465out_cmt_unlock:
466 up_write(&c->commit_sem);
467out:
468 spin_unlock(&c->cs_lock);
469 return err;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483int ubifs_gc_should_commit(struct ubifs_info *c)
484{
485 int ret = 0;
486
487 spin_lock(&c->cs_lock);
488 if (c->cmt_state == COMMIT_BACKGROUND) {
489 dbg_cmt("commit required now");
490 c->cmt_state = COMMIT_REQUIRED;
491 } else
492 dbg_cmt("commit not requested");
493 if (c->cmt_state == COMMIT_REQUIRED)
494 ret = 1;
495 spin_unlock(&c->cs_lock);
496 return ret;
497}
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513struct idx_node {
514 struct list_head list;
515 int iip;
516 union ubifs_key upper_key;
517 struct ubifs_idx_node idx __aligned(8);
518};
519
520
521
522
523
524
525
526
527
528
529
530int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot)
531{
532 struct ubifs_idx_node *idx;
533 int lnum, offs, len, err = 0;
534 struct ubifs_debug_info *d = c->dbg;
535
536 d->old_zroot = *zroot;
537 lnum = d->old_zroot.lnum;
538 offs = d->old_zroot.offs;
539 len = d->old_zroot.len;
540
541 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS);
542 if (!idx)
543 return -ENOMEM;
544
545 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
546 if (err)
547 goto out;
548
549 d->old_zroot_level = le16_to_cpu(idx->level);
550 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum);
551out:
552 kfree(idx);
553 return err;
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
570{
571 int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt;
572 int first = 1, iip;
573 struct ubifs_debug_info *d = c->dbg;
574 union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key;
575 unsigned long long uninitialized_var(last_sqnum);
576 struct ubifs_idx_node *idx;
577 struct list_head list;
578 struct idx_node *i;
579 size_t sz;
580
581 if (!dbg_is_chk_index(c))
582 return 0;
583
584 INIT_LIST_HEAD(&list);
585
586 sz = sizeof(struct idx_node) + ubifs_idx_node_sz(c, c->fanout) -
587 UBIFS_IDX_NODE_SZ;
588
589
590 lnum = d->old_zroot.lnum;
591 offs = d->old_zroot.offs;
592 len = d->old_zroot.len;
593 iip = 0;
594
595
596
597
598
599 while (1) {
600 struct ubifs_branch *br;
601
602
603 i = kmalloc(sz, GFP_NOFS);
604 if (!i) {
605 err = -ENOMEM;
606 goto out_free;
607 }
608 i->iip = iip;
609
610 list_add_tail(&i->list, &list);
611
612 idx = &i->idx;
613 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
614 if (err)
615 goto out_free;
616
617 child_cnt = le16_to_cpu(idx->child_cnt);
618 if (child_cnt < 1 || child_cnt > c->fanout) {
619 err = 1;
620 goto out_dump;
621 }
622 if (first) {
623 first = 0;
624
625 if (le16_to_cpu(idx->level) != d->old_zroot_level) {
626 err = 2;
627 goto out_dump;
628 }
629 if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) {
630 err = 3;
631 goto out_dump;
632 }
633
634 last_level = le16_to_cpu(idx->level) + 1;
635 last_sqnum = le64_to_cpu(idx->ch.sqnum) + 1;
636 key_read(c, ubifs_idx_key(c, idx), &lower_key);
637 highest_ino_key(c, &upper_key, INUM_WATERMARK);
638 }
639 key_copy(c, &upper_key, &i->upper_key);
640 if (le16_to_cpu(idx->level) != last_level - 1) {
641 err = 3;
642 goto out_dump;
643 }
644
645
646
647
648 if (le64_to_cpu(idx->ch.sqnum) >= last_sqnum) {
649 err = 4;
650 goto out_dump;
651 }
652
653 key_read(c, ubifs_idx_key(c, idx), &l_key);
654 br = ubifs_idx_branch(c, idx, child_cnt - 1);
655 key_read(c, &br->key, &u_key);
656 if (keys_cmp(c, &lower_key, &l_key) > 0) {
657 err = 5;
658 goto out_dump;
659 }
660 if (keys_cmp(c, &upper_key, &u_key) < 0) {
661 err = 6;
662 goto out_dump;
663 }
664 if (keys_cmp(c, &upper_key, &u_key) == 0)
665 if (!is_hash_key(c, &u_key)) {
666 err = 7;
667 goto out_dump;
668 }
669
670 if (le16_to_cpu(idx->level) == 0) {
671
672 while (1) {
673
674 list_del(&i->list);
675 kfree(i);
676
677 if (list_empty(&list))
678 goto out;
679
680 i = list_entry(list.prev, struct idx_node,
681 list);
682 idx = &i->idx;
683
684 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
685 iip = iip + 1;
686 break;
687 } else
688
689 iip = i->iip;
690 }
691 } else
692
693 iip = 0;
694
695
696
697
698 last_level = le16_to_cpu(idx->level);
699 last_sqnum = le64_to_cpu(idx->ch.sqnum);
700 br = ubifs_idx_branch(c, idx, iip);
701 lnum = le32_to_cpu(br->lnum);
702 offs = le32_to_cpu(br->offs);
703 len = le32_to_cpu(br->len);
704 key_read(c, &br->key, &lower_key);
705 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
706 br = ubifs_idx_branch(c, idx, iip + 1);
707 key_read(c, &br->key, &upper_key);
708 } else
709 key_copy(c, &i->upper_key, &upper_key);
710 }
711out:
712 err = dbg_old_index_check_init(c, zroot);
713 if (err)
714 goto out_free;
715
716 return 0;
717
718out_dump:
719 ubifs_err("dumping index node (iip=%d)", i->iip);
720 ubifs_dump_node(c, idx);
721 list_del(&i->list);
722 kfree(i);
723 if (!list_empty(&list)) {
724 i = list_entry(list.prev, struct idx_node, list);
725 ubifs_err("dumping parent index node");
726 ubifs_dump_node(c, &i->idx);
727 }
728out_free:
729 while (!list_empty(&list)) {
730 i = list_entry(list.next, struct idx_node, list);
731 list_del(&i->list);
732 kfree(i);
733 }
734 ubifs_err("failed, error %d", err);
735 if (err > 0)
736 err = -EINVAL;
737 return err;
738}
739