1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/crc32.h>
74#include <linux/slab.h>
75#include "ubifs.h"
76
77
78
79
80
81
82void ubifs_ro_mode(struct ubifs_info *c, int err)
83{
84 if (!c->ro_error) {
85 c->ro_error = 1;
86 c->no_chk_data_crc = 0;
87 c->vfs_sb->s_flags |= MS_RDONLY;
88 ubifs_warn("switched to read-only mode, error %d", err);
89 dump_stack();
90 }
91}
92
93
94
95
96
97
98
99int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
100 int len, int even_ebadmsg)
101{
102 int err;
103
104 err = ubi_read(c->ubi, lnum, buf, offs, len);
105
106
107
108
109 if (err && (err != -EBADMSG || even_ebadmsg)) {
110 ubifs_err("reading %d bytes from LEB %d:%d failed, error %d",
111 len, lnum, offs, err);
112 dump_stack();
113 }
114 return err;
115}
116
117int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
118 int len)
119{
120 int err;
121
122 ubifs_assert(!c->ro_media && !c->ro_mount);
123 if (c->ro_error)
124 return -EROFS;
125 if (!dbg_is_tst_rcvry(c))
126 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
127 else
128 err = dbg_leb_write(c, lnum, buf, offs, len);
129 if (err) {
130 ubifs_err("writing %d bytes to LEB %d:%d failed, error %d",
131 len, lnum, offs, err);
132 ubifs_ro_mode(c, err);
133 dump_stack();
134 }
135 return err;
136}
137
138int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
139{
140 int err;
141
142 ubifs_assert(!c->ro_media && !c->ro_mount);
143 if (c->ro_error)
144 return -EROFS;
145 if (!dbg_is_tst_rcvry(c))
146 err = ubi_leb_change(c->ubi, lnum, buf, len);
147 else
148 err = dbg_leb_change(c, lnum, buf, len);
149 if (err) {
150 ubifs_err("changing %d bytes in LEB %d failed, error %d",
151 len, lnum, err);
152 ubifs_ro_mode(c, err);
153 dump_stack();
154 }
155 return err;
156}
157
158int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
159{
160 int err;
161
162 ubifs_assert(!c->ro_media && !c->ro_mount);
163 if (c->ro_error)
164 return -EROFS;
165 if (!dbg_is_tst_rcvry(c))
166 err = ubi_leb_unmap(c->ubi, lnum);
167 else
168 err = dbg_leb_unmap(c, lnum);
169 if (err) {
170 ubifs_err("unmap LEB %d failed, error %d", lnum, err);
171 ubifs_ro_mode(c, err);
172 dump_stack();
173 }
174 return err;
175}
176
177int ubifs_leb_map(struct ubifs_info *c, int lnum)
178{
179 int err;
180
181 ubifs_assert(!c->ro_media && !c->ro_mount);
182 if (c->ro_error)
183 return -EROFS;
184 if (!dbg_is_tst_rcvry(c))
185 err = ubi_leb_map(c->ubi, lnum);
186 else
187 err = dbg_leb_map(c, lnum);
188 if (err) {
189 ubifs_err("mapping LEB %d failed, error %d", lnum, err);
190 ubifs_ro_mode(c, err);
191 dump_stack();
192 }
193 return err;
194}
195
196int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
197{
198 int err;
199
200 err = ubi_is_mapped(c->ubi, lnum);
201 if (err < 0) {
202 ubifs_err("ubi_is_mapped failed for LEB %d, error %d",
203 lnum, err);
204 dump_stack();
205 }
206 return err;
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
238 int offs, int quiet, int must_chk_crc)
239{
240 int err = -EINVAL, type, node_len;
241 uint32_t crc, node_crc, magic;
242 const struct ubifs_ch *ch = buf;
243
244 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
245 ubifs_assert(!(offs & 7) && offs < c->leb_size);
246
247 magic = le32_to_cpu(ch->magic);
248 if (magic != UBIFS_NODE_MAGIC) {
249 if (!quiet)
250 ubifs_err("bad magic %#08x, expected %#08x",
251 magic, UBIFS_NODE_MAGIC);
252 err = -EUCLEAN;
253 goto out;
254 }
255
256 type = ch->node_type;
257 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
258 if (!quiet)
259 ubifs_err("bad node type %d", type);
260 goto out;
261 }
262
263 node_len = le32_to_cpu(ch->len);
264 if (node_len + offs > c->leb_size)
265 goto out_len;
266
267 if (c->ranges[type].max_len == 0) {
268 if (node_len != c->ranges[type].len)
269 goto out_len;
270 } else if (node_len < c->ranges[type].min_len ||
271 node_len > c->ranges[type].max_len)
272 goto out_len;
273
274 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
275 !c->remounting_rw && c->no_chk_data_crc)
276 return 0;
277
278 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
279 node_crc = le32_to_cpu(ch->crc);
280 if (crc != node_crc) {
281 if (!quiet)
282 ubifs_err("bad CRC: calculated %#08x, read %#08x",
283 crc, node_crc);
284 err = -EUCLEAN;
285 goto out;
286 }
287
288 return 0;
289
290out_len:
291 if (!quiet)
292 ubifs_err("bad node length %d", node_len);
293out:
294 if (!quiet) {
295 ubifs_err("bad node at LEB %d:%d", lnum, offs);
296 ubifs_dump_node(c, buf);
297 dump_stack();
298 }
299 return err;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
319{
320 uint32_t crc;
321
322 ubifs_assert(pad >= 0 && !(pad & 7));
323
324 if (pad >= UBIFS_PAD_NODE_SZ) {
325 struct ubifs_ch *ch = buf;
326 struct ubifs_pad_node *pad_node = buf;
327
328 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
329 ch->node_type = UBIFS_PAD_NODE;
330 ch->group_type = UBIFS_NO_NODE_GROUP;
331 ch->padding[0] = ch->padding[1] = 0;
332 ch->sqnum = 0;
333 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
334 pad -= UBIFS_PAD_NODE_SZ;
335 pad_node->pad_len = cpu_to_le32(pad);
336 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
337 ch->crc = cpu_to_le32(crc);
338 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
339 } else if (pad > 0)
340
341 memset(buf, UBIFS_PADDING_BYTE, pad);
342}
343
344
345
346
347
348static unsigned long long next_sqnum(struct ubifs_info *c)
349{
350 unsigned long long sqnum;
351
352 spin_lock(&c->cnt_lock);
353 sqnum = ++c->max_sqnum;
354 spin_unlock(&c->cnt_lock);
355
356 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
357 if (sqnum >= SQNUM_WATERMARK) {
358 ubifs_err("sequence number overflow %llu, end of life",
359 sqnum);
360 ubifs_ro_mode(c, -EINVAL);
361 }
362 ubifs_warn("running out of sequence numbers, end of life soon");
363 }
364
365 return sqnum;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
380{
381 uint32_t crc;
382 struct ubifs_ch *ch = node;
383 unsigned long long sqnum = next_sqnum(c);
384
385 ubifs_assert(len >= UBIFS_CH_SZ);
386
387 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
388 ch->len = cpu_to_le32(len);
389 ch->group_type = UBIFS_NO_NODE_GROUP;
390 ch->sqnum = cpu_to_le64(sqnum);
391 ch->padding[0] = ch->padding[1] = 0;
392 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
393 ch->crc = cpu_to_le32(crc);
394
395 if (pad) {
396 len = ALIGN(len, 8);
397 pad = ALIGN(len, c->min_io_size) - len;
398 ubifs_pad(c, node + len, pad);
399 }
400}
401
402
403
404
405
406
407
408
409
410
411
412void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
413{
414 uint32_t crc;
415 struct ubifs_ch *ch = node;
416 unsigned long long sqnum = next_sqnum(c);
417
418 ubifs_assert(len >= UBIFS_CH_SZ);
419
420 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
421 ch->len = cpu_to_le32(len);
422 if (last)
423 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
424 else
425 ch->group_type = UBIFS_IN_NODE_GROUP;
426 ch->sqnum = cpu_to_le64(sqnum);
427 ch->padding[0] = ch->padding[1] = 0;
428 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
429 ch->crc = cpu_to_le32(crc);
430}
431
432
433
434
435
436
437
438static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
439{
440 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
441
442 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
443 wbuf->need_sync = 1;
444 wbuf->c->need_wbuf_sync = 1;
445 ubifs_wake_up_bgt(wbuf->c);
446 return HRTIMER_NORESTART;
447}
448
449
450
451
452
453static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
454{
455 ubifs_assert(!hrtimer_active(&wbuf->timer));
456
457 if (wbuf->no_timer)
458 return;
459 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
460 dbg_jhead(wbuf->jhead),
461 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
462 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
463 USEC_PER_SEC));
464 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
465 HRTIMER_MODE_REL);
466}
467
468
469
470
471
472static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
473{
474 if (wbuf->no_timer)
475 return;
476 wbuf->need_sync = 0;
477 hrtimer_cancel(&wbuf->timer);
478}
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
494{
495 struct ubifs_info *c = wbuf->c;
496 int err, dirt, sync_len;
497
498 cancel_wbuf_timer_nolock(wbuf);
499 if (!wbuf->used || wbuf->lnum == -1)
500
501 return 0;
502
503 dbg_io("LEB %d:%d, %d bytes, jhead %s",
504 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
505 ubifs_assert(!(wbuf->avail & 7));
506 ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
507 ubifs_assert(wbuf->size >= c->min_io_size);
508 ubifs_assert(wbuf->size <= c->max_write_size);
509 ubifs_assert(wbuf->size % c->min_io_size == 0);
510 ubifs_assert(!c->ro_media && !c->ro_mount);
511 if (c->leb_size - wbuf->offs >= c->max_write_size)
512 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
513
514 if (c->ro_error)
515 return -EROFS;
516
517
518
519
520
521 sync_len = ALIGN(wbuf->used, c->min_io_size);
522 dirt = sync_len - wbuf->used;
523 if (dirt)
524 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
525 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
526 if (err)
527 return err;
528
529 spin_lock(&wbuf->lock);
530 wbuf->offs += sync_len;
531
532
533
534
535
536
537
538
539
540
541 if (c->leb_size - wbuf->offs < c->max_write_size)
542 wbuf->size = c->leb_size - wbuf->offs;
543 else if (wbuf->offs & (c->max_write_size - 1))
544 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
545 else
546 wbuf->size = c->max_write_size;
547 wbuf->avail = wbuf->size;
548 wbuf->used = 0;
549 wbuf->next_ino = 0;
550 spin_unlock(&wbuf->lock);
551
552 if (wbuf->sync_callback)
553 err = wbuf->sync_callback(c, wbuf->lnum,
554 c->leb_size - wbuf->offs, dirt);
555 return err;
556}
557
558
559
560
561
562
563
564
565
566
567
568int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
569{
570 const struct ubifs_info *c = wbuf->c;
571
572 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
573 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
574 ubifs_assert(offs >= 0 && offs <= c->leb_size);
575 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
576 ubifs_assert(lnum != wbuf->lnum);
577 ubifs_assert(wbuf->used == 0);
578
579 spin_lock(&wbuf->lock);
580 wbuf->lnum = lnum;
581 wbuf->offs = offs;
582 if (c->leb_size - wbuf->offs < c->max_write_size)
583 wbuf->size = c->leb_size - wbuf->offs;
584 else if (wbuf->offs & (c->max_write_size - 1))
585 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
586 else
587 wbuf->size = c->max_write_size;
588 wbuf->avail = wbuf->size;
589 wbuf->used = 0;
590 spin_unlock(&wbuf->lock);
591
592 return 0;
593}
594
595
596
597
598
599
600
601
602
603int ubifs_bg_wbufs_sync(struct ubifs_info *c)
604{
605 int err, i;
606
607 ubifs_assert(!c->ro_media && !c->ro_mount);
608 if (!c->need_wbuf_sync)
609 return 0;
610 c->need_wbuf_sync = 0;
611
612 if (c->ro_error) {
613 err = -EROFS;
614 goto out_timers;
615 }
616
617 dbg_io("synchronize");
618 for (i = 0; i < c->jhead_cnt; i++) {
619 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
620
621 cond_resched();
622
623
624
625
626
627 if (mutex_is_locked(&wbuf->io_mutex))
628 continue;
629
630 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
631 if (!wbuf->need_sync) {
632 mutex_unlock(&wbuf->io_mutex);
633 continue;
634 }
635
636 err = ubifs_wbuf_sync_nolock(wbuf);
637 mutex_unlock(&wbuf->io_mutex);
638 if (err) {
639 ubifs_err("cannot sync write-buffer, error %d", err);
640 ubifs_ro_mode(c, err);
641 goto out_timers;
642 }
643 }
644
645 return 0;
646
647out_timers:
648
649 for (i = 0; i < c->jhead_cnt; i++) {
650 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
651
652 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
653 cancel_wbuf_timer_nolock(wbuf);
654 mutex_unlock(&wbuf->io_mutex);
655 }
656 return err;
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
676{
677 struct ubifs_info *c = wbuf->c;
678 int err, written, n, aligned_len = ALIGN(len, 8);
679
680 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
681 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
682 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
683 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
684 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
685 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
686 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
687 ubifs_assert(wbuf->size >= c->min_io_size);
688 ubifs_assert(wbuf->size <= c->max_write_size);
689 ubifs_assert(wbuf->size % c->min_io_size == 0);
690 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
691 ubifs_assert(!c->ro_media && !c->ro_mount);
692 ubifs_assert(!c->space_fixup);
693 if (c->leb_size - wbuf->offs >= c->max_write_size)
694 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
695
696 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
697 err = -ENOSPC;
698 goto out;
699 }
700
701 cancel_wbuf_timer_nolock(wbuf);
702
703 if (c->ro_error)
704 return -EROFS;
705
706 if (aligned_len <= wbuf->avail) {
707
708
709
710
711 memcpy(wbuf->buf + wbuf->used, buf, len);
712
713 if (aligned_len == wbuf->avail) {
714 dbg_io("flush jhead %s wbuf to LEB %d:%d",
715 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
716 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
717 wbuf->offs, wbuf->size);
718 if (err)
719 goto out;
720
721 spin_lock(&wbuf->lock);
722 wbuf->offs += wbuf->size;
723 if (c->leb_size - wbuf->offs >= c->max_write_size)
724 wbuf->size = c->max_write_size;
725 else
726 wbuf->size = c->leb_size - wbuf->offs;
727 wbuf->avail = wbuf->size;
728 wbuf->used = 0;
729 wbuf->next_ino = 0;
730 spin_unlock(&wbuf->lock);
731 } else {
732 spin_lock(&wbuf->lock);
733 wbuf->avail -= aligned_len;
734 wbuf->used += aligned_len;
735 spin_unlock(&wbuf->lock);
736 }
737
738 goto exit;
739 }
740
741 written = 0;
742
743 if (wbuf->used) {
744
745
746
747
748
749 dbg_io("flush jhead %s wbuf to LEB %d:%d",
750 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
751 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
752 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
753 wbuf->size);
754 if (err)
755 goto out;
756
757 wbuf->offs += wbuf->size;
758 len -= wbuf->avail;
759 aligned_len -= wbuf->avail;
760 written += wbuf->avail;
761 } else if (wbuf->offs & (c->max_write_size - 1)) {
762
763
764
765
766
767
768
769 dbg_io("write %d bytes to LEB %d:%d",
770 wbuf->size, wbuf->lnum, wbuf->offs);
771 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
772 wbuf->size);
773 if (err)
774 goto out;
775
776 wbuf->offs += wbuf->size;
777 len -= wbuf->size;
778 aligned_len -= wbuf->size;
779 written += wbuf->size;
780 }
781
782
783
784
785
786
787
788 n = aligned_len >> c->max_write_shift;
789 if (n) {
790 n <<= c->max_write_shift;
791 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
792 wbuf->offs);
793 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
794 wbuf->offs, n);
795 if (err)
796 goto out;
797 wbuf->offs += n;
798 aligned_len -= n;
799 len -= n;
800 written += n;
801 }
802
803 spin_lock(&wbuf->lock);
804 if (aligned_len)
805
806
807
808
809
810 memcpy(wbuf->buf, buf + written, len);
811
812 if (c->leb_size - wbuf->offs >= c->max_write_size)
813 wbuf->size = c->max_write_size;
814 else
815 wbuf->size = c->leb_size - wbuf->offs;
816 wbuf->avail = wbuf->size - aligned_len;
817 wbuf->used = aligned_len;
818 wbuf->next_ino = 0;
819 spin_unlock(&wbuf->lock);
820
821exit:
822 if (wbuf->sync_callback) {
823 int free = c->leb_size - wbuf->offs - wbuf->used;
824
825 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
826 if (err)
827 goto out;
828 }
829
830 if (wbuf->used)
831 new_wbuf_timer_nolock(wbuf);
832
833 return 0;
834
835out:
836 ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
837 len, wbuf->lnum, wbuf->offs, err);
838 ubifs_dump_node(c, buf);
839 dump_stack();
840 ubifs_dump_leb(c, wbuf->lnum);
841 return err;
842}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
859 int offs)
860{
861 int err, buf_len = ALIGN(len, c->min_io_size);
862
863 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
864 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
865 buf_len);
866 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
867 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
868 ubifs_assert(!c->ro_media && !c->ro_mount);
869 ubifs_assert(!c->space_fixup);
870
871 if (c->ro_error)
872 return -EROFS;
873
874 ubifs_prepare_node(c, buf, len, 1);
875 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
876 if (err)
877 ubifs_dump_node(c, buf);
878
879 return err;
880}
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
898 int lnum, int offs)
899{
900 const struct ubifs_info *c = wbuf->c;
901 int err, rlen, overlap;
902 struct ubifs_ch *ch = buf;
903
904 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
905 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
906 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
907 ubifs_assert(!(offs & 7) && offs < c->leb_size);
908 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
909
910 spin_lock(&wbuf->lock);
911 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
912 if (!overlap) {
913
914 spin_unlock(&wbuf->lock);
915 return ubifs_read_node(c, buf, type, len, lnum, offs);
916 }
917
918
919 rlen = wbuf->offs - offs;
920 if (rlen < 0)
921 rlen = 0;
922
923
924 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
925 spin_unlock(&wbuf->lock);
926
927 if (rlen > 0) {
928
929 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
930 if (err && err != -EBADMSG)
931 return err;
932 }
933
934 if (type != ch->node_type) {
935 ubifs_err("bad node type (%d but expected %d)",
936 ch->node_type, type);
937 goto out;
938 }
939
940 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
941 if (err) {
942 ubifs_err("expected node type %d", type);
943 return err;
944 }
945
946 rlen = le32_to_cpu(ch->len);
947 if (rlen != len) {
948 ubifs_err("bad node length %d, expected %d", rlen, len);
949 goto out;
950 }
951
952 return 0;
953
954out:
955 ubifs_err("bad node at LEB %d:%d", lnum, offs);
956 ubifs_dump_node(c, buf);
957 dump_stack();
958 return -EINVAL;
959}
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
975 int lnum, int offs)
976{
977 int err, l;
978 struct ubifs_ch *ch = buf;
979
980 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
981 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
982 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
983 ubifs_assert(!(offs & 7) && offs < c->leb_size);
984 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
985
986 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
987 if (err && err != -EBADMSG)
988 return err;
989
990 if (type != ch->node_type) {
991 ubifs_err("bad node type (%d but expected %d)",
992 ch->node_type, type);
993 goto out;
994 }
995
996 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
997 if (err) {
998 ubifs_err("expected node type %d", type);
999 return err;
1000 }
1001
1002 l = le32_to_cpu(ch->len);
1003 if (l != len) {
1004 ubifs_err("bad node length %d, expected %d", l, len);
1005 goto out;
1006 }
1007
1008 return 0;
1009
1010out:
1011 ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
1012 ubi_is_mapped(c->ubi, lnum));
1013 ubifs_dump_node(c, buf);
1014 dump_stack();
1015 return -EINVAL;
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1027{
1028 size_t size;
1029
1030 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1031 if (!wbuf->buf)
1032 return -ENOMEM;
1033
1034 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1035 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1036 if (!wbuf->inodes) {
1037 kfree(wbuf->buf);
1038 wbuf->buf = NULL;
1039 return -ENOMEM;
1040 }
1041
1042 wbuf->used = 0;
1043 wbuf->lnum = wbuf->offs = -1;
1044
1045
1046
1047
1048
1049
1050 size = c->max_write_size - (c->leb_start % c->max_write_size);
1051 wbuf->avail = wbuf->size = size;
1052 wbuf->sync_callback = NULL;
1053 mutex_init(&wbuf->io_mutex);
1054 spin_lock_init(&wbuf->lock);
1055 wbuf->c = c;
1056 wbuf->next_ino = 0;
1057
1058 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1059 wbuf->timer.function = wbuf_timer_callback_nolock;
1060 wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
1061 wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
1062 wbuf->delta *= 1000000000ULL;
1063 ubifs_assert(wbuf->delta <= ULONG_MAX);
1064 return 0;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1075{
1076 if (!wbuf->buf)
1077
1078 return;
1079
1080 spin_lock(&wbuf->lock);
1081 if (wbuf->used)
1082 wbuf->inodes[wbuf->next_ino++] = inum;
1083 spin_unlock(&wbuf->lock);
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1095{
1096 int i, ret = 0;
1097
1098 spin_lock(&wbuf->lock);
1099 for (i = 0; i < wbuf->next_ino; i++)
1100 if (inum == wbuf->inodes[i]) {
1101 ret = 1;
1102 break;
1103 }
1104 spin_unlock(&wbuf->lock);
1105
1106 return ret;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1119{
1120 int i, err = 0;
1121
1122 for (i = 0; i < c->jhead_cnt; i++) {
1123 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1124
1125 if (i == GCHD)
1126
1127
1128
1129
1130
1131
1132 continue;
1133
1134 if (!wbuf_has_ino(wbuf, inode->i_ino))
1135 continue;
1136
1137 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1138 if (wbuf_has_ino(wbuf, inode->i_ino))
1139 err = ubifs_wbuf_sync_nolock(wbuf);
1140 mutex_unlock(&wbuf->io_mutex);
1141
1142 if (err) {
1143 ubifs_ro_mode(c, err);
1144 return err;
1145 }
1146 }
1147 return 0;
1148}
1149