1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#ifndef __UBOOT__
62#include <init.h>
63#include <log.h>
64#include <dm/devres.h>
65#include <linux/crc32.h>
66#include <linux/slab.h>
67#include <u-boot/crc.h>
68#else
69#include <linux/compat.h>
70#include <linux/err.h>
71#endif
72#include "ubifs.h"
73
74
75
76
77
78
79void ubifs_ro_mode(struct ubifs_info *c, int err)
80{
81 if (!c->ro_error) {
82 c->ro_error = 1;
83 c->no_chk_data_crc = 0;
84 c->vfs_sb->s_flags |= MS_RDONLY;
85 ubifs_warn(c, "switched to read-only mode, error %d", err);
86 dump_stack();
87 }
88}
89
90
91
92
93
94
95
96int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
97 int len, int even_ebadmsg)
98{
99 int err;
100
101 err = ubi_read(c->ubi, lnum, buf, offs, len);
102
103
104
105
106 if (err && (err != -EBADMSG || even_ebadmsg)) {
107 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
108 len, lnum, offs, err);
109 dump_stack();
110 }
111 return err;
112}
113
114int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
115 int len)
116{
117 int err = 0;
118
119 ubifs_assert(!c->ro_media && !c->ro_mount);
120 if (c->ro_error)
121 return -EROFS;
122 if (!dbg_is_tst_rcvry(c))
123 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
124#ifndef __UBOOT__
125 else
126 err = dbg_leb_write(c, lnum, buf, offs, len);
127#endif
128 if (err) {
129 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
130 len, lnum, offs, err);
131 ubifs_ro_mode(c, err);
132 dump_stack();
133 }
134 return err;
135}
136
137int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
138{
139 int err = 0;
140
141 ubifs_assert(!c->ro_media && !c->ro_mount);
142 if (c->ro_error)
143 return -EROFS;
144 if (!dbg_is_tst_rcvry(c))
145 err = ubi_leb_change(c->ubi, lnum, buf, len);
146#ifndef __UBOOT__
147 else
148 err = dbg_leb_change(c, lnum, buf, len);
149#endif
150 if (err) {
151 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
152 len, lnum, err);
153 ubifs_ro_mode(c, err);
154 dump_stack();
155 }
156 return err;
157}
158
159int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
160{
161 int err = 0;
162
163 ubifs_assert(!c->ro_media && !c->ro_mount);
164 if (c->ro_error)
165 return -EROFS;
166 if (!dbg_is_tst_rcvry(c))
167 err = ubi_leb_unmap(c->ubi, lnum);
168#ifndef __UBOOT__
169 else
170 err = dbg_leb_unmap(c, lnum);
171#endif
172 if (err) {
173 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
174 ubifs_ro_mode(c, err);
175 dump_stack();
176 }
177 return err;
178}
179
180int ubifs_leb_map(struct ubifs_info *c, int lnum)
181{
182 int err = 0;
183
184 ubifs_assert(!c->ro_media && !c->ro_mount);
185 if (c->ro_error)
186 return -EROFS;
187 if (!dbg_is_tst_rcvry(c))
188 err = ubi_leb_map(c->ubi, lnum);
189#ifndef __UBOOT__
190 else
191 err = dbg_leb_map(c, lnum);
192#endif
193 if (err) {
194 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
195 ubifs_ro_mode(c, err);
196 dump_stack();
197 }
198 return err;
199}
200
201int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
202{
203 int err;
204
205 err = ubi_is_mapped(c->ubi, lnum);
206 if (err < 0) {
207 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
208 lnum, err);
209 dump_stack();
210 }
211 return err;
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
243 int offs, int quiet, int must_chk_crc)
244{
245 int err = -EINVAL, type, node_len;
246 uint32_t crc, node_crc, magic;
247 const struct ubifs_ch *ch = buf;
248
249 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
250 ubifs_assert(!(offs & 7) && offs < c->leb_size);
251
252 magic = le32_to_cpu(ch->magic);
253 if (magic != UBIFS_NODE_MAGIC) {
254 if (!quiet)
255 ubifs_err(c, "bad magic %#08x, expected %#08x",
256 magic, UBIFS_NODE_MAGIC);
257 err = -EUCLEAN;
258 goto out;
259 }
260
261 type = ch->node_type;
262 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
263 if (!quiet)
264 ubifs_err(c, "bad node type %d", type);
265 goto out;
266 }
267
268 node_len = le32_to_cpu(ch->len);
269 if (node_len + offs > c->leb_size)
270 goto out_len;
271
272 if (c->ranges[type].max_len == 0) {
273 if (node_len != c->ranges[type].len)
274 goto out_len;
275 } else if (node_len < c->ranges[type].min_len ||
276 node_len > c->ranges[type].max_len)
277 goto out_len;
278
279 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
280 !c->remounting_rw && c->no_chk_data_crc)
281 return 0;
282
283 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
284 node_crc = le32_to_cpu(ch->crc);
285 if (crc != node_crc) {
286 if (!quiet)
287 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
288 crc, node_crc);
289 err = -EUCLEAN;
290 goto out;
291 }
292
293 return 0;
294
295out_len:
296 if (!quiet)
297 ubifs_err(c, "bad node length %d", node_len);
298out:
299 if (!quiet) {
300 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
301 ubifs_dump_node(c, buf);
302 dump_stack();
303 }
304 return err;
305}
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
324{
325 uint32_t crc;
326
327 ubifs_assert(pad >= 0 && !(pad & 7));
328
329 if (pad >= UBIFS_PAD_NODE_SZ) {
330 struct ubifs_ch *ch = buf;
331 struct ubifs_pad_node *pad_node = buf;
332
333 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
334 ch->node_type = UBIFS_PAD_NODE;
335 ch->group_type = UBIFS_NO_NODE_GROUP;
336 ch->padding[0] = ch->padding[1] = 0;
337 ch->sqnum = 0;
338 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
339 pad -= UBIFS_PAD_NODE_SZ;
340 pad_node->pad_len = cpu_to_le32(pad);
341 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
342 ch->crc = cpu_to_le32(crc);
343 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
344 } else if (pad > 0)
345
346 memset(buf, UBIFS_PADDING_BYTE, pad);
347}
348
349
350
351
352
353static unsigned long long next_sqnum(struct ubifs_info *c)
354{
355 unsigned long long sqnum;
356
357 spin_lock(&c->cnt_lock);
358 sqnum = ++c->max_sqnum;
359 spin_unlock(&c->cnt_lock);
360
361 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
362 if (sqnum >= SQNUM_WATERMARK) {
363 ubifs_err(c, "sequence number overflow %llu, end of life",
364 sqnum);
365 ubifs_ro_mode(c, -EINVAL);
366 }
367 ubifs_warn(c, "running out of sequence numbers, end of life soon");
368 }
369
370 return sqnum;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
385{
386 uint32_t crc;
387 struct ubifs_ch *ch = node;
388 unsigned long long sqnum = next_sqnum(c);
389
390 ubifs_assert(len >= UBIFS_CH_SZ);
391
392 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
393 ch->len = cpu_to_le32(len);
394 ch->group_type = UBIFS_NO_NODE_GROUP;
395 ch->sqnum = cpu_to_le64(sqnum);
396 ch->padding[0] = ch->padding[1] = 0;
397 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
398 ch->crc = cpu_to_le32(crc);
399
400 if (pad) {
401 len = ALIGN(len, 8);
402 pad = ALIGN(len, c->min_io_size) - len;
403 ubifs_pad(c, node + len, pad);
404 }
405}
406
407
408
409
410
411
412
413
414
415
416
417void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
418{
419 uint32_t crc;
420 struct ubifs_ch *ch = node;
421 unsigned long long sqnum = next_sqnum(c);
422
423 ubifs_assert(len >= UBIFS_CH_SZ);
424
425 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
426 ch->len = cpu_to_le32(len);
427 if (last)
428 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
429 else
430 ch->group_type = UBIFS_IN_NODE_GROUP;
431 ch->sqnum = cpu_to_le64(sqnum);
432 ch->padding[0] = ch->padding[1] = 0;
433 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
434 ch->crc = cpu_to_le32(crc);
435}
436
437#ifndef __UBOOT__
438
439
440
441
442
443
444static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
445{
446 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
447
448 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
449 wbuf->need_sync = 1;
450 wbuf->c->need_wbuf_sync = 1;
451 ubifs_wake_up_bgt(wbuf->c);
452 return HRTIMER_NORESTART;
453}
454
455
456
457
458
459static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
460{
461 ubifs_assert(!hrtimer_active(&wbuf->timer));
462
463 if (wbuf->no_timer)
464 return;
465 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
466 dbg_jhead(wbuf->jhead),
467 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
468 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
469 USEC_PER_SEC));
470 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
471 HRTIMER_MODE_REL);
472}
473#endif
474
475
476
477
478
479static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
480{
481 if (wbuf->no_timer)
482 return;
483 wbuf->need_sync = 0;
484#ifndef __UBOOT__
485 hrtimer_cancel(&wbuf->timer);
486#endif
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
503{
504 struct ubifs_info *c = wbuf->c;
505 int err, dirt, sync_len;
506
507 cancel_wbuf_timer_nolock(wbuf);
508 if (!wbuf->used || wbuf->lnum == -1)
509
510 return 0;
511
512 dbg_io("LEB %d:%d, %d bytes, jhead %s",
513 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
514 ubifs_assert(!(wbuf->avail & 7));
515 ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
516 ubifs_assert(wbuf->size >= c->min_io_size);
517 ubifs_assert(wbuf->size <= c->max_write_size);
518 ubifs_assert(wbuf->size % c->min_io_size == 0);
519 ubifs_assert(!c->ro_media && !c->ro_mount);
520 if (c->leb_size - wbuf->offs >= c->max_write_size)
521 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
522
523 if (c->ro_error)
524 return -EROFS;
525
526
527
528
529
530 sync_len = ALIGN(wbuf->used, c->min_io_size);
531 dirt = sync_len - wbuf->used;
532 if (dirt)
533 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
534 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
535 if (err)
536 return err;
537
538 spin_lock(&wbuf->lock);
539 wbuf->offs += sync_len;
540
541
542
543
544
545
546
547
548
549
550 if (c->leb_size - wbuf->offs < c->max_write_size)
551 wbuf->size = c->leb_size - wbuf->offs;
552 else if (wbuf->offs & (c->max_write_size - 1))
553 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
554 else
555 wbuf->size = c->max_write_size;
556 wbuf->avail = wbuf->size;
557 wbuf->used = 0;
558 wbuf->next_ino = 0;
559 spin_unlock(&wbuf->lock);
560
561 if (wbuf->sync_callback)
562 err = wbuf->sync_callback(c, wbuf->lnum,
563 c->leb_size - wbuf->offs, dirt);
564 return err;
565}
566
567
568
569
570
571
572
573
574
575
576
577int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
578{
579 const struct ubifs_info *c = wbuf->c;
580
581 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
582 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
583 ubifs_assert(offs >= 0 && offs <= c->leb_size);
584 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
585 ubifs_assert(lnum != wbuf->lnum);
586 ubifs_assert(wbuf->used == 0);
587
588 spin_lock(&wbuf->lock);
589 wbuf->lnum = lnum;
590 wbuf->offs = offs;
591 if (c->leb_size - wbuf->offs < c->max_write_size)
592 wbuf->size = c->leb_size - wbuf->offs;
593 else if (wbuf->offs & (c->max_write_size - 1))
594 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
595 else
596 wbuf->size = c->max_write_size;
597 wbuf->avail = wbuf->size;
598 wbuf->used = 0;
599 spin_unlock(&wbuf->lock);
600
601 return 0;
602}
603
604#ifndef __UBOOT__
605
606
607
608
609
610
611
612
613int ubifs_bg_wbufs_sync(struct ubifs_info *c)
614{
615 int err, i;
616
617 ubifs_assert(!c->ro_media && !c->ro_mount);
618 if (!c->need_wbuf_sync)
619 return 0;
620 c->need_wbuf_sync = 0;
621
622 if (c->ro_error) {
623 err = -EROFS;
624 goto out_timers;
625 }
626
627 dbg_io("synchronize");
628 for (i = 0; i < c->jhead_cnt; i++) {
629 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
630
631 cond_resched();
632
633
634
635
636
637 if (mutex_is_locked(&wbuf->io_mutex))
638 continue;
639
640 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
641 if (!wbuf->need_sync) {
642 mutex_unlock(&wbuf->io_mutex);
643 continue;
644 }
645
646 err = ubifs_wbuf_sync_nolock(wbuf);
647 mutex_unlock(&wbuf->io_mutex);
648 if (err) {
649 ubifs_err(c, "cannot sync write-buffer, error %d", err);
650 ubifs_ro_mode(c, err);
651 goto out_timers;
652 }
653 }
654
655 return 0;
656
657out_timers:
658
659 for (i = 0; i < c->jhead_cnt; i++) {
660 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
661
662 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
663 cancel_wbuf_timer_nolock(wbuf);
664 mutex_unlock(&wbuf->io_mutex);
665 }
666 return err;
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
686{
687 struct ubifs_info *c = wbuf->c;
688 int err, written, n, aligned_len = ALIGN(len, 8);
689
690 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
691 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
692 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
693 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
694 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
695 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
696 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
697 ubifs_assert(wbuf->size >= c->min_io_size);
698 ubifs_assert(wbuf->size <= c->max_write_size);
699 ubifs_assert(wbuf->size % c->min_io_size == 0);
700 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
701 ubifs_assert(!c->ro_media && !c->ro_mount);
702 ubifs_assert(!c->space_fixup);
703 if (c->leb_size - wbuf->offs >= c->max_write_size)
704 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
705
706 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
707 err = -ENOSPC;
708 goto out;
709 }
710
711 cancel_wbuf_timer_nolock(wbuf);
712
713 if (c->ro_error)
714 return -EROFS;
715
716 if (aligned_len <= wbuf->avail) {
717
718
719
720
721 memcpy(wbuf->buf + wbuf->used, buf, len);
722
723 if (aligned_len == wbuf->avail) {
724 dbg_io("flush jhead %s wbuf to LEB %d:%d",
725 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
726 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
727 wbuf->offs, wbuf->size);
728 if (err)
729 goto out;
730
731 spin_lock(&wbuf->lock);
732 wbuf->offs += wbuf->size;
733 if (c->leb_size - wbuf->offs >= c->max_write_size)
734 wbuf->size = c->max_write_size;
735 else
736 wbuf->size = c->leb_size - wbuf->offs;
737 wbuf->avail = wbuf->size;
738 wbuf->used = 0;
739 wbuf->next_ino = 0;
740 spin_unlock(&wbuf->lock);
741 } else {
742 spin_lock(&wbuf->lock);
743 wbuf->avail -= aligned_len;
744 wbuf->used += aligned_len;
745 spin_unlock(&wbuf->lock);
746 }
747
748 goto exit;
749 }
750
751 written = 0;
752
753 if (wbuf->used) {
754
755
756
757
758
759 dbg_io("flush jhead %s wbuf to LEB %d:%d",
760 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
761 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
762 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
763 wbuf->size);
764 if (err)
765 goto out;
766
767 wbuf->offs += wbuf->size;
768 len -= wbuf->avail;
769 aligned_len -= wbuf->avail;
770 written += wbuf->avail;
771 } else if (wbuf->offs & (c->max_write_size - 1)) {
772
773
774
775
776
777
778
779 dbg_io("write %d bytes to LEB %d:%d",
780 wbuf->size, wbuf->lnum, wbuf->offs);
781 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
782 wbuf->size);
783 if (err)
784 goto out;
785
786 wbuf->offs += wbuf->size;
787 len -= wbuf->size;
788 aligned_len -= wbuf->size;
789 written += wbuf->size;
790 }
791
792
793
794
795
796
797
798 n = aligned_len >> c->max_write_shift;
799 if (n) {
800 n <<= c->max_write_shift;
801 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
802 wbuf->offs);
803 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
804 wbuf->offs, n);
805 if (err)
806 goto out;
807 wbuf->offs += n;
808 aligned_len -= n;
809 len -= n;
810 written += n;
811 }
812
813 spin_lock(&wbuf->lock);
814 if (aligned_len)
815
816
817
818
819
820 memcpy(wbuf->buf, buf + written, len);
821
822 if (c->leb_size - wbuf->offs >= c->max_write_size)
823 wbuf->size = c->max_write_size;
824 else
825 wbuf->size = c->leb_size - wbuf->offs;
826 wbuf->avail = wbuf->size - aligned_len;
827 wbuf->used = aligned_len;
828 wbuf->next_ino = 0;
829 spin_unlock(&wbuf->lock);
830
831exit:
832 if (wbuf->sync_callback) {
833 int free = c->leb_size - wbuf->offs - wbuf->used;
834
835 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
836 if (err)
837 goto out;
838 }
839
840 if (wbuf->used)
841 new_wbuf_timer_nolock(wbuf);
842
843 return 0;
844
845out:
846 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
847 len, wbuf->lnum, wbuf->offs, err);
848 ubifs_dump_node(c, buf);
849 dump_stack();
850 ubifs_dump_leb(c, wbuf->lnum);
851 return err;
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
869 int offs)
870{
871 int err, buf_len = ALIGN(len, c->min_io_size);
872
873 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
874 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
875 buf_len);
876 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
877 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
878 ubifs_assert(!c->ro_media && !c->ro_mount);
879 ubifs_assert(!c->space_fixup);
880
881 if (c->ro_error)
882 return -EROFS;
883
884 ubifs_prepare_node(c, buf, len, 1);
885 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
886 if (err)
887 ubifs_dump_node(c, buf);
888
889 return err;
890}
891#endif
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
909 int lnum, int offs)
910{
911 const struct ubifs_info *c = wbuf->c;
912 int err, rlen, overlap;
913 struct ubifs_ch *ch = buf;
914
915 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
916 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
917 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
918 ubifs_assert(!(offs & 7) && offs < c->leb_size);
919 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
920
921 spin_lock(&wbuf->lock);
922 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
923 if (!overlap) {
924
925 spin_unlock(&wbuf->lock);
926 return ubifs_read_node(c, buf, type, len, lnum, offs);
927 }
928
929
930 rlen = wbuf->offs - offs;
931 if (rlen < 0)
932 rlen = 0;
933
934
935 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
936 spin_unlock(&wbuf->lock);
937
938 if (rlen > 0) {
939
940 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
941 if (err && err != -EBADMSG)
942 return err;
943 }
944
945 if (type != ch->node_type) {
946 ubifs_err(c, "bad node type (%d but expected %d)",
947 ch->node_type, type);
948 goto out;
949 }
950
951 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
952 if (err) {
953 ubifs_err(c, "expected node type %d", type);
954 return err;
955 }
956
957 rlen = le32_to_cpu(ch->len);
958 if (rlen != len) {
959 ubifs_err(c, "bad node length %d, expected %d", rlen, len);
960 goto out;
961 }
962
963 return 0;
964
965out:
966 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
967 ubifs_dump_node(c, buf);
968 dump_stack();
969 return -EINVAL;
970}
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
986 int lnum, int offs)
987{
988 int err, l;
989 struct ubifs_ch *ch = buf;
990
991 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
992 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
993 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
994 ubifs_assert(!(offs & 7) && offs < c->leb_size);
995 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
996
997 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
998 if (err && err != -EBADMSG)
999 return err;
1000
1001 if (type != ch->node_type) {
1002 ubifs_errc(c, "bad node type (%d but expected %d)",
1003 ch->node_type, type);
1004 goto out;
1005 }
1006
1007 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
1008 if (err) {
1009 ubifs_errc(c, "expected node type %d", type);
1010 return err;
1011 }
1012
1013 l = le32_to_cpu(ch->len);
1014 if (l != len) {
1015 ubifs_errc(c, "bad node length %d, expected %d", l, len);
1016 goto out;
1017 }
1018
1019 return 0;
1020
1021out:
1022 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
1023 offs, ubi_is_mapped(c->ubi, lnum));
1024 if (!c->probing) {
1025 ubifs_dump_node(c, buf);
1026 dump_stack();
1027 }
1028 return -EINVAL;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1040{
1041 size_t size;
1042
1043 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1044 if (!wbuf->buf)
1045 return -ENOMEM;
1046
1047 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1048 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1049 if (!wbuf->inodes) {
1050 kfree(wbuf->buf);
1051 wbuf->buf = NULL;
1052 return -ENOMEM;
1053 }
1054
1055 wbuf->used = 0;
1056 wbuf->lnum = wbuf->offs = -1;
1057
1058
1059
1060
1061
1062
1063 size = c->max_write_size - (c->leb_start % c->max_write_size);
1064 wbuf->avail = wbuf->size = size;
1065 wbuf->sync_callback = NULL;
1066 mutex_init(&wbuf->io_mutex);
1067 spin_lock_init(&wbuf->lock);
1068 wbuf->c = c;
1069 wbuf->next_ino = 0;
1070
1071#ifndef __UBOOT__
1072 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1073 wbuf->timer.function = wbuf_timer_callback_nolock;
1074 wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
1075 wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
1076 wbuf->delta *= 1000000000ULL;
1077 ubifs_assert(wbuf->delta <= ULONG_MAX);
1078#endif
1079 return 0;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1090{
1091 if (!wbuf->buf)
1092
1093 return;
1094
1095 spin_lock(&wbuf->lock);
1096 if (wbuf->used)
1097 wbuf->inodes[wbuf->next_ino++] = inum;
1098 spin_unlock(&wbuf->lock);
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1110{
1111 int i, ret = 0;
1112
1113 spin_lock(&wbuf->lock);
1114 for (i = 0; i < wbuf->next_ino; i++)
1115 if (inum == wbuf->inodes[i]) {
1116 ret = 1;
1117 break;
1118 }
1119 spin_unlock(&wbuf->lock);
1120
1121 return ret;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1134{
1135 int i, err = 0;
1136
1137 for (i = 0; i < c->jhead_cnt; i++) {
1138 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1139
1140 if (i == GCHD)
1141
1142
1143
1144
1145
1146
1147 continue;
1148
1149 if (!wbuf_has_ino(wbuf, inode->i_ino))
1150 continue;
1151
1152 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1153 if (wbuf_has_ino(wbuf, inode->i_ino))
1154 err = ubifs_wbuf_sync_nolock(wbuf);
1155 mutex_unlock(&wbuf->io_mutex);
1156
1157 if (err) {
1158 ubifs_ro_mode(c, err);
1159 return err;
1160 }
1161 }
1162 return 0;
1163}
1164