1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62#ifndef __UBOOT__
63#include <linux/crc32.h>
64#include <linux/slab.h>
65#else
66#include <linux/compat.h>
67#include <linux/err.h>
68#endif
69#include "ubifs.h"
70
71
72
73
74
75
76void ubifs_ro_mode(struct ubifs_info *c, int err)
77{
78 if (!c->ro_error) {
79 c->ro_error = 1;
80 c->no_chk_data_crc = 0;
81 c->vfs_sb->s_flags |= MS_RDONLY;
82 ubifs_warn(c, "switched to read-only mode, error %d", err);
83 dump_stack();
84 }
85}
86
87
88
89
90
91
92
93int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
94 int len, int even_ebadmsg)
95{
96 int err;
97
98 err = ubi_read(c->ubi, lnum, buf, offs, len);
99
100
101
102
103 if (err && (err != -EBADMSG || even_ebadmsg)) {
104 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
105 len, lnum, offs, err);
106 dump_stack();
107 }
108 return err;
109}
110
111int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
112 int len)
113{
114 int err;
115
116 ubifs_assert(!c->ro_media && !c->ro_mount);
117 if (c->ro_error)
118 return -EROFS;
119 if (!dbg_is_tst_rcvry(c))
120 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
121#ifndef __UBOOT__
122 else
123 err = dbg_leb_write(c, lnum, buf, offs, len);
124#endif
125 if (err) {
126 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
127 len, lnum, offs, err);
128 ubifs_ro_mode(c, err);
129 dump_stack();
130 }
131 return err;
132}
133
134int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
135{
136 int err;
137
138 ubifs_assert(!c->ro_media && !c->ro_mount);
139 if (c->ro_error)
140 return -EROFS;
141 if (!dbg_is_tst_rcvry(c))
142 err = ubi_leb_change(c->ubi, lnum, buf, len);
143#ifndef __UBOOT__
144 else
145 err = dbg_leb_change(c, lnum, buf, len);
146#endif
147 if (err) {
148 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
149 len, lnum, err);
150 ubifs_ro_mode(c, err);
151 dump_stack();
152 }
153 return err;
154}
155
156int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
157{
158 int err;
159
160 ubifs_assert(!c->ro_media && !c->ro_mount);
161 if (c->ro_error)
162 return -EROFS;
163 if (!dbg_is_tst_rcvry(c))
164 err = ubi_leb_unmap(c->ubi, lnum);
165#ifndef __UBOOT__
166 else
167 err = dbg_leb_unmap(c, lnum);
168#endif
169 if (err) {
170 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
171 ubifs_ro_mode(c, err);
172 dump_stack();
173 }
174 return err;
175}
176
177int ubifs_leb_map(struct ubifs_info *c, int lnum)
178{
179 int err;
180
181 ubifs_assert(!c->ro_media && !c->ro_mount);
182 if (c->ro_error)
183 return -EROFS;
184 if (!dbg_is_tst_rcvry(c))
185 err = ubi_leb_map(c->ubi, lnum);
186#ifndef __UBOOT__
187 else
188 err = dbg_leb_map(c, lnum);
189#endif
190 if (err) {
191 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
192 ubifs_ro_mode(c, err);
193 dump_stack();
194 }
195 return err;
196}
197
198int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
199{
200 int err;
201
202 err = ubi_is_mapped(c->ubi, lnum);
203 if (err < 0) {
204 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
205 lnum, err);
206 dump_stack();
207 }
208 return err;
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
240 int offs, int quiet, int must_chk_crc)
241{
242 int err = -EINVAL, type, node_len;
243 uint32_t crc, node_crc, magic;
244 const struct ubifs_ch *ch = buf;
245
246 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
247 ubifs_assert(!(offs & 7) && offs < c->leb_size);
248
249 magic = le32_to_cpu(ch->magic);
250 if (magic != UBIFS_NODE_MAGIC) {
251 if (!quiet)
252 ubifs_err(c, "bad magic %#08x, expected %#08x",
253 magic, UBIFS_NODE_MAGIC);
254 err = -EUCLEAN;
255 goto out;
256 }
257
258 type = ch->node_type;
259 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
260 if (!quiet)
261 ubifs_err(c, "bad node type %d", type);
262 goto out;
263 }
264
265 node_len = le32_to_cpu(ch->len);
266 if (node_len + offs > c->leb_size)
267 goto out_len;
268
269 if (c->ranges[type].max_len == 0) {
270 if (node_len != c->ranges[type].len)
271 goto out_len;
272 } else if (node_len < c->ranges[type].min_len ||
273 node_len > c->ranges[type].max_len)
274 goto out_len;
275
276 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
277 !c->remounting_rw && c->no_chk_data_crc)
278 return 0;
279
280 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
281 node_crc = le32_to_cpu(ch->crc);
282 if (crc != node_crc) {
283 if (!quiet)
284 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
285 crc, node_crc);
286 err = -EUCLEAN;
287 goto out;
288 }
289
290 return 0;
291
292out_len:
293 if (!quiet)
294 ubifs_err(c, "bad node length %d", node_len);
295out:
296 if (!quiet) {
297 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
298 ubifs_dump_node(c, buf);
299 dump_stack();
300 }
301 return err;
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
321{
322 uint32_t crc;
323
324 ubifs_assert(pad >= 0 && !(pad & 7));
325
326 if (pad >= UBIFS_PAD_NODE_SZ) {
327 struct ubifs_ch *ch = buf;
328 struct ubifs_pad_node *pad_node = buf;
329
330 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
331 ch->node_type = UBIFS_PAD_NODE;
332 ch->group_type = UBIFS_NO_NODE_GROUP;
333 ch->padding[0] = ch->padding[1] = 0;
334 ch->sqnum = 0;
335 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
336 pad -= UBIFS_PAD_NODE_SZ;
337 pad_node->pad_len = cpu_to_le32(pad);
338 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
339 ch->crc = cpu_to_le32(crc);
340 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
341 } else if (pad > 0)
342
343 memset(buf, UBIFS_PADDING_BYTE, pad);
344}
345
346
347
348
349
350static unsigned long long next_sqnum(struct ubifs_info *c)
351{
352 unsigned long long sqnum;
353
354 spin_lock(&c->cnt_lock);
355 sqnum = ++c->max_sqnum;
356 spin_unlock(&c->cnt_lock);
357
358 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
359 if (sqnum >= SQNUM_WATERMARK) {
360 ubifs_err(c, "sequence number overflow %llu, end of life",
361 sqnum);
362 ubifs_ro_mode(c, -EINVAL);
363 }
364 ubifs_warn(c, "running out of sequence numbers, end of life soon");
365 }
366
367 return sqnum;
368}
369
370
371
372
373
374
375
376
377
378
379
380
381void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
382{
383 uint32_t crc;
384 struct ubifs_ch *ch = node;
385 unsigned long long sqnum = next_sqnum(c);
386
387 ubifs_assert(len >= UBIFS_CH_SZ);
388
389 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
390 ch->len = cpu_to_le32(len);
391 ch->group_type = UBIFS_NO_NODE_GROUP;
392 ch->sqnum = cpu_to_le64(sqnum);
393 ch->padding[0] = ch->padding[1] = 0;
394 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
395 ch->crc = cpu_to_le32(crc);
396
397 if (pad) {
398 len = ALIGN(len, 8);
399 pad = ALIGN(len, c->min_io_size) - len;
400 ubifs_pad(c, node + len, pad);
401 }
402}
403
404
405
406
407
408
409
410
411
412
413
414void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
415{
416 uint32_t crc;
417 struct ubifs_ch *ch = node;
418 unsigned long long sqnum = next_sqnum(c);
419
420 ubifs_assert(len >= UBIFS_CH_SZ);
421
422 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
423 ch->len = cpu_to_le32(len);
424 if (last)
425 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
426 else
427 ch->group_type = UBIFS_IN_NODE_GROUP;
428 ch->sqnum = cpu_to_le64(sqnum);
429 ch->padding[0] = ch->padding[1] = 0;
430 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
431 ch->crc = cpu_to_le32(crc);
432}
433
434#ifndef __UBOOT__
435
436
437
438
439
440
441static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
442{
443 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
444
445 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
446 wbuf->need_sync = 1;
447 wbuf->c->need_wbuf_sync = 1;
448 ubifs_wake_up_bgt(wbuf->c);
449 return HRTIMER_NORESTART;
450}
451
452
453
454
455
456static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
457{
458 ubifs_assert(!hrtimer_active(&wbuf->timer));
459
460 if (wbuf->no_timer)
461 return;
462 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
463 dbg_jhead(wbuf->jhead),
464 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
465 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
466 USEC_PER_SEC));
467 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
468 HRTIMER_MODE_REL);
469}
470#endif
471
472
473
474
475
476static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
477{
478 if (wbuf->no_timer)
479 return;
480 wbuf->need_sync = 0;
481#ifndef __UBOOT__
482 hrtimer_cancel(&wbuf->timer);
483#endif
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
500{
501 struct ubifs_info *c = wbuf->c;
502 int err, dirt, sync_len;
503
504 cancel_wbuf_timer_nolock(wbuf);
505 if (!wbuf->used || wbuf->lnum == -1)
506
507 return 0;
508
509 dbg_io("LEB %d:%d, %d bytes, jhead %s",
510 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
511 ubifs_assert(!(wbuf->avail & 7));
512 ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
513 ubifs_assert(wbuf->size >= c->min_io_size);
514 ubifs_assert(wbuf->size <= c->max_write_size);
515 ubifs_assert(wbuf->size % c->min_io_size == 0);
516 ubifs_assert(!c->ro_media && !c->ro_mount);
517 if (c->leb_size - wbuf->offs >= c->max_write_size)
518 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
519
520 if (c->ro_error)
521 return -EROFS;
522
523
524
525
526
527 sync_len = ALIGN(wbuf->used, c->min_io_size);
528 dirt = sync_len - wbuf->used;
529 if (dirt)
530 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
531 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
532 if (err)
533 return err;
534
535 spin_lock(&wbuf->lock);
536 wbuf->offs += sync_len;
537
538
539
540
541
542
543
544
545
546
547 if (c->leb_size - wbuf->offs < c->max_write_size)
548 wbuf->size = c->leb_size - wbuf->offs;
549 else if (wbuf->offs & (c->max_write_size - 1))
550 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
551 else
552 wbuf->size = c->max_write_size;
553 wbuf->avail = wbuf->size;
554 wbuf->used = 0;
555 wbuf->next_ino = 0;
556 spin_unlock(&wbuf->lock);
557
558 if (wbuf->sync_callback)
559 err = wbuf->sync_callback(c, wbuf->lnum,
560 c->leb_size - wbuf->offs, dirt);
561 return err;
562}
563
564
565
566
567
568
569
570
571
572
573
574int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
575{
576 const struct ubifs_info *c = wbuf->c;
577
578 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
579 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
580 ubifs_assert(offs >= 0 && offs <= c->leb_size);
581 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
582 ubifs_assert(lnum != wbuf->lnum);
583 ubifs_assert(wbuf->used == 0);
584
585 spin_lock(&wbuf->lock);
586 wbuf->lnum = lnum;
587 wbuf->offs = offs;
588 if (c->leb_size - wbuf->offs < c->max_write_size)
589 wbuf->size = c->leb_size - wbuf->offs;
590 else if (wbuf->offs & (c->max_write_size - 1))
591 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
592 else
593 wbuf->size = c->max_write_size;
594 wbuf->avail = wbuf->size;
595 wbuf->used = 0;
596 spin_unlock(&wbuf->lock);
597
598 return 0;
599}
600
601#ifndef __UBOOT__
602
603
604
605
606
607
608
609
610int ubifs_bg_wbufs_sync(struct ubifs_info *c)
611{
612 int err, i;
613
614 ubifs_assert(!c->ro_media && !c->ro_mount);
615 if (!c->need_wbuf_sync)
616 return 0;
617 c->need_wbuf_sync = 0;
618
619 if (c->ro_error) {
620 err = -EROFS;
621 goto out_timers;
622 }
623
624 dbg_io("synchronize");
625 for (i = 0; i < c->jhead_cnt; i++) {
626 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
627
628 cond_resched();
629
630
631
632
633
634 if (mutex_is_locked(&wbuf->io_mutex))
635 continue;
636
637 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
638 if (!wbuf->need_sync) {
639 mutex_unlock(&wbuf->io_mutex);
640 continue;
641 }
642
643 err = ubifs_wbuf_sync_nolock(wbuf);
644 mutex_unlock(&wbuf->io_mutex);
645 if (err) {
646 ubifs_err(c, "cannot sync write-buffer, error %d", err);
647 ubifs_ro_mode(c, err);
648 goto out_timers;
649 }
650 }
651
652 return 0;
653
654out_timers:
655
656 for (i = 0; i < c->jhead_cnt; i++) {
657 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
658
659 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
660 cancel_wbuf_timer_nolock(wbuf);
661 mutex_unlock(&wbuf->io_mutex);
662 }
663 return err;
664}
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
683{
684 struct ubifs_info *c = wbuf->c;
685 int err, written, n, aligned_len = ALIGN(len, 8);
686
687 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
688 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
689 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
690 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
691 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
692 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
693 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
694 ubifs_assert(wbuf->size >= c->min_io_size);
695 ubifs_assert(wbuf->size <= c->max_write_size);
696 ubifs_assert(wbuf->size % c->min_io_size == 0);
697 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
698 ubifs_assert(!c->ro_media && !c->ro_mount);
699 ubifs_assert(!c->space_fixup);
700 if (c->leb_size - wbuf->offs >= c->max_write_size)
701 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
702
703 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
704 err = -ENOSPC;
705 goto out;
706 }
707
708 cancel_wbuf_timer_nolock(wbuf);
709
710 if (c->ro_error)
711 return -EROFS;
712
713 if (aligned_len <= wbuf->avail) {
714
715
716
717
718 memcpy(wbuf->buf + wbuf->used, buf, len);
719
720 if (aligned_len == wbuf->avail) {
721 dbg_io("flush jhead %s wbuf to LEB %d:%d",
722 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
723 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
724 wbuf->offs, wbuf->size);
725 if (err)
726 goto out;
727
728 spin_lock(&wbuf->lock);
729 wbuf->offs += wbuf->size;
730 if (c->leb_size - wbuf->offs >= c->max_write_size)
731 wbuf->size = c->max_write_size;
732 else
733 wbuf->size = c->leb_size - wbuf->offs;
734 wbuf->avail = wbuf->size;
735 wbuf->used = 0;
736 wbuf->next_ino = 0;
737 spin_unlock(&wbuf->lock);
738 } else {
739 spin_lock(&wbuf->lock);
740 wbuf->avail -= aligned_len;
741 wbuf->used += aligned_len;
742 spin_unlock(&wbuf->lock);
743 }
744
745 goto exit;
746 }
747
748 written = 0;
749
750 if (wbuf->used) {
751
752
753
754
755
756 dbg_io("flush jhead %s wbuf to LEB %d:%d",
757 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
758 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
759 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
760 wbuf->size);
761 if (err)
762 goto out;
763
764 wbuf->offs += wbuf->size;
765 len -= wbuf->avail;
766 aligned_len -= wbuf->avail;
767 written += wbuf->avail;
768 } else if (wbuf->offs & (c->max_write_size - 1)) {
769
770
771
772
773
774
775
776 dbg_io("write %d bytes to LEB %d:%d",
777 wbuf->size, wbuf->lnum, wbuf->offs);
778 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
779 wbuf->size);
780 if (err)
781 goto out;
782
783 wbuf->offs += wbuf->size;
784 len -= wbuf->size;
785 aligned_len -= wbuf->size;
786 written += wbuf->size;
787 }
788
789
790
791
792
793
794
795 n = aligned_len >> c->max_write_shift;
796 if (n) {
797 n <<= c->max_write_shift;
798 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
799 wbuf->offs);
800 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
801 wbuf->offs, n);
802 if (err)
803 goto out;
804 wbuf->offs += n;
805 aligned_len -= n;
806 len -= n;
807 written += n;
808 }
809
810 spin_lock(&wbuf->lock);
811 if (aligned_len)
812
813
814
815
816
817 memcpy(wbuf->buf, buf + written, len);
818
819 if (c->leb_size - wbuf->offs >= c->max_write_size)
820 wbuf->size = c->max_write_size;
821 else
822 wbuf->size = c->leb_size - wbuf->offs;
823 wbuf->avail = wbuf->size - aligned_len;
824 wbuf->used = aligned_len;
825 wbuf->next_ino = 0;
826 spin_unlock(&wbuf->lock);
827
828exit:
829 if (wbuf->sync_callback) {
830 int free = c->leb_size - wbuf->offs - wbuf->used;
831
832 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
833 if (err)
834 goto out;
835 }
836
837 if (wbuf->used)
838 new_wbuf_timer_nolock(wbuf);
839
840 return 0;
841
842out:
843 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
844 len, wbuf->lnum, wbuf->offs, err);
845 ubifs_dump_node(c, buf);
846 dump_stack();
847 ubifs_dump_leb(c, wbuf->lnum);
848 return err;
849}
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
866 int offs)
867{
868 int err, buf_len = ALIGN(len, c->min_io_size);
869
870 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
871 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
872 buf_len);
873 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
874 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
875 ubifs_assert(!c->ro_media && !c->ro_mount);
876 ubifs_assert(!c->space_fixup);
877
878 if (c->ro_error)
879 return -EROFS;
880
881 ubifs_prepare_node(c, buf, len, 1);
882 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
883 if (err)
884 ubifs_dump_node(c, buf);
885
886 return err;
887}
888#endif
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
906 int lnum, int offs)
907{
908 const struct ubifs_info *c = wbuf->c;
909 int err, rlen, overlap;
910 struct ubifs_ch *ch = buf;
911
912 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
913 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
914 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
915 ubifs_assert(!(offs & 7) && offs < c->leb_size);
916 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
917
918 spin_lock(&wbuf->lock);
919 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
920 if (!overlap) {
921
922 spin_unlock(&wbuf->lock);
923 return ubifs_read_node(c, buf, type, len, lnum, offs);
924 }
925
926
927 rlen = wbuf->offs - offs;
928 if (rlen < 0)
929 rlen = 0;
930
931
932 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
933 spin_unlock(&wbuf->lock);
934
935 if (rlen > 0) {
936
937 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
938 if (err && err != -EBADMSG)
939 return err;
940 }
941
942 if (type != ch->node_type) {
943 ubifs_err(c, "bad node type (%d but expected %d)",
944 ch->node_type, type);
945 goto out;
946 }
947
948 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
949 if (err) {
950 ubifs_err(c, "expected node type %d", type);
951 return err;
952 }
953
954 rlen = le32_to_cpu(ch->len);
955 if (rlen != len) {
956 ubifs_err(c, "bad node length %d, expected %d", rlen, len);
957 goto out;
958 }
959
960 return 0;
961
962out:
963 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
964 ubifs_dump_node(c, buf);
965 dump_stack();
966 return -EINVAL;
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
983 int lnum, int offs)
984{
985 int err, l;
986 struct ubifs_ch *ch = buf;
987
988 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
989 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
990 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
991 ubifs_assert(!(offs & 7) && offs < c->leb_size);
992 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
993
994 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
995 if (err && err != -EBADMSG)
996 return err;
997
998 if (type != ch->node_type) {
999 ubifs_errc(c, "bad node type (%d but expected %d)",
1000 ch->node_type, type);
1001 goto out;
1002 }
1003
1004 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
1005 if (err) {
1006 ubifs_errc(c, "expected node type %d", type);
1007 return err;
1008 }
1009
1010 l = le32_to_cpu(ch->len);
1011 if (l != len) {
1012 ubifs_errc(c, "bad node length %d, expected %d", l, len);
1013 goto out;
1014 }
1015
1016 return 0;
1017
1018out:
1019 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
1020 offs, ubi_is_mapped(c->ubi, lnum));
1021 if (!c->probing) {
1022 ubifs_dump_node(c, buf);
1023 dump_stack();
1024 }
1025 return -EINVAL;
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1037{
1038 size_t size;
1039
1040 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1041 if (!wbuf->buf)
1042 return -ENOMEM;
1043
1044 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1045 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1046 if (!wbuf->inodes) {
1047 kfree(wbuf->buf);
1048 wbuf->buf = NULL;
1049 return -ENOMEM;
1050 }
1051
1052 wbuf->used = 0;
1053 wbuf->lnum = wbuf->offs = -1;
1054
1055
1056
1057
1058
1059
1060 size = c->max_write_size - (c->leb_start % c->max_write_size);
1061 wbuf->avail = wbuf->size = size;
1062 wbuf->sync_callback = NULL;
1063 mutex_init(&wbuf->io_mutex);
1064 spin_lock_init(&wbuf->lock);
1065 wbuf->c = c;
1066 wbuf->next_ino = 0;
1067
1068#ifndef __UBOOT__
1069 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1070 wbuf->timer.function = wbuf_timer_callback_nolock;
1071 wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
1072 wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
1073 wbuf->delta *= 1000000000ULL;
1074 ubifs_assert(wbuf->delta <= ULONG_MAX);
1075#endif
1076 return 0;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1087{
1088 if (!wbuf->buf)
1089
1090 return;
1091
1092 spin_lock(&wbuf->lock);
1093 if (wbuf->used)
1094 wbuf->inodes[wbuf->next_ino++] = inum;
1095 spin_unlock(&wbuf->lock);
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1107{
1108 int i, ret = 0;
1109
1110 spin_lock(&wbuf->lock);
1111 for (i = 0; i < wbuf->next_ino; i++)
1112 if (inum == wbuf->inodes[i]) {
1113 ret = 1;
1114 break;
1115 }
1116 spin_unlock(&wbuf->lock);
1117
1118 return ret;
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1131{
1132 int i, err = 0;
1133
1134 for (i = 0; i < c->jhead_cnt; i++) {
1135 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1136
1137 if (i == GCHD)
1138
1139
1140
1141
1142
1143
1144 continue;
1145
1146 if (!wbuf_has_ino(wbuf, inode->i_ino))
1147 continue;
1148
1149 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1150 if (wbuf_has_ino(wbuf, inode->i_ino))
1151 err = ubifs_wbuf_sync_nolock(wbuf);
1152 mutex_unlock(&wbuf->io_mutex);
1153
1154 if (err) {
1155 ubifs_ro_mode(c, err);
1156 return err;
1157 }
1158 }
1159 return 0;
1160}
1161