1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/crc32.h>
74#include <linux/slab.h>
75#include "ubifs.h"
76
77
78
79
80
81
82void ubifs_ro_mode(struct ubifs_info *c, int err)
83{
84 if (!c->ro_error) {
85 c->ro_error = 1;
86 c->no_chk_data_crc = 0;
87 c->vfs_sb->s_flags |= SB_RDONLY;
88 ubifs_warn(c, "switched to read-only mode, error %d", err);
89 dump_stack();
90 }
91}
92
93
94
95
96
97
98
99int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
100 int len, int even_ebadmsg)
101{
102 int err;
103
104 err = ubi_read(c->ubi, lnum, buf, offs, len);
105
106
107
108
109 if (err && (err != -EBADMSG || even_ebadmsg)) {
110 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
111 len, lnum, offs, err);
112 dump_stack();
113 }
114 return err;
115}
116
117int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
118 int len)
119{
120 int err;
121
122 ubifs_assert(!c->ro_media && !c->ro_mount);
123 if (c->ro_error)
124 return -EROFS;
125 if (!dbg_is_tst_rcvry(c))
126 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
127 else
128 err = dbg_leb_write(c, lnum, buf, offs, len);
129 if (err) {
130 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
131 len, lnum, offs, err);
132 ubifs_ro_mode(c, err);
133 dump_stack();
134 }
135 return err;
136}
137
138int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
139{
140 int err;
141
142 ubifs_assert(!c->ro_media && !c->ro_mount);
143 if (c->ro_error)
144 return -EROFS;
145 if (!dbg_is_tst_rcvry(c))
146 err = ubi_leb_change(c->ubi, lnum, buf, len);
147 else
148 err = dbg_leb_change(c, lnum, buf, len);
149 if (err) {
150 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
151 len, lnum, err);
152 ubifs_ro_mode(c, err);
153 dump_stack();
154 }
155 return err;
156}
157
158int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
159{
160 int err;
161
162 ubifs_assert(!c->ro_media && !c->ro_mount);
163 if (c->ro_error)
164 return -EROFS;
165 if (!dbg_is_tst_rcvry(c))
166 err = ubi_leb_unmap(c->ubi, lnum);
167 else
168 err = dbg_leb_unmap(c, lnum);
169 if (err) {
170 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
171 ubifs_ro_mode(c, err);
172 dump_stack();
173 }
174 return err;
175}
176
177int ubifs_leb_map(struct ubifs_info *c, int lnum)
178{
179 int err;
180
181 ubifs_assert(!c->ro_media && !c->ro_mount);
182 if (c->ro_error)
183 return -EROFS;
184 if (!dbg_is_tst_rcvry(c))
185 err = ubi_leb_map(c->ubi, lnum);
186 else
187 err = dbg_leb_map(c, lnum);
188 if (err) {
189 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
190 ubifs_ro_mode(c, err);
191 dump_stack();
192 }
193 return err;
194}
195
196int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
197{
198 int err;
199
200 err = ubi_is_mapped(c->ubi, lnum);
201 if (err < 0) {
202 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
203 lnum, err);
204 dump_stack();
205 }
206 return err;
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
238 int offs, int quiet, int must_chk_crc)
239{
240 int err = -EINVAL, type, node_len;
241 uint32_t crc, node_crc, magic;
242 const struct ubifs_ch *ch = buf;
243
244 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
245 ubifs_assert(!(offs & 7) && offs < c->leb_size);
246
247 magic = le32_to_cpu(ch->magic);
248 if (magic != UBIFS_NODE_MAGIC) {
249 if (!quiet)
250 ubifs_err(c, "bad magic %#08x, expected %#08x",
251 magic, UBIFS_NODE_MAGIC);
252 err = -EUCLEAN;
253 goto out;
254 }
255
256 type = ch->node_type;
257 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
258 if (!quiet)
259 ubifs_err(c, "bad node type %d", type);
260 goto out;
261 }
262
263 node_len = le32_to_cpu(ch->len);
264 if (node_len + offs > c->leb_size)
265 goto out_len;
266
267 if (c->ranges[type].max_len == 0) {
268 if (node_len != c->ranges[type].len)
269 goto out_len;
270 } else if (node_len < c->ranges[type].min_len ||
271 node_len > c->ranges[type].max_len)
272 goto out_len;
273
274 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
275 !c->remounting_rw && c->no_chk_data_crc)
276 return 0;
277
278 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
279 node_crc = le32_to_cpu(ch->crc);
280 if (crc != node_crc) {
281 if (!quiet)
282 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
283 crc, node_crc);
284 err = -EUCLEAN;
285 goto out;
286 }
287
288 return 0;
289
290out_len:
291 if (!quiet)
292 ubifs_err(c, "bad node length %d", node_len);
293out:
294 if (!quiet) {
295 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
296 ubifs_dump_node(c, buf);
297 dump_stack();
298 }
299 return err;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
319{
320 uint32_t crc;
321
322 ubifs_assert(pad >= 0 && !(pad & 7));
323
324 if (pad >= UBIFS_PAD_NODE_SZ) {
325 struct ubifs_ch *ch = buf;
326 struct ubifs_pad_node *pad_node = buf;
327
328 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
329 ch->node_type = UBIFS_PAD_NODE;
330 ch->group_type = UBIFS_NO_NODE_GROUP;
331 ch->padding[0] = ch->padding[1] = 0;
332 ch->sqnum = 0;
333 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
334 pad -= UBIFS_PAD_NODE_SZ;
335 pad_node->pad_len = cpu_to_le32(pad);
336 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
337 ch->crc = cpu_to_le32(crc);
338 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
339 } else if (pad > 0)
340
341 memset(buf, UBIFS_PADDING_BYTE, pad);
342}
343
344
345
346
347
348static unsigned long long next_sqnum(struct ubifs_info *c)
349{
350 unsigned long long sqnum;
351
352 spin_lock(&c->cnt_lock);
353 sqnum = ++c->max_sqnum;
354 spin_unlock(&c->cnt_lock);
355
356 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
357 if (sqnum >= SQNUM_WATERMARK) {
358 ubifs_err(c, "sequence number overflow %llu, end of life",
359 sqnum);
360 ubifs_ro_mode(c, -EINVAL);
361 }
362 ubifs_warn(c, "running out of sequence numbers, end of life soon");
363 }
364
365 return sqnum;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
380{
381 uint32_t crc;
382 struct ubifs_ch *ch = node;
383 unsigned long long sqnum = next_sqnum(c);
384
385 ubifs_assert(len >= UBIFS_CH_SZ);
386
387 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
388 ch->len = cpu_to_le32(len);
389 ch->group_type = UBIFS_NO_NODE_GROUP;
390 ch->sqnum = cpu_to_le64(sqnum);
391 ch->padding[0] = ch->padding[1] = 0;
392 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
393 ch->crc = cpu_to_le32(crc);
394
395 if (pad) {
396 len = ALIGN(len, 8);
397 pad = ALIGN(len, c->min_io_size) - len;
398 ubifs_pad(c, node + len, pad);
399 }
400}
401
402
403
404
405
406
407
408
409
410
411
412void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
413{
414 uint32_t crc;
415 struct ubifs_ch *ch = node;
416 unsigned long long sqnum = next_sqnum(c);
417
418 ubifs_assert(len >= UBIFS_CH_SZ);
419
420 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
421 ch->len = cpu_to_le32(len);
422 if (last)
423 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
424 else
425 ch->group_type = UBIFS_IN_NODE_GROUP;
426 ch->sqnum = cpu_to_le64(sqnum);
427 ch->padding[0] = ch->padding[1] = 0;
428 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
429 ch->crc = cpu_to_le32(crc);
430}
431
432
433
434
435
436
437
438static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
439{
440 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
441
442 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
443 wbuf->need_sync = 1;
444 wbuf->c->need_wbuf_sync = 1;
445 ubifs_wake_up_bgt(wbuf->c);
446 return HRTIMER_NORESTART;
447}
448
449
450
451
452
453static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
454{
455 ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
456 unsigned long long delta = dirty_writeback_interval;
457
458
459 delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
460
461 ubifs_assert(!hrtimer_active(&wbuf->timer));
462 ubifs_assert(delta <= ULONG_MAX);
463
464 if (wbuf->no_timer)
465 return;
466 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
467 dbg_jhead(wbuf->jhead),
468 div_u64(ktime_to_ns(softlimit), USEC_PER_SEC),
469 div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC));
470 hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
471 HRTIMER_MODE_REL);
472}
473
474
475
476
477
478static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
479{
480 if (wbuf->no_timer)
481 return;
482 wbuf->need_sync = 0;
483 hrtimer_cancel(&wbuf->timer);
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
500{
501 struct ubifs_info *c = wbuf->c;
502 int err, dirt, sync_len;
503
504 cancel_wbuf_timer_nolock(wbuf);
505 if (!wbuf->used || wbuf->lnum == -1)
506
507 return 0;
508
509 dbg_io("LEB %d:%d, %d bytes, jhead %s",
510 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
511 ubifs_assert(!(wbuf->avail & 7));
512 ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
513 ubifs_assert(wbuf->size >= c->min_io_size);
514 ubifs_assert(wbuf->size <= c->max_write_size);
515 ubifs_assert(wbuf->size % c->min_io_size == 0);
516 ubifs_assert(!c->ro_media && !c->ro_mount);
517 if (c->leb_size - wbuf->offs >= c->max_write_size)
518 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
519
520 if (c->ro_error)
521 return -EROFS;
522
523
524
525
526
527 sync_len = ALIGN(wbuf->used, c->min_io_size);
528 dirt = sync_len - wbuf->used;
529 if (dirt)
530 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
531 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
532 if (err)
533 return err;
534
535 spin_lock(&wbuf->lock);
536 wbuf->offs += sync_len;
537
538
539
540
541
542
543
544
545
546
547 if (c->leb_size - wbuf->offs < c->max_write_size)
548 wbuf->size = c->leb_size - wbuf->offs;
549 else if (wbuf->offs & (c->max_write_size - 1))
550 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
551 else
552 wbuf->size = c->max_write_size;
553 wbuf->avail = wbuf->size;
554 wbuf->used = 0;
555 wbuf->next_ino = 0;
556 spin_unlock(&wbuf->lock);
557
558 if (wbuf->sync_callback)
559 err = wbuf->sync_callback(c, wbuf->lnum,
560 c->leb_size - wbuf->offs, dirt);
561 return err;
562}
563
564
565
566
567
568
569
570
571
572
573
574int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
575{
576 const struct ubifs_info *c = wbuf->c;
577
578 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
579 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
580 ubifs_assert(offs >= 0 && offs <= c->leb_size);
581 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
582 ubifs_assert(lnum != wbuf->lnum);
583 ubifs_assert(wbuf->used == 0);
584
585 spin_lock(&wbuf->lock);
586 wbuf->lnum = lnum;
587 wbuf->offs = offs;
588 if (c->leb_size - wbuf->offs < c->max_write_size)
589 wbuf->size = c->leb_size - wbuf->offs;
590 else if (wbuf->offs & (c->max_write_size - 1))
591 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
592 else
593 wbuf->size = c->max_write_size;
594 wbuf->avail = wbuf->size;
595 wbuf->used = 0;
596 spin_unlock(&wbuf->lock);
597
598 return 0;
599}
600
601
602
603
604
605
606
607
608
609int ubifs_bg_wbufs_sync(struct ubifs_info *c)
610{
611 int err, i;
612
613 ubifs_assert(!c->ro_media && !c->ro_mount);
614 if (!c->need_wbuf_sync)
615 return 0;
616 c->need_wbuf_sync = 0;
617
618 if (c->ro_error) {
619 err = -EROFS;
620 goto out_timers;
621 }
622
623 dbg_io("synchronize");
624 for (i = 0; i < c->jhead_cnt; i++) {
625 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
626
627 cond_resched();
628
629
630
631
632
633 if (mutex_is_locked(&wbuf->io_mutex))
634 continue;
635
636 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
637 if (!wbuf->need_sync) {
638 mutex_unlock(&wbuf->io_mutex);
639 continue;
640 }
641
642 err = ubifs_wbuf_sync_nolock(wbuf);
643 mutex_unlock(&wbuf->io_mutex);
644 if (err) {
645 ubifs_err(c, "cannot sync write-buffer, error %d", err);
646 ubifs_ro_mode(c, err);
647 goto out_timers;
648 }
649 }
650
651 return 0;
652
653out_timers:
654
655 for (i = 0; i < c->jhead_cnt; i++) {
656 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
657
658 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
659 cancel_wbuf_timer_nolock(wbuf);
660 mutex_unlock(&wbuf->io_mutex);
661 }
662 return err;
663}
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
682{
683 struct ubifs_info *c = wbuf->c;
684 int err, written, n, aligned_len = ALIGN(len, 8);
685
686 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
687 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
688 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
689 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
690 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
691 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
692 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
693 ubifs_assert(wbuf->size >= c->min_io_size);
694 ubifs_assert(wbuf->size <= c->max_write_size);
695 ubifs_assert(wbuf->size % c->min_io_size == 0);
696 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
697 ubifs_assert(!c->ro_media && !c->ro_mount);
698 ubifs_assert(!c->space_fixup);
699 if (c->leb_size - wbuf->offs >= c->max_write_size)
700 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
701
702 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
703 err = -ENOSPC;
704 goto out;
705 }
706
707 cancel_wbuf_timer_nolock(wbuf);
708
709 if (c->ro_error)
710 return -EROFS;
711
712 if (aligned_len <= wbuf->avail) {
713
714
715
716
717 memcpy(wbuf->buf + wbuf->used, buf, len);
718
719 if (aligned_len == wbuf->avail) {
720 dbg_io("flush jhead %s wbuf to LEB %d:%d",
721 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
722 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
723 wbuf->offs, wbuf->size);
724 if (err)
725 goto out;
726
727 spin_lock(&wbuf->lock);
728 wbuf->offs += wbuf->size;
729 if (c->leb_size - wbuf->offs >= c->max_write_size)
730 wbuf->size = c->max_write_size;
731 else
732 wbuf->size = c->leb_size - wbuf->offs;
733 wbuf->avail = wbuf->size;
734 wbuf->used = 0;
735 wbuf->next_ino = 0;
736 spin_unlock(&wbuf->lock);
737 } else {
738 spin_lock(&wbuf->lock);
739 wbuf->avail -= aligned_len;
740 wbuf->used += aligned_len;
741 spin_unlock(&wbuf->lock);
742 }
743
744 goto exit;
745 }
746
747 written = 0;
748
749 if (wbuf->used) {
750
751
752
753
754
755 dbg_io("flush jhead %s wbuf to LEB %d:%d",
756 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
757 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
758 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
759 wbuf->size);
760 if (err)
761 goto out;
762
763 wbuf->offs += wbuf->size;
764 len -= wbuf->avail;
765 aligned_len -= wbuf->avail;
766 written += wbuf->avail;
767 } else if (wbuf->offs & (c->max_write_size - 1)) {
768
769
770
771
772
773
774
775 dbg_io("write %d bytes to LEB %d:%d",
776 wbuf->size, wbuf->lnum, wbuf->offs);
777 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
778 wbuf->size);
779 if (err)
780 goto out;
781
782 wbuf->offs += wbuf->size;
783 len -= wbuf->size;
784 aligned_len -= wbuf->size;
785 written += wbuf->size;
786 }
787
788
789
790
791
792
793
794 n = aligned_len >> c->max_write_shift;
795 if (n) {
796 n <<= c->max_write_shift;
797 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
798 wbuf->offs);
799 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
800 wbuf->offs, n);
801 if (err)
802 goto out;
803 wbuf->offs += n;
804 aligned_len -= n;
805 len -= n;
806 written += n;
807 }
808
809 spin_lock(&wbuf->lock);
810 if (aligned_len)
811
812
813
814
815
816 memcpy(wbuf->buf, buf + written, len);
817
818 if (c->leb_size - wbuf->offs >= c->max_write_size)
819 wbuf->size = c->max_write_size;
820 else
821 wbuf->size = c->leb_size - wbuf->offs;
822 wbuf->avail = wbuf->size - aligned_len;
823 wbuf->used = aligned_len;
824 wbuf->next_ino = 0;
825 spin_unlock(&wbuf->lock);
826
827exit:
828 if (wbuf->sync_callback) {
829 int free = c->leb_size - wbuf->offs - wbuf->used;
830
831 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
832 if (err)
833 goto out;
834 }
835
836 if (wbuf->used)
837 new_wbuf_timer_nolock(wbuf);
838
839 return 0;
840
841out:
842 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
843 len, wbuf->lnum, wbuf->offs, err);
844 ubifs_dump_node(c, buf);
845 dump_stack();
846 ubifs_dump_leb(c, wbuf->lnum);
847 return err;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
865 int offs)
866{
867 int err, buf_len = ALIGN(len, c->min_io_size);
868
869 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
870 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
871 buf_len);
872 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
873 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
874 ubifs_assert(!c->ro_media && !c->ro_mount);
875 ubifs_assert(!c->space_fixup);
876
877 if (c->ro_error)
878 return -EROFS;
879
880 ubifs_prepare_node(c, buf, len, 1);
881 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
882 if (err)
883 ubifs_dump_node(c, buf);
884
885 return err;
886}
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
904 int lnum, int offs)
905{
906 const struct ubifs_info *c = wbuf->c;
907 int err, rlen, overlap;
908 struct ubifs_ch *ch = buf;
909
910 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
911 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
912 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
913 ubifs_assert(!(offs & 7) && offs < c->leb_size);
914 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
915
916 spin_lock(&wbuf->lock);
917 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
918 if (!overlap) {
919
920 spin_unlock(&wbuf->lock);
921 return ubifs_read_node(c, buf, type, len, lnum, offs);
922 }
923
924
925 rlen = wbuf->offs - offs;
926 if (rlen < 0)
927 rlen = 0;
928
929
930 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
931 spin_unlock(&wbuf->lock);
932
933 if (rlen > 0) {
934
935 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
936 if (err && err != -EBADMSG)
937 return err;
938 }
939
940 if (type != ch->node_type) {
941 ubifs_err(c, "bad node type (%d but expected %d)",
942 ch->node_type, type);
943 goto out;
944 }
945
946 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
947 if (err) {
948 ubifs_err(c, "expected node type %d", type);
949 return err;
950 }
951
952 rlen = le32_to_cpu(ch->len);
953 if (rlen != len) {
954 ubifs_err(c, "bad node length %d, expected %d", rlen, len);
955 goto out;
956 }
957
958 return 0;
959
960out:
961 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
962 ubifs_dump_node(c, buf);
963 dump_stack();
964 return -EINVAL;
965}
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
981 int lnum, int offs)
982{
983 int err, l;
984 struct ubifs_ch *ch = buf;
985
986 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
987 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
988 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
989 ubifs_assert(!(offs & 7) && offs < c->leb_size);
990 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
991
992 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
993 if (err && err != -EBADMSG)
994 return err;
995
996 if (type != ch->node_type) {
997 ubifs_errc(c, "bad node type (%d but expected %d)",
998 ch->node_type, type);
999 goto out;
1000 }
1001
1002 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
1003 if (err) {
1004 ubifs_errc(c, "expected node type %d", type);
1005 return err;
1006 }
1007
1008 l = le32_to_cpu(ch->len);
1009 if (l != len) {
1010 ubifs_errc(c, "bad node length %d, expected %d", l, len);
1011 goto out;
1012 }
1013
1014 return 0;
1015
1016out:
1017 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
1018 offs, ubi_is_mapped(c->ubi, lnum));
1019 if (!c->probing) {
1020 ubifs_dump_node(c, buf);
1021 dump_stack();
1022 }
1023 return -EINVAL;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1035{
1036 size_t size;
1037
1038 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1039 if (!wbuf->buf)
1040 return -ENOMEM;
1041
1042 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1043 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1044 if (!wbuf->inodes) {
1045 kfree(wbuf->buf);
1046 wbuf->buf = NULL;
1047 return -ENOMEM;
1048 }
1049
1050 wbuf->used = 0;
1051 wbuf->lnum = wbuf->offs = -1;
1052
1053
1054
1055
1056
1057
1058 size = c->max_write_size - (c->leb_start % c->max_write_size);
1059 wbuf->avail = wbuf->size = size;
1060 wbuf->sync_callback = NULL;
1061 mutex_init(&wbuf->io_mutex);
1062 spin_lock_init(&wbuf->lock);
1063 wbuf->c = c;
1064 wbuf->next_ino = 0;
1065
1066 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1067 wbuf->timer.function = wbuf_timer_callback_nolock;
1068 return 0;
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1079{
1080 if (!wbuf->buf)
1081
1082 return;
1083
1084 spin_lock(&wbuf->lock);
1085 if (wbuf->used)
1086 wbuf->inodes[wbuf->next_ino++] = inum;
1087 spin_unlock(&wbuf->lock);
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1099{
1100 int i, ret = 0;
1101
1102 spin_lock(&wbuf->lock);
1103 for (i = 0; i < wbuf->next_ino; i++)
1104 if (inum == wbuf->inodes[i]) {
1105 ret = 1;
1106 break;
1107 }
1108 spin_unlock(&wbuf->lock);
1109
1110 return ret;
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1123{
1124 int i, err = 0;
1125
1126 for (i = 0; i < c->jhead_cnt; i++) {
1127 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1128
1129 if (i == GCHD)
1130
1131
1132
1133
1134
1135
1136 continue;
1137
1138 if (!wbuf_has_ino(wbuf, inode->i_ino))
1139 continue;
1140
1141 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1142 if (wbuf_has_ino(wbuf, inode->i_ino))
1143 err = ubifs_wbuf_sync_nolock(wbuf);
1144 mutex_unlock(&wbuf->io_mutex);
1145
1146 if (err) {
1147 ubifs_ro_mode(c, err);
1148 return err;
1149 }
1150 }
1151 return 0;
1152}
1153