1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/crc32.h>
74#include <linux/slab.h>
75#include "ubifs.h"
76
77
78
79
80
81
82void ubifs_ro_mode(struct ubifs_info *c, int err)
83{
84 if (!c->ro_error) {
85 c->ro_error = 1;
86 c->no_chk_data_crc = 0;
87 c->vfs_sb->s_flags |= MS_RDONLY;
88 ubifs_warn("switched to read-only mode, error %d", err);
89 dump_stack();
90 }
91}
92
93
94
95
96
97
98
99int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
100 int len, int even_ebadmsg)
101{
102 int err;
103
104 err = ubi_read(c->ubi, lnum, buf, offs, len);
105
106
107
108
109 if (err && (err != -EBADMSG || even_ebadmsg)) {
110 ubifs_err("reading %d bytes from LEB %d:%d failed, error %d",
111 len, lnum, offs, err);
112 dbg_dump_stack();
113 }
114 return err;
115}
116
117int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
118 int len, int dtype)
119{
120 int err;
121
122 ubifs_assert(!c->ro_media && !c->ro_mount);
123 if (c->ro_error)
124 return -EROFS;
125 if (!dbg_is_tst_rcvry(c))
126 err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
127 else
128 err = dbg_leb_write(c, lnum, buf, offs, len, dtype);
129 if (err) {
130 ubifs_err("writing %d bytes to LEB %d:%d failed, error %d",
131 len, lnum, offs, err);
132 ubifs_ro_mode(c, err);
133 dbg_dump_stack();
134 }
135 return err;
136}
137
138int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
139 int dtype)
140{
141 int err;
142
143 ubifs_assert(!c->ro_media && !c->ro_mount);
144 if (c->ro_error)
145 return -EROFS;
146 if (!dbg_is_tst_rcvry(c))
147 err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
148 else
149 err = dbg_leb_change(c, lnum, buf, len, dtype);
150 if (err) {
151 ubifs_err("changing %d bytes in LEB %d failed, error %d",
152 len, lnum, err);
153 ubifs_ro_mode(c, err);
154 dbg_dump_stack();
155 }
156 return err;
157}
158
159int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
160{
161 int err;
162
163 ubifs_assert(!c->ro_media && !c->ro_mount);
164 if (c->ro_error)
165 return -EROFS;
166 if (!dbg_is_tst_rcvry(c))
167 err = ubi_leb_unmap(c->ubi, lnum);
168 else
169 err = dbg_leb_unmap(c, lnum);
170 if (err) {
171 ubifs_err("unmap LEB %d failed, error %d", lnum, err);
172 ubifs_ro_mode(c, err);
173 dbg_dump_stack();
174 }
175 return err;
176}
177
178int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype)
179{
180 int err;
181
182 ubifs_assert(!c->ro_media && !c->ro_mount);
183 if (c->ro_error)
184 return -EROFS;
185 if (!dbg_is_tst_rcvry(c))
186 err = ubi_leb_map(c->ubi, lnum, dtype);
187 else
188 err = dbg_leb_map(c, lnum, dtype);
189 if (err) {
190 ubifs_err("mapping LEB %d failed, error %d", lnum, err);
191 ubifs_ro_mode(c, err);
192 dbg_dump_stack();
193 }
194 return err;
195}
196
197int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
198{
199 int err;
200
201 err = ubi_is_mapped(c->ubi, lnum);
202 if (err < 0) {
203 ubifs_err("ubi_is_mapped failed for LEB %d, error %d",
204 lnum, err);
205 dbg_dump_stack();
206 }
207 return err;
208}
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
239 int offs, int quiet, int must_chk_crc)
240{
241 int err = -EINVAL, type, node_len;
242 uint32_t crc, node_crc, magic;
243 const struct ubifs_ch *ch = buf;
244
245 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
246 ubifs_assert(!(offs & 7) && offs < c->leb_size);
247
248 magic = le32_to_cpu(ch->magic);
249 if (magic != UBIFS_NODE_MAGIC) {
250 if (!quiet)
251 ubifs_err("bad magic %#08x, expected %#08x",
252 magic, UBIFS_NODE_MAGIC);
253 err = -EUCLEAN;
254 goto out;
255 }
256
257 type = ch->node_type;
258 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
259 if (!quiet)
260 ubifs_err("bad node type %d", type);
261 goto out;
262 }
263
264 node_len = le32_to_cpu(ch->len);
265 if (node_len + offs > c->leb_size)
266 goto out_len;
267
268 if (c->ranges[type].max_len == 0) {
269 if (node_len != c->ranges[type].len)
270 goto out_len;
271 } else if (node_len < c->ranges[type].min_len ||
272 node_len > c->ranges[type].max_len)
273 goto out_len;
274
275 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
276 !c->remounting_rw && c->no_chk_data_crc)
277 return 0;
278
279 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
280 node_crc = le32_to_cpu(ch->crc);
281 if (crc != node_crc) {
282 if (!quiet)
283 ubifs_err("bad CRC: calculated %#08x, read %#08x",
284 crc, node_crc);
285 err = -EUCLEAN;
286 goto out;
287 }
288
289 return 0;
290
291out_len:
292 if (!quiet)
293 ubifs_err("bad node length %d", node_len);
294out:
295 if (!quiet) {
296 ubifs_err("bad node at LEB %d:%d", lnum, offs);
297 dbg_dump_node(c, buf);
298 dbg_dump_stack();
299 }
300 return err;
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
320{
321 uint32_t crc;
322
323 ubifs_assert(pad >= 0 && !(pad & 7));
324
325 if (pad >= UBIFS_PAD_NODE_SZ) {
326 struct ubifs_ch *ch = buf;
327 struct ubifs_pad_node *pad_node = buf;
328
329 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
330 ch->node_type = UBIFS_PAD_NODE;
331 ch->group_type = UBIFS_NO_NODE_GROUP;
332 ch->padding[0] = ch->padding[1] = 0;
333 ch->sqnum = 0;
334 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
335 pad -= UBIFS_PAD_NODE_SZ;
336 pad_node->pad_len = cpu_to_le32(pad);
337 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
338 ch->crc = cpu_to_le32(crc);
339 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
340 } else if (pad > 0)
341
342 memset(buf, UBIFS_PADDING_BYTE, pad);
343}
344
345
346
347
348
349static unsigned long long next_sqnum(struct ubifs_info *c)
350{
351 unsigned long long sqnum;
352
353 spin_lock(&c->cnt_lock);
354 sqnum = ++c->max_sqnum;
355 spin_unlock(&c->cnt_lock);
356
357 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
358 if (sqnum >= SQNUM_WATERMARK) {
359 ubifs_err("sequence number overflow %llu, end of life",
360 sqnum);
361 ubifs_ro_mode(c, -EINVAL);
362 }
363 ubifs_warn("running out of sequence numbers, end of life soon");
364 }
365
366 return sqnum;
367}
368
369
370
371
372
373
374
375
376
377
378
379
380void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
381{
382 uint32_t crc;
383 struct ubifs_ch *ch = node;
384 unsigned long long sqnum = next_sqnum(c);
385
386 ubifs_assert(len >= UBIFS_CH_SZ);
387
388 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
389 ch->len = cpu_to_le32(len);
390 ch->group_type = UBIFS_NO_NODE_GROUP;
391 ch->sqnum = cpu_to_le64(sqnum);
392 ch->padding[0] = ch->padding[1] = 0;
393 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
394 ch->crc = cpu_to_le32(crc);
395
396 if (pad) {
397 len = ALIGN(len, 8);
398 pad = ALIGN(len, c->min_io_size) - len;
399 ubifs_pad(c, node + len, pad);
400 }
401}
402
403
404
405
406
407
408
409
410
411
412
413void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
414{
415 uint32_t crc;
416 struct ubifs_ch *ch = node;
417 unsigned long long sqnum = next_sqnum(c);
418
419 ubifs_assert(len >= UBIFS_CH_SZ);
420
421 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
422 ch->len = cpu_to_le32(len);
423 if (last)
424 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
425 else
426 ch->group_type = UBIFS_IN_NODE_GROUP;
427 ch->sqnum = cpu_to_le64(sqnum);
428 ch->padding[0] = ch->padding[1] = 0;
429 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
430 ch->crc = cpu_to_le32(crc);
431}
432
433
434
435
436
437
438
439static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
440{
441 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
442
443 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
444 wbuf->need_sync = 1;
445 wbuf->c->need_wbuf_sync = 1;
446 ubifs_wake_up_bgt(wbuf->c);
447 return HRTIMER_NORESTART;
448}
449
450
451
452
453
454static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
455{
456 ubifs_assert(!hrtimer_active(&wbuf->timer));
457
458 if (wbuf->no_timer)
459 return;
460 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
461 dbg_jhead(wbuf->jhead),
462 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
463 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
464 USEC_PER_SEC));
465 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
466 HRTIMER_MODE_REL);
467}
468
469
470
471
472
473static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
474{
475 if (wbuf->no_timer)
476 return;
477 wbuf->need_sync = 0;
478 hrtimer_cancel(&wbuf->timer);
479}
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
495{
496 struct ubifs_info *c = wbuf->c;
497 int err, dirt, sync_len;
498
499 cancel_wbuf_timer_nolock(wbuf);
500 if (!wbuf->used || wbuf->lnum == -1)
501
502 return 0;
503
504 dbg_io("LEB %d:%d, %d bytes, jhead %s",
505 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
506 ubifs_assert(!(wbuf->avail & 7));
507 ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
508 ubifs_assert(wbuf->size >= c->min_io_size);
509 ubifs_assert(wbuf->size <= c->max_write_size);
510 ubifs_assert(wbuf->size % c->min_io_size == 0);
511 ubifs_assert(!c->ro_media && !c->ro_mount);
512 if (c->leb_size - wbuf->offs >= c->max_write_size)
513 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
514
515 if (c->ro_error)
516 return -EROFS;
517
518
519
520
521
522 sync_len = ALIGN(wbuf->used, c->min_io_size);
523 dirt = sync_len - wbuf->used;
524 if (dirt)
525 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
526 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len,
527 wbuf->dtype);
528 if (err)
529 return err;
530
531 spin_lock(&wbuf->lock);
532 wbuf->offs += sync_len;
533
534
535
536
537
538
539
540
541
542
543 if (c->leb_size - wbuf->offs < c->max_write_size)
544 wbuf->size = c->leb_size - wbuf->offs;
545 else if (wbuf->offs & (c->max_write_size - 1))
546 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
547 else
548 wbuf->size = c->max_write_size;
549 wbuf->avail = wbuf->size;
550 wbuf->used = 0;
551 wbuf->next_ino = 0;
552 spin_unlock(&wbuf->lock);
553
554 if (wbuf->sync_callback)
555 err = wbuf->sync_callback(c, wbuf->lnum,
556 c->leb_size - wbuf->offs, dirt);
557 return err;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
572 int dtype)
573{
574 const struct ubifs_info *c = wbuf->c;
575
576 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
577 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
578 ubifs_assert(offs >= 0 && offs <= c->leb_size);
579 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
580 ubifs_assert(lnum != wbuf->lnum);
581 ubifs_assert(wbuf->used == 0);
582
583 spin_lock(&wbuf->lock);
584 wbuf->lnum = lnum;
585 wbuf->offs = offs;
586 if (c->leb_size - wbuf->offs < c->max_write_size)
587 wbuf->size = c->leb_size - wbuf->offs;
588 else if (wbuf->offs & (c->max_write_size - 1))
589 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
590 else
591 wbuf->size = c->max_write_size;
592 wbuf->avail = wbuf->size;
593 wbuf->used = 0;
594 spin_unlock(&wbuf->lock);
595 wbuf->dtype = dtype;
596
597 return 0;
598}
599
600
601
602
603
604
605
606
607
608int ubifs_bg_wbufs_sync(struct ubifs_info *c)
609{
610 int err, i;
611
612 ubifs_assert(!c->ro_media && !c->ro_mount);
613 if (!c->need_wbuf_sync)
614 return 0;
615 c->need_wbuf_sync = 0;
616
617 if (c->ro_error) {
618 err = -EROFS;
619 goto out_timers;
620 }
621
622 dbg_io("synchronize");
623 for (i = 0; i < c->jhead_cnt; i++) {
624 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
625
626 cond_resched();
627
628
629
630
631
632 if (mutex_is_locked(&wbuf->io_mutex))
633 continue;
634
635 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
636 if (!wbuf->need_sync) {
637 mutex_unlock(&wbuf->io_mutex);
638 continue;
639 }
640
641 err = ubifs_wbuf_sync_nolock(wbuf);
642 mutex_unlock(&wbuf->io_mutex);
643 if (err) {
644 ubifs_err("cannot sync write-buffer, error %d", err);
645 ubifs_ro_mode(c, err);
646 goto out_timers;
647 }
648 }
649
650 return 0;
651
652out_timers:
653
654 for (i = 0; i < c->jhead_cnt; i++) {
655 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
656
657 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
658 cancel_wbuf_timer_nolock(wbuf);
659 mutex_unlock(&wbuf->io_mutex);
660 }
661 return err;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
681{
682 struct ubifs_info *c = wbuf->c;
683 int err, written, n, aligned_len = ALIGN(len, 8);
684
685 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
686 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
687 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
688 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
689 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
690 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
691 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
692 ubifs_assert(wbuf->size >= c->min_io_size);
693 ubifs_assert(wbuf->size <= c->max_write_size);
694 ubifs_assert(wbuf->size % c->min_io_size == 0);
695 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
696 ubifs_assert(!c->ro_media && !c->ro_mount);
697 ubifs_assert(!c->space_fixup);
698 if (c->leb_size - wbuf->offs >= c->max_write_size)
699 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
700
701 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
702 err = -ENOSPC;
703 goto out;
704 }
705
706 cancel_wbuf_timer_nolock(wbuf);
707
708 if (c->ro_error)
709 return -EROFS;
710
711 if (aligned_len <= wbuf->avail) {
712
713
714
715
716 memcpy(wbuf->buf + wbuf->used, buf, len);
717
718 if (aligned_len == wbuf->avail) {
719 dbg_io("flush jhead %s wbuf to LEB %d:%d",
720 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
721 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
722 wbuf->offs, wbuf->size,
723 wbuf->dtype);
724 if (err)
725 goto out;
726
727 spin_lock(&wbuf->lock);
728 wbuf->offs += wbuf->size;
729 if (c->leb_size - wbuf->offs >= c->max_write_size)
730 wbuf->size = c->max_write_size;
731 else
732 wbuf->size = c->leb_size - wbuf->offs;
733 wbuf->avail = wbuf->size;
734 wbuf->used = 0;
735 wbuf->next_ino = 0;
736 spin_unlock(&wbuf->lock);
737 } else {
738 spin_lock(&wbuf->lock);
739 wbuf->avail -= aligned_len;
740 wbuf->used += aligned_len;
741 spin_unlock(&wbuf->lock);
742 }
743
744 goto exit;
745 }
746
747 written = 0;
748
749 if (wbuf->used) {
750
751
752
753
754
755 dbg_io("flush jhead %s wbuf to LEB %d:%d",
756 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
757 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
758 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
759 wbuf->size, wbuf->dtype);
760 if (err)
761 goto out;
762
763 wbuf->offs += wbuf->size;
764 len -= wbuf->avail;
765 aligned_len -= wbuf->avail;
766 written += wbuf->avail;
767 } else if (wbuf->offs & (c->max_write_size - 1)) {
768
769
770
771
772
773
774
775 dbg_io("write %d bytes to LEB %d:%d",
776 wbuf->size, wbuf->lnum, wbuf->offs);
777 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
778 wbuf->size, wbuf->dtype);
779 if (err)
780 goto out;
781
782 wbuf->offs += wbuf->size;
783 len -= wbuf->size;
784 aligned_len -= wbuf->size;
785 written += wbuf->size;
786 }
787
788
789
790
791
792
793
794 n = aligned_len >> c->max_write_shift;
795 if (n) {
796 n <<= c->max_write_shift;
797 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
798 wbuf->offs);
799 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
800 wbuf->offs, n, wbuf->dtype);
801 if (err)
802 goto out;
803 wbuf->offs += n;
804 aligned_len -= n;
805 len -= n;
806 written += n;
807 }
808
809 spin_lock(&wbuf->lock);
810 if (aligned_len)
811
812
813
814
815
816 memcpy(wbuf->buf, buf + written, len);
817
818 if (c->leb_size - wbuf->offs >= c->max_write_size)
819 wbuf->size = c->max_write_size;
820 else
821 wbuf->size = c->leb_size - wbuf->offs;
822 wbuf->avail = wbuf->size - aligned_len;
823 wbuf->used = aligned_len;
824 wbuf->next_ino = 0;
825 spin_unlock(&wbuf->lock);
826
827exit:
828 if (wbuf->sync_callback) {
829 int free = c->leb_size - wbuf->offs - wbuf->used;
830
831 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
832 if (err)
833 goto out;
834 }
835
836 if (wbuf->used)
837 new_wbuf_timer_nolock(wbuf);
838
839 return 0;
840
841out:
842 ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
843 len, wbuf->lnum, wbuf->offs, err);
844 dbg_dump_node(c, buf);
845 dbg_dump_stack();
846 dbg_dump_leb(c, wbuf->lnum);
847 return err;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
866 int offs, int dtype)
867{
868 int err, buf_len = ALIGN(len, c->min_io_size);
869
870 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
871 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
872 buf_len);
873 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
874 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
875 ubifs_assert(!c->ro_media && !c->ro_mount);
876 ubifs_assert(!c->space_fixup);
877
878 if (c->ro_error)
879 return -EROFS;
880
881 ubifs_prepare_node(c, buf, len, 1);
882 err = ubifs_leb_write(c, lnum, buf, offs, buf_len, dtype);
883 if (err)
884 dbg_dump_node(c, buf);
885
886 return err;
887}
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
905 int lnum, int offs)
906{
907 const struct ubifs_info *c = wbuf->c;
908 int err, rlen, overlap;
909 struct ubifs_ch *ch = buf;
910
911 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
912 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
913 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
914 ubifs_assert(!(offs & 7) && offs < c->leb_size);
915 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
916
917 spin_lock(&wbuf->lock);
918 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
919 if (!overlap) {
920
921 spin_unlock(&wbuf->lock);
922 return ubifs_read_node(c, buf, type, len, lnum, offs);
923 }
924
925
926 rlen = wbuf->offs - offs;
927 if (rlen < 0)
928 rlen = 0;
929
930
931 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
932 spin_unlock(&wbuf->lock);
933
934 if (rlen > 0) {
935
936 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
937 if (err && err != -EBADMSG)
938 return err;
939 }
940
941 if (type != ch->node_type) {
942 ubifs_err("bad node type (%d but expected %d)",
943 ch->node_type, type);
944 goto out;
945 }
946
947 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
948 if (err) {
949 ubifs_err("expected node type %d", type);
950 return err;
951 }
952
953 rlen = le32_to_cpu(ch->len);
954 if (rlen != len) {
955 ubifs_err("bad node length %d, expected %d", rlen, len);
956 goto out;
957 }
958
959 return 0;
960
961out:
962 ubifs_err("bad node at LEB %d:%d", lnum, offs);
963 dbg_dump_node(c, buf);
964 dbg_dump_stack();
965 return -EINVAL;
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
982 int lnum, int offs)
983{
984 int err, l;
985 struct ubifs_ch *ch = buf;
986
987 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
988 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
989 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
990 ubifs_assert(!(offs & 7) && offs < c->leb_size);
991 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
992
993 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
994 if (err && err != -EBADMSG)
995 return err;
996
997 if (type != ch->node_type) {
998 ubifs_err("bad node type (%d but expected %d)",
999 ch->node_type, type);
1000 goto out;
1001 }
1002
1003 err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
1004 if (err) {
1005 ubifs_err("expected node type %d", type);
1006 return err;
1007 }
1008
1009 l = le32_to_cpu(ch->len);
1010 if (l != len) {
1011 ubifs_err("bad node length %d, expected %d", l, len);
1012 goto out;
1013 }
1014
1015 return 0;
1016
1017out:
1018 ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
1019 ubi_is_mapped(c->ubi, lnum));
1020 dbg_dump_node(c, buf);
1021 dbg_dump_stack();
1022 return -EINVAL;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1034{
1035 size_t size;
1036
1037 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1038 if (!wbuf->buf)
1039 return -ENOMEM;
1040
1041 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1042 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1043 if (!wbuf->inodes) {
1044 kfree(wbuf->buf);
1045 wbuf->buf = NULL;
1046 return -ENOMEM;
1047 }
1048
1049 wbuf->used = 0;
1050 wbuf->lnum = wbuf->offs = -1;
1051
1052
1053
1054
1055
1056
1057 size = c->max_write_size - (c->leb_start % c->max_write_size);
1058 wbuf->avail = wbuf->size = size;
1059 wbuf->dtype = UBI_UNKNOWN;
1060 wbuf->sync_callback = NULL;
1061 mutex_init(&wbuf->io_mutex);
1062 spin_lock_init(&wbuf->lock);
1063 wbuf->c = c;
1064 wbuf->next_ino = 0;
1065
1066 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1067 wbuf->timer.function = wbuf_timer_callback_nolock;
1068 wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
1069 wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
1070 wbuf->delta *= 1000000000ULL;
1071 ubifs_assert(wbuf->delta <= ULONG_MAX);
1072 return 0;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1083{
1084 if (!wbuf->buf)
1085
1086 return;
1087
1088 spin_lock(&wbuf->lock);
1089 if (wbuf->used)
1090 wbuf->inodes[wbuf->next_ino++] = inum;
1091 spin_unlock(&wbuf->lock);
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1103{
1104 int i, ret = 0;
1105
1106 spin_lock(&wbuf->lock);
1107 for (i = 0; i < wbuf->next_ino; i++)
1108 if (inum == wbuf->inodes[i]) {
1109 ret = 1;
1110 break;
1111 }
1112 spin_unlock(&wbuf->lock);
1113
1114 return ret;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1127{
1128 int i, err = 0;
1129
1130 for (i = 0; i < c->jhead_cnt; i++) {
1131 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1132
1133 if (i == GCHD)
1134
1135
1136
1137
1138
1139
1140 continue;
1141
1142 if (!wbuf_has_ino(wbuf, inode->i_ino))
1143 continue;
1144
1145 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1146 if (wbuf_has_ino(wbuf, inode->i_ino))
1147 err = ubifs_wbuf_sync_nolock(wbuf);
1148 mutex_unlock(&wbuf->io_mutex);
1149
1150 if (err) {
1151 ubifs_ro_mode(c, err);
1152 return err;
1153 }
1154 }
1155 return 0;
1156}
1157