1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#include <linux/crc32.h>
62#include <linux/slab.h>
63#include "ubifs.h"
64
65
66
67
68
69
70void ubifs_ro_mode(struct ubifs_info *c, int err)
71{
72 if (!c->ro_error) {
73 c->ro_error = 1;
74 c->no_chk_data_crc = 0;
75 c->vfs_sb->s_flags |= SB_RDONLY;
76 ubifs_warn(c, "switched to read-only mode, error %d", err);
77 dump_stack();
78 }
79}
80
81
82
83
84
85
86
87int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
88 int len, int even_ebadmsg)
89{
90 int err;
91
92 err = ubi_read(c->ubi, lnum, buf, offs, len);
93
94
95
96
97 if (err && (err != -EBADMSG || even_ebadmsg)) {
98 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
99 len, lnum, offs, err);
100 dump_stack();
101 }
102 return err;
103}
104
105int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
106 int len)
107{
108 int err;
109
110 ubifs_assert(c, !c->ro_media && !c->ro_mount);
111 if (c->ro_error)
112 return -EROFS;
113 if (!dbg_is_tst_rcvry(c))
114 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
115 else
116 err = dbg_leb_write(c, lnum, buf, offs, len);
117 if (err) {
118 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
119 len, lnum, offs, err);
120 ubifs_ro_mode(c, err);
121 dump_stack();
122 }
123 return err;
124}
125
126int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
127{
128 int err;
129
130 ubifs_assert(c, !c->ro_media && !c->ro_mount);
131 if (c->ro_error)
132 return -EROFS;
133 if (!dbg_is_tst_rcvry(c))
134 err = ubi_leb_change(c->ubi, lnum, buf, len);
135 else
136 err = dbg_leb_change(c, lnum, buf, len);
137 if (err) {
138 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
139 len, lnum, err);
140 ubifs_ro_mode(c, err);
141 dump_stack();
142 }
143 return err;
144}
145
146int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
147{
148 int err;
149
150 ubifs_assert(c, !c->ro_media && !c->ro_mount);
151 if (c->ro_error)
152 return -EROFS;
153 if (!dbg_is_tst_rcvry(c))
154 err = ubi_leb_unmap(c->ubi, lnum);
155 else
156 err = dbg_leb_unmap(c, lnum);
157 if (err) {
158 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
159 ubifs_ro_mode(c, err);
160 dump_stack();
161 }
162 return err;
163}
164
165int ubifs_leb_map(struct ubifs_info *c, int lnum)
166{
167 int err;
168
169 ubifs_assert(c, !c->ro_media && !c->ro_mount);
170 if (c->ro_error)
171 return -EROFS;
172 if (!dbg_is_tst_rcvry(c))
173 err = ubi_leb_map(c->ubi, lnum);
174 else
175 err = dbg_leb_map(c, lnum);
176 if (err) {
177 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
178 ubifs_ro_mode(c, err);
179 dump_stack();
180 }
181 return err;
182}
183
184int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
185{
186 int err;
187
188 err = ubi_is_mapped(c->ubi, lnum);
189 if (err < 0) {
190 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
191 lnum, err);
192 dump_stack();
193 }
194 return err;
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226int ubifs_check_node(const struct ubifs_info *c, const void *buf, int len,
227 int lnum, int offs, int quiet, int must_chk_crc)
228{
229 int err = -EINVAL, type, node_len;
230 uint32_t crc, node_crc, magic;
231 const struct ubifs_ch *ch = buf;
232
233 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
234 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
235
236 magic = le32_to_cpu(ch->magic);
237 if (magic != UBIFS_NODE_MAGIC) {
238 if (!quiet)
239 ubifs_err(c, "bad magic %#08x, expected %#08x",
240 magic, UBIFS_NODE_MAGIC);
241 err = -EUCLEAN;
242 goto out;
243 }
244
245 type = ch->node_type;
246 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
247 if (!quiet)
248 ubifs_err(c, "bad node type %d", type);
249 goto out;
250 }
251
252 node_len = le32_to_cpu(ch->len);
253 if (node_len + offs > c->leb_size)
254 goto out_len;
255
256 if (c->ranges[type].max_len == 0) {
257 if (node_len != c->ranges[type].len)
258 goto out_len;
259 } else if (node_len < c->ranges[type].min_len ||
260 node_len > c->ranges[type].max_len)
261 goto out_len;
262
263 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
264 !c->remounting_rw && c->no_chk_data_crc)
265 return 0;
266
267 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
268 node_crc = le32_to_cpu(ch->crc);
269 if (crc != node_crc) {
270 if (!quiet)
271 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
272 crc, node_crc);
273 err = -EUCLEAN;
274 goto out;
275 }
276
277 return 0;
278
279out_len:
280 if (!quiet)
281 ubifs_err(c, "bad node length %d", node_len);
282out:
283 if (!quiet) {
284 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
285 ubifs_dump_node(c, buf, len);
286 dump_stack();
287 }
288 return err;
289}
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
308{
309 uint32_t crc;
310
311 ubifs_assert(c, pad >= 0);
312
313 if (pad >= UBIFS_PAD_NODE_SZ) {
314 struct ubifs_ch *ch = buf;
315 struct ubifs_pad_node *pad_node = buf;
316
317 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
318 ch->node_type = UBIFS_PAD_NODE;
319 ch->group_type = UBIFS_NO_NODE_GROUP;
320 ch->padding[0] = ch->padding[1] = 0;
321 ch->sqnum = 0;
322 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
323 pad -= UBIFS_PAD_NODE_SZ;
324 pad_node->pad_len = cpu_to_le32(pad);
325 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
326 ch->crc = cpu_to_le32(crc);
327 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
328 } else if (pad > 0)
329
330 memset(buf, UBIFS_PADDING_BYTE, pad);
331}
332
333
334
335
336
337static unsigned long long next_sqnum(struct ubifs_info *c)
338{
339 unsigned long long sqnum;
340
341 spin_lock(&c->cnt_lock);
342 sqnum = ++c->max_sqnum;
343 spin_unlock(&c->cnt_lock);
344
345 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
346 if (sqnum >= SQNUM_WATERMARK) {
347 ubifs_err(c, "sequence number overflow %llu, end of life",
348 sqnum);
349 ubifs_ro_mode(c, -EINVAL);
350 }
351 ubifs_warn(c, "running out of sequence numbers, end of life soon");
352 }
353
354 return sqnum;
355}
356
357void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad)
358{
359 struct ubifs_ch *ch = node;
360 unsigned long long sqnum = next_sqnum(c);
361
362 ubifs_assert(c, len >= UBIFS_CH_SZ);
363
364 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
365 ch->len = cpu_to_le32(len);
366 ch->group_type = UBIFS_NO_NODE_GROUP;
367 ch->sqnum = cpu_to_le64(sqnum);
368 ch->padding[0] = ch->padding[1] = 0;
369
370 if (pad) {
371 len = ALIGN(len, 8);
372 pad = ALIGN(len, c->min_io_size) - len;
373 ubifs_pad(c, node + len, pad);
374 }
375}
376
377void ubifs_crc_node(struct ubifs_info *c, void *node, int len)
378{
379 struct ubifs_ch *ch = node;
380 uint32_t crc;
381
382 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
383 ch->crc = cpu_to_le32(crc);
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
402 int hmac_offs, int pad)
403{
404 int err;
405
406 ubifs_init_node(c, node, len, pad);
407
408 if (hmac_offs > 0) {
409 err = ubifs_node_insert_hmac(c, node, len, hmac_offs);
410 if (err)
411 return err;
412 }
413
414 ubifs_crc_node(c, node, len);
415
416 return 0;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
431{
432
433
434
435
436 ubifs_prepare_node_hmac(c, node, len, 0, pad);
437}
438
439
440
441
442
443
444
445
446
447
448
449void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
450{
451 uint32_t crc;
452 struct ubifs_ch *ch = node;
453 unsigned long long sqnum = next_sqnum(c);
454
455 ubifs_assert(c, len >= UBIFS_CH_SZ);
456
457 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
458 ch->len = cpu_to_le32(len);
459 if (last)
460 ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
461 else
462 ch->group_type = UBIFS_IN_NODE_GROUP;
463 ch->sqnum = cpu_to_le64(sqnum);
464 ch->padding[0] = ch->padding[1] = 0;
465 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
466 ch->crc = cpu_to_le32(crc);
467}
468
469
470
471
472
473
474
475static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
476{
477 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
478
479 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
480 wbuf->need_sync = 1;
481 wbuf->c->need_wbuf_sync = 1;
482 ubifs_wake_up_bgt(wbuf->c);
483 return HRTIMER_NORESTART;
484}
485
486
487
488
489
490
491static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
492{
493 ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
494 unsigned long long delta = dirty_writeback_interval;
495
496
497 delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
498
499 ubifs_assert(c, !hrtimer_active(&wbuf->timer));
500 ubifs_assert(c, delta <= ULONG_MAX);
501
502 if (wbuf->no_timer)
503 return;
504 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
505 dbg_jhead(wbuf->jhead),
506 div_u64(ktime_to_ns(softlimit), USEC_PER_SEC),
507 div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC));
508 hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
509 HRTIMER_MODE_REL);
510}
511
512
513
514
515
516static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
517{
518 if (wbuf->no_timer)
519 return;
520 wbuf->need_sync = 0;
521 hrtimer_cancel(&wbuf->timer);
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
538{
539 struct ubifs_info *c = wbuf->c;
540 int err, dirt, sync_len;
541
542 cancel_wbuf_timer_nolock(wbuf);
543 if (!wbuf->used || wbuf->lnum == -1)
544
545 return 0;
546
547 dbg_io("LEB %d:%d, %d bytes, jhead %s",
548 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
549 ubifs_assert(c, !(wbuf->avail & 7));
550 ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size);
551 ubifs_assert(c, wbuf->size >= c->min_io_size);
552 ubifs_assert(c, wbuf->size <= c->max_write_size);
553 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
554 ubifs_assert(c, !c->ro_media && !c->ro_mount);
555 if (c->leb_size - wbuf->offs >= c->max_write_size)
556 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
557
558 if (c->ro_error)
559 return -EROFS;
560
561
562
563
564
565 sync_len = ALIGN(wbuf->used, c->min_io_size);
566 dirt = sync_len - wbuf->used;
567 if (dirt)
568 ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
569 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
570 if (err)
571 return err;
572
573 spin_lock(&wbuf->lock);
574 wbuf->offs += sync_len;
575
576
577
578
579
580
581
582
583
584
585 if (c->leb_size - wbuf->offs < c->max_write_size)
586 wbuf->size = c->leb_size - wbuf->offs;
587 else if (wbuf->offs & (c->max_write_size - 1))
588 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
589 else
590 wbuf->size = c->max_write_size;
591 wbuf->avail = wbuf->size;
592 wbuf->used = 0;
593 wbuf->next_ino = 0;
594 spin_unlock(&wbuf->lock);
595
596 if (wbuf->sync_callback)
597 err = wbuf->sync_callback(c, wbuf->lnum,
598 c->leb_size - wbuf->offs, dirt);
599 return err;
600}
601
602
603
604
605
606
607
608
609
610
611
612int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
613{
614 const struct ubifs_info *c = wbuf->c;
615
616 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
617 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt);
618 ubifs_assert(c, offs >= 0 && offs <= c->leb_size);
619 ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7));
620 ubifs_assert(c, lnum != wbuf->lnum);
621 ubifs_assert(c, wbuf->used == 0);
622
623 spin_lock(&wbuf->lock);
624 wbuf->lnum = lnum;
625 wbuf->offs = offs;
626 if (c->leb_size - wbuf->offs < c->max_write_size)
627 wbuf->size = c->leb_size - wbuf->offs;
628 else if (wbuf->offs & (c->max_write_size - 1))
629 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
630 else
631 wbuf->size = c->max_write_size;
632 wbuf->avail = wbuf->size;
633 wbuf->used = 0;
634 spin_unlock(&wbuf->lock);
635
636 return 0;
637}
638
639
640
641
642
643
644
645
646
647int ubifs_bg_wbufs_sync(struct ubifs_info *c)
648{
649 int err, i;
650
651 ubifs_assert(c, !c->ro_media && !c->ro_mount);
652 if (!c->need_wbuf_sync)
653 return 0;
654 c->need_wbuf_sync = 0;
655
656 if (c->ro_error) {
657 err = -EROFS;
658 goto out_timers;
659 }
660
661 dbg_io("synchronize");
662 for (i = 0; i < c->jhead_cnt; i++) {
663 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
664
665 cond_resched();
666
667
668
669
670
671 if (mutex_is_locked(&wbuf->io_mutex))
672 continue;
673
674 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
675 if (!wbuf->need_sync) {
676 mutex_unlock(&wbuf->io_mutex);
677 continue;
678 }
679
680 err = ubifs_wbuf_sync_nolock(wbuf);
681 mutex_unlock(&wbuf->io_mutex);
682 if (err) {
683 ubifs_err(c, "cannot sync write-buffer, error %d", err);
684 ubifs_ro_mode(c, err);
685 goto out_timers;
686 }
687 }
688
689 return 0;
690
691out_timers:
692
693 for (i = 0; i < c->jhead_cnt; i++) {
694 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
695
696 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
697 cancel_wbuf_timer_nolock(wbuf);
698 mutex_unlock(&wbuf->io_mutex);
699 }
700 return err;
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
720{
721 struct ubifs_info *c = wbuf->c;
722 int err, n, written = 0, aligned_len = ALIGN(len, 8);
723
724 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
725 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
726 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
727 ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
728 ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
729 ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
730 ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size);
731 ubifs_assert(c, wbuf->size >= c->min_io_size);
732 ubifs_assert(c, wbuf->size <= c->max_write_size);
733 ubifs_assert(c, wbuf->size % c->min_io_size == 0);
734 ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex));
735 ubifs_assert(c, !c->ro_media && !c->ro_mount);
736 ubifs_assert(c, !c->space_fixup);
737 if (c->leb_size - wbuf->offs >= c->max_write_size)
738 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
739
740 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
741 err = -ENOSPC;
742 goto out;
743 }
744
745 cancel_wbuf_timer_nolock(wbuf);
746
747 if (c->ro_error)
748 return -EROFS;
749
750 if (aligned_len <= wbuf->avail) {
751
752
753
754
755 memcpy(wbuf->buf + wbuf->used, buf, len);
756 if (aligned_len > len) {
757 ubifs_assert(c, aligned_len - len < 8);
758 ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
759 }
760
761 if (aligned_len == wbuf->avail) {
762 dbg_io("flush jhead %s wbuf to LEB %d:%d",
763 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
764 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
765 wbuf->offs, wbuf->size);
766 if (err)
767 goto out;
768
769 spin_lock(&wbuf->lock);
770 wbuf->offs += wbuf->size;
771 if (c->leb_size - wbuf->offs >= c->max_write_size)
772 wbuf->size = c->max_write_size;
773 else
774 wbuf->size = c->leb_size - wbuf->offs;
775 wbuf->avail = wbuf->size;
776 wbuf->used = 0;
777 wbuf->next_ino = 0;
778 spin_unlock(&wbuf->lock);
779 } else {
780 spin_lock(&wbuf->lock);
781 wbuf->avail -= aligned_len;
782 wbuf->used += aligned_len;
783 spin_unlock(&wbuf->lock);
784 }
785
786 goto exit;
787 }
788
789 if (wbuf->used) {
790
791
792
793
794
795 dbg_io("flush jhead %s wbuf to LEB %d:%d",
796 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
797 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
798 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
799 wbuf->size);
800 if (err)
801 goto out;
802
803 wbuf->offs += wbuf->size;
804 len -= wbuf->avail;
805 aligned_len -= wbuf->avail;
806 written += wbuf->avail;
807 } else if (wbuf->offs & (c->max_write_size - 1)) {
808
809
810
811
812
813
814
815 dbg_io("write %d bytes to LEB %d:%d",
816 wbuf->size, wbuf->lnum, wbuf->offs);
817 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
818 wbuf->size);
819 if (err)
820 goto out;
821
822 wbuf->offs += wbuf->size;
823 len -= wbuf->size;
824 aligned_len -= wbuf->size;
825 written += wbuf->size;
826 }
827
828
829
830
831
832
833
834 n = aligned_len >> c->max_write_shift;
835 if (n) {
836 n <<= c->max_write_shift;
837 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
838 wbuf->offs);
839 err = ubifs_leb_write(c, wbuf->lnum, buf + written,
840 wbuf->offs, n);
841 if (err)
842 goto out;
843 wbuf->offs += n;
844 aligned_len -= n;
845 len -= n;
846 written += n;
847 }
848
849 spin_lock(&wbuf->lock);
850 if (aligned_len) {
851
852
853
854
855
856 memcpy(wbuf->buf, buf + written, len);
857 if (aligned_len > len) {
858 ubifs_assert(c, aligned_len - len < 8);
859 ubifs_pad(c, wbuf->buf + len, aligned_len - len);
860 }
861 }
862
863 if (c->leb_size - wbuf->offs >= c->max_write_size)
864 wbuf->size = c->max_write_size;
865 else
866 wbuf->size = c->leb_size - wbuf->offs;
867 wbuf->avail = wbuf->size - aligned_len;
868 wbuf->used = aligned_len;
869 wbuf->next_ino = 0;
870 spin_unlock(&wbuf->lock);
871
872exit:
873 if (wbuf->sync_callback) {
874 int free = c->leb_size - wbuf->offs - wbuf->used;
875
876 err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
877 if (err)
878 goto out;
879 }
880
881 if (wbuf->used)
882 new_wbuf_timer_nolock(c, wbuf);
883
884 return 0;
885
886out:
887 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
888 len, wbuf->lnum, wbuf->offs, err);
889 ubifs_dump_node(c, buf, written + len);
890 dump_stack();
891 ubifs_dump_leb(c, wbuf->lnum);
892 return err;
893}
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum,
911 int offs, int hmac_offs)
912{
913 int err, buf_len = ALIGN(len, c->min_io_size);
914
915 dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
916 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
917 buf_len);
918 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
919 ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size);
920 ubifs_assert(c, !c->ro_media && !c->ro_mount);
921 ubifs_assert(c, !c->space_fixup);
922
923 if (c->ro_error)
924 return -EROFS;
925
926 err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1);
927 if (err)
928 return err;
929
930 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
931 if (err)
932 ubifs_dump_node(c, buf, len);
933
934 return err;
935}
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
952 int offs)
953{
954 return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1);
955}
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
973 int lnum, int offs)
974{
975 const struct ubifs_info *c = wbuf->c;
976 int err, rlen, overlap;
977 struct ubifs_ch *ch = buf;
978
979 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
980 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
981 ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
982 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
983 ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
984
985 spin_lock(&wbuf->lock);
986 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
987 if (!overlap) {
988
989 spin_unlock(&wbuf->lock);
990 return ubifs_read_node(c, buf, type, len, lnum, offs);
991 }
992
993
994 rlen = wbuf->offs - offs;
995 if (rlen < 0)
996 rlen = 0;
997
998
999 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1000 spin_unlock(&wbuf->lock);
1001
1002 if (rlen > 0) {
1003
1004 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1005 if (err && err != -EBADMSG)
1006 return err;
1007 }
1008
1009 if (type != ch->node_type) {
1010 ubifs_err(c, "bad node type (%d but expected %d)",
1011 ch->node_type, type);
1012 goto out;
1013 }
1014
1015 err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1016 if (err) {
1017 ubifs_err(c, "expected node type %d", type);
1018 return err;
1019 }
1020
1021 rlen = le32_to_cpu(ch->len);
1022 if (rlen != len) {
1023 ubifs_err(c, "bad node length %d, expected %d", rlen, len);
1024 goto out;
1025 }
1026
1027 return 0;
1028
1029out:
1030 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
1031 ubifs_dump_node(c, buf, len);
1032 dump_stack();
1033 return -EINVAL;
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
1050 int lnum, int offs)
1051{
1052 int err, l;
1053 struct ubifs_ch *ch = buf;
1054
1055 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
1056 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1057 ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
1058 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1059 ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
1060
1061 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
1062 if (err && err != -EBADMSG)
1063 return err;
1064
1065 if (type != ch->node_type) {
1066 ubifs_errc(c, "bad node type (%d but expected %d)",
1067 ch->node_type, type);
1068 goto out;
1069 }
1070
1071 err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1072 if (err) {
1073 ubifs_errc(c, "expected node type %d", type);
1074 return err;
1075 }
1076
1077 l = le32_to_cpu(ch->len);
1078 if (l != len) {
1079 ubifs_errc(c, "bad node length %d, expected %d", l, len);
1080 goto out;
1081 }
1082
1083 return 0;
1084
1085out:
1086 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
1087 offs, ubi_is_mapped(c->ubi, lnum));
1088 if (!c->probing) {
1089 ubifs_dump_node(c, buf, len);
1090 dump_stack();
1091 }
1092 return -EINVAL;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1104{
1105 size_t size;
1106
1107 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1108 if (!wbuf->buf)
1109 return -ENOMEM;
1110
1111 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1112 wbuf->inodes = kmalloc(size, GFP_KERNEL);
1113 if (!wbuf->inodes) {
1114 kfree(wbuf->buf);
1115 wbuf->buf = NULL;
1116 return -ENOMEM;
1117 }
1118
1119 wbuf->used = 0;
1120 wbuf->lnum = wbuf->offs = -1;
1121
1122
1123
1124
1125
1126
1127 size = c->max_write_size - (c->leb_start % c->max_write_size);
1128 wbuf->avail = wbuf->size = size;
1129 wbuf->sync_callback = NULL;
1130 mutex_init(&wbuf->io_mutex);
1131 spin_lock_init(&wbuf->lock);
1132 wbuf->c = c;
1133 wbuf->next_ino = 0;
1134
1135 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1136 wbuf->timer.function = wbuf_timer_callback_nolock;
1137 return 0;
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1148{
1149 if (!wbuf->buf)
1150
1151 return;
1152
1153 spin_lock(&wbuf->lock);
1154 if (wbuf->used)
1155 wbuf->inodes[wbuf->next_ino++] = inum;
1156 spin_unlock(&wbuf->lock);
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1168{
1169 int i, ret = 0;
1170
1171 spin_lock(&wbuf->lock);
1172 for (i = 0; i < wbuf->next_ino; i++)
1173 if (inum == wbuf->inodes[i]) {
1174 ret = 1;
1175 break;
1176 }
1177 spin_unlock(&wbuf->lock);
1178
1179 return ret;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1192{
1193 int i, err = 0;
1194
1195 for (i = 0; i < c->jhead_cnt; i++) {
1196 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1197
1198 if (i == GCHD)
1199
1200
1201
1202
1203
1204
1205 continue;
1206
1207 if (!wbuf_has_ino(wbuf, inode->i_ino))
1208 continue;
1209
1210 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1211 if (wbuf_has_ino(wbuf, inode->i_ino))
1212 err = ubifs_wbuf_sync_nolock(wbuf);
1213 mutex_unlock(&wbuf->io_mutex);
1214
1215 if (err) {
1216 ubifs_ro_mode(c, err);
1217 return err;
1218 }
1219 }
1220 return 0;
1221}
1222