1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/string.h>
27#include <linux/buffer_head.h>
28#include <linux/errno.h>
29#include <linux/nilfs2_fs.h>
30#include "mdt.h"
31#include "sufile.h"
32
33
34struct nilfs_sufile_info {
35 struct nilfs_mdt_info mi;
36 unsigned long ncleansegs;
37 __u64 allocmin;
38 __u64 allocmax;
39};
40
41static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
42{
43 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
44}
45
46static inline unsigned long
47nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
48{
49 return NILFS_MDT(sufile)->mi_entries_per_block;
50}
51
52static unsigned long
53nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
54{
55 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
56 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
57 return (unsigned long)t;
58}
59
60static unsigned long
61nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
62{
63 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
64 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
65}
66
67static unsigned long
68nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
69 __u64 max)
70{
71 return min_t(unsigned long,
72 nilfs_sufile_segment_usages_per_block(sufile) -
73 nilfs_sufile_get_offset(sufile, curr),
74 max - curr + 1);
75}
76
77static struct nilfs_segment_usage *
78nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
79 struct buffer_head *bh, void *kaddr)
80{
81 return kaddr + bh_offset(bh) +
82 nilfs_sufile_get_offset(sufile, segnum) *
83 NILFS_MDT(sufile)->mi_entry_size;
84}
85
86static inline int nilfs_sufile_get_header_block(struct inode *sufile,
87 struct buffer_head **bhp)
88{
89 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
90}
91
92static inline int
93nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
94 int create, struct buffer_head **bhp)
95{
96 return nilfs_mdt_get_block(sufile,
97 nilfs_sufile_get_blkoff(sufile, segnum),
98 create, NULL, bhp);
99}
100
101static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
102 __u64 segnum)
103{
104 return nilfs_mdt_delete_block(sufile,
105 nilfs_sufile_get_blkoff(sufile, segnum));
106}
107
108static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
109 u64 ncleanadd, u64 ndirtyadd)
110{
111 struct nilfs_sufile_header *header;
112 void *kaddr;
113
114 kaddr = kmap_atomic(header_bh->b_page);
115 header = kaddr + bh_offset(header_bh);
116 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
117 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
118 kunmap_atomic(kaddr);
119
120 mark_buffer_dirty(header_bh);
121}
122
123
124
125
126
127unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
128{
129 return NILFS_SUI(sufile)->ncleansegs;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
161 int create, size_t *ndone,
162 void (*dofunc)(struct inode *, __u64,
163 struct buffer_head *,
164 struct buffer_head *))
165{
166 struct buffer_head *header_bh, *bh;
167 unsigned long blkoff, prev_blkoff;
168 __u64 *seg;
169 size_t nerr = 0, n = 0;
170 int ret = 0;
171
172 if (unlikely(nsegs == 0))
173 goto out;
174
175 down_write(&NILFS_MDT(sufile)->mi_sem);
176 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
177 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
178 printk(KERN_WARNING
179 "%s: invalid segment number: %llu\n", __func__,
180 (unsigned long long)*seg);
181 nerr++;
182 }
183 }
184 if (nerr > 0) {
185 ret = -EINVAL;
186 goto out_sem;
187 }
188
189 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
190 if (ret < 0)
191 goto out_sem;
192
193 seg = segnumv;
194 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
195 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
196 if (ret < 0)
197 goto out_header;
198
199 for (;;) {
200 dofunc(sufile, *seg, header_bh, bh);
201
202 if (++seg >= segnumv + nsegs)
203 break;
204 prev_blkoff = blkoff;
205 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
206 if (blkoff == prev_blkoff)
207 continue;
208
209
210 brelse(bh);
211 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
212 if (unlikely(ret < 0))
213 goto out_header;
214 }
215 brelse(bh);
216
217 out_header:
218 n = seg - segnumv;
219 brelse(header_bh);
220 out_sem:
221 up_write(&NILFS_MDT(sufile)->mi_sem);
222 out:
223 if (ndone)
224 *ndone = n;
225 return ret;
226}
227
228int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
229 void (*dofunc)(struct inode *, __u64,
230 struct buffer_head *,
231 struct buffer_head *))
232{
233 struct buffer_head *header_bh, *bh;
234 int ret;
235
236 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
237 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
238 __func__, (unsigned long long)segnum);
239 return -EINVAL;
240 }
241 down_write(&NILFS_MDT(sufile)->mi_sem);
242
243 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
244 if (ret < 0)
245 goto out_sem;
246
247 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
248 if (!ret) {
249 dofunc(sufile, segnum, header_bh, bh);
250 brelse(bh);
251 }
252 brelse(header_bh);
253
254 out_sem:
255 up_write(&NILFS_MDT(sufile)->mi_sem);
256 return ret;
257}
258
259
260
261
262
263
264
265
266
267
268
269
270int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
271{
272 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
273 __u64 nsegs;
274 int ret = -ERANGE;
275
276 down_write(&NILFS_MDT(sufile)->mi_sem);
277 nsegs = nilfs_sufile_get_nsegments(sufile);
278
279 if (start <= end && end < nsegs) {
280 sui->allocmin = start;
281 sui->allocmax = end;
282 ret = 0;
283 }
284 up_write(&NILFS_MDT(sufile)->mi_sem);
285 return ret;
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
306{
307 struct buffer_head *header_bh, *su_bh;
308 struct nilfs_sufile_header *header;
309 struct nilfs_segment_usage *su;
310 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
311 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
312 __u64 segnum, maxsegnum, last_alloc;
313 void *kaddr;
314 unsigned long nsegments, ncleansegs, nsus, cnt;
315 int ret, j;
316
317 down_write(&NILFS_MDT(sufile)->mi_sem);
318
319 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
320 if (ret < 0)
321 goto out_sem;
322 kaddr = kmap_atomic(header_bh->b_page);
323 header = kaddr + bh_offset(header_bh);
324 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
325 last_alloc = le64_to_cpu(header->sh_last_alloc);
326 kunmap_atomic(kaddr);
327
328 nsegments = nilfs_sufile_get_nsegments(sufile);
329 maxsegnum = sui->allocmax;
330 segnum = last_alloc + 1;
331 if (segnum < sui->allocmin || segnum > sui->allocmax)
332 segnum = sui->allocmin;
333
334 for (cnt = 0; cnt < nsegments; cnt += nsus) {
335 if (segnum > maxsegnum) {
336 if (cnt < sui->allocmax - sui->allocmin + 1) {
337
338
339
340
341
342 segnum = sui->allocmin;
343 maxsegnum = last_alloc;
344 } else if (segnum > sui->allocmin &&
345 sui->allocmax + 1 < nsegments) {
346 segnum = sui->allocmax + 1;
347 maxsegnum = nsegments - 1;
348 } else if (sui->allocmin > 0) {
349 segnum = 0;
350 maxsegnum = sui->allocmin - 1;
351 } else {
352 break;
353 }
354 }
355 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
356 &su_bh);
357 if (ret < 0)
358 goto out_header;
359 kaddr = kmap_atomic(su_bh->b_page);
360 su = nilfs_sufile_block_get_segment_usage(
361 sufile, segnum, su_bh, kaddr);
362
363 nsus = nilfs_sufile_segment_usages_in_block(
364 sufile, segnum, maxsegnum);
365 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
366 if (!nilfs_segment_usage_clean(su))
367 continue;
368
369 nilfs_segment_usage_set_dirty(su);
370 kunmap_atomic(kaddr);
371
372 kaddr = kmap_atomic(header_bh->b_page);
373 header = kaddr + bh_offset(header_bh);
374 le64_add_cpu(&header->sh_ncleansegs, -1);
375 le64_add_cpu(&header->sh_ndirtysegs, 1);
376 header->sh_last_alloc = cpu_to_le64(segnum);
377 kunmap_atomic(kaddr);
378
379 sui->ncleansegs--;
380 mark_buffer_dirty(header_bh);
381 mark_buffer_dirty(su_bh);
382 nilfs_mdt_mark_dirty(sufile);
383 brelse(su_bh);
384 *segnump = segnum;
385 goto out_header;
386 }
387
388 kunmap_atomic(kaddr);
389 brelse(su_bh);
390 }
391
392
393 ret = -ENOSPC;
394
395 out_header:
396 brelse(header_bh);
397
398 out_sem:
399 up_write(&NILFS_MDT(sufile)->mi_sem);
400 return ret;
401}
402
403void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
404 struct buffer_head *header_bh,
405 struct buffer_head *su_bh)
406{
407 struct nilfs_segment_usage *su;
408 void *kaddr;
409
410 kaddr = kmap_atomic(su_bh->b_page);
411 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
412 if (unlikely(!nilfs_segment_usage_clean(su))) {
413 printk(KERN_WARNING "%s: segment %llu must be clean\n",
414 __func__, (unsigned long long)segnum);
415 kunmap_atomic(kaddr);
416 return;
417 }
418 nilfs_segment_usage_set_dirty(su);
419 kunmap_atomic(kaddr);
420
421 nilfs_sufile_mod_counter(header_bh, -1, 1);
422 NILFS_SUI(sufile)->ncleansegs--;
423
424 mark_buffer_dirty(su_bh);
425 nilfs_mdt_mark_dirty(sufile);
426}
427
428void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
429 struct buffer_head *header_bh,
430 struct buffer_head *su_bh)
431{
432 struct nilfs_segment_usage *su;
433 void *kaddr;
434 int clean, dirty;
435
436 kaddr = kmap_atomic(su_bh->b_page);
437 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
438 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
439 su->su_nblocks == cpu_to_le32(0)) {
440 kunmap_atomic(kaddr);
441 return;
442 }
443 clean = nilfs_segment_usage_clean(su);
444 dirty = nilfs_segment_usage_dirty(su);
445
446
447 su->su_lastmod = cpu_to_le64(0);
448 su->su_nblocks = cpu_to_le32(0);
449 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
450 kunmap_atomic(kaddr);
451
452 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
453 NILFS_SUI(sufile)->ncleansegs -= clean;
454
455 mark_buffer_dirty(su_bh);
456 nilfs_mdt_mark_dirty(sufile);
457}
458
459void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
460 struct buffer_head *header_bh,
461 struct buffer_head *su_bh)
462{
463 struct nilfs_segment_usage *su;
464 void *kaddr;
465 int sudirty;
466
467 kaddr = kmap_atomic(su_bh->b_page);
468 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
469 if (nilfs_segment_usage_clean(su)) {
470 printk(KERN_WARNING "%s: segment %llu is already clean\n",
471 __func__, (unsigned long long)segnum);
472 kunmap_atomic(kaddr);
473 return;
474 }
475 WARN_ON(nilfs_segment_usage_error(su));
476 WARN_ON(!nilfs_segment_usage_dirty(su));
477
478 sudirty = nilfs_segment_usage_dirty(su);
479 nilfs_segment_usage_set_clean(su);
480 kunmap_atomic(kaddr);
481 mark_buffer_dirty(su_bh);
482
483 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
484 NILFS_SUI(sufile)->ncleansegs++;
485
486 nilfs_mdt_mark_dirty(sufile);
487}
488
489
490
491
492
493
494int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
495{
496 struct buffer_head *bh;
497 int ret;
498
499 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
500 if (!ret) {
501 mark_buffer_dirty(bh);
502 nilfs_mdt_mark_dirty(sufile);
503 brelse(bh);
504 }
505 return ret;
506}
507
508
509
510
511
512
513
514
515int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
516 unsigned long nblocks, time_t modtime)
517{
518 struct buffer_head *bh;
519 struct nilfs_segment_usage *su;
520 void *kaddr;
521 int ret;
522
523 down_write(&NILFS_MDT(sufile)->mi_sem);
524 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
525 if (ret < 0)
526 goto out_sem;
527
528 kaddr = kmap_atomic(bh->b_page);
529 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
530 WARN_ON(nilfs_segment_usage_error(su));
531 if (modtime)
532 su->su_lastmod = cpu_to_le64(modtime);
533 su->su_nblocks = cpu_to_le32(nblocks);
534 kunmap_atomic(kaddr);
535
536 mark_buffer_dirty(bh);
537 nilfs_mdt_mark_dirty(sufile);
538 brelse(bh);
539
540 out_sem:
541 up_write(&NILFS_MDT(sufile)->mi_sem);
542 return ret;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
562{
563 struct buffer_head *header_bh;
564 struct nilfs_sufile_header *header;
565 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
566 void *kaddr;
567 int ret;
568
569 down_read(&NILFS_MDT(sufile)->mi_sem);
570
571 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
572 if (ret < 0)
573 goto out_sem;
574
575 kaddr = kmap_atomic(header_bh->b_page);
576 header = kaddr + bh_offset(header_bh);
577 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
578 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
579 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
580 sustat->ss_ctime = nilfs->ns_ctime;
581 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
582 spin_lock(&nilfs->ns_last_segment_lock);
583 sustat->ss_prot_seq = nilfs->ns_prot_seq;
584 spin_unlock(&nilfs->ns_last_segment_lock);
585 kunmap_atomic(kaddr);
586 brelse(header_bh);
587
588 out_sem:
589 up_read(&NILFS_MDT(sufile)->mi_sem);
590 return ret;
591}
592
593void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
594 struct buffer_head *header_bh,
595 struct buffer_head *su_bh)
596{
597 struct nilfs_segment_usage *su;
598 void *kaddr;
599 int suclean;
600
601 kaddr = kmap_atomic(su_bh->b_page);
602 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
603 if (nilfs_segment_usage_error(su)) {
604 kunmap_atomic(kaddr);
605 return;
606 }
607 suclean = nilfs_segment_usage_clean(su);
608 nilfs_segment_usage_set_error(su);
609 kunmap_atomic(kaddr);
610
611 if (suclean) {
612 nilfs_sufile_mod_counter(header_bh, -1, 0);
613 NILFS_SUI(sufile)->ncleansegs--;
614 }
615 mark_buffer_dirty(su_bh);
616 nilfs_mdt_mark_dirty(sufile);
617}
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636static int nilfs_sufile_truncate_range(struct inode *sufile,
637 __u64 start, __u64 end)
638{
639 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
640 struct buffer_head *header_bh;
641 struct buffer_head *su_bh;
642 struct nilfs_segment_usage *su, *su2;
643 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
644 unsigned long segusages_per_block;
645 unsigned long nsegs, ncleaned;
646 __u64 segnum;
647 void *kaddr;
648 ssize_t n, nc;
649 int ret;
650 int j;
651
652 nsegs = nilfs_sufile_get_nsegments(sufile);
653
654 ret = -EINVAL;
655 if (start > end || start >= nsegs)
656 goto out;
657
658 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
659 if (ret < 0)
660 goto out;
661
662 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
663 ncleaned = 0;
664
665 for (segnum = start; segnum <= end; segnum += n) {
666 n = min_t(unsigned long,
667 segusages_per_block -
668 nilfs_sufile_get_offset(sufile, segnum),
669 end - segnum + 1);
670 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
671 &su_bh);
672 if (ret < 0) {
673 if (ret != -ENOENT)
674 goto out_header;
675
676 continue;
677 }
678 kaddr = kmap_atomic(su_bh->b_page);
679 su = nilfs_sufile_block_get_segment_usage(
680 sufile, segnum, su_bh, kaddr);
681 su2 = su;
682 for (j = 0; j < n; j++, su = (void *)su + susz) {
683 if ((le32_to_cpu(su->su_flags) &
684 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
685 nilfs_segment_is_active(nilfs, segnum + j)) {
686 ret = -EBUSY;
687 kunmap_atomic(kaddr);
688 brelse(su_bh);
689 goto out_header;
690 }
691 }
692 nc = 0;
693 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
694 if (nilfs_segment_usage_error(su)) {
695 nilfs_segment_usage_set_clean(su);
696 nc++;
697 }
698 }
699 kunmap_atomic(kaddr);
700 if (nc > 0) {
701 mark_buffer_dirty(su_bh);
702 ncleaned += nc;
703 }
704 brelse(su_bh);
705
706 if (n == segusages_per_block) {
707
708 nilfs_sufile_delete_segment_usage_block(sufile, segnum);
709 }
710 }
711 ret = 0;
712
713out_header:
714 if (ncleaned > 0) {
715 NILFS_SUI(sufile)->ncleansegs += ncleaned;
716 nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
717 nilfs_mdt_mark_dirty(sufile);
718 }
719 brelse(header_bh);
720out:
721 return ret;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
741{
742 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
743 struct buffer_head *header_bh;
744 struct nilfs_sufile_header *header;
745 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
746 void *kaddr;
747 unsigned long nsegs, nrsvsegs;
748 int ret = 0;
749
750 down_write(&NILFS_MDT(sufile)->mi_sem);
751
752 nsegs = nilfs_sufile_get_nsegments(sufile);
753 if (nsegs == newnsegs)
754 goto out;
755
756 ret = -ENOSPC;
757 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
758 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
759 goto out;
760
761 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
762 if (ret < 0)
763 goto out;
764
765 if (newnsegs > nsegs) {
766 sui->ncleansegs += newnsegs - nsegs;
767 } else {
768 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
769 if (ret < 0)
770 goto out_header;
771
772 sui->ncleansegs -= nsegs - newnsegs;
773 }
774
775 kaddr = kmap_atomic(header_bh->b_page);
776 header = kaddr + bh_offset(header_bh);
777 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
778 kunmap_atomic(kaddr);
779
780 mark_buffer_dirty(header_bh);
781 nilfs_mdt_mark_dirty(sufile);
782 nilfs_set_nsegments(nilfs, newnsegs);
783
784out_header:
785 brelse(header_bh);
786out:
787 up_write(&NILFS_MDT(sufile)->mi_sem);
788 return ret;
789}
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
809 unsigned sisz, size_t nsi)
810{
811 struct buffer_head *su_bh;
812 struct nilfs_segment_usage *su;
813 struct nilfs_suinfo *si = buf;
814 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
815 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
816 void *kaddr;
817 unsigned long nsegs, segusages_per_block;
818 ssize_t n;
819 int ret, i, j;
820
821 down_read(&NILFS_MDT(sufile)->mi_sem);
822
823 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
824 nsegs = min_t(unsigned long,
825 nilfs_sufile_get_nsegments(sufile) - segnum,
826 nsi);
827 for (i = 0; i < nsegs; i += n, segnum += n) {
828 n = min_t(unsigned long,
829 segusages_per_block -
830 nilfs_sufile_get_offset(sufile, segnum),
831 nsegs - i);
832 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
833 &su_bh);
834 if (ret < 0) {
835 if (ret != -ENOENT)
836 goto out;
837
838 memset(si, 0, sisz * n);
839 si = (void *)si + sisz * n;
840 continue;
841 }
842
843 kaddr = kmap_atomic(su_bh->b_page);
844 su = nilfs_sufile_block_get_segment_usage(
845 sufile, segnum, su_bh, kaddr);
846 for (j = 0; j < n;
847 j++, su = (void *)su + susz, si = (void *)si + sisz) {
848 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
849 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
850 si->sui_flags = le32_to_cpu(su->su_flags) &
851 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
852 if (nilfs_segment_is_active(nilfs, segnum + j))
853 si->sui_flags |=
854 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
855 }
856 kunmap_atomic(kaddr);
857 brelse(su_bh);
858 }
859 ret = nsegs;
860
861 out:
862 up_read(&NILFS_MDT(sufile)->mi_sem);
863 return ret;
864}
865
866
867
868
869
870
871
872
873int nilfs_sufile_read(struct super_block *sb, size_t susize,
874 struct nilfs_inode *raw_inode, struct inode **inodep)
875{
876 struct inode *sufile;
877 struct nilfs_sufile_info *sui;
878 struct buffer_head *header_bh;
879 struct nilfs_sufile_header *header;
880 void *kaddr;
881 int err;
882
883 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
884 if (unlikely(!sufile))
885 return -ENOMEM;
886 if (!(sufile->i_state & I_NEW))
887 goto out;
888
889 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
890 if (err)
891 goto failed;
892
893 nilfs_mdt_set_entry_size(sufile, susize,
894 sizeof(struct nilfs_sufile_header));
895
896 err = nilfs_read_inode_common(sufile, raw_inode);
897 if (err)
898 goto failed;
899
900 err = nilfs_sufile_get_header_block(sufile, &header_bh);
901 if (err)
902 goto failed;
903
904 sui = NILFS_SUI(sufile);
905 kaddr = kmap_atomic(header_bh->b_page);
906 header = kaddr + bh_offset(header_bh);
907 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
908 kunmap_atomic(kaddr);
909 brelse(header_bh);
910
911 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
912 sui->allocmin = 0;
913
914 unlock_new_inode(sufile);
915 out:
916 *inodep = sufile;
917 return 0;
918 failed:
919 iget_failed(sufile);
920 return err;
921}
922