1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/fs.h>
13#include <linux/string.h>
14#include <linux/buffer_head.h>
15#include <linux/errno.h>
16#include "mdt.h"
17#include "sufile.h"
18
19#include <trace/events/nilfs2.h>
20
21
22
23
24
25
26
27
28struct nilfs_sufile_info {
29 struct nilfs_mdt_info mi;
30 unsigned long ncleansegs;
31 __u64 allocmin;
32 __u64 allocmax;
33};
34
35static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
36{
37 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
38}
39
40static inline unsigned long
41nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
42{
43 return NILFS_MDT(sufile)->mi_entries_per_block;
44}
45
46static unsigned long
47nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
48{
49 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
50
51 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
52 return (unsigned long)t;
53}
54
55static unsigned long
56nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
57{
58 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
59
60 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
61}
62
63static unsigned long
64nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
65 __u64 max)
66{
67 return min_t(unsigned long,
68 nilfs_sufile_segment_usages_per_block(sufile) -
69 nilfs_sufile_get_offset(sufile, curr),
70 max - curr + 1);
71}
72
73static struct nilfs_segment_usage *
74nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
75 struct buffer_head *bh, void *kaddr)
76{
77 return kaddr + bh_offset(bh) +
78 nilfs_sufile_get_offset(sufile, segnum) *
79 NILFS_MDT(sufile)->mi_entry_size;
80}
81
82static inline int nilfs_sufile_get_header_block(struct inode *sufile,
83 struct buffer_head **bhp)
84{
85 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
86}
87
88static inline int
89nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
90 int create, struct buffer_head **bhp)
91{
92 return nilfs_mdt_get_block(sufile,
93 nilfs_sufile_get_blkoff(sufile, segnum),
94 create, NULL, bhp);
95}
96
97static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
98 __u64 segnum)
99{
100 return nilfs_mdt_delete_block(sufile,
101 nilfs_sufile_get_blkoff(sufile, segnum));
102}
103
104static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
105 u64 ncleanadd, u64 ndirtyadd)
106{
107 struct nilfs_sufile_header *header;
108 void *kaddr;
109
110 kaddr = kmap_atomic(header_bh->b_page);
111 header = kaddr + bh_offset(header_bh);
112 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
113 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
114 kunmap_atomic(kaddr);
115
116 mark_buffer_dirty(header_bh);
117}
118
119
120
121
122
123unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
124{
125 return NILFS_SUI(sufile)->ncleansegs;
126}
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
157 int create, size_t *ndone,
158 void (*dofunc)(struct inode *, __u64,
159 struct buffer_head *,
160 struct buffer_head *))
161{
162 struct buffer_head *header_bh, *bh;
163 unsigned long blkoff, prev_blkoff;
164 __u64 *seg;
165 size_t nerr = 0, n = 0;
166 int ret = 0;
167
168 if (unlikely(nsegs == 0))
169 goto out;
170
171 down_write(&NILFS_MDT(sufile)->mi_sem);
172 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
173 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
174 nilfs_warn(sufile->i_sb,
175 "%s: invalid segment number: %llu",
176 __func__, (unsigned long long)*seg);
177 nerr++;
178 }
179 }
180 if (nerr > 0) {
181 ret = -EINVAL;
182 goto out_sem;
183 }
184
185 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
186 if (ret < 0)
187 goto out_sem;
188
189 seg = segnumv;
190 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
191 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
192 if (ret < 0)
193 goto out_header;
194
195 for (;;) {
196 dofunc(sufile, *seg, header_bh, bh);
197
198 if (++seg >= segnumv + nsegs)
199 break;
200 prev_blkoff = blkoff;
201 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
202 if (blkoff == prev_blkoff)
203 continue;
204
205
206 brelse(bh);
207 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
208 if (unlikely(ret < 0))
209 goto out_header;
210 }
211 brelse(bh);
212
213 out_header:
214 n = seg - segnumv;
215 brelse(header_bh);
216 out_sem:
217 up_write(&NILFS_MDT(sufile)->mi_sem);
218 out:
219 if (ndone)
220 *ndone = n;
221 return ret;
222}
223
224int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
225 void (*dofunc)(struct inode *, __u64,
226 struct buffer_head *,
227 struct buffer_head *))
228{
229 struct buffer_head *header_bh, *bh;
230 int ret;
231
232 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
233 nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu",
234 __func__, (unsigned long long)segnum);
235 return -EINVAL;
236 }
237 down_write(&NILFS_MDT(sufile)->mi_sem);
238
239 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
240 if (ret < 0)
241 goto out_sem;
242
243 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
244 if (!ret) {
245 dofunc(sufile, segnum, header_bh, bh);
246 brelse(bh);
247 }
248 brelse(header_bh);
249
250 out_sem:
251 up_write(&NILFS_MDT(sufile)->mi_sem);
252 return ret;
253}
254
255
256
257
258
259
260
261
262
263
264
265
266int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
267{
268 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
269 __u64 nsegs;
270 int ret = -ERANGE;
271
272 down_write(&NILFS_MDT(sufile)->mi_sem);
273 nsegs = nilfs_sufile_get_nsegments(sufile);
274
275 if (start <= end && end < nsegs) {
276 sui->allocmin = start;
277 sui->allocmax = end;
278 ret = 0;
279 }
280 up_write(&NILFS_MDT(sufile)->mi_sem);
281 return ret;
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
302{
303 struct buffer_head *header_bh, *su_bh;
304 struct nilfs_sufile_header *header;
305 struct nilfs_segment_usage *su;
306 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
307 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
308 __u64 segnum, maxsegnum, last_alloc;
309 void *kaddr;
310 unsigned long nsegments, nsus, cnt;
311 int ret, j;
312
313 down_write(&NILFS_MDT(sufile)->mi_sem);
314
315 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
316 if (ret < 0)
317 goto out_sem;
318 kaddr = kmap_atomic(header_bh->b_page);
319 header = kaddr + bh_offset(header_bh);
320 last_alloc = le64_to_cpu(header->sh_last_alloc);
321 kunmap_atomic(kaddr);
322
323 nsegments = nilfs_sufile_get_nsegments(sufile);
324 maxsegnum = sui->allocmax;
325 segnum = last_alloc + 1;
326 if (segnum < sui->allocmin || segnum > sui->allocmax)
327 segnum = sui->allocmin;
328
329 for (cnt = 0; cnt < nsegments; cnt += nsus) {
330 if (segnum > maxsegnum) {
331 if (cnt < sui->allocmax - sui->allocmin + 1) {
332
333
334
335
336
337 segnum = sui->allocmin;
338 maxsegnum = last_alloc;
339 } else if (segnum > sui->allocmin &&
340 sui->allocmax + 1 < nsegments) {
341 segnum = sui->allocmax + 1;
342 maxsegnum = nsegments - 1;
343 } else if (sui->allocmin > 0) {
344 segnum = 0;
345 maxsegnum = sui->allocmin - 1;
346 } else {
347 break;
348 }
349 }
350 trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
351 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
352 &su_bh);
353 if (ret < 0)
354 goto out_header;
355 kaddr = kmap_atomic(su_bh->b_page);
356 su = nilfs_sufile_block_get_segment_usage(
357 sufile, segnum, su_bh, kaddr);
358
359 nsus = nilfs_sufile_segment_usages_in_block(
360 sufile, segnum, maxsegnum);
361 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
362 if (!nilfs_segment_usage_clean(su))
363 continue;
364
365 nilfs_segment_usage_set_dirty(su);
366 kunmap_atomic(kaddr);
367
368 kaddr = kmap_atomic(header_bh->b_page);
369 header = kaddr + bh_offset(header_bh);
370 le64_add_cpu(&header->sh_ncleansegs, -1);
371 le64_add_cpu(&header->sh_ndirtysegs, 1);
372 header->sh_last_alloc = cpu_to_le64(segnum);
373 kunmap_atomic(kaddr);
374
375 sui->ncleansegs--;
376 mark_buffer_dirty(header_bh);
377 mark_buffer_dirty(su_bh);
378 nilfs_mdt_mark_dirty(sufile);
379 brelse(su_bh);
380 *segnump = segnum;
381
382 trace_nilfs2_segment_usage_allocated(sufile, segnum);
383
384 goto out_header;
385 }
386
387 kunmap_atomic(kaddr);
388 brelse(su_bh);
389 }
390
391
392 ret = -ENOSPC;
393
394 out_header:
395 brelse(header_bh);
396
397 out_sem:
398 up_write(&NILFS_MDT(sufile)->mi_sem);
399 return ret;
400}
401
402void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
403 struct buffer_head *header_bh,
404 struct buffer_head *su_bh)
405{
406 struct nilfs_segment_usage *su;
407 void *kaddr;
408
409 kaddr = kmap_atomic(su_bh->b_page);
410 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
411 if (unlikely(!nilfs_segment_usage_clean(su))) {
412 nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
413 __func__, (unsigned long long)segnum);
414 kunmap_atomic(kaddr);
415 return;
416 }
417 nilfs_segment_usage_set_dirty(su);
418 kunmap_atomic(kaddr);
419
420 nilfs_sufile_mod_counter(header_bh, -1, 1);
421 NILFS_SUI(sufile)->ncleansegs--;
422
423 mark_buffer_dirty(su_bh);
424 nilfs_mdt_mark_dirty(sufile);
425}
426
427void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
428 struct buffer_head *header_bh,
429 struct buffer_head *su_bh)
430{
431 struct nilfs_segment_usage *su;
432 void *kaddr;
433 int clean, dirty;
434
435 kaddr = kmap_atomic(su_bh->b_page);
436 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
437 if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
438 su->su_nblocks == cpu_to_le32(0)) {
439 kunmap_atomic(kaddr);
440 return;
441 }
442 clean = nilfs_segment_usage_clean(su);
443 dirty = nilfs_segment_usage_dirty(su);
444
445
446 su->su_lastmod = cpu_to_le64(0);
447 su->su_nblocks = cpu_to_le32(0);
448 su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
449 kunmap_atomic(kaddr);
450
451 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
452 NILFS_SUI(sufile)->ncleansegs -= clean;
453
454 mark_buffer_dirty(su_bh);
455 nilfs_mdt_mark_dirty(sufile);
456}
457
458void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
459 struct buffer_head *header_bh,
460 struct buffer_head *su_bh)
461{
462 struct nilfs_segment_usage *su;
463 void *kaddr;
464 int sudirty;
465
466 kaddr = kmap_atomic(su_bh->b_page);
467 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
468 if (nilfs_segment_usage_clean(su)) {
469 nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
470 __func__, (unsigned long long)segnum);
471 kunmap_atomic(kaddr);
472 return;
473 }
474 WARN_ON(nilfs_segment_usage_error(su));
475 WARN_ON(!nilfs_segment_usage_dirty(su));
476
477 sudirty = nilfs_segment_usage_dirty(su);
478 nilfs_segment_usage_set_clean(su);
479 kunmap_atomic(kaddr);
480 mark_buffer_dirty(su_bh);
481
482 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
483 NILFS_SUI(sufile)->ncleansegs++;
484
485 nilfs_mdt_mark_dirty(sufile);
486
487 trace_nilfs2_segment_usage_freed(sufile, segnum);
488}
489
490
491
492
493
494
495int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
496{
497 struct buffer_head *bh;
498 int ret;
499
500 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
501 if (!ret) {
502 mark_buffer_dirty(bh);
503 nilfs_mdt_mark_dirty(sufile);
504 brelse(bh);
505 }
506 return ret;
507}
508
509
510
511
512
513
514
515
516int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
517 unsigned long nblocks, time64_t modtime)
518{
519 struct buffer_head *bh;
520 struct nilfs_segment_usage *su;
521 void *kaddr;
522 int ret;
523
524 down_write(&NILFS_MDT(sufile)->mi_sem);
525 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
526 if (ret < 0)
527 goto out_sem;
528
529 kaddr = kmap_atomic(bh->b_page);
530 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
531 WARN_ON(nilfs_segment_usage_error(su));
532 if (modtime)
533 su->su_lastmod = cpu_to_le64(modtime);
534 su->su_nblocks = cpu_to_le32(nblocks);
535 kunmap_atomic(kaddr);
536
537 mark_buffer_dirty(bh);
538 nilfs_mdt_mark_dirty(sufile);
539 brelse(bh);
540
541 out_sem:
542 up_write(&NILFS_MDT(sufile)->mi_sem);
543 return ret;
544}
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
563{
564 struct buffer_head *header_bh;
565 struct nilfs_sufile_header *header;
566 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
567 void *kaddr;
568 int ret;
569
570 down_read(&NILFS_MDT(sufile)->mi_sem);
571
572 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
573 if (ret < 0)
574 goto out_sem;
575
576 kaddr = kmap_atomic(header_bh->b_page);
577 header = kaddr + bh_offset(header_bh);
578 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
579 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
580 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
581 sustat->ss_ctime = nilfs->ns_ctime;
582 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
583 spin_lock(&nilfs->ns_last_segment_lock);
584 sustat->ss_prot_seq = nilfs->ns_prot_seq;
585 spin_unlock(&nilfs->ns_last_segment_lock);
586 kunmap_atomic(kaddr);
587 brelse(header_bh);
588
589 out_sem:
590 up_read(&NILFS_MDT(sufile)->mi_sem);
591 return ret;
592}
593
594void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
595 struct buffer_head *header_bh,
596 struct buffer_head *su_bh)
597{
598 struct nilfs_segment_usage *su;
599 void *kaddr;
600 int suclean;
601
602 kaddr = kmap_atomic(su_bh->b_page);
603 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
604 if (nilfs_segment_usage_error(su)) {
605 kunmap_atomic(kaddr);
606 return;
607 }
608 suclean = nilfs_segment_usage_clean(su);
609 nilfs_segment_usage_set_error(su);
610 kunmap_atomic(kaddr);
611
612 if (suclean) {
613 nilfs_sufile_mod_counter(header_bh, -1, 0);
614 NILFS_SUI(sufile)->ncleansegs--;
615 }
616 mark_buffer_dirty(su_bh);
617 nilfs_mdt_mark_dirty(sufile);
618}
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637static int nilfs_sufile_truncate_range(struct inode *sufile,
638 __u64 start, __u64 end)
639{
640 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
641 struct buffer_head *header_bh;
642 struct buffer_head *su_bh;
643 struct nilfs_segment_usage *su, *su2;
644 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
645 unsigned long segusages_per_block;
646 unsigned long nsegs, ncleaned;
647 __u64 segnum;
648 void *kaddr;
649 ssize_t n, nc;
650 int ret;
651 int j;
652
653 nsegs = nilfs_sufile_get_nsegments(sufile);
654
655 ret = -EINVAL;
656 if (start > end || start >= nsegs)
657 goto out;
658
659 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
660 if (ret < 0)
661 goto out;
662
663 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
664 ncleaned = 0;
665
666 for (segnum = start; segnum <= end; segnum += n) {
667 n = min_t(unsigned long,
668 segusages_per_block -
669 nilfs_sufile_get_offset(sufile, segnum),
670 end - segnum + 1);
671 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
672 &su_bh);
673 if (ret < 0) {
674 if (ret != -ENOENT)
675 goto out_header;
676
677 continue;
678 }
679 kaddr = kmap_atomic(su_bh->b_page);
680 su = nilfs_sufile_block_get_segment_usage(
681 sufile, segnum, su_bh, kaddr);
682 su2 = su;
683 for (j = 0; j < n; j++, su = (void *)su + susz) {
684 if ((le32_to_cpu(su->su_flags) &
685 ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
686 nilfs_segment_is_active(nilfs, segnum + j)) {
687 ret = -EBUSY;
688 kunmap_atomic(kaddr);
689 brelse(su_bh);
690 goto out_header;
691 }
692 }
693 nc = 0;
694 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
695 if (nilfs_segment_usage_error(su)) {
696 nilfs_segment_usage_set_clean(su);
697 nc++;
698 }
699 }
700 kunmap_atomic(kaddr);
701 if (nc > 0) {
702 mark_buffer_dirty(su_bh);
703 ncleaned += nc;
704 }
705 brelse(su_bh);
706
707 if (n == segusages_per_block) {
708
709 nilfs_sufile_delete_segment_usage_block(sufile, segnum);
710 }
711 }
712 ret = 0;
713
714out_header:
715 if (ncleaned > 0) {
716 NILFS_SUI(sufile)->ncleansegs += ncleaned;
717 nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
718 nilfs_mdt_mark_dirty(sufile);
719 }
720 brelse(header_bh);
721out:
722 return ret;
723}
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
742{
743 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
744 struct buffer_head *header_bh;
745 struct nilfs_sufile_header *header;
746 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
747 void *kaddr;
748 unsigned long nsegs, nrsvsegs;
749 int ret = 0;
750
751 down_write(&NILFS_MDT(sufile)->mi_sem);
752
753 nsegs = nilfs_sufile_get_nsegments(sufile);
754 if (nsegs == newnsegs)
755 goto out;
756
757 ret = -ENOSPC;
758 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
759 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
760 goto out;
761
762 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
763 if (ret < 0)
764 goto out;
765
766 if (newnsegs > nsegs) {
767 sui->ncleansegs += newnsegs - nsegs;
768 } else {
769 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
770 if (ret < 0)
771 goto out_header;
772
773 sui->ncleansegs -= nsegs - newnsegs;
774 }
775
776 kaddr = kmap_atomic(header_bh->b_page);
777 header = kaddr + bh_offset(header_bh);
778 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
779 kunmap_atomic(kaddr);
780
781 mark_buffer_dirty(header_bh);
782 nilfs_mdt_mark_dirty(sufile);
783 nilfs_set_nsegments(nilfs, newnsegs);
784
785out_header:
786 brelse(header_bh);
787out:
788 up_write(&NILFS_MDT(sufile)->mi_sem);
789 return ret;
790}
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
810 unsigned int sisz, size_t nsi)
811{
812 struct buffer_head *su_bh;
813 struct nilfs_segment_usage *su;
814 struct nilfs_suinfo *si = buf;
815 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
816 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
817 void *kaddr;
818 unsigned long nsegs, segusages_per_block;
819 ssize_t n;
820 int ret, i, j;
821
822 down_read(&NILFS_MDT(sufile)->mi_sem);
823
824 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
825 nsegs = min_t(unsigned long,
826 nilfs_sufile_get_nsegments(sufile) - segnum,
827 nsi);
828 for (i = 0; i < nsegs; i += n, segnum += n) {
829 n = min_t(unsigned long,
830 segusages_per_block -
831 nilfs_sufile_get_offset(sufile, segnum),
832 nsegs - i);
833 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
834 &su_bh);
835 if (ret < 0) {
836 if (ret != -ENOENT)
837 goto out;
838
839 memset(si, 0, sisz * n);
840 si = (void *)si + sisz * n;
841 continue;
842 }
843
844 kaddr = kmap_atomic(su_bh->b_page);
845 su = nilfs_sufile_block_get_segment_usage(
846 sufile, segnum, su_bh, kaddr);
847 for (j = 0; j < n;
848 j++, su = (void *)su + susz, si = (void *)si + sisz) {
849 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
850 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
851 si->sui_flags = le32_to_cpu(su->su_flags) &
852 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
853 if (nilfs_segment_is_active(nilfs, segnum + j))
854 si->sui_flags |=
855 BIT(NILFS_SEGMENT_USAGE_ACTIVE);
856 }
857 kunmap_atomic(kaddr);
858 brelse(su_bh);
859 }
860 ret = nsegs;
861
862 out:
863 up_read(&NILFS_MDT(sufile)->mi_sem);
864 return ret;
865}
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
888 unsigned int supsz, size_t nsup)
889{
890 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
891 struct buffer_head *header_bh, *bh;
892 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
893 struct nilfs_segment_usage *su;
894 void *kaddr;
895 unsigned long blkoff, prev_blkoff;
896 int cleansi, cleansu, dirtysi, dirtysu;
897 long ncleaned = 0, ndirtied = 0;
898 int ret = 0;
899
900 if (unlikely(nsup == 0))
901 return ret;
902
903 for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
904 if (sup->sup_segnum >= nilfs->ns_nsegments
905 || (sup->sup_flags &
906 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
907 || (nilfs_suinfo_update_nblocks(sup) &&
908 sup->sup_sui.sui_nblocks >
909 nilfs->ns_blocks_per_segment))
910 return -EINVAL;
911 }
912
913 down_write(&NILFS_MDT(sufile)->mi_sem);
914
915 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
916 if (ret < 0)
917 goto out_sem;
918
919 sup = buf;
920 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
921 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
922 if (ret < 0)
923 goto out_header;
924
925 for (;;) {
926 kaddr = kmap_atomic(bh->b_page);
927 su = nilfs_sufile_block_get_segment_usage(
928 sufile, sup->sup_segnum, bh, kaddr);
929
930 if (nilfs_suinfo_update_lastmod(sup))
931 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
932
933 if (nilfs_suinfo_update_nblocks(sup))
934 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
935
936 if (nilfs_suinfo_update_flags(sup)) {
937
938
939
940
941
942 sup->sup_sui.sui_flags &=
943 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
944
945 cleansi = nilfs_suinfo_clean(&sup->sup_sui);
946 cleansu = nilfs_segment_usage_clean(su);
947 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
948 dirtysu = nilfs_segment_usage_dirty(su);
949
950 if (cleansi && !cleansu)
951 ++ncleaned;
952 else if (!cleansi && cleansu)
953 --ncleaned;
954
955 if (dirtysi && !dirtysu)
956 ++ndirtied;
957 else if (!dirtysi && dirtysu)
958 --ndirtied;
959
960 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
961 }
962
963 kunmap_atomic(kaddr);
964
965 sup = (void *)sup + supsz;
966 if (sup >= supend)
967 break;
968
969 prev_blkoff = blkoff;
970 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
971 if (blkoff == prev_blkoff)
972 continue;
973
974
975 mark_buffer_dirty(bh);
976 put_bh(bh);
977 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
978 if (unlikely(ret < 0))
979 goto out_mark;
980 }
981 mark_buffer_dirty(bh);
982 put_bh(bh);
983
984 out_mark:
985 if (ncleaned || ndirtied) {
986 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
987 (u64)ndirtied);
988 NILFS_SUI(sufile)->ncleansegs += ncleaned;
989 }
990 nilfs_mdt_mark_dirty(sufile);
991 out_header:
992 put_bh(header_bh);
993 out_sem:
994 up_write(&NILFS_MDT(sufile)->mi_sem);
995 return ret;
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
1015{
1016 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
1017 struct buffer_head *su_bh;
1018 struct nilfs_segment_usage *su;
1019 void *kaddr;
1020 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
1021 sector_t seg_start, seg_end, start_block, end_block;
1022 sector_t start = 0, nblocks = 0;
1023 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
1024 int ret = 0;
1025 unsigned int sects_per_block;
1026
1027 sects_per_block = (1 << nilfs->ns_blocksize_bits) /
1028 bdev_logical_block_size(nilfs->ns_bdev);
1029 len = range->len >> nilfs->ns_blocksize_bits;
1030 minlen = range->minlen >> nilfs->ns_blocksize_bits;
1031 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
1032
1033 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
1034 return -EINVAL;
1035
1036 start_block = (range->start + nilfs->ns_blocksize - 1) >>
1037 nilfs->ns_blocksize_bits;
1038
1039
1040
1041
1042
1043
1044 if (max_blocks - start_block < len)
1045 end_block = max_blocks - 1;
1046 else
1047 end_block = start_block + len - 1;
1048
1049 segnum = nilfs_get_segnum_of_block(nilfs, start_block);
1050 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
1051
1052 down_read(&NILFS_MDT(sufile)->mi_sem);
1053
1054 while (segnum <= segnum_end) {
1055 n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
1056 segnum_end);
1057
1058 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
1059 &su_bh);
1060 if (ret < 0) {
1061 if (ret != -ENOENT)
1062 goto out_sem;
1063
1064 segnum += n;
1065 continue;
1066 }
1067
1068 kaddr = kmap_atomic(su_bh->b_page);
1069 su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
1070 su_bh, kaddr);
1071 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
1072 if (!nilfs_segment_usage_clean(su))
1073 continue;
1074
1075 nilfs_get_segment_range(nilfs, segnum, &seg_start,
1076 &seg_end);
1077
1078 if (!nblocks) {
1079
1080 start = seg_start;
1081 nblocks = seg_end - seg_start + 1;
1082 continue;
1083 }
1084
1085 if (start + nblocks == seg_start) {
1086
1087 nblocks += seg_end - seg_start + 1;
1088 continue;
1089 }
1090
1091
1092 if (start < start_block) {
1093 nblocks -= start_block - start;
1094 start = start_block;
1095 }
1096
1097 if (nblocks >= minlen) {
1098 kunmap_atomic(kaddr);
1099
1100 ret = blkdev_issue_discard(nilfs->ns_bdev,
1101 start * sects_per_block,
1102 nblocks * sects_per_block,
1103 GFP_NOFS, 0);
1104 if (ret < 0) {
1105 put_bh(su_bh);
1106 goto out_sem;
1107 }
1108
1109 ndiscarded += nblocks;
1110 kaddr = kmap_atomic(su_bh->b_page);
1111 su = nilfs_sufile_block_get_segment_usage(
1112 sufile, segnum, su_bh, kaddr);
1113 }
1114
1115
1116 start = seg_start;
1117 nblocks = seg_end - seg_start + 1;
1118 }
1119 kunmap_atomic(kaddr);
1120 put_bh(su_bh);
1121 }
1122
1123
1124 if (nblocks) {
1125
1126 if (start < start_block) {
1127 nblocks -= start_block - start;
1128 start = start_block;
1129 }
1130 if (start + nblocks > end_block + 1)
1131 nblocks = end_block - start + 1;
1132
1133 if (nblocks >= minlen) {
1134 ret = blkdev_issue_discard(nilfs->ns_bdev,
1135 start * sects_per_block,
1136 nblocks * sects_per_block,
1137 GFP_NOFS, 0);
1138 if (!ret)
1139 ndiscarded += nblocks;
1140 }
1141 }
1142
1143out_sem:
1144 up_read(&NILFS_MDT(sufile)->mi_sem);
1145
1146 range->len = ndiscarded << nilfs->ns_blocksize_bits;
1147 return ret;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157int nilfs_sufile_read(struct super_block *sb, size_t susize,
1158 struct nilfs_inode *raw_inode, struct inode **inodep)
1159{
1160 struct inode *sufile;
1161 struct nilfs_sufile_info *sui;
1162 struct buffer_head *header_bh;
1163 struct nilfs_sufile_header *header;
1164 void *kaddr;
1165 int err;
1166
1167 if (susize > sb->s_blocksize) {
1168 nilfs_err(sb, "too large segment usage size: %zu bytes",
1169 susize);
1170 return -EINVAL;
1171 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
1172 nilfs_err(sb, "too small segment usage size: %zu bytes",
1173 susize);
1174 return -EINVAL;
1175 }
1176
1177 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
1178 if (unlikely(!sufile))
1179 return -ENOMEM;
1180 if (!(sufile->i_state & I_NEW))
1181 goto out;
1182
1183 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
1184 if (err)
1185 goto failed;
1186
1187 nilfs_mdt_set_entry_size(sufile, susize,
1188 sizeof(struct nilfs_sufile_header));
1189
1190 err = nilfs_read_inode_common(sufile, raw_inode);
1191 if (err)
1192 goto failed;
1193
1194 err = nilfs_sufile_get_header_block(sufile, &header_bh);
1195 if (err)
1196 goto failed;
1197
1198 sui = NILFS_SUI(sufile);
1199 kaddr = kmap_atomic(header_bh->b_page);
1200 header = kaddr + bh_offset(header_bh);
1201 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
1202 kunmap_atomic(kaddr);
1203 brelse(header_bh);
1204
1205 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
1206 sui->allocmin = 0;
1207
1208 unlock_new_inode(sufile);
1209 out:
1210 *inodep = sufile;
1211 return 0;
1212 failed:
1213 iget_failed(sufile);
1214 return err;
1215}
1216