1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/string.h>
27#include <linux/buffer_head.h>
28#include <linux/errno.h>
29#include <linux/nilfs2_fs.h>
30#include "mdt.h"
31#include "sufile.h"
32
33
34
35
36
37
38
39
40struct nilfs_sufile_info {
41 struct nilfs_mdt_info mi;
42 unsigned long ncleansegs;
43 __u64 allocmin;
44 __u64 allocmax;
45};
46
47static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
48{
49 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
50}
51
52static inline unsigned long
53nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
54{
55 return NILFS_MDT(sufile)->mi_entries_per_block;
56}
57
58static unsigned long
59nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
60{
61 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
62 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
63 return (unsigned long)t;
64}
65
66static unsigned long
67nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
68{
69 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
70 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
71}
72
73static unsigned long
74nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
75 __u64 max)
76{
77 return min_t(unsigned long,
78 nilfs_sufile_segment_usages_per_block(sufile) -
79 nilfs_sufile_get_offset(sufile, curr),
80 max - curr + 1);
81}
82
83static struct nilfs_segment_usage *
84nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
85 struct buffer_head *bh, void *kaddr)
86{
87 return kaddr + bh_offset(bh) +
88 nilfs_sufile_get_offset(sufile, segnum) *
89 NILFS_MDT(sufile)->mi_entry_size;
90}
91
92static inline int nilfs_sufile_get_header_block(struct inode *sufile,
93 struct buffer_head **bhp)
94{
95 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
96}
97
98static inline int
99nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
100 int create, struct buffer_head **bhp)
101{
102 return nilfs_mdt_get_block(sufile,
103 nilfs_sufile_get_blkoff(sufile, segnum),
104 create, NULL, bhp);
105}
106
107static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
108 __u64 segnum)
109{
110 return nilfs_mdt_delete_block(sufile,
111 nilfs_sufile_get_blkoff(sufile, segnum));
112}
113
114static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
115 u64 ncleanadd, u64 ndirtyadd)
116{
117 struct nilfs_sufile_header *header;
118 void *kaddr;
119
120 kaddr = kmap_atomic(header_bh->b_page);
121 header = kaddr + bh_offset(header_bh);
122 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
123 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
124 kunmap_atomic(kaddr);
125
126 mark_buffer_dirty(header_bh);
127}
128
129
130
131
132
133unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
134{
135 return NILFS_SUI(sufile)->ncleansegs;
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
167 int create, size_t *ndone,
168 void (*dofunc)(struct inode *, __u64,
169 struct buffer_head *,
170 struct buffer_head *))
171{
172 struct buffer_head *header_bh, *bh;
173 unsigned long blkoff, prev_blkoff;
174 __u64 *seg;
175 size_t nerr = 0, n = 0;
176 int ret = 0;
177
178 if (unlikely(nsegs == 0))
179 goto out;
180
181 down_write(&NILFS_MDT(sufile)->mi_sem);
182 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
183 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
184 printk(KERN_WARNING
185 "%s: invalid segment number: %llu\n", __func__,
186 (unsigned long long)*seg);
187 nerr++;
188 }
189 }
190 if (nerr > 0) {
191 ret = -EINVAL;
192 goto out_sem;
193 }
194
195 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
196 if (ret < 0)
197 goto out_sem;
198
199 seg = segnumv;
200 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
201 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
202 if (ret < 0)
203 goto out_header;
204
205 for (;;) {
206 dofunc(sufile, *seg, header_bh, bh);
207
208 if (++seg >= segnumv + nsegs)
209 break;
210 prev_blkoff = blkoff;
211 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
212 if (blkoff == prev_blkoff)
213 continue;
214
215
216 brelse(bh);
217 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
218 if (unlikely(ret < 0))
219 goto out_header;
220 }
221 brelse(bh);
222
223 out_header:
224 n = seg - segnumv;
225 brelse(header_bh);
226 out_sem:
227 up_write(&NILFS_MDT(sufile)->mi_sem);
228 out:
229 if (ndone)
230 *ndone = n;
231 return ret;
232}
233
234int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
235 void (*dofunc)(struct inode *, __u64,
236 struct buffer_head *,
237 struct buffer_head *))
238{
239 struct buffer_head *header_bh, *bh;
240 int ret;
241
242 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
243 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
244 __func__, (unsigned long long)segnum);
245 return -EINVAL;
246 }
247 down_write(&NILFS_MDT(sufile)->mi_sem);
248
249 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
250 if (ret < 0)
251 goto out_sem;
252
253 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
254 if (!ret) {
255 dofunc(sufile, segnum, header_bh, bh);
256 brelse(bh);
257 }
258 brelse(header_bh);
259
260 out_sem:
261 up_write(&NILFS_MDT(sufile)->mi_sem);
262 return ret;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
277{
278 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
279 __u64 nsegs;
280 int ret = -ERANGE;
281
282 down_write(&NILFS_MDT(sufile)->mi_sem);
283 nsegs = nilfs_sufile_get_nsegments(sufile);
284
285 if (start <= end && end < nsegs) {
286 sui->allocmin = start;
287 sui->allocmax = end;
288 ret = 0;
289 }
290 up_write(&NILFS_MDT(sufile)->mi_sem);
291 return ret;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
312{
313 struct buffer_head *header_bh, *su_bh;
314 struct nilfs_sufile_header *header;
315 struct nilfs_segment_usage *su;
316 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
317 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
318 __u64 segnum, maxsegnum, last_alloc;
319 void *kaddr;
320 unsigned long nsegments, ncleansegs, nsus, cnt;
321 int ret, j;
322
323 down_write(&NILFS_MDT(sufile)->mi_sem);
324
325 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
326 if (ret < 0)
327 goto out_sem;
328 kaddr = kmap_atomic(header_bh->b_page);
329 header = kaddr + bh_offset(header_bh);
330 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
331 last_alloc = le64_to_cpu(header->sh_last_alloc);
332 kunmap_atomic(kaddr);
333
334 nsegments = nilfs_sufile_get_nsegments(sufile);
335 maxsegnum = sui->allocmax;
336 segnum = last_alloc + 1;
337 if (segnum < sui->allocmin || segnum > sui->allocmax)
338 segnum = sui->allocmin;
339
340 for (cnt = 0; cnt < nsegments; cnt += nsus) {
341 if (segnum > maxsegnum) {
342 if (cnt < sui->allocmax - sui->allocmin + 1) {
343
344
345
346
347
348 segnum = sui->allocmin;
349 maxsegnum = last_alloc;
350 } else if (segnum > sui->allocmin &&
351 sui->allocmax + 1 < nsegments) {
352 segnum = sui->allocmax + 1;
353 maxsegnum = nsegments - 1;
354 } else if (sui->allocmin > 0) {
355 segnum = 0;
356 maxsegnum = sui->allocmin - 1;
357 } else {
358 break;
359 }
360 }
361 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
362 &su_bh);
363 if (ret < 0)
364 goto out_header;
365 kaddr = kmap_atomic(su_bh->b_page);
366 su = nilfs_sufile_block_get_segment_usage(
367 sufile, segnum, su_bh, kaddr);
368
369 nsus = nilfs_sufile_segment_usages_in_block(
370 sufile, segnum, maxsegnum);
371 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
372 if (!nilfs_segment_usage_clean(su))
373 continue;
374
375 nilfs_segment_usage_set_dirty(su);
376 kunmap_atomic(kaddr);
377
378 kaddr = kmap_atomic(header_bh->b_page);
379 header = kaddr + bh_offset(header_bh);
380 le64_add_cpu(&header->sh_ncleansegs, -1);
381 le64_add_cpu(&header->sh_ndirtysegs, 1);
382 header->sh_last_alloc = cpu_to_le64(segnum);
383 kunmap_atomic(kaddr);
384
385 sui->ncleansegs--;
386 mark_buffer_dirty(header_bh);
387 mark_buffer_dirty(su_bh);
388 nilfs_mdt_mark_dirty(sufile);
389 brelse(su_bh);
390 *segnump = segnum;
391 goto out_header;
392 }
393
394 kunmap_atomic(kaddr);
395 brelse(su_bh);
396 }
397
398
399 ret = -ENOSPC;
400
401 out_header:
402 brelse(header_bh);
403
404 out_sem:
405 up_write(&NILFS_MDT(sufile)->mi_sem);
406 return ret;
407}
408
409void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
410 struct buffer_head *header_bh,
411 struct buffer_head *su_bh)
412{
413 struct nilfs_segment_usage *su;
414 void *kaddr;
415
416 kaddr = kmap_atomic(su_bh->b_page);
417 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
418 if (unlikely(!nilfs_segment_usage_clean(su))) {
419 printk(KERN_WARNING "%s: segment %llu must be clean\n",
420 __func__, (unsigned long long)segnum);
421 kunmap_atomic(kaddr);
422 return;
423 }
424 nilfs_segment_usage_set_dirty(su);
425 kunmap_atomic(kaddr);
426
427 nilfs_sufile_mod_counter(header_bh, -1, 1);
428 NILFS_SUI(sufile)->ncleansegs--;
429
430 mark_buffer_dirty(su_bh);
431 nilfs_mdt_mark_dirty(sufile);
432}
433
434void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
435 struct buffer_head *header_bh,
436 struct buffer_head *su_bh)
437{
438 struct nilfs_segment_usage *su;
439 void *kaddr;
440 int clean, dirty;
441
442 kaddr = kmap_atomic(su_bh->b_page);
443 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
444 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
445 su->su_nblocks == cpu_to_le32(0)) {
446 kunmap_atomic(kaddr);
447 return;
448 }
449 clean = nilfs_segment_usage_clean(su);
450 dirty = nilfs_segment_usage_dirty(su);
451
452
453 su->su_lastmod = cpu_to_le64(0);
454 su->su_nblocks = cpu_to_le32(0);
455 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
456 kunmap_atomic(kaddr);
457
458 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
459 NILFS_SUI(sufile)->ncleansegs -= clean;
460
461 mark_buffer_dirty(su_bh);
462 nilfs_mdt_mark_dirty(sufile);
463}
464
465void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
466 struct buffer_head *header_bh,
467 struct buffer_head *su_bh)
468{
469 struct nilfs_segment_usage *su;
470 void *kaddr;
471 int sudirty;
472
473 kaddr = kmap_atomic(su_bh->b_page);
474 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
475 if (nilfs_segment_usage_clean(su)) {
476 printk(KERN_WARNING "%s: segment %llu is already clean\n",
477 __func__, (unsigned long long)segnum);
478 kunmap_atomic(kaddr);
479 return;
480 }
481 WARN_ON(nilfs_segment_usage_error(su));
482 WARN_ON(!nilfs_segment_usage_dirty(su));
483
484 sudirty = nilfs_segment_usage_dirty(su);
485 nilfs_segment_usage_set_clean(su);
486 kunmap_atomic(kaddr);
487 mark_buffer_dirty(su_bh);
488
489 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
490 NILFS_SUI(sufile)->ncleansegs++;
491
492 nilfs_mdt_mark_dirty(sufile);
493}
494
495
496
497
498
499
500int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
501{
502 struct buffer_head *bh;
503 int ret;
504
505 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
506 if (!ret) {
507 mark_buffer_dirty(bh);
508 nilfs_mdt_mark_dirty(sufile);
509 brelse(bh);
510 }
511 return ret;
512}
513
514
515
516
517
518
519
520
521int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
522 unsigned long nblocks, time_t modtime)
523{
524 struct buffer_head *bh;
525 struct nilfs_segment_usage *su;
526 void *kaddr;
527 int ret;
528
529 down_write(&NILFS_MDT(sufile)->mi_sem);
530 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
531 if (ret < 0)
532 goto out_sem;
533
534 kaddr = kmap_atomic(bh->b_page);
535 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
536 WARN_ON(nilfs_segment_usage_error(su));
537 if (modtime)
538 su->su_lastmod = cpu_to_le64(modtime);
539 su->su_nblocks = cpu_to_le32(nblocks);
540 kunmap_atomic(kaddr);
541
542 mark_buffer_dirty(bh);
543 nilfs_mdt_mark_dirty(sufile);
544 brelse(bh);
545
546 out_sem:
547 up_write(&NILFS_MDT(sufile)->mi_sem);
548 return ret;
549}
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
568{
569 struct buffer_head *header_bh;
570 struct nilfs_sufile_header *header;
571 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
572 void *kaddr;
573 int ret;
574
575 down_read(&NILFS_MDT(sufile)->mi_sem);
576
577 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
578 if (ret < 0)
579 goto out_sem;
580
581 kaddr = kmap_atomic(header_bh->b_page);
582 header = kaddr + bh_offset(header_bh);
583 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
584 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
585 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
586 sustat->ss_ctime = nilfs->ns_ctime;
587 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
588 spin_lock(&nilfs->ns_last_segment_lock);
589 sustat->ss_prot_seq = nilfs->ns_prot_seq;
590 spin_unlock(&nilfs->ns_last_segment_lock);
591 kunmap_atomic(kaddr);
592 brelse(header_bh);
593
594 out_sem:
595 up_read(&NILFS_MDT(sufile)->mi_sem);
596 return ret;
597}
598
599void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
600 struct buffer_head *header_bh,
601 struct buffer_head *su_bh)
602{
603 struct nilfs_segment_usage *su;
604 void *kaddr;
605 int suclean;
606
607 kaddr = kmap_atomic(su_bh->b_page);
608 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
609 if (nilfs_segment_usage_error(su)) {
610 kunmap_atomic(kaddr);
611 return;
612 }
613 suclean = nilfs_segment_usage_clean(su);
614 nilfs_segment_usage_set_error(su);
615 kunmap_atomic(kaddr);
616
617 if (suclean) {
618 nilfs_sufile_mod_counter(header_bh, -1, 0);
619 NILFS_SUI(sufile)->ncleansegs--;
620 }
621 mark_buffer_dirty(su_bh);
622 nilfs_mdt_mark_dirty(sufile);
623}
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642static int nilfs_sufile_truncate_range(struct inode *sufile,
643 __u64 start, __u64 end)
644{
645 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
646 struct buffer_head *header_bh;
647 struct buffer_head *su_bh;
648 struct nilfs_segment_usage *su, *su2;
649 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
650 unsigned long segusages_per_block;
651 unsigned long nsegs, ncleaned;
652 __u64 segnum;
653 void *kaddr;
654 ssize_t n, nc;
655 int ret;
656 int j;
657
658 nsegs = nilfs_sufile_get_nsegments(sufile);
659
660 ret = -EINVAL;
661 if (start > end || start >= nsegs)
662 goto out;
663
664 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
665 if (ret < 0)
666 goto out;
667
668 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
669 ncleaned = 0;
670
671 for (segnum = start; segnum <= end; segnum += n) {
672 n = min_t(unsigned long,
673 segusages_per_block -
674 nilfs_sufile_get_offset(sufile, segnum),
675 end - segnum + 1);
676 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
677 &su_bh);
678 if (ret < 0) {
679 if (ret != -ENOENT)
680 goto out_header;
681
682 continue;
683 }
684 kaddr = kmap_atomic(su_bh->b_page);
685 su = nilfs_sufile_block_get_segment_usage(
686 sufile, segnum, su_bh, kaddr);
687 su2 = su;
688 for (j = 0; j < n; j++, su = (void *)su + susz) {
689 if ((le32_to_cpu(su->su_flags) &
690 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
691 nilfs_segment_is_active(nilfs, segnum + j)) {
692 ret = -EBUSY;
693 kunmap_atomic(kaddr);
694 brelse(su_bh);
695 goto out_header;
696 }
697 }
698 nc = 0;
699 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
700 if (nilfs_segment_usage_error(su)) {
701 nilfs_segment_usage_set_clean(su);
702 nc++;
703 }
704 }
705 kunmap_atomic(kaddr);
706 if (nc > 0) {
707 mark_buffer_dirty(su_bh);
708 ncleaned += nc;
709 }
710 brelse(su_bh);
711
712 if (n == segusages_per_block) {
713
714 nilfs_sufile_delete_segment_usage_block(sufile, segnum);
715 }
716 }
717 ret = 0;
718
719out_header:
720 if (ncleaned > 0) {
721 NILFS_SUI(sufile)->ncleansegs += ncleaned;
722 nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
723 nilfs_mdt_mark_dirty(sufile);
724 }
725 brelse(header_bh);
726out:
727 return ret;
728}
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
747{
748 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
749 struct buffer_head *header_bh;
750 struct nilfs_sufile_header *header;
751 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
752 void *kaddr;
753 unsigned long nsegs, nrsvsegs;
754 int ret = 0;
755
756 down_write(&NILFS_MDT(sufile)->mi_sem);
757
758 nsegs = nilfs_sufile_get_nsegments(sufile);
759 if (nsegs == newnsegs)
760 goto out;
761
762 ret = -ENOSPC;
763 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
764 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
765 goto out;
766
767 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
768 if (ret < 0)
769 goto out;
770
771 if (newnsegs > nsegs) {
772 sui->ncleansegs += newnsegs - nsegs;
773 } else {
774 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
775 if (ret < 0)
776 goto out_header;
777
778 sui->ncleansegs -= nsegs - newnsegs;
779 }
780
781 kaddr = kmap_atomic(header_bh->b_page);
782 header = kaddr + bh_offset(header_bh);
783 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
784 kunmap_atomic(kaddr);
785
786 mark_buffer_dirty(header_bh);
787 nilfs_mdt_mark_dirty(sufile);
788 nilfs_set_nsegments(nilfs, newnsegs);
789
790out_header:
791 brelse(header_bh);
792out:
793 up_write(&NILFS_MDT(sufile)->mi_sem);
794 return ret;
795}
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
815 unsigned sisz, size_t nsi)
816{
817 struct buffer_head *su_bh;
818 struct nilfs_segment_usage *su;
819 struct nilfs_suinfo *si = buf;
820 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
821 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
822 void *kaddr;
823 unsigned long nsegs, segusages_per_block;
824 ssize_t n;
825 int ret, i, j;
826
827 down_read(&NILFS_MDT(sufile)->mi_sem);
828
829 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
830 nsegs = min_t(unsigned long,
831 nilfs_sufile_get_nsegments(sufile) - segnum,
832 nsi);
833 for (i = 0; i < nsegs; i += n, segnum += n) {
834 n = min_t(unsigned long,
835 segusages_per_block -
836 nilfs_sufile_get_offset(sufile, segnum),
837 nsegs - i);
838 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
839 &su_bh);
840 if (ret < 0) {
841 if (ret != -ENOENT)
842 goto out;
843
844 memset(si, 0, sisz * n);
845 si = (void *)si + sisz * n;
846 continue;
847 }
848
849 kaddr = kmap_atomic(su_bh->b_page);
850 su = nilfs_sufile_block_get_segment_usage(
851 sufile, segnum, su_bh, kaddr);
852 for (j = 0; j < n;
853 j++, su = (void *)su + susz, si = (void *)si + sisz) {
854 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
855 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
856 si->sui_flags = le32_to_cpu(su->su_flags) &
857 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
858 if (nilfs_segment_is_active(nilfs, segnum + j))
859 si->sui_flags |=
860 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
861 }
862 kunmap_atomic(kaddr);
863 brelse(su_bh);
864 }
865 ret = nsegs;
866
867 out:
868 up_read(&NILFS_MDT(sufile)->mi_sem);
869 return ret;
870}
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
893 unsigned supsz, size_t nsup)
894{
895 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
896 struct buffer_head *header_bh, *bh;
897 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
898 struct nilfs_segment_usage *su;
899 void *kaddr;
900 unsigned long blkoff, prev_blkoff;
901 int cleansi, cleansu, dirtysi, dirtysu;
902 long ncleaned = 0, ndirtied = 0;
903 int ret = 0;
904
905 if (unlikely(nsup == 0))
906 return ret;
907
908 for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
909 if (sup->sup_segnum >= nilfs->ns_nsegments
910 || (sup->sup_flags &
911 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
912 || (nilfs_suinfo_update_nblocks(sup) &&
913 sup->sup_sui.sui_nblocks >
914 nilfs->ns_blocks_per_segment))
915 return -EINVAL;
916 }
917
918 down_write(&NILFS_MDT(sufile)->mi_sem);
919
920 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
921 if (ret < 0)
922 goto out_sem;
923
924 sup = buf;
925 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
926 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
927 if (ret < 0)
928 goto out_header;
929
930 for (;;) {
931 kaddr = kmap_atomic(bh->b_page);
932 su = nilfs_sufile_block_get_segment_usage(
933 sufile, sup->sup_segnum, bh, kaddr);
934
935 if (nilfs_suinfo_update_lastmod(sup))
936 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
937
938 if (nilfs_suinfo_update_nblocks(sup))
939 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
940
941 if (nilfs_suinfo_update_flags(sup)) {
942
943
944
945
946
947 sup->sup_sui.sui_flags &=
948 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
949
950 cleansi = nilfs_suinfo_clean(&sup->sup_sui);
951 cleansu = nilfs_segment_usage_clean(su);
952 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
953 dirtysu = nilfs_segment_usage_dirty(su);
954
955 if (cleansi && !cleansu)
956 ++ncleaned;
957 else if (!cleansi && cleansu)
958 --ncleaned;
959
960 if (dirtysi && !dirtysu)
961 ++ndirtied;
962 else if (!dirtysi && dirtysu)
963 --ndirtied;
964
965 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
966 }
967
968 kunmap_atomic(kaddr);
969
970 sup = (void *)sup + supsz;
971 if (sup >= supend)
972 break;
973
974 prev_blkoff = blkoff;
975 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
976 if (blkoff == prev_blkoff)
977 continue;
978
979
980 mark_buffer_dirty(bh);
981 put_bh(bh);
982 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
983 if (unlikely(ret < 0))
984 goto out_mark;
985 }
986 mark_buffer_dirty(bh);
987 put_bh(bh);
988
989 out_mark:
990 if (ncleaned || ndirtied) {
991 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
992 (u64)ndirtied);
993 NILFS_SUI(sufile)->ncleansegs += ncleaned;
994 }
995 nilfs_mdt_mark_dirty(sufile);
996 out_header:
997 put_bh(header_bh);
998 out_sem:
999 up_write(&NILFS_MDT(sufile)->mi_sem);
1000 return ret;
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
1020{
1021 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
1022 struct buffer_head *su_bh;
1023 struct nilfs_segment_usage *su;
1024 void *kaddr;
1025 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
1026 sector_t seg_start, seg_end, start_block, end_block;
1027 sector_t start = 0, nblocks = 0;
1028 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
1029 int ret = 0;
1030 unsigned int sects_per_block;
1031
1032 sects_per_block = (1 << nilfs->ns_blocksize_bits) /
1033 bdev_logical_block_size(nilfs->ns_bdev);
1034 len = range->len >> nilfs->ns_blocksize_bits;
1035 minlen = range->minlen >> nilfs->ns_blocksize_bits;
1036 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
1037
1038 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
1039 return -EINVAL;
1040
1041 start_block = (range->start + nilfs->ns_blocksize - 1) >>
1042 nilfs->ns_blocksize_bits;
1043
1044
1045
1046
1047
1048
1049 if (max_blocks - start_block < len)
1050 end_block = max_blocks - 1;
1051 else
1052 end_block = start_block + len - 1;
1053
1054 segnum = nilfs_get_segnum_of_block(nilfs, start_block);
1055 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
1056
1057 down_read(&NILFS_MDT(sufile)->mi_sem);
1058
1059 while (segnum <= segnum_end) {
1060 n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
1061 segnum_end);
1062
1063 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
1064 &su_bh);
1065 if (ret < 0) {
1066 if (ret != -ENOENT)
1067 goto out_sem;
1068
1069 segnum += n;
1070 continue;
1071 }
1072
1073 kaddr = kmap_atomic(su_bh->b_page);
1074 su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
1075 su_bh, kaddr);
1076 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
1077 if (!nilfs_segment_usage_clean(su))
1078 continue;
1079
1080 nilfs_get_segment_range(nilfs, segnum, &seg_start,
1081 &seg_end);
1082
1083 if (!nblocks) {
1084
1085 start = seg_start;
1086 nblocks = seg_end - seg_start + 1;
1087 continue;
1088 }
1089
1090 if (start + nblocks == seg_start) {
1091
1092 nblocks += seg_end - seg_start + 1;
1093 continue;
1094 }
1095
1096
1097 if (start < start_block) {
1098 nblocks -= start_block - start;
1099 start = start_block;
1100 }
1101
1102 if (nblocks >= minlen) {
1103 kunmap_atomic(kaddr);
1104
1105 ret = blkdev_issue_discard(nilfs->ns_bdev,
1106 start * sects_per_block,
1107 nblocks * sects_per_block,
1108 GFP_NOFS, 0);
1109 if (ret < 0) {
1110 put_bh(su_bh);
1111 goto out_sem;
1112 }
1113
1114 ndiscarded += nblocks;
1115 kaddr = kmap_atomic(su_bh->b_page);
1116 su = nilfs_sufile_block_get_segment_usage(
1117 sufile, segnum, su_bh, kaddr);
1118 }
1119
1120
1121 start = seg_start;
1122 nblocks = seg_end - seg_start + 1;
1123 }
1124 kunmap_atomic(kaddr);
1125 put_bh(su_bh);
1126 }
1127
1128
1129 if (nblocks) {
1130
1131 if (start < start_block) {
1132 nblocks -= start_block - start;
1133 start = start_block;
1134 }
1135 if (start + nblocks > end_block + 1)
1136 nblocks = end_block - start + 1;
1137
1138 if (nblocks >= minlen) {
1139 ret = blkdev_issue_discard(nilfs->ns_bdev,
1140 start * sects_per_block,
1141 nblocks * sects_per_block,
1142 GFP_NOFS, 0);
1143 if (!ret)
1144 ndiscarded += nblocks;
1145 }
1146 }
1147
1148out_sem:
1149 up_read(&NILFS_MDT(sufile)->mi_sem);
1150
1151 range->len = ndiscarded << nilfs->ns_blocksize_bits;
1152 return ret;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162int nilfs_sufile_read(struct super_block *sb, size_t susize,
1163 struct nilfs_inode *raw_inode, struct inode **inodep)
1164{
1165 struct inode *sufile;
1166 struct nilfs_sufile_info *sui;
1167 struct buffer_head *header_bh;
1168 struct nilfs_sufile_header *header;
1169 void *kaddr;
1170 int err;
1171
1172 if (susize > sb->s_blocksize) {
1173 printk(KERN_ERR
1174 "NILFS: too large segment usage size: %zu bytes.\n",
1175 susize);
1176 return -EINVAL;
1177 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
1178 printk(KERN_ERR
1179 "NILFS: too small segment usage size: %zu bytes.\n",
1180 susize);
1181 return -EINVAL;
1182 }
1183
1184 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
1185 if (unlikely(!sufile))
1186 return -ENOMEM;
1187 if (!(sufile->i_state & I_NEW))
1188 goto out;
1189
1190 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
1191 if (err)
1192 goto failed;
1193
1194 nilfs_mdt_set_entry_size(sufile, susize,
1195 sizeof(struct nilfs_sufile_header));
1196
1197 err = nilfs_read_inode_common(sufile, raw_inode);
1198 if (err)
1199 goto failed;
1200
1201 err = nilfs_sufile_get_header_block(sufile, &header_bh);
1202 if (err)
1203 goto failed;
1204
1205 sui = NILFS_SUI(sufile);
1206 kaddr = kmap_atomic(header_bh->b_page);
1207 header = kaddr + bh_offset(header_bh);
1208 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
1209 kunmap_atomic(kaddr);
1210 brelse(header_bh);
1211
1212 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
1213 sui->allocmin = 0;
1214
1215 unlock_new_inode(sufile);
1216 out:
1217 *inodep = sufile;
1218 return 0;
1219 failed:
1220 iget_failed(sufile);
1221 return err;
1222}
1223