1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/string.h>
27#include <linux/buffer_head.h>
28#include <linux/errno.h>
29#include <linux/nilfs2_fs.h>
30#include "mdt.h"
31#include "sufile.h"
32
33
34struct nilfs_sufile_info {
35 struct nilfs_mdt_info mi;
36 unsigned long ncleansegs;
37};
38
39static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
40{
41 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
42}
43
44static inline unsigned long
45nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
46{
47 return NILFS_MDT(sufile)->mi_entries_per_block;
48}
49
50static unsigned long
51nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
52{
53 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
54 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
55 return (unsigned long)t;
56}
57
58static unsigned long
59nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
60{
61 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
62 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
63}
64
65static unsigned long
66nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
67 __u64 max)
68{
69 return min_t(unsigned long,
70 nilfs_sufile_segment_usages_per_block(sufile) -
71 nilfs_sufile_get_offset(sufile, curr),
72 max - curr + 1);
73}
74
75static struct nilfs_segment_usage *
76nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
77 struct buffer_head *bh, void *kaddr)
78{
79 return kaddr + bh_offset(bh) +
80 nilfs_sufile_get_offset(sufile, segnum) *
81 NILFS_MDT(sufile)->mi_entry_size;
82}
83
84static inline int nilfs_sufile_get_header_block(struct inode *sufile,
85 struct buffer_head **bhp)
86{
87 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
88}
89
90static inline int
91nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
92 int create, struct buffer_head **bhp)
93{
94 return nilfs_mdt_get_block(sufile,
95 nilfs_sufile_get_blkoff(sufile, segnum),
96 create, NULL, bhp);
97}
98
99static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
100 u64 ncleanadd, u64 ndirtyadd)
101{
102 struct nilfs_sufile_header *header;
103 void *kaddr;
104
105 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
106 header = kaddr + bh_offset(header_bh);
107 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
108 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
109 kunmap_atomic(kaddr, KM_USER0);
110
111 nilfs_mdt_mark_buffer_dirty(header_bh);
112}
113
114
115
116
117
118unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
119{
120 return NILFS_SUI(sufile)->ncleansegs;
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
152 int create, size_t *ndone,
153 void (*dofunc)(struct inode *, __u64,
154 struct buffer_head *,
155 struct buffer_head *))
156{
157 struct buffer_head *header_bh, *bh;
158 unsigned long blkoff, prev_blkoff;
159 __u64 *seg;
160 size_t nerr = 0, n = 0;
161 int ret = 0;
162
163 if (unlikely(nsegs == 0))
164 goto out;
165
166 down_write(&NILFS_MDT(sufile)->mi_sem);
167 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
168 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
169 printk(KERN_WARNING
170 "%s: invalid segment number: %llu\n", __func__,
171 (unsigned long long)*seg);
172 nerr++;
173 }
174 }
175 if (nerr > 0) {
176 ret = -EINVAL;
177 goto out_sem;
178 }
179
180 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
181 if (ret < 0)
182 goto out_sem;
183
184 seg = segnumv;
185 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
186 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
187 if (ret < 0)
188 goto out_header;
189
190 for (;;) {
191 dofunc(sufile, *seg, header_bh, bh);
192
193 if (++seg >= segnumv + nsegs)
194 break;
195 prev_blkoff = blkoff;
196 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
197 if (blkoff == prev_blkoff)
198 continue;
199
200
201 brelse(bh);
202 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
203 if (unlikely(ret < 0))
204 goto out_header;
205 }
206 brelse(bh);
207
208 out_header:
209 n = seg - segnumv;
210 brelse(header_bh);
211 out_sem:
212 up_write(&NILFS_MDT(sufile)->mi_sem);
213 out:
214 if (ndone)
215 *ndone = n;
216 return ret;
217}
218
219int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
220 void (*dofunc)(struct inode *, __u64,
221 struct buffer_head *,
222 struct buffer_head *))
223{
224 struct buffer_head *header_bh, *bh;
225 int ret;
226
227 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
228 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
229 __func__, (unsigned long long)segnum);
230 return -EINVAL;
231 }
232 down_write(&NILFS_MDT(sufile)->mi_sem);
233
234 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
235 if (ret < 0)
236 goto out_sem;
237
238 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
239 if (!ret) {
240 dofunc(sufile, segnum, header_bh, bh);
241 brelse(bh);
242 }
243 brelse(header_bh);
244
245 out_sem:
246 up_write(&NILFS_MDT(sufile)->mi_sem);
247 return ret;
248}
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
268{
269 struct buffer_head *header_bh, *su_bh;
270 struct nilfs_sufile_header *header;
271 struct nilfs_segment_usage *su;
272 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
273 __u64 segnum, maxsegnum, last_alloc;
274 void *kaddr;
275 unsigned long nsegments, ncleansegs, nsus;
276 int ret, i, j;
277
278 down_write(&NILFS_MDT(sufile)->mi_sem);
279
280 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
281 if (ret < 0)
282 goto out_sem;
283 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
284 header = kaddr + bh_offset(header_bh);
285 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
286 last_alloc = le64_to_cpu(header->sh_last_alloc);
287 kunmap_atomic(kaddr, KM_USER0);
288
289 nsegments = nilfs_sufile_get_nsegments(sufile);
290 segnum = last_alloc + 1;
291 maxsegnum = nsegments - 1;
292 for (i = 0; i < nsegments; i += nsus) {
293 if (segnum >= nsegments) {
294
295 segnum = 0;
296 maxsegnum = last_alloc;
297 }
298 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
299 &su_bh);
300 if (ret < 0)
301 goto out_header;
302 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
303 su = nilfs_sufile_block_get_segment_usage(
304 sufile, segnum, su_bh, kaddr);
305
306 nsus = nilfs_sufile_segment_usages_in_block(
307 sufile, segnum, maxsegnum);
308 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
309 if (!nilfs_segment_usage_clean(su))
310 continue;
311
312 nilfs_segment_usage_set_dirty(su);
313 kunmap_atomic(kaddr, KM_USER0);
314
315 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
316 header = kaddr + bh_offset(header_bh);
317 le64_add_cpu(&header->sh_ncleansegs, -1);
318 le64_add_cpu(&header->sh_ndirtysegs, 1);
319 header->sh_last_alloc = cpu_to_le64(segnum);
320 kunmap_atomic(kaddr, KM_USER0);
321
322 NILFS_SUI(sufile)->ncleansegs--;
323 nilfs_mdt_mark_buffer_dirty(header_bh);
324 nilfs_mdt_mark_buffer_dirty(su_bh);
325 nilfs_mdt_mark_dirty(sufile);
326 brelse(su_bh);
327 *segnump = segnum;
328 goto out_header;
329 }
330
331 kunmap_atomic(kaddr, KM_USER0);
332 brelse(su_bh);
333 }
334
335
336 ret = -ENOSPC;
337
338 out_header:
339 brelse(header_bh);
340
341 out_sem:
342 up_write(&NILFS_MDT(sufile)->mi_sem);
343 return ret;
344}
345
346void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
347 struct buffer_head *header_bh,
348 struct buffer_head *su_bh)
349{
350 struct nilfs_segment_usage *su;
351 void *kaddr;
352
353 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
354 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
355 if (unlikely(!nilfs_segment_usage_clean(su))) {
356 printk(KERN_WARNING "%s: segment %llu must be clean\n",
357 __func__, (unsigned long long)segnum);
358 kunmap_atomic(kaddr, KM_USER0);
359 return;
360 }
361 nilfs_segment_usage_set_dirty(su);
362 kunmap_atomic(kaddr, KM_USER0);
363
364 nilfs_sufile_mod_counter(header_bh, -1, 1);
365 NILFS_SUI(sufile)->ncleansegs--;
366
367 nilfs_mdt_mark_buffer_dirty(su_bh);
368 nilfs_mdt_mark_dirty(sufile);
369}
370
371void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
372 struct buffer_head *header_bh,
373 struct buffer_head *su_bh)
374{
375 struct nilfs_segment_usage *su;
376 void *kaddr;
377 int clean, dirty;
378
379 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
380 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
381 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
382 su->su_nblocks == cpu_to_le32(0)) {
383 kunmap_atomic(kaddr, KM_USER0);
384 return;
385 }
386 clean = nilfs_segment_usage_clean(su);
387 dirty = nilfs_segment_usage_dirty(su);
388
389
390 su->su_lastmod = cpu_to_le64(0);
391 su->su_nblocks = cpu_to_le32(0);
392 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
393 kunmap_atomic(kaddr, KM_USER0);
394
395 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
396 NILFS_SUI(sufile)->ncleansegs -= clean;
397
398 nilfs_mdt_mark_buffer_dirty(su_bh);
399 nilfs_mdt_mark_dirty(sufile);
400}
401
402void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
403 struct buffer_head *header_bh,
404 struct buffer_head *su_bh)
405{
406 struct nilfs_segment_usage *su;
407 void *kaddr;
408 int sudirty;
409
410 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
411 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
412 if (nilfs_segment_usage_clean(su)) {
413 printk(KERN_WARNING "%s: segment %llu is already clean\n",
414 __func__, (unsigned long long)segnum);
415 kunmap_atomic(kaddr, KM_USER0);
416 return;
417 }
418 WARN_ON(nilfs_segment_usage_error(su));
419 WARN_ON(!nilfs_segment_usage_dirty(su));
420
421 sudirty = nilfs_segment_usage_dirty(su);
422 nilfs_segment_usage_set_clean(su);
423 kunmap_atomic(kaddr, KM_USER0);
424 nilfs_mdt_mark_buffer_dirty(su_bh);
425
426 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
427 NILFS_SUI(sufile)->ncleansegs++;
428
429 nilfs_mdt_mark_dirty(sufile);
430}
431
432
433
434
435
436
437int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
438{
439 struct buffer_head *bh;
440 int ret;
441
442 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
443 if (!ret) {
444 nilfs_mdt_mark_buffer_dirty(bh);
445 nilfs_mdt_mark_dirty(sufile);
446 brelse(bh);
447 }
448 return ret;
449}
450
451
452
453
454
455
456
457
458int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
459 unsigned long nblocks, time_t modtime)
460{
461 struct buffer_head *bh;
462 struct nilfs_segment_usage *su;
463 void *kaddr;
464 int ret;
465
466 down_write(&NILFS_MDT(sufile)->mi_sem);
467 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
468 if (ret < 0)
469 goto out_sem;
470
471 kaddr = kmap_atomic(bh->b_page, KM_USER0);
472 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
473 WARN_ON(nilfs_segment_usage_error(su));
474 if (modtime)
475 su->su_lastmod = cpu_to_le64(modtime);
476 su->su_nblocks = cpu_to_le32(nblocks);
477 kunmap_atomic(kaddr, KM_USER0);
478
479 nilfs_mdt_mark_buffer_dirty(bh);
480 nilfs_mdt_mark_dirty(sufile);
481 brelse(bh);
482
483 out_sem:
484 up_write(&NILFS_MDT(sufile)->mi_sem);
485 return ret;
486}
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
505{
506 struct buffer_head *header_bh;
507 struct nilfs_sufile_header *header;
508 struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
509 void *kaddr;
510 int ret;
511
512 down_read(&NILFS_MDT(sufile)->mi_sem);
513
514 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
515 if (ret < 0)
516 goto out_sem;
517
518 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
519 header = kaddr + bh_offset(header_bh);
520 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
521 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
522 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
523 sustat->ss_ctime = nilfs->ns_ctime;
524 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
525 spin_lock(&nilfs->ns_last_segment_lock);
526 sustat->ss_prot_seq = nilfs->ns_prot_seq;
527 spin_unlock(&nilfs->ns_last_segment_lock);
528 kunmap_atomic(kaddr, KM_USER0);
529 brelse(header_bh);
530
531 out_sem:
532 up_read(&NILFS_MDT(sufile)->mi_sem);
533 return ret;
534}
535
536void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
537 struct buffer_head *header_bh,
538 struct buffer_head *su_bh)
539{
540 struct nilfs_segment_usage *su;
541 void *kaddr;
542 int suclean;
543
544 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
545 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
546 if (nilfs_segment_usage_error(su)) {
547 kunmap_atomic(kaddr, KM_USER0);
548 return;
549 }
550 suclean = nilfs_segment_usage_clean(su);
551 nilfs_segment_usage_set_error(su);
552 kunmap_atomic(kaddr, KM_USER0);
553
554 if (suclean) {
555 nilfs_sufile_mod_counter(header_bh, -1, 0);
556 NILFS_SUI(sufile)->ncleansegs--;
557 }
558 nilfs_mdt_mark_buffer_dirty(su_bh);
559 nilfs_mdt_mark_dirty(sufile);
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
580 unsigned sisz, size_t nsi)
581{
582 struct buffer_head *su_bh;
583 struct nilfs_segment_usage *su;
584 struct nilfs_suinfo *si = buf;
585 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
586 struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
587 void *kaddr;
588 unsigned long nsegs, segusages_per_block;
589 ssize_t n;
590 int ret, i, j;
591
592 down_read(&NILFS_MDT(sufile)->mi_sem);
593
594 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
595 nsegs = min_t(unsigned long,
596 nilfs_sufile_get_nsegments(sufile) - segnum,
597 nsi);
598 for (i = 0; i < nsegs; i += n, segnum += n) {
599 n = min_t(unsigned long,
600 segusages_per_block -
601 nilfs_sufile_get_offset(sufile, segnum),
602 nsegs - i);
603 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
604 &su_bh);
605 if (ret < 0) {
606 if (ret != -ENOENT)
607 goto out;
608
609 memset(si, 0, sisz * n);
610 si = (void *)si + sisz * n;
611 continue;
612 }
613
614 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
615 su = nilfs_sufile_block_get_segment_usage(
616 sufile, segnum, su_bh, kaddr);
617 for (j = 0; j < n;
618 j++, su = (void *)su + susz, si = (void *)si + sisz) {
619 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
620 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
621 si->sui_flags = le32_to_cpu(su->su_flags) &
622 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
623 if (nilfs_segment_is_active(nilfs, segnum + j))
624 si->sui_flags |=
625 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
626 }
627 kunmap_atomic(kaddr, KM_USER0);
628 brelse(su_bh);
629 }
630 ret = nsegs;
631
632 out:
633 up_read(&NILFS_MDT(sufile)->mi_sem);
634 return ret;
635}
636
637
638
639
640
641
642
643
644int nilfs_sufile_read(struct super_block *sb, size_t susize,
645 struct nilfs_inode *raw_inode, struct inode **inodep)
646{
647 struct inode *sufile;
648 struct nilfs_sufile_info *sui;
649 struct buffer_head *header_bh;
650 struct nilfs_sufile_header *header;
651 void *kaddr;
652 int err;
653
654 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
655 if (unlikely(!sufile))
656 return -ENOMEM;
657 if (!(sufile->i_state & I_NEW))
658 goto out;
659
660 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
661 if (err)
662 goto failed;
663
664 nilfs_mdt_set_entry_size(sufile, susize,
665 sizeof(struct nilfs_sufile_header));
666
667 err = nilfs_read_inode_common(sufile, raw_inode);
668 if (err)
669 goto failed;
670
671 err = nilfs_sufile_get_header_block(sufile, &header_bh);
672 if (err)
673 goto failed;
674
675 sui = NILFS_SUI(sufile);
676 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
677 header = kaddr + bh_offset(header_bh);
678 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
679 kunmap_atomic(kaddr, KM_USER0);
680 brelse(header_bh);
681
682 unlock_new_inode(sufile);
683 out:
684 *inodep = sufile;
685 return 0;
686 failed:
687 iget_failed(sufile);
688 return err;
689}
690