linux/fs/nilfs2/sufile.c
<<
>>
Prefs
   1/*
   2 * sufile.c - NILFS segment usage file.
   3 *
   4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 * Written by Koji Sato <koji@osrg.net>.
  21 * Rivised by Ryusuke Konishi <ryusuke@osrg.net>.
  22 */
  23
  24#include <linux/kernel.h>
  25#include <linux/fs.h>
  26#include <linux/string.h>
  27#include <linux/buffer_head.h>
  28#include <linux/errno.h>
  29#include <linux/nilfs2_fs.h>
  30#include "mdt.h"
  31#include "sufile.h"
  32
  33
  34static inline unsigned long
  35nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  36{
  37        return NILFS_MDT(sufile)->mi_entries_per_block;
  38}
  39
  40static unsigned long
  41nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  42{
  43        __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  44        do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  45        return (unsigned long)t;
  46}
  47
  48static unsigned long
  49nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  50{
  51        __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  52        return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  53}
  54
  55static unsigned long
  56nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  57                                     __u64 max)
  58{
  59        return min_t(unsigned long,
  60                     nilfs_sufile_segment_usages_per_block(sufile) -
  61                     nilfs_sufile_get_offset(sufile, curr),
  62                     max - curr + 1);
  63}
  64
  65static inline struct nilfs_sufile_header *
  66nilfs_sufile_block_get_header(const struct inode *sufile,
  67                              struct buffer_head *bh,
  68                              void *kaddr)
  69{
  70        return kaddr + bh_offset(bh);
  71}
  72
  73static struct nilfs_segment_usage *
  74nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  75                                     struct buffer_head *bh, void *kaddr)
  76{
  77        return kaddr + bh_offset(bh) +
  78                nilfs_sufile_get_offset(sufile, segnum) *
  79                NILFS_MDT(sufile)->mi_entry_size;
  80}
  81
  82static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  83                                                struct buffer_head **bhp)
  84{
  85        return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  86}
  87
  88static inline int
  89nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  90                                     int create, struct buffer_head **bhp)
  91{
  92        return nilfs_mdt_get_block(sufile,
  93                                   nilfs_sufile_get_blkoff(sufile, segnum),
  94                                   create, NULL, bhp);
  95}
  96
  97static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  98                                     u64 ncleanadd, u64 ndirtyadd)
  99{
 100        struct nilfs_sufile_header *header;
 101        void *kaddr;
 102
 103        kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
 104        header = kaddr + bh_offset(header_bh);
 105        le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
 106        le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
 107        kunmap_atomic(kaddr, KM_USER0);
 108
 109        nilfs_mdt_mark_buffer_dirty(header_bh);
 110}
 111
 112/**
 113 * nilfs_sufile_updatev - modify multiple segment usages at a time
 114 * @sufile: inode of segment usage file
 115 * @segnumv: array of segment numbers
 116 * @nsegs: size of @segnumv array
 117 * @create: creation flag
 118 * @ndone: place to store number of modified segments on @segnumv
 119 * @dofunc: primitive operation for the update
 120 *
 121 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
 122 * against the given array of segments.  The @dofunc is called with
 123 * buffers of a header block and the sufile block in which the target
 124 * segment usage entry is contained.  If @ndone is given, the number
 125 * of successfully modified segments from the head is stored in the
 126 * place @ndone points to.
 127 *
 128 * Return Value: On success, zero is returned.  On error, one of the
 129 * following negative error codes is returned.
 130 *
 131 * %-EIO - I/O error.
 132 *
 133 * %-ENOMEM - Insufficient amount of memory available.
 134 *
 135 * %-ENOENT - Given segment usage is in hole block (may be returned if
 136 *            @create is zero)
 137 *
 138 * %-EINVAL - Invalid segment usage number
 139 */
 140int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
 141                         int create, size_t *ndone,
 142                         void (*dofunc)(struct inode *, __u64,
 143                                        struct buffer_head *,
 144                                        struct buffer_head *))
 145{
 146        struct buffer_head *header_bh, *bh;
 147        unsigned long blkoff, prev_blkoff;
 148        __u64 *seg;
 149        size_t nerr = 0, n = 0;
 150        int ret = 0;
 151
 152        if (unlikely(nsegs == 0))
 153                goto out;
 154
 155        down_write(&NILFS_MDT(sufile)->mi_sem);
 156        for (seg = segnumv; seg < segnumv + nsegs; seg++) {
 157                if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
 158                        printk(KERN_WARNING
 159                               "%s: invalid segment number: %llu\n", __func__,
 160                               (unsigned long long)*seg);
 161                        nerr++;
 162                }
 163        }
 164        if (nerr > 0) {
 165                ret = -EINVAL;
 166                goto out_sem;
 167        }
 168
 169        ret = nilfs_sufile_get_header_block(sufile, &header_bh);
 170        if (ret < 0)
 171                goto out_sem;
 172
 173        seg = segnumv;
 174        blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
 175        ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
 176        if (ret < 0)
 177                goto out_header;
 178
 179        for (;;) {
 180                dofunc(sufile, *seg, header_bh, bh);
 181
 182                if (++seg >= segnumv + nsegs)
 183                        break;
 184                prev_blkoff = blkoff;
 185                blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
 186                if (blkoff == prev_blkoff)
 187                        continue;
 188
 189                /* get different block */
 190                brelse(bh);
 191                ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
 192                if (unlikely(ret < 0))
 193                        goto out_header;
 194        }
 195        brelse(bh);
 196
 197 out_header:
 198        n = seg - segnumv;
 199        brelse(header_bh);
 200 out_sem:
 201        up_write(&NILFS_MDT(sufile)->mi_sem);
 202 out:
 203        if (ndone)
 204                *ndone = n;
 205        return ret;
 206}
 207
 208int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
 209                        void (*dofunc)(struct inode *, __u64,
 210                                       struct buffer_head *,
 211                                       struct buffer_head *))
 212{
 213        struct buffer_head *header_bh, *bh;
 214        int ret;
 215
 216        if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
 217                printk(KERN_WARNING "%s: invalid segment number: %llu\n",
 218                       __func__, (unsigned long long)segnum);
 219                return -EINVAL;
 220        }
 221        down_write(&NILFS_MDT(sufile)->mi_sem);
 222
 223        ret = nilfs_sufile_get_header_block(sufile, &header_bh);
 224        if (ret < 0)
 225                goto out_sem;
 226
 227        ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
 228        if (!ret) {
 229                dofunc(sufile, segnum, header_bh, bh);
 230                brelse(bh);
 231        }
 232        brelse(header_bh);
 233
 234 out_sem:
 235        up_write(&NILFS_MDT(sufile)->mi_sem);
 236        return ret;
 237}
 238
 239/**
 240 * nilfs_sufile_alloc - allocate a segment
 241 * @sufile: inode of segment usage file
 242 * @segnump: pointer to segment number
 243 *
 244 * Description: nilfs_sufile_alloc() allocates a clean segment.
 245 *
 246 * Return Value: On success, 0 is returned and the segment number of the
 247 * allocated segment is stored in the place pointed by @segnump. On error, one
 248 * of the following negative error codes is returned.
 249 *
 250 * %-EIO - I/O error.
 251 *
 252 * %-ENOMEM - Insufficient amount of memory available.
 253 *
 254 * %-ENOSPC - No clean segment left.
 255 */
 256int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
 257{
 258        struct buffer_head *header_bh, *su_bh;
 259        struct nilfs_sufile_header *header;
 260        struct nilfs_segment_usage *su;
 261        size_t susz = NILFS_MDT(sufile)->mi_entry_size;
 262        __u64 segnum, maxsegnum, last_alloc;
 263        void *kaddr;
 264        unsigned long nsegments, ncleansegs, nsus;
 265        int ret, i, j;
 266
 267        down_write(&NILFS_MDT(sufile)->mi_sem);
 268
 269        ret = nilfs_sufile_get_header_block(sufile, &header_bh);
 270        if (ret < 0)
 271                goto out_sem;
 272        kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
 273        header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
 274        ncleansegs = le64_to_cpu(header->sh_ncleansegs);
 275        last_alloc = le64_to_cpu(header->sh_last_alloc);
 276        kunmap_atomic(kaddr, KM_USER0);
 277
 278        nsegments = nilfs_sufile_get_nsegments(sufile);
 279        segnum = last_alloc + 1;
 280        maxsegnum = nsegments - 1;
 281        for (i = 0; i < nsegments; i += nsus) {
 282                if (segnum >= nsegments) {
 283                        /* wrap around */
 284                        segnum = 0;
 285                        maxsegnum = last_alloc;
 286                }
 287                ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
 288                                                           &su_bh);
 289                if (ret < 0)
 290                        goto out_header;
 291                kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
 292                su = nilfs_sufile_block_get_segment_usage(
 293                        sufile, segnum, su_bh, kaddr);
 294
 295                nsus = nilfs_sufile_segment_usages_in_block(
 296                        sufile, segnum, maxsegnum);
 297                for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
 298                        if (!nilfs_segment_usage_clean(su))
 299                                continue;
 300                        /* found a clean segment */
 301                        nilfs_segment_usage_set_dirty(su);
 302                        kunmap_atomic(kaddr, KM_USER0);
 303
 304                        kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
 305                        header = nilfs_sufile_block_get_header(
 306                                sufile, header_bh, kaddr);
 307                        le64_add_cpu(&header->sh_ncleansegs, -1);
 308                        le64_add_cpu(&header->sh_ndirtysegs, 1);
 309                        header->sh_last_alloc = cpu_to_le64(segnum);
 310                        kunmap_atomic(kaddr, KM_USER0);
 311
 312                        nilfs_mdt_mark_buffer_dirty(header_bh);
 313                        nilfs_mdt_mark_buffer_dirty(su_bh);
 314                        nilfs_mdt_mark_dirty(sufile);
 315                        brelse(su_bh);
 316                        *segnump = segnum;
 317                        goto out_header;
 318                }
 319
 320                kunmap_atomic(kaddr, KM_USER0);
 321                brelse(su_bh);
 322        }
 323
 324        /* no segments left */
 325        ret = -ENOSPC;
 326
 327 out_header:
 328        brelse(header_bh);
 329
 330 out_sem:
 331        up_write(&NILFS_MDT(sufile)->mi_sem);
 332        return ret;
 333}
 334
 335void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
 336                                 struct buffer_head *header_bh,
 337                                 struct buffer_head *su_bh)
 338{
 339        struct nilfs_segment_usage *su;
 340        void *kaddr;
 341
 342        kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
 343        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
 344        if (unlikely(!nilfs_segment_usage_clean(su))) {
 345                printk(KERN_WARNING "%s: segment %llu must be clean\n",
 346                       __func__, (unsigned long long)segnum);
 347                kunmap_atomic(kaddr, KM_USER0);
 348                return;
 349        }
 350        nilfs_segment_usage_set_dirty(su);
 351        kunmap_atomic(kaddr, KM_USER0);
 352
 353        nilfs_sufile_mod_counter(header_bh, -1, 1);
 354        nilfs_mdt_mark_buffer_dirty(su_bh);
 355        nilfs_mdt_mark_dirty(sufile);
 356}
 357
 358void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
 359                           struct buffer_head *header_bh,
 360                           struct buffer_head *su_bh)
 361{
 362        struct nilfs_segment_usage *su;
 363        void *kaddr;
 364        int clean, dirty;
 365
 366        kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
 367        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
 368        if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
 369            su->su_nblocks == cpu_to_le32(0)) {
 370                kunmap_atomic(kaddr, KM_USER0);
 371                return;
 372        }
 373        clean = nilfs_segment_usage_clean(su);
 374        dirty = nilfs_segment_usage_dirty(su);
 375
 376        /* make the segment garbage */
 377        su->su_lastmod = cpu_to_le64(0);
 378        su->su_nblocks = cpu_to_le32(0);
 379        su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
 380        kunmap_atomic(kaddr, KM_USER0);
 381
 382        nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
 383        nilfs_mdt_mark_buffer_dirty(su_bh);
 384        nilfs_mdt_mark_dirty(sufile);
 385}
 386
 387void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
 388                          struct buffer_head *header_bh,
 389                          struct buffer_head *su_bh)
 390{
 391        struct nilfs_segment_usage *su;
 392        void *kaddr;
 393        int sudirty;
 394
 395        kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
 396        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
 397        if (nilfs_segment_usage_clean(su)) {
 398                printk(KERN_WARNING "%s: segment %llu is already clean\n",
 399                       __func__, (unsigned long long)segnum);
 400                kunmap_atomic(kaddr, KM_USER0);
 401                return;
 402        }
 403        WARN_ON(nilfs_segment_usage_error(su));
 404        WARN_ON(!nilfs_segment_usage_dirty(su));
 405
 406        sudirty = nilfs_segment_usage_dirty(su);
 407        nilfs_segment_usage_set_clean(su);
 408        kunmap_atomic(kaddr, KM_USER0);
 409        nilfs_mdt_mark_buffer_dirty(su_bh);
 410
 411        nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
 412        nilfs_mdt_mark_dirty(sufile);
 413}
 414
 415/**
 416 * nilfs_sufile_get_segment_usage - get a segment usage
 417 * @sufile: inode of segment usage file
 418 * @segnum: segment number
 419 * @sup: pointer to segment usage
 420 * @bhp: pointer to buffer head
 421 *
 422 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
 423 * specified by @segnum.
 424 *
 425 * Return Value: On success, 0 is returned, and the segment usage and the
 426 * buffer head of the buffer on which the segment usage is located are stored
 427 * in the place pointed by @sup and @bhp, respectively. On error, one of the
 428 * following negative error codes is returned.
 429 *
 430 * %-EIO - I/O error.
 431 *
 432 * %-ENOMEM - Insufficient amount of memory available.
 433 *
 434 * %-EINVAL - Invalid segment usage number.
 435 */
 436int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
 437                                   struct nilfs_segment_usage **sup,
 438                                   struct buffer_head **bhp)
 439{
 440        struct buffer_head *bh;
 441        struct nilfs_segment_usage *su;
 442        void *kaddr;
 443        int ret;
 444
 445        /* segnum is 0 origin */
 446        if (segnum >= nilfs_sufile_get_nsegments(sufile))
 447                return -EINVAL;
 448        down_write(&NILFS_MDT(sufile)->mi_sem);
 449        ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
 450        if (ret < 0)
 451                goto out_sem;
 452        kaddr = kmap(bh->b_page);
 453        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
 454        if (nilfs_segment_usage_error(su)) {
 455                kunmap(bh->b_page);
 456                brelse(bh);
 457                ret = -EINVAL;
 458                goto out_sem;
 459        }
 460
 461        if (sup != NULL)
 462                *sup = su;
 463        *bhp = bh;
 464
 465 out_sem:
 466        up_write(&NILFS_MDT(sufile)->mi_sem);
 467        return ret;
 468}
 469
 470/**
 471 * nilfs_sufile_put_segment_usage - put a segment usage
 472 * @sufile: inode of segment usage file
 473 * @segnum: segment number
 474 * @bh: buffer head
 475 *
 476 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
 477 * specified by @segnum. @bh must be the buffer head which have been returned
 478 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
 479 */
 480void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
 481                                    struct buffer_head *bh)
 482{
 483        kunmap(bh->b_page);
 484        brelse(bh);
 485}
 486
 487/**
 488 * nilfs_sufile_get_stat - get segment usage statistics
 489 * @sufile: inode of segment usage file
 490 * @stat: pointer to a structure of segment usage statistics
 491 *
 492 * Description: nilfs_sufile_get_stat() returns information about segment
 493 * usage.
 494 *
 495 * Return Value: On success, 0 is returned, and segment usage information is
 496 * stored in the place pointed by @stat. On error, one of the following
 497 * negative error codes is returned.
 498 *
 499 * %-EIO - I/O error.
 500 *
 501 * %-ENOMEM - Insufficient amount of memory available.
 502 */
 503int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
 504{
 505        struct buffer_head *header_bh;
 506        struct nilfs_sufile_header *header;
 507        struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
 508        void *kaddr;
 509        int ret;
 510
 511        down_read(&NILFS_MDT(sufile)->mi_sem);
 512
 513        ret = nilfs_sufile_get_header_block(sufile, &header_bh);
 514        if (ret < 0)
 515                goto out_sem;
 516
 517        kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
 518        header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
 519        sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
 520        sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
 521        sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
 522        sustat->ss_ctime = nilfs->ns_ctime;
 523        sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
 524        spin_lock(&nilfs->ns_last_segment_lock);
 525        sustat->ss_prot_seq = nilfs->ns_prot_seq;
 526        spin_unlock(&nilfs->ns_last_segment_lock);
 527        kunmap_atomic(kaddr, KM_USER0);
 528        brelse(header_bh);
 529
 530 out_sem:
 531        up_read(&NILFS_MDT(sufile)->mi_sem);
 532        return ret;
 533}
 534
 535/**
 536 * nilfs_sufile_get_ncleansegs - get the number of clean segments
 537 * @sufile: inode of segment usage file
 538 * @nsegsp: pointer to the number of clean segments
 539 *
 540 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
 541 * segments.
 542 *
 543 * Return Value: On success, 0 is returned and the number of clean segments is
 544 * stored in the place pointed by @nsegsp. On error, one of the following
 545 * negative error codes is returned.
 546 *
 547 * %-EIO - I/O error.
 548 *
 549 * %-ENOMEM - Insufficient amount of memory available.
 550 */
 551int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
 552{
 553        struct nilfs_sustat sustat;
 554        int ret;
 555
 556        ret = nilfs_sufile_get_stat(sufile, &sustat);
 557        if (ret == 0)
 558                *nsegsp = sustat.ss_ncleansegs;
 559        return ret;
 560}
 561
 562void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
 563                               struct buffer_head *header_bh,
 564                               struct buffer_head *su_bh)
 565{
 566        struct nilfs_segment_usage *su;
 567        void *kaddr;
 568        int suclean;
 569
 570        kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
 571        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
 572        if (nilfs_segment_usage_error(su)) {
 573                kunmap_atomic(kaddr, KM_USER0);
 574                return;
 575        }
 576        suclean = nilfs_segment_usage_clean(su);
 577        nilfs_segment_usage_set_error(su);
 578        kunmap_atomic(kaddr, KM_USER0);
 579
 580        if (suclean)
 581                nilfs_sufile_mod_counter(header_bh, -1, 0);
 582        nilfs_mdt_mark_buffer_dirty(su_bh);
 583        nilfs_mdt_mark_dirty(sufile);
 584}
 585
 586/**
 587 * nilfs_sufile_get_suinfo -
 588 * @sufile: inode of segment usage file
 589 * @segnum: segment number to start looking
 590 * @buf: array of suinfo
 591 * @sisz: byte size of suinfo
 592 * @nsi: size of suinfo array
 593 *
 594 * Description:
 595 *
 596 * Return Value: On success, 0 is returned and .... On error, one of the
 597 * following negative error codes is returned.
 598 *
 599 * %-EIO - I/O error.
 600 *
 601 * %-ENOMEM - Insufficient amount of memory available.
 602 */
 603ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
 604                                unsigned sisz, size_t nsi)
 605{
 606        struct buffer_head *su_bh;
 607        struct nilfs_segment_usage *su;
 608        struct nilfs_suinfo *si = buf;
 609        size_t susz = NILFS_MDT(sufile)->mi_entry_size;
 610        struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
 611        void *kaddr;
 612        unsigned long nsegs, segusages_per_block;
 613        ssize_t n;
 614        int ret, i, j;
 615
 616        down_read(&NILFS_MDT(sufile)->mi_sem);
 617
 618        segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
 619        nsegs = min_t(unsigned long,
 620                      nilfs_sufile_get_nsegments(sufile) - segnum,
 621                      nsi);
 622        for (i = 0; i < nsegs; i += n, segnum += n) {
 623                n = min_t(unsigned long,
 624                          segusages_per_block -
 625                                  nilfs_sufile_get_offset(sufile, segnum),
 626                          nsegs - i);
 627                ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
 628                                                           &su_bh);
 629                if (ret < 0) {
 630                        if (ret != -ENOENT)
 631                                goto out;
 632                        /* hole */
 633                        memset(si, 0, sisz * n);
 634                        si = (void *)si + sisz * n;
 635                        continue;
 636                }
 637
 638                kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
 639                su = nilfs_sufile_block_get_segment_usage(
 640                        sufile, segnum, su_bh, kaddr);
 641                for (j = 0; j < n;
 642                     j++, su = (void *)su + susz, si = (void *)si + sisz) {
 643                        si->sui_lastmod = le64_to_cpu(su->su_lastmod);
 644                        si->sui_nblocks = le32_to_cpu(su->su_nblocks);
 645                        si->sui_flags = le32_to_cpu(su->su_flags) &
 646                                ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
 647                        if (nilfs_segment_is_active(nilfs, segnum + j))
 648                                si->sui_flags |=
 649                                        (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
 650                }
 651                kunmap_atomic(kaddr, KM_USER0);
 652                brelse(su_bh);
 653        }
 654        ret = nsegs;
 655
 656 out:
 657        up_read(&NILFS_MDT(sufile)->mi_sem);
 658        return ret;
 659}
 660