linux/fs/xfs/xfs_itable.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_inum.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_mount.h"
  28#include "xfs_inode.h"
  29#include "xfs_btree.h"
  30#include "xfs_ialloc.h"
  31#include "xfs_ialloc_btree.h"
  32#include "xfs_itable.h"
  33#include "xfs_error.h"
  34#include "xfs_trace.h"
  35#include "xfs_icache.h"
  36#include "xfs_dinode.h"
  37
  38STATIC int
  39xfs_internal_inum(
  40        xfs_mount_t     *mp,
  41        xfs_ino_t       ino)
  42{
  43        return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
  44                (xfs_sb_version_hasquota(&mp->m_sb) &&
  45                 xfs_is_quota_inode(&mp->m_sb, ino)));
  46}
  47
  48/*
  49 * Return stat information for one inode.
  50 * Return 0 if ok, else errno.
  51 */
  52int
  53xfs_bulkstat_one_int(
  54        struct xfs_mount        *mp,            /* mount point for filesystem */
  55        xfs_ino_t               ino,            /* inode to get data for */
  56        void __user             *buffer,        /* buffer to place output in */
  57        int                     ubsize,         /* size of buffer */
  58        bulkstat_one_fmt_pf     formatter,      /* formatter, copy to user */
  59        int                     *ubused,        /* bytes used by me */
  60        int                     *stat)          /* BULKSTAT_RV_... */
  61{
  62        struct xfs_icdinode     *dic;           /* dinode core info pointer */
  63        struct xfs_inode        *ip;            /* incore inode pointer */
  64        struct xfs_bstat        *buf;           /* return buffer */
  65        int                     error = 0;      /* error value */
  66
  67        *stat = BULKSTAT_RV_NOTHING;
  68
  69        if (!buffer || xfs_internal_inum(mp, ino))
  70                return XFS_ERROR(EINVAL);
  71
  72        buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
  73        if (!buf)
  74                return XFS_ERROR(ENOMEM);
  75
  76        error = xfs_iget(mp, NULL, ino,
  77                         (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
  78                         XFS_ILOCK_SHARED, &ip);
  79        if (error) {
  80                *stat = BULKSTAT_RV_NOTHING;
  81                goto out_free;
  82        }
  83
  84        ASSERT(ip != NULL);
  85        ASSERT(ip->i_imap.im_blkno != 0);
  86
  87        dic = &ip->i_d;
  88
  89        /* xfs_iget returns the following without needing
  90         * further change.
  91         */
  92        buf->bs_nlink = dic->di_nlink;
  93        buf->bs_projid_lo = dic->di_projid_lo;
  94        buf->bs_projid_hi = dic->di_projid_hi;
  95        buf->bs_ino = ino;
  96        buf->bs_mode = dic->di_mode;
  97        buf->bs_uid = dic->di_uid;
  98        buf->bs_gid = dic->di_gid;
  99        buf->bs_size = dic->di_size;
 100        buf->bs_atime.tv_sec = dic->di_atime.t_sec;
 101        buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
 102        buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
 103        buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
 104        buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
 105        buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
 106        buf->bs_xflags = xfs_ip2xflags(ip);
 107        buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
 108        buf->bs_extents = dic->di_nextents;
 109        buf->bs_gen = dic->di_gen;
 110        memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
 111        buf->bs_dmevmask = dic->di_dmevmask;
 112        buf->bs_dmstate = dic->di_dmstate;
 113        buf->bs_aextents = dic->di_anextents;
 114        buf->bs_forkoff = XFS_IFORK_BOFF(ip);
 115
 116        switch (dic->di_format) {
 117        case XFS_DINODE_FMT_DEV:
 118                buf->bs_rdev = ip->i_df.if_u2.if_rdev;
 119                buf->bs_blksize = BLKDEV_IOSIZE;
 120                buf->bs_blocks = 0;
 121                break;
 122        case XFS_DINODE_FMT_LOCAL:
 123        case XFS_DINODE_FMT_UUID:
 124                buf->bs_rdev = 0;
 125                buf->bs_blksize = mp->m_sb.sb_blocksize;
 126                buf->bs_blocks = 0;
 127                break;
 128        case XFS_DINODE_FMT_EXTENTS:
 129        case XFS_DINODE_FMT_BTREE:
 130                buf->bs_rdev = 0;
 131                buf->bs_blksize = mp->m_sb.sb_blocksize;
 132                buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
 133                break;
 134        }
 135        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 136        IRELE(ip);
 137
 138        error = formatter(buffer, ubsize, ubused, buf);
 139
 140        if (!error)
 141                *stat = BULKSTAT_RV_DIDONE;
 142
 143 out_free:
 144        kmem_free(buf);
 145        return error;
 146}
 147
 148/* Return 0 on success or positive error */
 149STATIC int
 150xfs_bulkstat_one_fmt(
 151        void                    __user *ubuffer,
 152        int                     ubsize,
 153        int                     *ubused,
 154        const xfs_bstat_t       *buffer)
 155{
 156        if (ubsize < sizeof(*buffer))
 157                return XFS_ERROR(ENOMEM);
 158        if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
 159                return XFS_ERROR(EFAULT);
 160        if (ubused)
 161                *ubused = sizeof(*buffer);
 162        return 0;
 163}
 164
 165int
 166xfs_bulkstat_one(
 167        xfs_mount_t     *mp,            /* mount point for filesystem */
 168        xfs_ino_t       ino,            /* inode number to get data for */
 169        void            __user *buffer, /* buffer to place output in */
 170        int             ubsize,         /* size of buffer */
 171        int             *ubused,        /* bytes used by me */
 172        int             *stat)          /* BULKSTAT_RV_... */
 173{
 174        return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
 175                                    xfs_bulkstat_one_fmt, ubused, stat);
 176}
 177
 178#define XFS_BULKSTAT_UBLEFT(ubleft)     ((ubleft) >= statstruct_size)
 179
 180/*
 181 * Return stat information in bulk (by-inode) for the filesystem.
 182 */
 183int                                     /* error status */
 184xfs_bulkstat(
 185        xfs_mount_t             *mp,    /* mount point for filesystem */
 186        xfs_ino_t               *lastinop, /* last inode returned */
 187        int                     *ubcountp, /* size of buffer/count returned */
 188        bulkstat_one_pf         formatter, /* func that'd fill a single buf */
 189        size_t                  statstruct_size, /* sizeof struct filling */
 190        char                    __user *ubuffer, /* buffer with inode stats */
 191        int                     *done)  /* 1 if there are more stats to get */
 192{
 193        xfs_agblock_t           agbno=0;/* allocation group block number */
 194        xfs_buf_t               *agbp;  /* agi header buffer */
 195        xfs_agi_t               *agi;   /* agi header data */
 196        xfs_agino_t             agino;  /* inode # in allocation group */
 197        xfs_agnumber_t          agno;   /* allocation group number */
 198        int                     chunkidx; /* current index into inode chunk */
 199        int                     clustidx; /* current index into inode cluster */
 200        xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
 201        int                     end_of_ag; /* set if we've seen the ag end */
 202        int                     error;  /* error code */
 203        int                     fmterror;/* bulkstat formatter result */
 204        int                     i;      /* loop index */
 205        int                     icount; /* count of inodes good in irbuf */
 206        size_t                  irbsize; /* size of irec buffer in bytes */
 207        xfs_ino_t               ino;    /* inode number (filesystem) */
 208        xfs_inobt_rec_incore_t  *irbp;  /* current irec buffer pointer */
 209        xfs_inobt_rec_incore_t  *irbuf; /* start of irec buffer */
 210        xfs_inobt_rec_incore_t  *irbufend; /* end of good irec buffer entries */
 211        xfs_ino_t               lastino; /* last inode number returned */
 212        int                     blks_per_cluster; /* # of blocks per cluster */
 213        int                     inodes_per_cluster;/* # of inodes per cluster */
 214        int                     nirbuf; /* size of irbuf */
 215        int                     rval;   /* return value error code */
 216        int                     tmp;    /* result value from btree calls */
 217        int                     ubcount; /* size of user's buffer */
 218        int                     ubleft; /* bytes left in user's buffer */
 219        char                    __user *ubufp;  /* pointer into user's buffer */
 220        int                     ubelem; /* spaces used in user's buffer */
 221        int                     ubused; /* bytes used by formatter */
 222
 223        /*
 224         * Get the last inode value, see if there's nothing to do.
 225         */
 226        ino = (xfs_ino_t)*lastinop;
 227        lastino = ino;
 228        agno = XFS_INO_TO_AGNO(mp, ino);
 229        agino = XFS_INO_TO_AGINO(mp, ino);
 230        if (agno >= mp->m_sb.sb_agcount ||
 231            ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
 232                *done = 1;
 233                *ubcountp = 0;
 234                return 0;
 235        }
 236        if (!ubcountp || *ubcountp <= 0) {
 237                return EINVAL;
 238        }
 239        ubcount = *ubcountp; /* statstruct's */
 240        ubleft = ubcount * statstruct_size; /* bytes */
 241        *ubcountp = ubelem = 0;
 242        *done = 0;
 243        fmterror = 0;
 244        ubufp = ubuffer;
 245        blks_per_cluster = xfs_icluster_size_fsb(mp);
 246        inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
 247        irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
 248        if (!irbuf)
 249                return ENOMEM;
 250
 251        nirbuf = irbsize / sizeof(*irbuf);
 252
 253        /*
 254         * Loop over the allocation groups, starting from the last
 255         * inode returned; 0 means start of the allocation group.
 256         */
 257        rval = 0;
 258        while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
 259                cond_resched();
 260                error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
 261                if (error) {
 262                        /*
 263                         * Skip this allocation group and go to the next one.
 264                         */
 265                        agno++;
 266                        agino = 0;
 267                        continue;
 268                }
 269                agi = XFS_BUF_TO_AGI(agbp);
 270                /*
 271                 * Allocate and initialize a btree cursor for ialloc btree.
 272                 */
 273                cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
 274                                            XFS_BTNUM_INO);
 275                irbp = irbuf;
 276                irbufend = irbuf + nirbuf;
 277                end_of_ag = 0;
 278                /*
 279                 * If we're returning in the middle of an allocation group,
 280                 * we need to get the remainder of the chunk we're in.
 281                 */
 282                if (agino > 0) {
 283                        xfs_inobt_rec_incore_t r;
 284
 285                        /*
 286                         * Lookup the inode chunk that this inode lives in.
 287                         */
 288                        error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE,
 289                                                 &tmp);
 290                        if (!error &&   /* no I/O error */
 291                            tmp &&      /* lookup succeeded */
 292                                        /* got the record, should always work */
 293                            !(error = xfs_inobt_get_rec(cur, &r, &i)) &&
 294                            i == 1 &&
 295                                        /* this is the right chunk */
 296                            agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
 297                                        /* lastino was not last in chunk */
 298                            (chunkidx = agino - r.ir_startino + 1) <
 299                                    XFS_INODES_PER_CHUNK &&
 300                                        /* there are some left allocated */
 301                            xfs_inobt_maskn(chunkidx,
 302                                    XFS_INODES_PER_CHUNK - chunkidx) &
 303                                    ~r.ir_free) {
 304                                /*
 305                                 * Grab the chunk record.  Mark all the
 306                                 * uninteresting inodes (because they're
 307                                 * before our start point) free.
 308                                 */
 309                                for (i = 0; i < chunkidx; i++) {
 310                                        if (XFS_INOBT_MASK(i) & ~r.ir_free)
 311                                                r.ir_freecount++;
 312                                }
 313                                r.ir_free |= xfs_inobt_maskn(0, chunkidx);
 314                                irbp->ir_startino = r.ir_startino;
 315                                irbp->ir_freecount = r.ir_freecount;
 316                                irbp->ir_free = r.ir_free;
 317                                irbp++;
 318                                agino = r.ir_startino + XFS_INODES_PER_CHUNK;
 319                                icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
 320                        } else {
 321                                /*
 322                                 * If any of those tests failed, bump the
 323                                 * inode number (just in case).
 324                                 */
 325                                agino++;
 326                                icount = 0;
 327                        }
 328                        /*
 329                         * In any case, increment to the next record.
 330                         */
 331                        if (!error)
 332                                error = xfs_btree_increment(cur, 0, &tmp);
 333                } else {
 334                        /*
 335                         * Start of ag.  Lookup the first inode chunk.
 336                         */
 337                        error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
 338                        icount = 0;
 339                }
 340                /*
 341                 * Loop through inode btree records in this ag,
 342                 * until we run out of inodes or space in the buffer.
 343                 */
 344                while (irbp < irbufend && icount < ubcount) {
 345                        xfs_inobt_rec_incore_t r;
 346
 347                        /*
 348                         * Loop as long as we're unable to read the
 349                         * inode btree.
 350                         */
 351                        while (error) {
 352                                agino += XFS_INODES_PER_CHUNK;
 353                                if (XFS_AGINO_TO_AGBNO(mp, agino) >=
 354                                                be32_to_cpu(agi->agi_length))
 355                                        break;
 356                                error = xfs_inobt_lookup(cur, agino,
 357                                                         XFS_LOOKUP_GE, &tmp);
 358                                cond_resched();
 359                        }
 360                        /*
 361                         * If ran off the end of the ag either with an error,
 362                         * or the normal way, set end and stop collecting.
 363                         */
 364                        if (error) {
 365                                end_of_ag = 1;
 366                                break;
 367                        }
 368
 369                        error = xfs_inobt_get_rec(cur, &r, &i);
 370                        if (error || i == 0) {
 371                                end_of_ag = 1;
 372                                break;
 373                        }
 374
 375                        /*
 376                         * If this chunk has any allocated inodes, save it.
 377                         * Also start read-ahead now for this chunk.
 378                         */
 379                        if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
 380                                struct blk_plug plug;
 381                                /*
 382                                 * Loop over all clusters in the next chunk.
 383                                 * Do a readahead if there are any allocated
 384                                 * inodes in that cluster.
 385                                 */
 386                                blk_start_plug(&plug);
 387                                agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
 388                                for (chunkidx = 0;
 389                                     chunkidx < XFS_INODES_PER_CHUNK;
 390                                     chunkidx += inodes_per_cluster,
 391                                     agbno += blks_per_cluster) {
 392                                        if (xfs_inobt_maskn(chunkidx,
 393                                            inodes_per_cluster) & ~r.ir_free)
 394                                                xfs_btree_reada_bufs(mp, agno,
 395                                                        agbno, blks_per_cluster,
 396                                                        &xfs_inode_buf_ops);
 397                                }
 398                                blk_finish_plug(&plug);
 399                                irbp->ir_startino = r.ir_startino;
 400                                irbp->ir_freecount = r.ir_freecount;
 401                                irbp->ir_free = r.ir_free;
 402                                irbp++;
 403                                icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
 404                        }
 405                        /*
 406                         * Set agino to after this chunk and bump the cursor.
 407                         */
 408                        agino = r.ir_startino + XFS_INODES_PER_CHUNK;
 409                        error = xfs_btree_increment(cur, 0, &tmp);
 410                        cond_resched();
 411                }
 412                /*
 413                 * Drop the btree buffers and the agi buffer.
 414                 * We can't hold any of the locks these represent
 415                 * when calling iget.
 416                 */
 417                xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 418                xfs_buf_relse(agbp);
 419                /*
 420                 * Now format all the good inodes into the user's buffer.
 421                 */
 422                irbufend = irbp;
 423                for (irbp = irbuf;
 424                     irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
 425                        /*
 426                         * Now process this chunk of inodes.
 427                         */
 428                        for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
 429                             XFS_BULKSTAT_UBLEFT(ubleft) &&
 430                                irbp->ir_freecount < XFS_INODES_PER_CHUNK;
 431                             chunkidx++, clustidx++, agino++) {
 432                                ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
 433
 434                                ino = XFS_AGINO_TO_INO(mp, agno, agino);
 435                                /*
 436                                 * Skip if this inode is free.
 437                                 */
 438                                if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
 439                                        lastino = ino;
 440                                        continue;
 441                                }
 442                                /*
 443                                 * Count used inodes as free so we can tell
 444                                 * when the chunk is used up.
 445                                 */
 446                                irbp->ir_freecount++;
 447
 448                                /*
 449                                 * Get the inode and fill in a single buffer.
 450                                 */
 451                                ubused = statstruct_size;
 452                                error = formatter(mp, ino, ubufp, ubleft,
 453                                                  &ubused, &fmterror);
 454                                if (fmterror == BULKSTAT_RV_NOTHING) {
 455                                        if (error && error != ENOENT &&
 456                                                error != EINVAL) {
 457                                                ubleft = 0;
 458                                                rval = error;
 459                                                break;
 460                                        }
 461                                        lastino = ino;
 462                                        continue;
 463                                }
 464                                if (fmterror == BULKSTAT_RV_GIVEUP) {
 465                                        ubleft = 0;
 466                                        ASSERT(error);
 467                                        rval = error;
 468                                        break;
 469                                }
 470                                if (ubufp)
 471                                        ubufp += ubused;
 472                                ubleft -= ubused;
 473                                ubelem++;
 474                                lastino = ino;
 475                        }
 476
 477                        cond_resched();
 478                }
 479                /*
 480                 * Set up for the next loop iteration.
 481                 */
 482                if (XFS_BULKSTAT_UBLEFT(ubleft)) {
 483                        if (end_of_ag) {
 484                                agno++;
 485                                agino = 0;
 486                        } else
 487                                agino = XFS_INO_TO_AGINO(mp, lastino);
 488                } else
 489                        break;
 490        }
 491        /*
 492         * Done, we're either out of filesystem or space to put the data.
 493         */
 494        kmem_free(irbuf);
 495        *ubcountp = ubelem;
 496        /*
 497         * Found some inodes, return them now and return the error next time.
 498         */
 499        if (ubelem)
 500                rval = 0;
 501        if (agno >= mp->m_sb.sb_agcount) {
 502                /*
 503                 * If we ran out of filesystem, mark lastino as off
 504                 * the end of the filesystem, so the next call
 505                 * will return immediately.
 506                 */
 507                *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
 508                *done = 1;
 509        } else
 510                *lastinop = (xfs_ino_t)lastino;
 511
 512        return rval;
 513}
 514
 515/*
 516 * Return stat information in bulk (by-inode) for the filesystem.
 517 * Special case for non-sequential one inode bulkstat.
 518 */
 519int                                     /* error status */
 520xfs_bulkstat_single(
 521        xfs_mount_t             *mp,    /* mount point for filesystem */
 522        xfs_ino_t               *lastinop, /* inode to return */
 523        char                    __user *buffer, /* buffer with inode stats */
 524        int                     *done)  /* 1 if there are more stats to get */
 525{
 526        int                     count;  /* count value for bulkstat call */
 527        int                     error;  /* return value */
 528        xfs_ino_t               ino;    /* filesystem inode number */
 529        int                     res;    /* result from bs1 */
 530
 531        /*
 532         * note that requesting valid inode numbers which are not allocated
 533         * to inodes will most likely cause xfs_imap_to_bp to generate warning
 534         * messages about bad magic numbers. This is ok. The fact that
 535         * the inode isn't actually an inode is handled by the
 536         * error check below. Done this way to make the usual case faster
 537         * at the expense of the error case.
 538         */
 539
 540        ino = *lastinop;
 541        error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
 542                                 NULL, &res);
 543        if (error) {
 544                /*
 545                 * Special case way failed, do it the "long" way
 546                 * to see if that works.
 547                 */
 548                (*lastinop)--;
 549                count = 1;
 550                if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
 551                                sizeof(xfs_bstat_t), buffer, done))
 552                        return error;
 553                if (count == 0 || (xfs_ino_t)*lastinop != ino)
 554                        return error == EFSCORRUPTED ?
 555                                XFS_ERROR(EINVAL) : error;
 556                else
 557                        return 0;
 558        }
 559        *done = 0;
 560        return 0;
 561}
 562
 563int
 564xfs_inumbers_fmt(
 565        void                    __user *ubuffer, /* buffer to write to */
 566        const xfs_inogrp_t      *buffer,        /* buffer to read from */
 567        long                    count,          /* # of elements to read */
 568        long                    *written)       /* # of bytes written */
 569{
 570        if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
 571                return -EFAULT;
 572        *written = count * sizeof(*buffer);
 573        return 0;
 574}
 575
 576/*
 577 * Return inode number table for the filesystem.
 578 */
 579int                                     /* error status */
 580xfs_inumbers(
 581        xfs_mount_t     *mp,            /* mount point for filesystem */
 582        xfs_ino_t       *lastino,       /* last inode returned */
 583        int             *count,         /* size of buffer/count returned */
 584        void            __user *ubuffer,/* buffer with inode descriptions */
 585        inumbers_fmt_pf formatter)
 586{
 587        xfs_buf_t       *agbp;
 588        xfs_agino_t     agino;
 589        xfs_agnumber_t  agno;
 590        int             bcount;
 591        xfs_inogrp_t    *buffer;
 592        int             bufidx;
 593        xfs_btree_cur_t *cur;
 594        int             error;
 595        xfs_inobt_rec_incore_t r;
 596        int             i;
 597        xfs_ino_t       ino;
 598        int             left;
 599        int             tmp;
 600
 601        ino = (xfs_ino_t)*lastino;
 602        agno = XFS_INO_TO_AGNO(mp, ino);
 603        agino = XFS_INO_TO_AGINO(mp, ino);
 604        left = *count;
 605        *count = 0;
 606        bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
 607        buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
 608        error = bufidx = 0;
 609        cur = NULL;
 610        agbp = NULL;
 611        while (left > 0 && agno < mp->m_sb.sb_agcount) {
 612                if (agbp == NULL) {
 613                        error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
 614                        if (error) {
 615                                /*
 616                                 * If we can't read the AGI of this ag,
 617                                 * then just skip to the next one.
 618                                 */
 619                                ASSERT(cur == NULL);
 620                                agbp = NULL;
 621                                agno++;
 622                                agino = 0;
 623                                continue;
 624                        }
 625                        cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
 626                                                    XFS_BTNUM_INO);
 627                        error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
 628                                                 &tmp);
 629                        if (error) {
 630                                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 631                                cur = NULL;
 632                                xfs_buf_relse(agbp);
 633                                agbp = NULL;
 634                                /*
 635                                 * Move up the last inode in the current
 636                                 * chunk.  The lookup_ge will always get
 637                                 * us the first inode in the next chunk.
 638                                 */
 639                                agino += XFS_INODES_PER_CHUNK - 1;
 640                                continue;
 641                        }
 642                }
 643                error = xfs_inobt_get_rec(cur, &r, &i);
 644                if (error || i == 0) {
 645                        xfs_buf_relse(agbp);
 646                        agbp = NULL;
 647                        xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 648                        cur = NULL;
 649                        agno++;
 650                        agino = 0;
 651                        continue;
 652                }
 653                agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
 654                buffer[bufidx].xi_startino =
 655                        XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
 656                buffer[bufidx].xi_alloccount =
 657                        XFS_INODES_PER_CHUNK - r.ir_freecount;
 658                buffer[bufidx].xi_allocmask = ~r.ir_free;
 659                bufidx++;
 660                left--;
 661                if (bufidx == bcount) {
 662                        long written;
 663                        if (formatter(ubuffer, buffer, bufidx, &written)) {
 664                                error = XFS_ERROR(EFAULT);
 665                                break;
 666                        }
 667                        ubuffer += written;
 668                        *count += bufidx;
 669                        bufidx = 0;
 670                }
 671                if (left) {
 672                        error = xfs_btree_increment(cur, 0, &tmp);
 673                        if (error) {
 674                                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 675                                cur = NULL;
 676                                xfs_buf_relse(agbp);
 677                                agbp = NULL;
 678                                /*
 679                                 * The agino value has already been bumped.
 680                                 * Just try to skip up to it.
 681                                 */
 682                                agino += XFS_INODES_PER_CHUNK;
 683                                continue;
 684                        }
 685                }
 686        }
 687        if (!error) {
 688                if (bufidx) {
 689                        long written;
 690                        if (formatter(ubuffer, buffer, bufidx, &written))
 691                                error = XFS_ERROR(EFAULT);
 692                        else
 693                                *count += bufidx;
 694                }
 695                *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
 696        }
 697        kmem_free(buffer);
 698        if (cur)
 699                xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
 700                                           XFS_BTREE_NOERROR));
 701        if (agbp)
 702                xfs_buf_relse(agbp);
 703        return error;
 704}
 705