linux/fs/xfs/xfs_itable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_inode.h"
  14#include "xfs_btree.h"
  15#include "xfs_ialloc.h"
  16#include "xfs_ialloc_btree.h"
  17#include "xfs_iwalk.h"
  18#include "xfs_itable.h"
  19#include "xfs_error.h"
  20#include "xfs_icache.h"
  21#include "xfs_health.h"
  22
  23/*
  24 * Bulk Stat
  25 * =========
  26 *
  27 * Use the inode walking functions to fill out struct xfs_bulkstat for every
  28 * allocated inode, then pass the stat information to some externally provided
  29 * iteration function.
  30 */
  31
  32struct xfs_bstat_chunk {
  33        bulkstat_one_fmt_pf     formatter;
  34        struct xfs_ibulk        *breq;
  35        struct xfs_bulkstat     *buf;
  36};
  37
  38/*
  39 * Fill out the bulkstat info for a single inode and report it somewhere.
  40 *
  41 * bc->breq->lastino is effectively the inode cursor as we walk through the
  42 * filesystem.  Therefore, we update it any time we need to move the cursor
  43 * forward, regardless of whether or not we're sending any bstat information
  44 * back to userspace.  If the inode is internal metadata or, has been freed
  45 * out from under us, we just simply keep going.
  46 *
  47 * However, if any other type of error happens we want to stop right where we
  48 * are so that userspace will call back with exact number of the bad inode and
  49 * we can send back an error code.
  50 *
  51 * Note that if the formatter tells us there's no space left in the buffer we
  52 * move the cursor forward and abort the walk.
  53 */
  54STATIC int
  55xfs_bulkstat_one_int(
  56        struct xfs_mount        *mp,
  57        struct user_namespace   *mnt_userns,
  58        struct xfs_trans        *tp,
  59        xfs_ino_t               ino,
  60        struct xfs_bstat_chunk  *bc)
  61{
  62        struct user_namespace   *sb_userns = mp->m_super->s_user_ns;
  63        struct xfs_inode        *ip;            /* incore inode pointer */
  64        struct inode            *inode;
  65        struct xfs_bulkstat     *buf = bc->buf;
  66        int                     error = -EINVAL;
  67
  68        if (xfs_internal_inum(mp, ino))
  69                goto out_advance;
  70
  71        error = xfs_iget(mp, tp, ino,
  72                         (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
  73                         XFS_ILOCK_SHARED, &ip);
  74        if (error == -ENOENT || error == -EINVAL)
  75                goto out_advance;
  76        if (error)
  77                goto out;
  78
  79        ASSERT(ip != NULL);
  80        ASSERT(ip->i_imap.im_blkno != 0);
  81        inode = VFS_I(ip);
  82
  83        /* xfs_iget returns the following without needing
  84         * further change.
  85         */
  86        buf->bs_projectid = ip->i_projid;
  87        buf->bs_ino = ino;
  88        buf->bs_uid = from_kuid(sb_userns, i_uid_into_mnt(mnt_userns, inode));
  89        buf->bs_gid = from_kgid(sb_userns, i_gid_into_mnt(mnt_userns, inode));
  90        buf->bs_size = ip->i_disk_size;
  91
  92        buf->bs_nlink = inode->i_nlink;
  93        buf->bs_atime = inode->i_atime.tv_sec;
  94        buf->bs_atime_nsec = inode->i_atime.tv_nsec;
  95        buf->bs_mtime = inode->i_mtime.tv_sec;
  96        buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
  97        buf->bs_ctime = inode->i_ctime.tv_sec;
  98        buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
  99        buf->bs_gen = inode->i_generation;
 100        buf->bs_mode = inode->i_mode;
 101
 102        buf->bs_xflags = xfs_ip2xflags(ip);
 103        buf->bs_extsize_blks = ip->i_extsize;
 104        buf->bs_extents = xfs_ifork_nextents(&ip->i_df);
 105        xfs_bulkstat_health(ip, buf);
 106        buf->bs_aextents = xfs_ifork_nextents(ip->i_afp);
 107        buf->bs_forkoff = XFS_IFORK_BOFF(ip);
 108        buf->bs_version = XFS_BULKSTAT_VERSION_V5;
 109
 110        if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
 111                buf->bs_btime = ip->i_crtime.tv_sec;
 112                buf->bs_btime_nsec = ip->i_crtime.tv_nsec;
 113                if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
 114                        buf->bs_cowextsize_blks = ip->i_cowextsize;
 115        }
 116
 117        switch (ip->i_df.if_format) {
 118        case XFS_DINODE_FMT_DEV:
 119                buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
 120                buf->bs_blksize = BLKDEV_IOSIZE;
 121                buf->bs_blocks = 0;
 122                break;
 123        case XFS_DINODE_FMT_LOCAL:
 124                buf->bs_rdev = 0;
 125                buf->bs_blksize = mp->m_sb.sb_blocksize;
 126                buf->bs_blocks = 0;
 127                break;
 128        case XFS_DINODE_FMT_EXTENTS:
 129        case XFS_DINODE_FMT_BTREE:
 130                buf->bs_rdev = 0;
 131                buf->bs_blksize = mp->m_sb.sb_blocksize;
 132                buf->bs_blocks = ip->i_nblocks + ip->i_delayed_blks;
 133                break;
 134        }
 135        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 136        xfs_irele(ip);
 137
 138        error = bc->formatter(bc->breq, buf);
 139        if (error == -ECANCELED)
 140                goto out_advance;
 141        if (error)
 142                goto out;
 143
 144out_advance:
 145        /*
 146         * Advance the cursor to the inode that comes after the one we just
 147         * looked at.  We want the caller to move along if the bulkstat
 148         * information was copied successfully; if we tried to grab the inode
 149         * but it's no longer allocated; or if it's internal metadata.
 150         */
 151        bc->breq->startino = ino + 1;
 152out:
 153        return error;
 154}
 155
 156/* Bulkstat a single inode. */
 157int
 158xfs_bulkstat_one(
 159        struct xfs_ibulk        *breq,
 160        bulkstat_one_fmt_pf     formatter)
 161{
 162        struct xfs_bstat_chunk  bc = {
 163                .formatter      = formatter,
 164                .breq           = breq,
 165        };
 166        int                     error;
 167
 168        if (breq->mnt_userns != &init_user_ns) {
 169                xfs_warn_ratelimited(breq->mp,
 170                        "bulkstat not supported inside of idmapped mounts.");
 171                return -EINVAL;
 172        }
 173
 174        ASSERT(breq->icount == 1);
 175
 176        bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
 177                        KM_MAYFAIL);
 178        if (!bc.buf)
 179                return -ENOMEM;
 180
 181        error = xfs_bulkstat_one_int(breq->mp, breq->mnt_userns, NULL,
 182                                     breq->startino, &bc);
 183
 184        kmem_free(bc.buf);
 185
 186        /*
 187         * If we reported one inode to userspace then we abort because we hit
 188         * the end of the buffer.  Don't leak that back to userspace.
 189         */
 190        if (error == -ECANCELED)
 191                error = 0;
 192
 193        return error;
 194}
 195
 196static int
 197xfs_bulkstat_iwalk(
 198        struct xfs_mount        *mp,
 199        struct xfs_trans        *tp,
 200        xfs_ino_t               ino,
 201        void                    *data)
 202{
 203        struct xfs_bstat_chunk  *bc = data;
 204        int                     error;
 205
 206        error = xfs_bulkstat_one_int(mp, bc->breq->mnt_userns, tp, ino, data);
 207        /* bulkstat just skips over missing inodes */
 208        if (error == -ENOENT || error == -EINVAL)
 209                return 0;
 210        return error;
 211}
 212
 213/*
 214 * Check the incoming lastino parameter.
 215 *
 216 * We allow any inode value that could map to physical space inside the
 217 * filesystem because if there are no inodes there, bulkstat moves on to the
 218 * next chunk.  In other words, the magic agino value of zero takes us to the
 219 * first chunk in the AG, and an agino value past the end of the AG takes us to
 220 * the first chunk in the next AG.
 221 *
 222 * Therefore we can end early if the requested inode is beyond the end of the
 223 * filesystem or doesn't map properly.
 224 */
 225static inline bool
 226xfs_bulkstat_already_done(
 227        struct xfs_mount        *mp,
 228        xfs_ino_t               startino)
 229{
 230        xfs_agnumber_t          agno = XFS_INO_TO_AGNO(mp, startino);
 231        xfs_agino_t             agino = XFS_INO_TO_AGINO(mp, startino);
 232
 233        return agno >= mp->m_sb.sb_agcount ||
 234               startino != XFS_AGINO_TO_INO(mp, agno, agino);
 235}
 236
 237/* Return stat information in bulk (by-inode) for the filesystem. */
 238int
 239xfs_bulkstat(
 240        struct xfs_ibulk        *breq,
 241        bulkstat_one_fmt_pf     formatter)
 242{
 243        struct xfs_bstat_chunk  bc = {
 244                .formatter      = formatter,
 245                .breq           = breq,
 246        };
 247        int                     error;
 248
 249        if (breq->mnt_userns != &init_user_ns) {
 250                xfs_warn_ratelimited(breq->mp,
 251                        "bulkstat not supported inside of idmapped mounts.");
 252                return -EINVAL;
 253        }
 254        if (xfs_bulkstat_already_done(breq->mp, breq->startino))
 255                return 0;
 256
 257        bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
 258                        KM_MAYFAIL);
 259        if (!bc.buf)
 260                return -ENOMEM;
 261
 262        error = xfs_iwalk(breq->mp, NULL, breq->startino, breq->flags,
 263                        xfs_bulkstat_iwalk, breq->icount, &bc);
 264
 265        kmem_free(bc.buf);
 266
 267        /*
 268         * We found some inodes, so clear the error status and return them.
 269         * The lastino pointer will point directly at the inode that triggered
 270         * any error that occurred, so on the next call the error will be
 271         * triggered again and propagated to userspace as there will be no
 272         * formatted inodes in the buffer.
 273         */
 274        if (breq->ocount > 0)
 275                error = 0;
 276
 277        return error;
 278}
 279
 280/* Convert bulkstat (v5) to bstat (v1). */
 281void
 282xfs_bulkstat_to_bstat(
 283        struct xfs_mount                *mp,
 284        struct xfs_bstat                *bs1,
 285        const struct xfs_bulkstat       *bstat)
 286{
 287        /* memset is needed here because of padding holes in the structure. */
 288        memset(bs1, 0, sizeof(struct xfs_bstat));
 289        bs1->bs_ino = bstat->bs_ino;
 290        bs1->bs_mode = bstat->bs_mode;
 291        bs1->bs_nlink = bstat->bs_nlink;
 292        bs1->bs_uid = bstat->bs_uid;
 293        bs1->bs_gid = bstat->bs_gid;
 294        bs1->bs_rdev = bstat->bs_rdev;
 295        bs1->bs_blksize = bstat->bs_blksize;
 296        bs1->bs_size = bstat->bs_size;
 297        bs1->bs_atime.tv_sec = bstat->bs_atime;
 298        bs1->bs_mtime.tv_sec = bstat->bs_mtime;
 299        bs1->bs_ctime.tv_sec = bstat->bs_ctime;
 300        bs1->bs_atime.tv_nsec = bstat->bs_atime_nsec;
 301        bs1->bs_mtime.tv_nsec = bstat->bs_mtime_nsec;
 302        bs1->bs_ctime.tv_nsec = bstat->bs_ctime_nsec;
 303        bs1->bs_blocks = bstat->bs_blocks;
 304        bs1->bs_xflags = bstat->bs_xflags;
 305        bs1->bs_extsize = XFS_FSB_TO_B(mp, bstat->bs_extsize_blks);
 306        bs1->bs_extents = bstat->bs_extents;
 307        bs1->bs_gen = bstat->bs_gen;
 308        bs1->bs_projid_lo = bstat->bs_projectid & 0xFFFF;
 309        bs1->bs_forkoff = bstat->bs_forkoff;
 310        bs1->bs_projid_hi = bstat->bs_projectid >> 16;
 311        bs1->bs_sick = bstat->bs_sick;
 312        bs1->bs_checked = bstat->bs_checked;
 313        bs1->bs_cowextsize = XFS_FSB_TO_B(mp, bstat->bs_cowextsize_blks);
 314        bs1->bs_dmevmask = 0;
 315        bs1->bs_dmstate = 0;
 316        bs1->bs_aextents = bstat->bs_aextents;
 317}
 318
 319struct xfs_inumbers_chunk {
 320        inumbers_fmt_pf         formatter;
 321        struct xfs_ibulk        *breq;
 322};
 323
 324/*
 325 * INUMBERS
 326 * ========
 327 * This is how we export inode btree records to userspace, so that XFS tools
 328 * can figure out where inodes are allocated.
 329 */
 330
 331/*
 332 * Format the inode group structure and report it somewhere.
 333 *
 334 * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk
 335 * through the filesystem so we move it forward unless there was a runtime
 336 * error.  If the formatter tells us the buffer is now full we also move the
 337 * cursor forward and abort the walk.
 338 */
 339STATIC int
 340xfs_inumbers_walk(
 341        struct xfs_mount        *mp,
 342        struct xfs_trans        *tp,
 343        xfs_agnumber_t          agno,
 344        const struct xfs_inobt_rec_incore *irec,
 345        void                    *data)
 346{
 347        struct xfs_inumbers     inogrp = {
 348                .xi_startino    = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino),
 349                .xi_alloccount  = irec->ir_count - irec->ir_freecount,
 350                .xi_allocmask   = ~irec->ir_free,
 351                .xi_version     = XFS_INUMBERS_VERSION_V5,
 352        };
 353        struct xfs_inumbers_chunk *ic = data;
 354        int                     error;
 355
 356        error = ic->formatter(ic->breq, &inogrp);
 357        if (error && error != -ECANCELED)
 358                return error;
 359
 360        ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) +
 361                        XFS_INODES_PER_CHUNK;
 362        return error;
 363}
 364
 365/*
 366 * Return inode number table for the filesystem.
 367 */
 368int
 369xfs_inumbers(
 370        struct xfs_ibulk        *breq,
 371        inumbers_fmt_pf         formatter)
 372{
 373        struct xfs_inumbers_chunk ic = {
 374                .formatter      = formatter,
 375                .breq           = breq,
 376        };
 377        int                     error = 0;
 378
 379        if (xfs_bulkstat_already_done(breq->mp, breq->startino))
 380                return 0;
 381
 382        error = xfs_inobt_walk(breq->mp, NULL, breq->startino, breq->flags,
 383                        xfs_inumbers_walk, breq->icount, &ic);
 384
 385        /*
 386         * We found some inode groups, so clear the error status and return
 387         * them.  The lastino pointer will point directly at the inode that
 388         * triggered any error that occurred, so on the next call the error
 389         * will be triggered again and propagated to userspace as there will be
 390         * no formatted inode groups in the buffer.
 391         */
 392        if (breq->ocount > 0)
 393                error = 0;
 394
 395        return error;
 396}
 397
 398/* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
 399void
 400xfs_inumbers_to_inogrp(
 401        struct xfs_inogrp               *ig1,
 402        const struct xfs_inumbers       *ig)
 403{
 404        /* memset is needed here because of padding holes in the structure. */
 405        memset(ig1, 0, sizeof(struct xfs_inogrp));
 406        ig1->xi_startino = ig->xi_startino;
 407        ig1->xi_alloccount = ig->xi_alloccount;
 408        ig1->xi_allocmask = ig->xi_allocmask;
 409}
 410