linux/fs/xfs/scrub/attr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Oracle.  All Rights Reserved.
   4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_trans_resv.h"
  11#include "xfs_mount.h"
  12#include "xfs_defer.h"
  13#include "xfs_btree.h"
  14#include "xfs_bit.h"
  15#include "xfs_log_format.h"
  16#include "xfs_trans.h"
  17#include "xfs_sb.h"
  18#include "xfs_inode.h"
  19#include "xfs_da_format.h"
  20#include "xfs_da_btree.h"
  21#include "xfs_dir2.h"
  22#include "xfs_attr.h"
  23#include "xfs_attr_leaf.h"
  24#include "scrub/xfs_scrub.h"
  25#include "scrub/scrub.h"
  26#include "scrub/common.h"
  27#include "scrub/dabtree.h"
  28#include "scrub/trace.h"
  29
  30#include <linux/posix_acl_xattr.h>
  31#include <linux/xattr.h>
  32
  33/* Set us up to scrub an inode's extended attributes. */
  34int
  35xfs_scrub_setup_xattr(
  36        struct xfs_scrub_context        *sc,
  37        struct xfs_inode                *ip)
  38{
  39        size_t                          sz;
  40
  41        /*
  42         * Allocate the buffer without the inode lock held.  We need enough
  43         * space to read every xattr value in the file or enough space to
  44         * hold three copies of the xattr free space bitmap.  (Not both at
  45         * the same time.)
  46         */
  47        sz = max_t(size_t, XATTR_SIZE_MAX, 3 * sizeof(long) *
  48                        BITS_TO_LONGS(sc->mp->m_attr_geo->blksize));
  49        sc->buf = kmem_zalloc_large(sz, KM_SLEEP);
  50        if (!sc->buf)
  51                return -ENOMEM;
  52
  53        return xfs_scrub_setup_inode_contents(sc, ip, 0);
  54}
  55
  56/* Extended Attributes */
  57
  58struct xfs_scrub_xattr {
  59        struct xfs_attr_list_context    context;
  60        struct xfs_scrub_context        *sc;
  61};
  62
  63/*
  64 * Check that an extended attribute key can be looked up by hash.
  65 *
  66 * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked)
  67 * to call this function for every attribute key in an inode.  Once
  68 * we're here, we load the attribute value to see if any errors happen,
  69 * or if we get more or less data than we expected.
  70 */
  71static void
  72xfs_scrub_xattr_listent(
  73        struct xfs_attr_list_context    *context,
  74        int                             flags,
  75        unsigned char                   *name,
  76        int                             namelen,
  77        int                             valuelen)
  78{
  79        struct xfs_scrub_xattr          *sx;
  80        struct xfs_da_args              args = { NULL };
  81        int                             error = 0;
  82
  83        sx = container_of(context, struct xfs_scrub_xattr, context);
  84
  85        if (flags & XFS_ATTR_INCOMPLETE) {
  86                /* Incomplete attr key, just mark the inode for preening. */
  87                xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino);
  88                return;
  89        }
  90
  91        args.flags = ATTR_KERNOTIME;
  92        if (flags & XFS_ATTR_ROOT)
  93                args.flags |= ATTR_ROOT;
  94        else if (flags & XFS_ATTR_SECURE)
  95                args.flags |= ATTR_SECURE;
  96        args.geo = context->dp->i_mount->m_attr_geo;
  97        args.whichfork = XFS_ATTR_FORK;
  98        args.dp = context->dp;
  99        args.name = name;
 100        args.namelen = namelen;
 101        args.hashval = xfs_da_hashname(args.name, args.namelen);
 102        args.trans = context->tp;
 103        args.value = sx->sc->buf;
 104        args.valuelen = XATTR_SIZE_MAX;
 105
 106        error = xfs_attr_get_ilocked(context->dp, &args);
 107        if (error == -EEXIST)
 108                error = 0;
 109        if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
 110                        &error))
 111                goto fail_xref;
 112        if (args.valuelen != valuelen)
 113                xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
 114                                             args.blkno);
 115fail_xref:
 116        if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 117                context->seen_enough = 1;
 118        return;
 119}
 120
 121/*
 122 * Mark a range [start, start+len) in this map.  Returns true if the
 123 * region was free, and false if there's a conflict or a problem.
 124 *
 125 * Within a char, the lowest bit of the char represents the byte with
 126 * the smallest address
 127 */
 128STATIC bool
 129xfs_scrub_xattr_set_map(
 130        struct xfs_scrub_context        *sc,
 131        unsigned long                   *map,
 132        unsigned int                    start,
 133        unsigned int                    len)
 134{
 135        unsigned int                    mapsize = sc->mp->m_attr_geo->blksize;
 136        bool                            ret = true;
 137
 138        if (start >= mapsize)
 139                return false;
 140        if (start + len > mapsize) {
 141                len = mapsize - start;
 142                ret = false;
 143        }
 144
 145        if (find_next_bit(map, mapsize, start) < start + len)
 146                ret = false;
 147        bitmap_set(map, start, len);
 148
 149        return ret;
 150}
 151
 152/*
 153 * Check the leaf freemap from the usage bitmap.  Returns false if the
 154 * attr freemap has problems or points to used space.
 155 */
 156STATIC bool
 157xfs_scrub_xattr_check_freemap(
 158        struct xfs_scrub_context        *sc,
 159        unsigned long                   *map,
 160        struct xfs_attr3_icleaf_hdr     *leafhdr)
 161{
 162        unsigned long                   *freemap;
 163        unsigned long                   *dstmap;
 164        unsigned int                    mapsize = sc->mp->m_attr_geo->blksize;
 165        int                             i;
 166
 167        /* Construct bitmap of freemap contents. */
 168        freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
 169        bitmap_zero(freemap, mapsize);
 170        for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
 171                if (!xfs_scrub_xattr_set_map(sc, freemap,
 172                                leafhdr->freemap[i].base,
 173                                leafhdr->freemap[i].size))
 174                        return false;
 175        }
 176
 177        /* Look for bits that are set in freemap and are marked in use. */
 178        dstmap = freemap + BITS_TO_LONGS(mapsize);
 179        return bitmap_and(dstmap, freemap, map, mapsize) == 0;
 180}
 181
 182/*
 183 * Check this leaf entry's relations to everything else.
 184 * Returns the number of bytes used for the name/value data.
 185 */
 186STATIC void
 187xfs_scrub_xattr_entry(
 188        struct xfs_scrub_da_btree       *ds,
 189        int                             level,
 190        char                            *buf_end,
 191        struct xfs_attr_leafblock       *leaf,
 192        struct xfs_attr3_icleaf_hdr     *leafhdr,
 193        unsigned long                   *usedmap,
 194        struct xfs_attr_leaf_entry      *ent,
 195        int                             idx,
 196        unsigned int                    *usedbytes,
 197        __u32                           *last_hashval)
 198{
 199        struct xfs_mount                *mp = ds->state->mp;
 200        char                            *name_end;
 201        struct xfs_attr_leaf_name_local *lentry;
 202        struct xfs_attr_leaf_name_remote *rentry;
 203        unsigned int                    nameidx;
 204        unsigned int                    namesize;
 205
 206        if (ent->pad2 != 0)
 207                xfs_scrub_da_set_corrupt(ds, level);
 208
 209        /* Hash values in order? */
 210        if (be32_to_cpu(ent->hashval) < *last_hashval)
 211                xfs_scrub_da_set_corrupt(ds, level);
 212        *last_hashval = be32_to_cpu(ent->hashval);
 213
 214        nameidx = be16_to_cpu(ent->nameidx);
 215        if (nameidx < leafhdr->firstused ||
 216            nameidx >= mp->m_attr_geo->blksize) {
 217                xfs_scrub_da_set_corrupt(ds, level);
 218                return;
 219        }
 220
 221        /* Check the name information. */
 222        if (ent->flags & XFS_ATTR_LOCAL) {
 223                lentry = xfs_attr3_leaf_name_local(leaf, idx);
 224                namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
 225                                be16_to_cpu(lentry->valuelen));
 226                name_end = (char *)lentry + namesize;
 227                if (lentry->namelen == 0)
 228                        xfs_scrub_da_set_corrupt(ds, level);
 229        } else {
 230                rentry = xfs_attr3_leaf_name_remote(leaf, idx);
 231                namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
 232                name_end = (char *)rentry + namesize;
 233                if (rentry->namelen == 0 || rentry->valueblk == 0)
 234                        xfs_scrub_da_set_corrupt(ds, level);
 235        }
 236        if (name_end > buf_end)
 237                xfs_scrub_da_set_corrupt(ds, level);
 238
 239        if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
 240                xfs_scrub_da_set_corrupt(ds, level);
 241        if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
 242                *usedbytes += namesize;
 243}
 244
 245/* Scrub an attribute leaf. */
 246STATIC int
 247xfs_scrub_xattr_block(
 248        struct xfs_scrub_da_btree       *ds,
 249        int                             level)
 250{
 251        struct xfs_attr3_icleaf_hdr     leafhdr;
 252        struct xfs_mount                *mp = ds->state->mp;
 253        struct xfs_da_state_blk         *blk = &ds->state->path.blk[level];
 254        struct xfs_buf                  *bp = blk->bp;
 255        xfs_dablk_t                     *last_checked = ds->private;
 256        struct xfs_attr_leafblock       *leaf = bp->b_addr;
 257        struct xfs_attr_leaf_entry      *ent;
 258        struct xfs_attr_leaf_entry      *entries;
 259        unsigned long                   *usedmap = ds->sc->buf;
 260        char                            *buf_end;
 261        size_t                          off;
 262        __u32                           last_hashval = 0;
 263        unsigned int                    usedbytes = 0;
 264        unsigned int                    hdrsize;
 265        int                             i;
 266
 267        if (*last_checked == blk->blkno)
 268                return 0;
 269        *last_checked = blk->blkno;
 270        bitmap_zero(usedmap, mp->m_attr_geo->blksize);
 271
 272        /* Check all the padding. */
 273        if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) {
 274                struct xfs_attr3_leafblock      *leaf = bp->b_addr;
 275
 276                if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
 277                    leaf->hdr.info.hdr.pad != 0)
 278                        xfs_scrub_da_set_corrupt(ds, level);
 279        } else {
 280                if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
 281                        xfs_scrub_da_set_corrupt(ds, level);
 282        }
 283
 284        /* Check the leaf header */
 285        xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
 286        hdrsize = xfs_attr3_leaf_hdr_size(leaf);
 287
 288        if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
 289                xfs_scrub_da_set_corrupt(ds, level);
 290        if (leafhdr.firstused > mp->m_attr_geo->blksize)
 291                xfs_scrub_da_set_corrupt(ds, level);
 292        if (leafhdr.firstused < hdrsize)
 293                xfs_scrub_da_set_corrupt(ds, level);
 294        if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
 295                xfs_scrub_da_set_corrupt(ds, level);
 296
 297        if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 298                goto out;
 299
 300        entries = xfs_attr3_leaf_entryp(leaf);
 301        if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
 302                xfs_scrub_da_set_corrupt(ds, level);
 303
 304        buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
 305        for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
 306                /* Mark the leaf entry itself. */
 307                off = (char *)ent - (char *)leaf;
 308                if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off,
 309                                sizeof(xfs_attr_leaf_entry_t))) {
 310                        xfs_scrub_da_set_corrupt(ds, level);
 311                        goto out;
 312                }
 313
 314                /* Check the entry and nameval. */
 315                xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
 316                                usedmap, ent, i, &usedbytes, &last_hashval);
 317
 318                if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 319                        goto out;
 320        }
 321
 322        if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
 323                xfs_scrub_da_set_corrupt(ds, level);
 324
 325        if (leafhdr.usedbytes != usedbytes)
 326                xfs_scrub_da_set_corrupt(ds, level);
 327
 328out:
 329        return 0;
 330}
 331
 332/* Scrub a attribute btree record. */
 333STATIC int
 334xfs_scrub_xattr_rec(
 335        struct xfs_scrub_da_btree       *ds,
 336        int                             level,
 337        void                            *rec)
 338{
 339        struct xfs_mount                *mp = ds->state->mp;
 340        struct xfs_attr_leaf_entry      *ent = rec;
 341        struct xfs_da_state_blk         *blk;
 342        struct xfs_attr_leaf_name_local *lentry;
 343        struct xfs_attr_leaf_name_remote        *rentry;
 344        struct xfs_buf                  *bp;
 345        xfs_dahash_t                    calc_hash;
 346        xfs_dahash_t                    hash;
 347        int                             nameidx;
 348        int                             hdrsize;
 349        unsigned int                    badflags;
 350        int                             error;
 351
 352        blk = &ds->state->path.blk[level];
 353
 354        /* Check the whole block, if necessary. */
 355        error = xfs_scrub_xattr_block(ds, level);
 356        if (error)
 357                goto out;
 358        if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 359                goto out;
 360
 361        /* Check the hash of the entry. */
 362        error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
 363        if (error)
 364                goto out;
 365
 366        /* Find the attr entry's location. */
 367        bp = blk->bp;
 368        hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
 369        nameidx = be16_to_cpu(ent->nameidx);
 370        if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
 371                xfs_scrub_da_set_corrupt(ds, level);
 372                goto out;
 373        }
 374
 375        /* Retrieve the entry and check it. */
 376        hash = be32_to_cpu(ent->hashval);
 377        badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
 378                        XFS_ATTR_INCOMPLETE);
 379        if ((ent->flags & badflags) != 0)
 380                xfs_scrub_da_set_corrupt(ds, level);
 381        if (ent->flags & XFS_ATTR_LOCAL) {
 382                lentry = (struct xfs_attr_leaf_name_local *)
 383                                (((char *)bp->b_addr) + nameidx);
 384                if (lentry->namelen <= 0) {
 385                        xfs_scrub_da_set_corrupt(ds, level);
 386                        goto out;
 387                }
 388                calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
 389        } else {
 390                rentry = (struct xfs_attr_leaf_name_remote *)
 391                                (((char *)bp->b_addr) + nameidx);
 392                if (rentry->namelen <= 0) {
 393                        xfs_scrub_da_set_corrupt(ds, level);
 394                        goto out;
 395                }
 396                calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
 397        }
 398        if (calc_hash != hash)
 399                xfs_scrub_da_set_corrupt(ds, level);
 400
 401out:
 402        return error;
 403}
 404
 405/* Scrub the extended attribute metadata. */
 406int
 407xfs_scrub_xattr(
 408        struct xfs_scrub_context        *sc)
 409{
 410        struct xfs_scrub_xattr          sx;
 411        struct attrlist_cursor_kern     cursor = { 0 };
 412        xfs_dablk_t                     last_checked = -1U;
 413        int                             error = 0;
 414
 415        if (!xfs_inode_hasattr(sc->ip))
 416                return -ENOENT;
 417
 418        memset(&sx, 0, sizeof(sx));
 419        /* Check attribute tree structure */
 420        error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec,
 421                        &last_checked);
 422        if (error)
 423                goto out;
 424
 425        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
 426                goto out;
 427
 428        /* Check that every attr key can also be looked up by hash. */
 429        sx.context.dp = sc->ip;
 430        sx.context.cursor = &cursor;
 431        sx.context.resynch = 1;
 432        sx.context.put_listent = xfs_scrub_xattr_listent;
 433        sx.context.tp = sc->tp;
 434        sx.context.flags = ATTR_INCOMPLETE;
 435        sx.sc = sc;
 436
 437        /*
 438         * Look up every xattr in this file by name.
 439         *
 440         * Use the backend implementation of xfs_attr_list to call
 441         * xfs_scrub_xattr_listent on every attribute key in this inode.
 442         * In other words, we use the same iterator/callback mechanism
 443         * that listattr uses to scrub extended attributes, though in our
 444         * _listent function, we check the value of the attribute.
 445         *
 446         * The VFS only locks i_rwsem when modifying attrs, so keep all
 447         * three locks held because that's the only way to ensure we're
 448         * the only thread poking into the da btree.  We traverse the da
 449         * btree while holding a leaf buffer locked for the xattr name
 450         * iteration, which doesn't really follow the usual buffer
 451         * locking order.
 452         */
 453        error = xfs_attr_list_int_ilocked(&sx.context);
 454        if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
 455                goto out;
 456out:
 457        return error;
 458}
 459