linux/fs/xfs/xfs_attr_inactive.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * Copyright (c) 2013 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_trans_resv.h"
  13#include "xfs_bit.h"
  14#include "xfs_mount.h"
  15#include "xfs_da_format.h"
  16#include "xfs_da_btree.h"
  17#include "xfs_inode.h"
  18#include "xfs_alloc.h"
  19#include "xfs_attr_remote.h"
  20#include "xfs_trans.h"
  21#include "xfs_inode_item.h"
  22#include "xfs_bmap.h"
  23#include "xfs_attr.h"
  24#include "xfs_attr_leaf.h"
  25#include "xfs_error.h"
  26#include "xfs_quota.h"
  27#include "xfs_trace.h"
  28#include "xfs_dir2.h"
  29#include "xfs_defer.h"
  30
  31/*
  32 * Look at all the extents for this logical region,
  33 * invalidate any buffers that are incore/in transactions.
  34 */
  35STATIC int
  36xfs_attr3_leaf_freextent(
  37        struct xfs_trans        **trans,
  38        struct xfs_inode        *dp,
  39        xfs_dablk_t             blkno,
  40        int                     blkcnt)
  41{
  42        struct xfs_bmbt_irec    map;
  43        struct xfs_buf          *bp;
  44        xfs_dablk_t             tblkno;
  45        xfs_daddr_t             dblkno;
  46        int                     tblkcnt;
  47        int                     dblkcnt;
  48        int                     nmap;
  49        int                     error;
  50
  51        /*
  52         * Roll through the "value", invalidating the attribute value's
  53         * blocks.
  54         */
  55        tblkno = blkno;
  56        tblkcnt = blkcnt;
  57        while (tblkcnt > 0) {
  58                /*
  59                 * Try to remember where we decided to put the value.
  60                 */
  61                nmap = 1;
  62                error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
  63                                       &map, &nmap, XFS_BMAPI_ATTRFORK);
  64                if (error) {
  65                        return error;
  66                }
  67                ASSERT(nmap == 1);
  68                ASSERT(map.br_startblock != DELAYSTARTBLOCK);
  69
  70                /*
  71                 * If it's a hole, these are already unmapped
  72                 * so there's nothing to invalidate.
  73                 */
  74                if (map.br_startblock != HOLESTARTBLOCK) {
  75
  76                        dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
  77                                                  map.br_startblock);
  78                        dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
  79                                                map.br_blockcount);
  80                        bp = xfs_trans_get_buf(*trans,
  81                                        dp->i_mount->m_ddev_targp,
  82                                        dblkno, dblkcnt, 0);
  83                        if (!bp)
  84                                return -ENOMEM;
  85                        xfs_trans_binval(*trans, bp);
  86                        /*
  87                         * Roll to next transaction.
  88                         */
  89                        error = xfs_trans_roll_inode(trans, dp);
  90                        if (error)
  91                                return error;
  92                }
  93
  94                tblkno += map.br_blockcount;
  95                tblkcnt -= map.br_blockcount;
  96        }
  97
  98        return 0;
  99}
 100
 101/*
 102 * Invalidate all of the "remote" value regions pointed to by a particular
 103 * leaf block.
 104 * Note that we must release the lock on the buffer so that we are not
 105 * caught holding something that the logging code wants to flush to disk.
 106 */
 107STATIC int
 108xfs_attr3_leaf_inactive(
 109        struct xfs_trans        **trans,
 110        struct xfs_inode        *dp,
 111        struct xfs_buf          *bp)
 112{
 113        struct xfs_attr_leafblock *leaf;
 114        struct xfs_attr3_icleaf_hdr ichdr;
 115        struct xfs_attr_leaf_entry *entry;
 116        struct xfs_attr_leaf_name_remote *name_rmt;
 117        struct xfs_attr_inactive_list *list;
 118        struct xfs_attr_inactive_list *lp;
 119        int                     error;
 120        int                     count;
 121        int                     size;
 122        int                     tmp;
 123        int                     i;
 124        struct xfs_mount        *mp = bp->b_target->bt_mount;
 125
 126        leaf = bp->b_addr;
 127        xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
 128
 129        /*
 130         * Count the number of "remote" value extents.
 131         */
 132        count = 0;
 133        entry = xfs_attr3_leaf_entryp(leaf);
 134        for (i = 0; i < ichdr.count; entry++, i++) {
 135                if (be16_to_cpu(entry->nameidx) &&
 136                    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
 137                        name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
 138                        if (name_rmt->valueblk)
 139                                count++;
 140                }
 141        }
 142
 143        /*
 144         * If there are no "remote" values, we're done.
 145         */
 146        if (count == 0) {
 147                xfs_trans_brelse(*trans, bp);
 148                return 0;
 149        }
 150
 151        /*
 152         * Allocate storage for a list of all the "remote" value extents.
 153         */
 154        size = count * sizeof(xfs_attr_inactive_list_t);
 155        list = kmem_alloc(size, KM_SLEEP);
 156
 157        /*
 158         * Identify each of the "remote" value extents.
 159         */
 160        lp = list;
 161        entry = xfs_attr3_leaf_entryp(leaf);
 162        for (i = 0; i < ichdr.count; entry++, i++) {
 163                if (be16_to_cpu(entry->nameidx) &&
 164                    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
 165                        name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
 166                        if (name_rmt->valueblk) {
 167                                lp->valueblk = be32_to_cpu(name_rmt->valueblk);
 168                                lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
 169                                                    be32_to_cpu(name_rmt->valuelen));
 170                                lp++;
 171                        }
 172                }
 173        }
 174        xfs_trans_brelse(*trans, bp);   /* unlock for trans. in freextent() */
 175
 176        /*
 177         * Invalidate each of the "remote" value extents.
 178         */
 179        error = 0;
 180        for (lp = list, i = 0; i < count; i++, lp++) {
 181                tmp = xfs_attr3_leaf_freextent(trans, dp,
 182                                lp->valueblk, lp->valuelen);
 183
 184                if (error == 0)
 185                        error = tmp;    /* save only the 1st errno */
 186        }
 187
 188        kmem_free(list);
 189        return error;
 190}
 191
 192/*
 193 * Recurse (gasp!) through the attribute nodes until we find leaves.
 194 * We're doing a depth-first traversal in order to invalidate everything.
 195 */
 196STATIC int
 197xfs_attr3_node_inactive(
 198        struct xfs_trans **trans,
 199        struct xfs_inode *dp,
 200        struct xfs_buf  *bp,
 201        int             level)
 202{
 203        xfs_da_blkinfo_t *info;
 204        xfs_da_intnode_t *node;
 205        xfs_dablk_t child_fsb;
 206        xfs_daddr_t parent_blkno, child_blkno;
 207        int error, i;
 208        struct xfs_buf *child_bp;
 209        struct xfs_da_node_entry *btree;
 210        struct xfs_da3_icnode_hdr ichdr;
 211
 212        /*
 213         * Since this code is recursive (gasp!) we must protect ourselves.
 214         */
 215        if (level > XFS_DA_NODE_MAXDEPTH) {
 216                xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
 217                return -EIO;
 218        }
 219
 220        node = bp->b_addr;
 221        dp->d_ops->node_hdr_from_disk(&ichdr, node);
 222        parent_blkno = bp->b_bn;
 223        if (!ichdr.count) {
 224                xfs_trans_brelse(*trans, bp);
 225                return 0;
 226        }
 227        btree = dp->d_ops->node_tree_p(node);
 228        child_fsb = be32_to_cpu(btree[0].before);
 229        xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
 230
 231        /*
 232         * If this is the node level just above the leaves, simply loop
 233         * over the leaves removing all of them.  If this is higher up
 234         * in the tree, recurse downward.
 235         */
 236        for (i = 0; i < ichdr.count; i++) {
 237                /*
 238                 * Read the subsidiary block to see what we have to work with.
 239                 * Don't do this in a transaction.  This is a depth-first
 240                 * traversal of the tree so we may deal with many blocks
 241                 * before we come back to this one.
 242                 */
 243                error = xfs_da3_node_read(*trans, dp, child_fsb, -1, &child_bp,
 244                                          XFS_ATTR_FORK);
 245                if (error)
 246                        return error;
 247
 248                /* save for re-read later */
 249                child_blkno = XFS_BUF_ADDR(child_bp);
 250
 251                /*
 252                 * Invalidate the subtree, however we have to.
 253                 */
 254                info = child_bp->b_addr;
 255                switch (info->magic) {
 256                case cpu_to_be16(XFS_DA_NODE_MAGIC):
 257                case cpu_to_be16(XFS_DA3_NODE_MAGIC):
 258                        error = xfs_attr3_node_inactive(trans, dp, child_bp,
 259                                                        level + 1);
 260                        break;
 261                case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
 262                case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
 263                        error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
 264                        break;
 265                default:
 266                        error = -EIO;
 267                        xfs_trans_brelse(*trans, child_bp);
 268                        break;
 269                }
 270                if (error)
 271                        return error;
 272
 273                /*
 274                 * Remove the subsidiary block from the cache and from the log.
 275                 */
 276                error = xfs_da_get_buf(*trans, dp, 0, child_blkno, &child_bp,
 277                                       XFS_ATTR_FORK);
 278                if (error)
 279                        return error;
 280                xfs_trans_binval(*trans, child_bp);
 281
 282                /*
 283                 * If we're not done, re-read the parent to get the next
 284                 * child block number.
 285                 */
 286                if (i + 1 < ichdr.count) {
 287                        error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
 288                                                 &bp, XFS_ATTR_FORK);
 289                        if (error)
 290                                return error;
 291                        node = bp->b_addr;
 292                        btree = dp->d_ops->node_tree_p(node);
 293                        child_fsb = be32_to_cpu(btree[i + 1].before);
 294                        xfs_trans_brelse(*trans, bp);
 295                }
 296                /*
 297                 * Atomically commit the whole invalidate stuff.
 298                 */
 299                error = xfs_trans_roll_inode(trans, dp);
 300                if (error)
 301                        return  error;
 302        }
 303
 304        return 0;
 305}
 306
 307/*
 308 * Indiscriminately delete the entire attribute fork
 309 *
 310 * Recurse (gasp!) through the attribute nodes until we find leaves.
 311 * We're doing a depth-first traversal in order to invalidate everything.
 312 */
 313static int
 314xfs_attr3_root_inactive(
 315        struct xfs_trans        **trans,
 316        struct xfs_inode        *dp)
 317{
 318        struct xfs_da_blkinfo   *info;
 319        struct xfs_buf          *bp;
 320        xfs_daddr_t             blkno;
 321        int                     error;
 322
 323        /*
 324         * Read block 0 to see what we have to work with.
 325         * We only get here if we have extents, since we remove
 326         * the extents in reverse order the extent containing
 327         * block 0 must still be there.
 328         */
 329        error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
 330        if (error)
 331                return error;
 332        blkno = bp->b_bn;
 333
 334        /*
 335         * Invalidate the tree, even if the "tree" is only a single leaf block.
 336         * This is a depth-first traversal!
 337         */
 338        info = bp->b_addr;
 339        switch (info->magic) {
 340        case cpu_to_be16(XFS_DA_NODE_MAGIC):
 341        case cpu_to_be16(XFS_DA3_NODE_MAGIC):
 342                error = xfs_attr3_node_inactive(trans, dp, bp, 1);
 343                break;
 344        case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
 345        case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
 346                error = xfs_attr3_leaf_inactive(trans, dp, bp);
 347                break;
 348        default:
 349                error = -EIO;
 350                xfs_trans_brelse(*trans, bp);
 351                break;
 352        }
 353        if (error)
 354                return error;
 355
 356        /*
 357         * Invalidate the incore copy of the root block.
 358         */
 359        error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
 360        if (error)
 361                return error;
 362        xfs_trans_binval(*trans, bp);   /* remove from cache */
 363        /*
 364         * Commit the invalidate and start the next transaction.
 365         */
 366        error = xfs_trans_roll_inode(trans, dp);
 367
 368        return error;
 369}
 370
 371/*
 372 * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
 373 * removes both the on-disk and in-memory inode fork. Note that this also has to
 374 * handle the condition of inodes without attributes but with an attribute fork
 375 * configured, so we can't use xfs_inode_hasattr() here.
 376 *
 377 * The in-memory attribute fork is removed even on error.
 378 */
 379int
 380xfs_attr_inactive(
 381        struct xfs_inode        *dp)
 382{
 383        struct xfs_trans        *trans;
 384        struct xfs_mount        *mp;
 385        int                     lock_mode = XFS_ILOCK_SHARED;
 386        int                     error = 0;
 387
 388        mp = dp->i_mount;
 389        ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
 390
 391        xfs_ilock(dp, lock_mode);
 392        if (!XFS_IFORK_Q(dp))
 393                goto out_destroy_fork;
 394        xfs_iunlock(dp, lock_mode);
 395
 396        lock_mode = 0;
 397
 398        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrinval, 0, 0, 0, &trans);
 399        if (error)
 400                goto out_destroy_fork;
 401
 402        lock_mode = XFS_ILOCK_EXCL;
 403        xfs_ilock(dp, lock_mode);
 404
 405        if (!XFS_IFORK_Q(dp))
 406                goto out_cancel;
 407
 408        /*
 409         * No need to make quota reservations here. We expect to release some
 410         * blocks, not allocate, in the common case.
 411         */
 412        xfs_trans_ijoin(trans, dp, 0);
 413
 414        /*
 415         * Invalidate and truncate the attribute fork extents. Make sure the
 416         * fork actually has attributes as otherwise the invalidation has no
 417         * blocks to read and returns an error. In this case, just do the fork
 418         * removal below.
 419         */
 420        if (xfs_inode_hasattr(dp) &&
 421            dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
 422                error = xfs_attr3_root_inactive(&trans, dp);
 423                if (error)
 424                        goto out_cancel;
 425
 426                error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
 427                if (error)
 428                        goto out_cancel;
 429        }
 430
 431        /* Reset the attribute fork - this also destroys the in-core fork */
 432        xfs_attr_fork_remove(dp, trans);
 433
 434        error = xfs_trans_commit(trans);
 435        xfs_iunlock(dp, lock_mode);
 436        return error;
 437
 438out_cancel:
 439        xfs_trans_cancel(trans);
 440out_destroy_fork:
 441        /* kill the in-core attr fork before we drop the inode lock */
 442        if (dp->i_afp)
 443                xfs_idestroy_fork(dp, XFS_ATTR_FORK);
 444        if (lock_mode)
 445                xfs_iunlock(dp, lock_mode);
 446        return error;
 447}
 448