linux/fs/xfs/libxfs/xfs_bmap_btree.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_mount.h"
  26#include "xfs_defer.h"
  27#include "xfs_inode.h"
  28#include "xfs_trans.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_alloc.h"
  31#include "xfs_btree.h"
  32#include "xfs_bmap_btree.h"
  33#include "xfs_bmap.h"
  34#include "xfs_error.h"
  35#include "xfs_quota.h"
  36#include "xfs_trace.h"
  37#include "xfs_cksum.h"
  38#include "xfs_rmap.h"
  39
  40/*
  41 * Determine the extent state.
  42 */
  43/* ARGSUSED */
  44STATIC xfs_exntst_t
  45xfs_extent_state(
  46        xfs_filblks_t           blks,
  47        int                     extent_flag)
  48{
  49        if (extent_flag) {
  50                ASSERT(blks != 0);      /* saved for DMIG */
  51                return XFS_EXT_UNWRITTEN;
  52        }
  53        return XFS_EXT_NORM;
  54}
  55
  56/*
  57 * Convert on-disk form of btree root to in-memory form.
  58 */
  59void
  60xfs_bmdr_to_bmbt(
  61        struct xfs_inode        *ip,
  62        xfs_bmdr_block_t        *dblock,
  63        int                     dblocklen,
  64        struct xfs_btree_block  *rblock,
  65        int                     rblocklen)
  66{
  67        struct xfs_mount        *mp = ip->i_mount;
  68        int                     dmxr;
  69        xfs_bmbt_key_t          *fkp;
  70        __be64                  *fpp;
  71        xfs_bmbt_key_t          *tkp;
  72        __be64                  *tpp;
  73
  74        xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
  75                                 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
  76                                 XFS_BTREE_LONG_PTRS);
  77        rblock->bb_level = dblock->bb_level;
  78        ASSERT(be16_to_cpu(rblock->bb_level) > 0);
  79        rblock->bb_numrecs = dblock->bb_numrecs;
  80        dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
  81        fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
  82        tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
  83        fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
  84        tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
  85        dmxr = be16_to_cpu(dblock->bb_numrecs);
  86        memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  87        memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  88}
  89
  90/*
  91 * Convert a compressed bmap extent record to an uncompressed form.
  92 * This code must be in sync with the routines xfs_bmbt_get_startoff,
  93 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
  94 */
  95STATIC void
  96__xfs_bmbt_get_all(
  97                __uint64_t l0,
  98                __uint64_t l1,
  99                xfs_bmbt_irec_t *s)
 100{
 101        int     ext_flag;
 102        xfs_exntst_t st;
 103
 104        ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
 105        s->br_startoff = ((xfs_fileoff_t)l0 &
 106                           xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
 107        s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
 108                           (((xfs_fsblock_t)l1) >> 21);
 109        s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
 110        /* This is xfs_extent_state() in-line */
 111        if (ext_flag) {
 112                ASSERT(s->br_blockcount != 0);  /* saved for DMIG */
 113                st = XFS_EXT_UNWRITTEN;
 114        } else
 115                st = XFS_EXT_NORM;
 116        s->br_state = st;
 117}
 118
 119void
 120xfs_bmbt_get_all(
 121        xfs_bmbt_rec_host_t *r,
 122        xfs_bmbt_irec_t *s)
 123{
 124        __xfs_bmbt_get_all(r->l0, r->l1, s);
 125}
 126
 127/*
 128 * Extract the blockcount field from an in memory bmap extent record.
 129 */
 130xfs_filblks_t
 131xfs_bmbt_get_blockcount(
 132        xfs_bmbt_rec_host_t     *r)
 133{
 134        return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
 135}
 136
 137/*
 138 * Extract the startblock field from an in memory bmap extent record.
 139 */
 140xfs_fsblock_t
 141xfs_bmbt_get_startblock(
 142        xfs_bmbt_rec_host_t     *r)
 143{
 144        return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
 145               (((xfs_fsblock_t)r->l1) >> 21);
 146}
 147
 148/*
 149 * Extract the startoff field from an in memory bmap extent record.
 150 */
 151xfs_fileoff_t
 152xfs_bmbt_get_startoff(
 153        xfs_bmbt_rec_host_t     *r)
 154{
 155        return ((xfs_fileoff_t)r->l0 &
 156                 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
 157}
 158
 159xfs_exntst_t
 160xfs_bmbt_get_state(
 161        xfs_bmbt_rec_host_t     *r)
 162{
 163        int     ext_flag;
 164
 165        ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
 166        return xfs_extent_state(xfs_bmbt_get_blockcount(r),
 167                                ext_flag);
 168}
 169
 170/*
 171 * Extract the blockcount field from an on disk bmap extent record.
 172 */
 173xfs_filblks_t
 174xfs_bmbt_disk_get_blockcount(
 175        xfs_bmbt_rec_t  *r)
 176{
 177        return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
 178}
 179
 180/*
 181 * Extract the startoff field from a disk format bmap extent record.
 182 */
 183xfs_fileoff_t
 184xfs_bmbt_disk_get_startoff(
 185        xfs_bmbt_rec_t  *r)
 186{
 187        return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
 188                 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
 189}
 190
 191
 192/*
 193 * Set all the fields in a bmap extent record from the arguments.
 194 */
 195void
 196xfs_bmbt_set_allf(
 197        xfs_bmbt_rec_host_t     *r,
 198        xfs_fileoff_t           startoff,
 199        xfs_fsblock_t           startblock,
 200        xfs_filblks_t           blockcount,
 201        xfs_exntst_t            state)
 202{
 203        int             extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
 204
 205        ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
 206        ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
 207        ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
 208
 209        ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
 210
 211        r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
 212                ((xfs_bmbt_rec_base_t)startoff << 9) |
 213                ((xfs_bmbt_rec_base_t)startblock >> 43);
 214        r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
 215                ((xfs_bmbt_rec_base_t)blockcount &
 216                (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
 217}
 218
 219/*
 220 * Set all the fields in a bmap extent record from the uncompressed form.
 221 */
 222void
 223xfs_bmbt_set_all(
 224        xfs_bmbt_rec_host_t *r,
 225        xfs_bmbt_irec_t *s)
 226{
 227        xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
 228                             s->br_blockcount, s->br_state);
 229}
 230
 231
 232/*
 233 * Set all the fields in a disk format bmap extent record from the arguments.
 234 */
 235void
 236xfs_bmbt_disk_set_allf(
 237        xfs_bmbt_rec_t          *r,
 238        xfs_fileoff_t           startoff,
 239        xfs_fsblock_t           startblock,
 240        xfs_filblks_t           blockcount,
 241        xfs_exntst_t            state)
 242{
 243        int                     extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
 244
 245        ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
 246        ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
 247        ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
 248        ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
 249
 250        r->l0 = cpu_to_be64(
 251                ((xfs_bmbt_rec_base_t)extent_flag << 63) |
 252                 ((xfs_bmbt_rec_base_t)startoff << 9) |
 253                 ((xfs_bmbt_rec_base_t)startblock >> 43));
 254        r->l1 = cpu_to_be64(
 255                ((xfs_bmbt_rec_base_t)startblock << 21) |
 256                 ((xfs_bmbt_rec_base_t)blockcount &
 257                  (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
 258}
 259
 260/*
 261 * Set all the fields in a bmap extent record from the uncompressed form.
 262 */
 263STATIC void
 264xfs_bmbt_disk_set_all(
 265        xfs_bmbt_rec_t  *r,
 266        xfs_bmbt_irec_t *s)
 267{
 268        xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
 269                                  s->br_blockcount, s->br_state);
 270}
 271
 272/*
 273 * Set the blockcount field in a bmap extent record.
 274 */
 275void
 276xfs_bmbt_set_blockcount(
 277        xfs_bmbt_rec_host_t *r,
 278        xfs_filblks_t   v)
 279{
 280        ASSERT((v & xfs_mask64hi(43)) == 0);
 281        r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
 282                  (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
 283}
 284
 285/*
 286 * Set the startblock field in a bmap extent record.
 287 */
 288void
 289xfs_bmbt_set_startblock(
 290        xfs_bmbt_rec_host_t *r,
 291        xfs_fsblock_t   v)
 292{
 293        ASSERT((v & xfs_mask64hi(12)) == 0);
 294        r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
 295                  (xfs_bmbt_rec_base_t)(v >> 43);
 296        r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
 297                  (xfs_bmbt_rec_base_t)(v << 21);
 298}
 299
 300/*
 301 * Set the startoff field in a bmap extent record.
 302 */
 303void
 304xfs_bmbt_set_startoff(
 305        xfs_bmbt_rec_host_t *r,
 306        xfs_fileoff_t   v)
 307{
 308        ASSERT((v & xfs_mask64hi(9)) == 0);
 309        r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
 310                ((xfs_bmbt_rec_base_t)v << 9) |
 311                  (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
 312}
 313
 314/*
 315 * Set the extent state field in a bmap extent record.
 316 */
 317void
 318xfs_bmbt_set_state(
 319        xfs_bmbt_rec_host_t *r,
 320        xfs_exntst_t    v)
 321{
 322        ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
 323        if (v == XFS_EXT_NORM)
 324                r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
 325        else
 326                r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
 327}
 328
 329/*
 330 * Convert in-memory form of btree root to on-disk form.
 331 */
 332void
 333xfs_bmbt_to_bmdr(
 334        struct xfs_mount        *mp,
 335        struct xfs_btree_block  *rblock,
 336        int                     rblocklen,
 337        xfs_bmdr_block_t        *dblock,
 338        int                     dblocklen)
 339{
 340        int                     dmxr;
 341        xfs_bmbt_key_t          *fkp;
 342        __be64                  *fpp;
 343        xfs_bmbt_key_t          *tkp;
 344        __be64                  *tpp;
 345
 346        if (xfs_sb_version_hascrc(&mp->m_sb)) {
 347                ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
 348                ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
 349                       &mp->m_sb.sb_meta_uuid));
 350                ASSERT(rblock->bb_u.l.bb_blkno ==
 351                       cpu_to_be64(XFS_BUF_DADDR_NULL));
 352        } else
 353                ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
 354        ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
 355        ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
 356        ASSERT(rblock->bb_level != 0);
 357        dblock->bb_level = rblock->bb_level;
 358        dblock->bb_numrecs = rblock->bb_numrecs;
 359        dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
 360        fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
 361        tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
 362        fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
 363        tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
 364        dmxr = be16_to_cpu(dblock->bb_numrecs);
 365        memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
 366        memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
 367}
 368
 369/*
 370 * Check extent records, which have just been read, for
 371 * any bit in the extent flag field. ASSERT on debug
 372 * kernels, as this condition should not occur.
 373 * Return an error condition (1) if any flags found,
 374 * otherwise return 0.
 375 */
 376
 377int
 378xfs_check_nostate_extents(
 379        xfs_ifork_t             *ifp,
 380        xfs_extnum_t            idx,
 381        xfs_extnum_t            num)
 382{
 383        for (; num > 0; num--, idx++) {
 384                xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
 385                if ((ep->l0 >>
 386                     (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
 387                        ASSERT(0);
 388                        return 1;
 389                }
 390        }
 391        return 0;
 392}
 393
 394
 395STATIC struct xfs_btree_cur *
 396xfs_bmbt_dup_cursor(
 397        struct xfs_btree_cur    *cur)
 398{
 399        struct xfs_btree_cur    *new;
 400
 401        new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
 402                        cur->bc_private.b.ip, cur->bc_private.b.whichfork);
 403
 404        /*
 405         * Copy the firstblock, dfops, and flags values,
 406         * since init cursor doesn't get them.
 407         */
 408        new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
 409        new->bc_private.b.dfops = cur->bc_private.b.dfops;
 410        new->bc_private.b.flags = cur->bc_private.b.flags;
 411
 412        return new;
 413}
 414
 415STATIC void
 416xfs_bmbt_update_cursor(
 417        struct xfs_btree_cur    *src,
 418        struct xfs_btree_cur    *dst)
 419{
 420        ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
 421               (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
 422        ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
 423
 424        dst->bc_private.b.allocated += src->bc_private.b.allocated;
 425        dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
 426
 427        src->bc_private.b.allocated = 0;
 428}
 429
 430STATIC int
 431xfs_bmbt_alloc_block(
 432        struct xfs_btree_cur    *cur,
 433        union xfs_btree_ptr     *start,
 434        union xfs_btree_ptr     *new,
 435        int                     *stat)
 436{
 437        xfs_alloc_arg_t         args;           /* block allocation args */
 438        int                     error;          /* error return value */
 439
 440        memset(&args, 0, sizeof(args));
 441        args.tp = cur->bc_tp;
 442        args.mp = cur->bc_mp;
 443        args.fsbno = cur->bc_private.b.firstblock;
 444        args.firstblock = args.fsbno;
 445        xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
 446                        cur->bc_private.b.whichfork);
 447
 448        if (args.fsbno == NULLFSBLOCK) {
 449                args.fsbno = be64_to_cpu(start->l);
 450                args.type = XFS_ALLOCTYPE_START_BNO;
 451try_another_ag:
 452                /*
 453                 * Make sure there is sufficient room left in the AG to
 454                 * complete a full tree split for an extent insert.  If
 455                 * we are converting the middle part of an extent then
 456                 * we may need space for two tree splits.
 457                 *
 458                 * We are relying on the caller to make the correct block
 459                 * reservation for this operation to succeed.  If the
 460                 * reservation amount is insufficient then we may fail a
 461                 * block allocation here and corrupt the filesystem.
 462                 */
 463                args.minleft = args.tp->t_blk_res;
 464        } else if (cur->bc_private.b.dfops->dop_low) {
 465                args.type = XFS_ALLOCTYPE_START_BNO;
 466        } else {
 467                args.type = XFS_ALLOCTYPE_NEAR_BNO;
 468        }
 469
 470        args.minlen = args.maxlen = args.prod = 1;
 471        args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
 472        if (!args.wasdel && args.tp->t_blk_res == 0) {
 473                error = -ENOSPC;
 474                goto error0;
 475        }
 476        error = xfs_alloc_vextent(&args);
 477        if (error)
 478                goto error0;
 479
 480        /*
 481         * During a CoW operation, the allocation and bmbt updates occur in
 482         * different transactions.  The mapping code tries to put new bmbt
 483         * blocks near extents being mapped, but the only way to guarantee this
 484         * is if the alloc and the mapping happen in a single transaction that
 485         * has a block reservation.  That isn't the case here, so if we run out
 486         * of space we'll try again with another AG.
 487         */
 488        if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
 489            args.fsbno == NULLFSBLOCK &&
 490            args.type == XFS_ALLOCTYPE_NEAR_BNO) {
 491                args.fsbno = cur->bc_private.b.firstblock;
 492                args.type = XFS_ALLOCTYPE_FIRST_AG;
 493                goto try_another_ag;
 494        }
 495
 496        if (args.fsbno == NULLFSBLOCK && args.minleft) {
 497                /*
 498                 * Could not find an AG with enough free space to satisfy
 499                 * a full btree split.  Try again and if
 500                 * successful activate the lowspace algorithm.
 501                 */
 502                args.fsbno = 0;
 503                args.type = XFS_ALLOCTYPE_FIRST_AG;
 504                error = xfs_alloc_vextent(&args);
 505                if (error)
 506                        goto error0;
 507                cur->bc_private.b.dfops->dop_low = true;
 508        }
 509        if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
 510                XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
 511                *stat = 0;
 512                return 0;
 513        }
 514        ASSERT(args.len == 1);
 515        cur->bc_private.b.firstblock = args.fsbno;
 516        cur->bc_private.b.allocated++;
 517        cur->bc_private.b.ip->i_d.di_nblocks++;
 518        xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
 519        xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
 520                        XFS_TRANS_DQ_BCOUNT, 1L);
 521
 522        new->l = cpu_to_be64(args.fsbno);
 523
 524        XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
 525        *stat = 1;
 526        return 0;
 527
 528 error0:
 529        XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
 530        return error;
 531}
 532
 533STATIC int
 534xfs_bmbt_free_block(
 535        struct xfs_btree_cur    *cur,
 536        struct xfs_buf          *bp)
 537{
 538        struct xfs_mount        *mp = cur->bc_mp;
 539        struct xfs_inode        *ip = cur->bc_private.b.ip;
 540        struct xfs_trans        *tp = cur->bc_tp;
 541        xfs_fsblock_t           fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
 542        struct xfs_owner_info   oinfo;
 543
 544        xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
 545        xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
 546        ip->i_d.di_nblocks--;
 547
 548        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 549        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
 550        return 0;
 551}
 552
 553STATIC int
 554xfs_bmbt_get_minrecs(
 555        struct xfs_btree_cur    *cur,
 556        int                     level)
 557{
 558        if (level == cur->bc_nlevels - 1) {
 559                struct xfs_ifork        *ifp;
 560
 561                ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
 562                                    cur->bc_private.b.whichfork);
 563
 564                return xfs_bmbt_maxrecs(cur->bc_mp,
 565                                        ifp->if_broot_bytes, level == 0) / 2;
 566        }
 567
 568        return cur->bc_mp->m_bmap_dmnr[level != 0];
 569}
 570
 571int
 572xfs_bmbt_get_maxrecs(
 573        struct xfs_btree_cur    *cur,
 574        int                     level)
 575{
 576        if (level == cur->bc_nlevels - 1) {
 577                struct xfs_ifork        *ifp;
 578
 579                ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
 580                                    cur->bc_private.b.whichfork);
 581
 582                return xfs_bmbt_maxrecs(cur->bc_mp,
 583                                        ifp->if_broot_bytes, level == 0);
 584        }
 585
 586        return cur->bc_mp->m_bmap_dmxr[level != 0];
 587
 588}
 589
 590/*
 591 * Get the maximum records we could store in the on-disk format.
 592 *
 593 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
 594 * for the root node this checks the available space in the dinode fork
 595 * so that we can resize the in-memory buffer to match it.  After a
 596 * resize to the maximum size this function returns the same value
 597 * as xfs_bmbt_get_maxrecs for the root node, too.
 598 */
 599STATIC int
 600xfs_bmbt_get_dmaxrecs(
 601        struct xfs_btree_cur    *cur,
 602        int                     level)
 603{
 604        if (level != cur->bc_nlevels - 1)
 605                return cur->bc_mp->m_bmap_dmxr[level != 0];
 606        return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
 607}
 608
 609STATIC void
 610xfs_bmbt_init_key_from_rec(
 611        union xfs_btree_key     *key,
 612        union xfs_btree_rec     *rec)
 613{
 614        key->bmbt.br_startoff =
 615                cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
 616}
 617
 618STATIC void
 619xfs_bmbt_init_rec_from_cur(
 620        struct xfs_btree_cur    *cur,
 621        union xfs_btree_rec     *rec)
 622{
 623        xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
 624}
 625
 626STATIC void
 627xfs_bmbt_init_ptr_from_cur(
 628        struct xfs_btree_cur    *cur,
 629        union xfs_btree_ptr     *ptr)
 630{
 631        ptr->l = 0;
 632}
 633
 634STATIC __int64_t
 635xfs_bmbt_key_diff(
 636        struct xfs_btree_cur    *cur,
 637        union xfs_btree_key     *key)
 638{
 639        return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
 640                                      cur->bc_rec.b.br_startoff;
 641}
 642
 643static bool
 644xfs_bmbt_verify(
 645        struct xfs_buf          *bp)
 646{
 647        struct xfs_mount        *mp = bp->b_target->bt_mount;
 648        struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
 649        unsigned int            level;
 650
 651        switch (block->bb_magic) {
 652        case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
 653                if (!xfs_sb_version_hascrc(&mp->m_sb))
 654                        return false;
 655                if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
 656                        return false;
 657                if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
 658                        return false;
 659                /*
 660                 * XXX: need a better way of verifying the owner here. Right now
 661                 * just make sure there has been one set.
 662                 */
 663                if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
 664                        return false;
 665                /* fall through */
 666        case cpu_to_be32(XFS_BMAP_MAGIC):
 667                break;
 668        default:
 669                return false;
 670        }
 671
 672        /*
 673         * numrecs and level verification.
 674         *
 675         * We don't know what fork we belong to, so just verify that the level
 676         * is less than the maximum of the two. Later checks will be more
 677         * precise.
 678         */
 679        level = be16_to_cpu(block->bb_level);
 680        if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
 681                return false;
 682        if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
 683                return false;
 684
 685        /* sibling pointer verification */
 686        if (!block->bb_u.l.bb_leftsib ||
 687            (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
 688             !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
 689                return false;
 690        if (!block->bb_u.l.bb_rightsib ||
 691            (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
 692             !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
 693                return false;
 694
 695        return true;
 696}
 697
 698static void
 699xfs_bmbt_read_verify(
 700        struct xfs_buf  *bp)
 701{
 702        if (!xfs_btree_lblock_verify_crc(bp))
 703                xfs_buf_ioerror(bp, -EFSBADCRC);
 704        else if (!xfs_bmbt_verify(bp))
 705                xfs_buf_ioerror(bp, -EFSCORRUPTED);
 706
 707        if (bp->b_error) {
 708                trace_xfs_btree_corrupt(bp, _RET_IP_);
 709                xfs_verifier_error(bp);
 710        }
 711}
 712
 713static void
 714xfs_bmbt_write_verify(
 715        struct xfs_buf  *bp)
 716{
 717        if (!xfs_bmbt_verify(bp)) {
 718                trace_xfs_btree_corrupt(bp, _RET_IP_);
 719                xfs_buf_ioerror(bp, -EFSCORRUPTED);
 720                xfs_verifier_error(bp);
 721                return;
 722        }
 723        xfs_btree_lblock_calc_crc(bp);
 724}
 725
 726const struct xfs_buf_ops xfs_bmbt_buf_ops = {
 727        .name = "xfs_bmbt",
 728        .verify_read = xfs_bmbt_read_verify,
 729        .verify_write = xfs_bmbt_write_verify,
 730};
 731
 732
 733#if defined(DEBUG) || defined(XFS_WARN)
 734STATIC int
 735xfs_bmbt_keys_inorder(
 736        struct xfs_btree_cur    *cur,
 737        union xfs_btree_key     *k1,
 738        union xfs_btree_key     *k2)
 739{
 740        return be64_to_cpu(k1->bmbt.br_startoff) <
 741                be64_to_cpu(k2->bmbt.br_startoff);
 742}
 743
 744STATIC int
 745xfs_bmbt_recs_inorder(
 746        struct xfs_btree_cur    *cur,
 747        union xfs_btree_rec     *r1,
 748        union xfs_btree_rec     *r2)
 749{
 750        return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
 751                xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
 752                xfs_bmbt_disk_get_startoff(&r2->bmbt);
 753}
 754#endif  /* DEBUG */
 755
 756static const struct xfs_btree_ops xfs_bmbt_ops = {
 757        .rec_len                = sizeof(xfs_bmbt_rec_t),
 758        .key_len                = sizeof(xfs_bmbt_key_t),
 759
 760        .dup_cursor             = xfs_bmbt_dup_cursor,
 761        .update_cursor          = xfs_bmbt_update_cursor,
 762        .alloc_block            = xfs_bmbt_alloc_block,
 763        .free_block             = xfs_bmbt_free_block,
 764        .get_maxrecs            = xfs_bmbt_get_maxrecs,
 765        .get_minrecs            = xfs_bmbt_get_minrecs,
 766        .get_dmaxrecs           = xfs_bmbt_get_dmaxrecs,
 767        .init_key_from_rec      = xfs_bmbt_init_key_from_rec,
 768        .init_rec_from_cur      = xfs_bmbt_init_rec_from_cur,
 769        .init_ptr_from_cur      = xfs_bmbt_init_ptr_from_cur,
 770        .key_diff               = xfs_bmbt_key_diff,
 771        .buf_ops                = &xfs_bmbt_buf_ops,
 772#if defined(DEBUG) || defined(XFS_WARN)
 773        .keys_inorder           = xfs_bmbt_keys_inorder,
 774        .recs_inorder           = xfs_bmbt_recs_inorder,
 775#endif
 776};
 777
 778/*
 779 * Allocate a new bmap btree cursor.
 780 */
 781struct xfs_btree_cur *                          /* new bmap btree cursor */
 782xfs_bmbt_init_cursor(
 783        struct xfs_mount        *mp,            /* file system mount point */
 784        struct xfs_trans        *tp,            /* transaction pointer */
 785        struct xfs_inode        *ip,            /* inode owning the btree */
 786        int                     whichfork)      /* data or attr fork */
 787{
 788        struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
 789        struct xfs_btree_cur    *cur;
 790        ASSERT(whichfork != XFS_COW_FORK);
 791
 792        cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
 793
 794        cur->bc_tp = tp;
 795        cur->bc_mp = mp;
 796        cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
 797        cur->bc_btnum = XFS_BTNUM_BMAP;
 798        cur->bc_blocklog = mp->m_sb.sb_blocklog;
 799        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
 800
 801        cur->bc_ops = &xfs_bmbt_ops;
 802        cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
 803        if (xfs_sb_version_hascrc(&mp->m_sb))
 804                cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
 805
 806        cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
 807        cur->bc_private.b.ip = ip;
 808        cur->bc_private.b.firstblock = NULLFSBLOCK;
 809        cur->bc_private.b.dfops = NULL;
 810        cur->bc_private.b.allocated = 0;
 811        cur->bc_private.b.flags = 0;
 812        cur->bc_private.b.whichfork = whichfork;
 813
 814        return cur;
 815}
 816
 817/*
 818 * Calculate number of records in a bmap btree block.
 819 */
 820int
 821xfs_bmbt_maxrecs(
 822        struct xfs_mount        *mp,
 823        int                     blocklen,
 824        int                     leaf)
 825{
 826        blocklen -= XFS_BMBT_BLOCK_LEN(mp);
 827
 828        if (leaf)
 829                return blocklen / sizeof(xfs_bmbt_rec_t);
 830        return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
 831}
 832
 833/*
 834 * Calculate number of records in a bmap btree inode root.
 835 */
 836int
 837xfs_bmdr_maxrecs(
 838        int                     blocklen,
 839        int                     leaf)
 840{
 841        blocklen -= sizeof(xfs_bmdr_block_t);
 842
 843        if (leaf)
 844                return blocklen / sizeof(xfs_bmdr_rec_t);
 845        return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
 846}
 847
 848/*
 849 * Change the owner of a btree format fork fo the inode passed in. Change it to
 850 * the owner of that is passed in so that we can change owners before or after
 851 * we switch forks between inodes. The operation that the caller is doing will
 852 * determine whether is needs to change owner before or after the switch.
 853 *
 854 * For demand paged transactional modification, the fork switch should be done
 855 * after reading in all the blocks, modifying them and pinning them in the
 856 * transaction. For modification when the buffers are already pinned in memory,
 857 * the fork switch can be done before changing the owner as we won't need to
 858 * validate the owner until the btree buffers are unpinned and writes can occur
 859 * again.
 860 *
 861 * For recovery based ownership change, there is no transactional context and
 862 * so a buffer list must be supplied so that we can record the buffers that we
 863 * modified for the caller to issue IO on.
 864 */
 865int
 866xfs_bmbt_change_owner(
 867        struct xfs_trans        *tp,
 868        struct xfs_inode        *ip,
 869        int                     whichfork,
 870        xfs_ino_t               new_owner,
 871        struct list_head        *buffer_list)
 872{
 873        struct xfs_btree_cur    *cur;
 874        int                     error;
 875
 876        ASSERT(tp || buffer_list);
 877        ASSERT(!(tp && buffer_list));
 878        if (whichfork == XFS_DATA_FORK)
 879                ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
 880        else
 881                ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
 882
 883        cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
 884        if (!cur)
 885                return -ENOMEM;
 886
 887        error = xfs_btree_change_owner(cur, new_owner, buffer_list);
 888        xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
 889        return error;
 890}
 891