linux/fs/btrfs/extent_io.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#ifndef BTRFS_EXTENT_IO_H
   4#define BTRFS_EXTENT_IO_H
   5
   6#include <linux/rbtree.h>
   7#include <linux/refcount.h>
   8#include "ulist.h"
   9
  10/* bits for the extent state */
  11#define EXTENT_DIRTY            (1U << 0)
  12#define EXTENT_UPTODATE         (1U << 1)
  13#define EXTENT_LOCKED           (1U << 2)
  14#define EXTENT_NEW              (1U << 3)
  15#define EXTENT_DELALLOC         (1U << 4)
  16#define EXTENT_DEFRAG           (1U << 5)
  17#define EXTENT_BOUNDARY         (1U << 6)
  18#define EXTENT_NODATASUM        (1U << 7)
  19#define EXTENT_CLEAR_META_RESV  (1U << 8)
  20#define EXTENT_NEED_WAIT        (1U << 9)
  21#define EXTENT_DAMAGED          (1U << 10)
  22#define EXTENT_NORESERVE        (1U << 11)
  23#define EXTENT_QGROUP_RESERVED  (1U << 12)
  24#define EXTENT_CLEAR_DATA_RESV  (1U << 13)
  25#define EXTENT_DELALLOC_NEW     (1U << 14)
  26#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
  27                                 EXTENT_CLEAR_DATA_RESV)
  28#define EXTENT_CTLBITS          (EXTENT_DO_ACCOUNTING)
  29
  30/*
  31 * Redefined bits above which are used only in the device allocation tree,
  32 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
  33 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
  34 * manipulation functions
  35 */
  36#define CHUNK_ALLOCATED EXTENT_DIRTY
  37#define CHUNK_TRIMMED   EXTENT_DEFRAG
  38
  39/*
  40 * flags for bio submission. The high bits indicate the compression
  41 * type for this bio
  42 */
  43#define EXTENT_BIO_COMPRESSED 1
  44#define EXTENT_BIO_FLAG_SHIFT 16
  45
  46enum {
  47        EXTENT_BUFFER_UPTODATE,
  48        EXTENT_BUFFER_DIRTY,
  49        EXTENT_BUFFER_CORRUPT,
  50        /* this got triggered by readahead */
  51        EXTENT_BUFFER_READAHEAD,
  52        EXTENT_BUFFER_TREE_REF,
  53        EXTENT_BUFFER_STALE,
  54        EXTENT_BUFFER_WRITEBACK,
  55        /* read IO error */
  56        EXTENT_BUFFER_READ_ERR,
  57        EXTENT_BUFFER_UNMAPPED,
  58        EXTENT_BUFFER_IN_TREE,
  59        /* write IO error */
  60        EXTENT_BUFFER_WRITE_ERR,
  61};
  62
  63/* these are flags for __process_pages_contig */
  64#define PAGE_UNLOCK             (1 << 0)
  65#define PAGE_CLEAR_DIRTY        (1 << 1)
  66#define PAGE_SET_WRITEBACK      (1 << 2)
  67#define PAGE_END_WRITEBACK      (1 << 3)
  68#define PAGE_SET_PRIVATE2       (1 << 4)
  69#define PAGE_SET_ERROR          (1 << 5)
  70#define PAGE_LOCK               (1 << 6)
  71
  72/*
  73 * page->private values.  Every page that is controlled by the extent
  74 * map has page->private set to one.
  75 */
  76#define EXTENT_PAGE_PRIVATE 1
  77
  78/*
  79 * The extent buffer bitmap operations are done with byte granularity instead of
  80 * word granularity for two reasons:
  81 * 1. The bitmaps must be little-endian on disk.
  82 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
  83 *    single word in a bitmap may straddle two pages in the extent buffer.
  84 */
  85#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
  86#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
  87#define BITMAP_FIRST_BYTE_MASK(start) \
  88        ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
  89#define BITMAP_LAST_BYTE_MASK(nbits) \
  90        (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
  91
  92struct extent_state;
  93struct btrfs_root;
  94struct btrfs_inode;
  95struct btrfs_io_bio;
  96struct io_failure_record;
  97
  98
  99typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
 100                struct bio *bio, u64 bio_offset);
 101
 102struct extent_io_ops {
 103        /*
 104         * The following callbacks must be always defined, the function
 105         * pointer will be called unconditionally.
 106         */
 107        blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
 108                                        int mirror_num, unsigned long bio_flags);
 109        int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
 110                                    struct page *page, u64 start, u64 end,
 111                                    int mirror);
 112};
 113
 114enum {
 115        IO_TREE_FS_INFO_FREED_EXTENTS0,
 116        IO_TREE_FS_INFO_FREED_EXTENTS1,
 117        IO_TREE_INODE_IO,
 118        IO_TREE_INODE_IO_FAILURE,
 119        IO_TREE_RELOC_BLOCKS,
 120        IO_TREE_TRANS_DIRTY_PAGES,
 121        IO_TREE_ROOT_DIRTY_LOG_PAGES,
 122        IO_TREE_SELFTEST,
 123};
 124
 125struct extent_io_tree {
 126        struct rb_root state;
 127        struct btrfs_fs_info *fs_info;
 128        void *private_data;
 129        u64 dirty_bytes;
 130        bool track_uptodate;
 131
 132        /* Who owns this io tree, should be one of IO_TREE_* */
 133        u8 owner;
 134
 135        spinlock_t lock;
 136        const struct extent_io_ops *ops;
 137};
 138
 139struct extent_state {
 140        u64 start;
 141        u64 end; /* inclusive */
 142        struct rb_node rb_node;
 143
 144        /* ADD NEW ELEMENTS AFTER THIS */
 145        wait_queue_head_t wq;
 146        refcount_t refs;
 147        unsigned state;
 148
 149        struct io_failure_record *failrec;
 150
 151#ifdef CONFIG_BTRFS_DEBUG
 152        struct list_head leak_list;
 153#endif
 154};
 155
 156#define INLINE_EXTENT_BUFFER_PAGES 16
 157#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
 158struct extent_buffer {
 159        u64 start;
 160        unsigned long len;
 161        unsigned long bflags;
 162        struct btrfs_fs_info *fs_info;
 163        spinlock_t refs_lock;
 164        atomic_t refs;
 165        atomic_t io_pages;
 166        int read_mirror;
 167        struct rcu_head rcu_head;
 168        pid_t lock_owner;
 169
 170        int blocking_writers;
 171        atomic_t blocking_readers;
 172        bool lock_nested;
 173        /* >= 0 if eb belongs to a log tree, -1 otherwise */
 174        short log_index;
 175
 176        /* protects write locks */
 177        rwlock_t lock;
 178
 179        /* readers use lock_wq while they wait for the write
 180         * lock holders to unlock
 181         */
 182        wait_queue_head_t write_lock_wq;
 183
 184        /* writers use read_lock_wq while they wait for readers
 185         * to unlock
 186         */
 187        wait_queue_head_t read_lock_wq;
 188        struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
 189#ifdef CONFIG_BTRFS_DEBUG
 190        int spinning_writers;
 191        atomic_t spinning_readers;
 192        atomic_t read_locks;
 193        int write_locks;
 194        struct list_head leak_list;
 195#endif
 196};
 197
 198/*
 199 * Structure to record how many bytes and which ranges are set/cleared
 200 */
 201struct extent_changeset {
 202        /* How many bytes are set/cleared in this operation */
 203        unsigned int bytes_changed;
 204
 205        /* Changed ranges */
 206        struct ulist range_changed;
 207};
 208
 209static inline void extent_changeset_init(struct extent_changeset *changeset)
 210{
 211        changeset->bytes_changed = 0;
 212        ulist_init(&changeset->range_changed);
 213}
 214
 215static inline struct extent_changeset *extent_changeset_alloc(void)
 216{
 217        struct extent_changeset *ret;
 218
 219        ret = kmalloc(sizeof(*ret), GFP_KERNEL);
 220        if (!ret)
 221                return NULL;
 222
 223        extent_changeset_init(ret);
 224        return ret;
 225}
 226
 227static inline void extent_changeset_release(struct extent_changeset *changeset)
 228{
 229        if (!changeset)
 230                return;
 231        changeset->bytes_changed = 0;
 232        ulist_release(&changeset->range_changed);
 233}
 234
 235static inline void extent_changeset_free(struct extent_changeset *changeset)
 236{
 237        if (!changeset)
 238                return;
 239        extent_changeset_release(changeset);
 240        kfree(changeset);
 241}
 242
 243static inline void extent_set_compress_type(unsigned long *bio_flags,
 244                                            int compress_type)
 245{
 246        *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
 247}
 248
 249static inline int extent_compress_type(unsigned long bio_flags)
 250{
 251        return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
 252}
 253
 254struct extent_map_tree;
 255
 256typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
 257                                          struct page *page,
 258                                          size_t pg_offset,
 259                                          u64 start, u64 len,
 260                                          int create);
 261
 262void extent_io_tree_init(struct btrfs_fs_info *fs_info,
 263                         struct extent_io_tree *tree, unsigned int owner,
 264                         void *private_data);
 265void extent_io_tree_release(struct extent_io_tree *tree);
 266int try_release_extent_mapping(struct page *page, gfp_t mask);
 267int try_release_extent_buffer(struct page *page);
 268int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 269                     struct extent_state **cached);
 270
 271static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 272{
 273        return lock_extent_bits(tree, start, end, NULL);
 274}
 275
 276int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 277int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
 278                          get_extent_t *get_extent, int mirror_num);
 279int __init extent_io_init(void);
 280void __cold extent_io_exit(void);
 281
 282u64 count_range_bits(struct extent_io_tree *tree,
 283                     u64 *start, u64 search_end,
 284                     u64 max_bytes, unsigned bits, int contig);
 285
 286void free_extent_state(struct extent_state *state);
 287int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
 288                   unsigned bits, int filled,
 289                   struct extent_state *cached_state);
 290int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 291                unsigned bits, struct extent_changeset *changeset);
 292int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 293                     unsigned bits, int wake, int delete,
 294                     struct extent_state **cached);
 295int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 296                     unsigned bits, int wake, int delete,
 297                     struct extent_state **cached, gfp_t mask,
 298                     struct extent_changeset *changeset);
 299
 300static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 301{
 302        return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
 303}
 304
 305static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
 306                u64 end, struct extent_state **cached)
 307{
 308        return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
 309                                GFP_NOFS, NULL);
 310}
 311
 312static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
 313                u64 start, u64 end, struct extent_state **cached)
 314{
 315        return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
 316                                GFP_ATOMIC, NULL);
 317}
 318
 319static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
 320                u64 end, unsigned bits)
 321{
 322        int wake = 0;
 323
 324        if (bits & EXTENT_LOCKED)
 325                wake = 1;
 326
 327        return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
 328}
 329
 330int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 331                           unsigned bits, struct extent_changeset *changeset);
 332int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 333                   unsigned bits, u64 *failed_start,
 334                   struct extent_state **cached_state, gfp_t mask);
 335int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
 336                           unsigned bits);
 337
 338static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
 339                u64 end, unsigned bits)
 340{
 341        return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
 342}
 343
 344static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
 345                u64 end, struct extent_state **cached_state)
 346{
 347        return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
 348                                cached_state, GFP_NOFS, NULL);
 349}
 350
 351static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
 352                u64 end, gfp_t mask)
 353{
 354        return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
 355                              NULL, mask);
 356}
 357
 358static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
 359                                     u64 end, struct extent_state **cached)
 360{
 361        return clear_extent_bit(tree, start, end,
 362                                EXTENT_DIRTY | EXTENT_DELALLOC |
 363                                EXTENT_DO_ACCOUNTING, 0, 0, cached);
 364}
 365
 366int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 367                       unsigned bits, unsigned clear_bits,
 368                       struct extent_state **cached_state);
 369
 370static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
 371                                      u64 end, unsigned int extra_bits,
 372                                      struct extent_state **cached_state)
 373{
 374        return set_extent_bit(tree, start, end,
 375                              EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
 376                              NULL, cached_state, GFP_NOFS);
 377}
 378
 379static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
 380                u64 end, struct extent_state **cached_state)
 381{
 382        return set_extent_bit(tree, start, end,
 383                              EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
 384                              NULL, cached_state, GFP_NOFS);
 385}
 386
 387static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
 388                u64 end)
 389{
 390        return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
 391                        GFP_NOFS);
 392}
 393
 394static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
 395                u64 end, struct extent_state **cached_state, gfp_t mask)
 396{
 397        return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
 398                              cached_state, mask);
 399}
 400
 401int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
 402                          u64 *start_ret, u64 *end_ret, unsigned bits,
 403                          struct extent_state **cached_state);
 404void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
 405                                 u64 *start_ret, u64 *end_ret, unsigned bits);
 406int extent_invalidatepage(struct extent_io_tree *tree,
 407                          struct page *page, unsigned long offset);
 408int extent_write_full_page(struct page *page, struct writeback_control *wbc);
 409int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
 410                              int mode);
 411int extent_writepages(struct address_space *mapping,
 412                      struct writeback_control *wbc);
 413int btree_write_cache_pages(struct address_space *mapping,
 414                            struct writeback_control *wbc);
 415int extent_readpages(struct address_space *mapping, struct list_head *pages,
 416                     unsigned nr_pages);
 417int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 418                __u64 start, __u64 len);
 419void set_page_extent_mapped(struct page *page);
 420
 421struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 422                                          u64 start);
 423struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 424                                                  u64 start, unsigned long len);
 425struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 426                                                u64 start);
 427struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 428struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 429                                         u64 start);
 430void free_extent_buffer(struct extent_buffer *eb);
 431void free_extent_buffer_stale(struct extent_buffer *eb);
 432#define WAIT_NONE       0
 433#define WAIT_COMPLETE   1
 434#define WAIT_PAGE_LOCK  2
 435int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
 436                             int mirror_num);
 437void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
 438
 439static inline int num_extent_pages(const struct extent_buffer *eb)
 440{
 441        return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
 442               (eb->start >> PAGE_SHIFT);
 443}
 444
 445static inline void extent_buffer_get(struct extent_buffer *eb)
 446{
 447        atomic_inc(&eb->refs);
 448}
 449
 450static inline int extent_buffer_uptodate(struct extent_buffer *eb)
 451{
 452        return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 453}
 454
 455int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
 456                         unsigned long start, unsigned long len);
 457void read_extent_buffer(const struct extent_buffer *eb, void *dst,
 458                        unsigned long start,
 459                        unsigned long len);
 460int read_extent_buffer_to_user(const struct extent_buffer *eb,
 461                               void __user *dst, unsigned long start,
 462                               unsigned long len);
 463void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
 464void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
 465                const void *src);
 466void write_extent_buffer(struct extent_buffer *eb, const void *src,
 467                         unsigned long start, unsigned long len);
 468void copy_extent_buffer_full(struct extent_buffer *dst,
 469                             struct extent_buffer *src);
 470void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 471                        unsigned long dst_offset, unsigned long src_offset,
 472                        unsigned long len);
 473void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 474                           unsigned long src_offset, unsigned long len);
 475void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 476                           unsigned long src_offset, unsigned long len);
 477void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
 478                           unsigned long len);
 479int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
 480                           unsigned long pos);
 481void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
 482                              unsigned long pos, unsigned long len);
 483void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
 484                                unsigned long pos, unsigned long len);
 485void clear_extent_buffer_dirty(struct extent_buffer *eb);
 486bool set_extent_buffer_dirty(struct extent_buffer *eb);
 487void set_extent_buffer_uptodate(struct extent_buffer *eb);
 488void clear_extent_buffer_uptodate(struct extent_buffer *eb);
 489int extent_buffer_under_io(struct extent_buffer *eb);
 490int map_private_extent_buffer(const struct extent_buffer *eb,
 491                              unsigned long offset, unsigned long min_len,
 492                              char **map, unsigned long *map_start,
 493                              unsigned long *map_len);
 494void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
 495void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 496void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
 497                                 u64 delalloc_end, struct page *locked_page,
 498                                 unsigned bits_to_clear,
 499                                 unsigned long page_ops);
 500struct bio *btrfs_bio_alloc(u64 first_byte);
 501struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
 502struct bio *btrfs_bio_clone(struct bio *bio);
 503struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
 504
 505struct btrfs_fs_info;
 506struct btrfs_inode;
 507
 508int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
 509                      u64 length, u64 logical, struct page *page,
 510                      unsigned int pg_offset, int mirror_num);
 511int clean_io_failure(struct btrfs_fs_info *fs_info,
 512                     struct extent_io_tree *failure_tree,
 513                     struct extent_io_tree *io_tree, u64 start,
 514                     struct page *page, u64 ino, unsigned int pg_offset);
 515void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
 516int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
 517
 518/*
 519 * When IO fails, either with EIO or csum verification fails, we
 520 * try other mirrors that might have a good copy of the data.  This
 521 * io_failure_record is used to record state as we go through all the
 522 * mirrors.  If another mirror has good data, the page is set up to date
 523 * and things continue.  If a good mirror can't be found, the original
 524 * bio end_io callback is called to indicate things have failed.
 525 */
 526struct io_failure_record {
 527        struct page *page;
 528        u64 start;
 529        u64 len;
 530        u64 logical;
 531        unsigned long bio_flags;
 532        int this_mirror;
 533        int failed_mirror;
 534        int in_validation;
 535};
 536
 537
 538void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
 539                u64 end);
 540int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
 541                                struct io_failure_record **failrec_ret);
 542bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
 543                            struct io_failure_record *failrec, int fail_mirror);
 544struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
 545                                    struct io_failure_record *failrec,
 546                                    struct page *page, int pg_offset, int icsum,
 547                                    bio_end_io_t *endio_func, void *data);
 548int free_io_failure(struct extent_io_tree *failure_tree,
 549                    struct extent_io_tree *io_tree,
 550                    struct io_failure_record *rec);
 551#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 552bool find_lock_delalloc_range(struct inode *inode,
 553                             struct page *locked_page, u64 *start,
 554                             u64 *end);
 555#endif
 556struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
 557                                               u64 start);
 558
 559#endif
 560