linux/fs/btrfs/volumes.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#ifndef BTRFS_VOLUMES_H
   7#define BTRFS_VOLUMES_H
   8
   9#include <linux/bio.h>
  10#include <linux/sort.h>
  11#include <linux/btrfs.h>
  12#include "async-thread.h"
  13
  14#define BTRFS_MAX_DATA_CHUNK_SIZE       (10ULL * SZ_1G)
  15
  16extern struct mutex uuid_mutex;
  17
  18#define BTRFS_STRIPE_LEN        SZ_64K
  19
  20struct buffer_head;
  21struct btrfs_pending_bios {
  22        struct bio *head;
  23        struct bio *tail;
  24};
  25
  26struct btrfs_io_geometry {
  27        /* remaining bytes before crossing a stripe */
  28        u64 len;
  29        /* offset of logical address in chunk */
  30        u64 offset;
  31        /* length of single IO stripe */
  32        u64 stripe_len;
  33        /* number of stripe where address falls */
  34        u64 stripe_nr;
  35        /* offset of address in stripe */
  36        u64 stripe_offset;
  37        /* offset of raid56 stripe into the chunk */
  38        u64 raid56_stripe_offset;
  39};
  40
  41/*
  42 * Use sequence counter to get consistent device stat data on
  43 * 32-bit processors.
  44 */
  45#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  46#include <linux/seqlock.h>
  47#define __BTRFS_NEED_DEVICE_DATA_ORDERED
  48#define btrfs_device_data_ordered_init(device)  \
  49        seqcount_init(&device->data_seqcount)
  50#else
  51#define btrfs_device_data_ordered_init(device) do { } while (0)
  52#endif
  53
  54#define BTRFS_DEV_STATE_WRITEABLE       (0)
  55#define BTRFS_DEV_STATE_IN_FS_METADATA  (1)
  56#define BTRFS_DEV_STATE_MISSING         (2)
  57#define BTRFS_DEV_STATE_REPLACE_TGT     (3)
  58#define BTRFS_DEV_STATE_FLUSH_SENT      (4)
  59
  60struct btrfs_device {
  61        struct list_head dev_list; /* device_list_mutex */
  62        struct list_head dev_alloc_list; /* chunk mutex */
  63        struct list_head post_commit_list; /* chunk mutex */
  64        struct btrfs_fs_devices *fs_devices;
  65        struct btrfs_fs_info *fs_info;
  66
  67        struct rcu_string *name;
  68
  69        u64 generation;
  70
  71        spinlock_t io_lock ____cacheline_aligned;
  72        int running_pending;
  73        /* regular prio bios */
  74        struct btrfs_pending_bios pending_bios;
  75        /* sync bios */
  76        struct btrfs_pending_bios pending_sync_bios;
  77
  78        struct block_device *bdev;
  79
  80        /* the mode sent to blkdev_get */
  81        fmode_t mode;
  82
  83        unsigned long dev_state;
  84        blk_status_t last_flush_error;
  85        int flush_bio_sent;
  86
  87#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
  88        seqcount_t data_seqcount;
  89#endif
  90
  91        /* the internal btrfs device id */
  92        u64 devid;
  93
  94        /* size of the device in memory */
  95        u64 total_bytes;
  96
  97        /* size of the device on disk */
  98        u64 disk_total_bytes;
  99
 100        /* bytes used */
 101        u64 bytes_used;
 102
 103        /* optimal io alignment for this device */
 104        u32 io_align;
 105
 106        /* optimal io width for this device */
 107        u32 io_width;
 108        /* type and info about this device */
 109        u64 type;
 110
 111        /* minimal io size for this device */
 112        u32 sector_size;
 113
 114        /* physical drive uuid (or lvm uuid) */
 115        u8 uuid[BTRFS_UUID_SIZE];
 116
 117        /*
 118         * size of the device on the current transaction
 119         *
 120         * This variant is update when committing the transaction,
 121         * and protected by chunk mutex
 122         */
 123        u64 commit_total_bytes;
 124
 125        /* bytes used on the current transaction */
 126        u64 commit_bytes_used;
 127
 128        /* for sending down flush barriers */
 129        struct bio *flush_bio;
 130        struct completion flush_wait;
 131
 132        /* per-device scrub information */
 133        struct scrub_ctx *scrub_ctx;
 134
 135        struct btrfs_work work;
 136
 137        /* readahead state */
 138        atomic_t reada_in_flight;
 139        u64 reada_next;
 140        struct reada_zone *reada_curr_zone;
 141        struct radix_tree_root reada_zones;
 142        struct radix_tree_root reada_extents;
 143
 144        /* disk I/O failure stats. For detailed description refer to
 145         * enum btrfs_dev_stat_values in ioctl.h */
 146        int dev_stats_valid;
 147
 148        /* Counter to record the change of device stats */
 149        atomic_t dev_stats_ccnt;
 150        atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
 151
 152        struct extent_io_tree alloc_state;
 153};
 154
 155/*
 156 * If we read those variants at the context of their own lock, we needn't
 157 * use the following helpers, reading them directly is safe.
 158 */
 159#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 160#define BTRFS_DEVICE_GETSET_FUNCS(name)                                 \
 161static inline u64                                                       \
 162btrfs_device_get_##name(const struct btrfs_device *dev)                 \
 163{                                                                       \
 164        u64 size;                                                       \
 165        unsigned int seq;                                               \
 166                                                                        \
 167        do {                                                            \
 168                seq = read_seqcount_begin(&dev->data_seqcount);         \
 169                size = dev->name;                                       \
 170        } while (read_seqcount_retry(&dev->data_seqcount, seq));        \
 171        return size;                                                    \
 172}                                                                       \
 173                                                                        \
 174static inline void                                                      \
 175btrfs_device_set_##name(struct btrfs_device *dev, u64 size)             \
 176{                                                                       \
 177        preempt_disable();                                              \
 178        write_seqcount_begin(&dev->data_seqcount);                      \
 179        dev->name = size;                                               \
 180        write_seqcount_end(&dev->data_seqcount);                        \
 181        preempt_enable();                                               \
 182}
 183#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
 184#define BTRFS_DEVICE_GETSET_FUNCS(name)                                 \
 185static inline u64                                                       \
 186btrfs_device_get_##name(const struct btrfs_device *dev)                 \
 187{                                                                       \
 188        u64 size;                                                       \
 189                                                                        \
 190        preempt_disable();                                              \
 191        size = dev->name;                                               \
 192        preempt_enable();                                               \
 193        return size;                                                    \
 194}                                                                       \
 195                                                                        \
 196static inline void                                                      \
 197btrfs_device_set_##name(struct btrfs_device *dev, u64 size)             \
 198{                                                                       \
 199        preempt_disable();                                              \
 200        dev->name = size;                                               \
 201        preempt_enable();                                               \
 202}
 203#else
 204#define BTRFS_DEVICE_GETSET_FUNCS(name)                                 \
 205static inline u64                                                       \
 206btrfs_device_get_##name(const struct btrfs_device *dev)                 \
 207{                                                                       \
 208        return dev->name;                                               \
 209}                                                                       \
 210                                                                        \
 211static inline void                                                      \
 212btrfs_device_set_##name(struct btrfs_device *dev, u64 size)             \
 213{                                                                       \
 214        dev->name = size;                                               \
 215}
 216#endif
 217
 218BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
 219BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
 220BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
 221
 222struct btrfs_fs_devices {
 223        u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
 224        u8 metadata_uuid[BTRFS_FSID_SIZE];
 225        bool fsid_change;
 226        struct list_head fs_list;
 227
 228        u64 num_devices;
 229        u64 open_devices;
 230        u64 rw_devices;
 231        u64 missing_devices;
 232        u64 total_rw_bytes;
 233        u64 total_devices;
 234
 235        /* Highest generation number of seen devices */
 236        u64 latest_generation;
 237
 238        struct block_device *latest_bdev;
 239
 240        /* all of the devices in the FS, protected by a mutex
 241         * so we can safely walk it to write out the supers without
 242         * worrying about add/remove by the multi-device code.
 243         * Scrubbing super can kick off supers writing by holding
 244         * this mutex lock.
 245         */
 246        struct mutex device_list_mutex;
 247
 248        /* List of all devices, protected by device_list_mutex */
 249        struct list_head devices;
 250
 251        /*
 252         * Devices which can satisfy space allocation. Protected by
 253         * chunk_mutex
 254         */
 255        struct list_head alloc_list;
 256
 257        struct btrfs_fs_devices *seed;
 258        int seeding;
 259
 260        int opened;
 261
 262        /* set when we find or add a device that doesn't have the
 263         * nonrot flag set
 264         */
 265        int rotating;
 266
 267        struct btrfs_fs_info *fs_info;
 268        /* sysfs kobjects */
 269        struct kobject fsid_kobj;
 270        struct kobject *device_dir_kobj;
 271        struct completion kobj_unregister;
 272};
 273
 274#define BTRFS_BIO_INLINE_CSUM_SIZE      64
 275
 276#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info)        \
 277                        - sizeof(struct btrfs_chunk))           \
 278                        / sizeof(struct btrfs_stripe) + 1)
 279
 280#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE        \
 281                                - 2 * sizeof(struct btrfs_disk_key)     \
 282                                - 2 * sizeof(struct btrfs_chunk))       \
 283                                / sizeof(struct btrfs_stripe) + 1)
 284
 285/*
 286 * we need the mirror number and stripe index to be passed around
 287 * the call chain while we are processing end_io (especially errors).
 288 * Really, what we need is a btrfs_bio structure that has this info
 289 * and is properly sized with its stripe array, but we're not there
 290 * quite yet.  We have our own btrfs bioset, and all of the bios
 291 * we allocate are actually btrfs_io_bios.  We'll cram as much of
 292 * struct btrfs_bio as we can into this over time.
 293 */
 294struct btrfs_io_bio {
 295        unsigned int mirror_num;
 296        unsigned int stripe_index;
 297        u64 logical;
 298        u8 *csum;
 299        u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
 300        struct bvec_iter iter;
 301        /*
 302         * This member must come last, bio_alloc_bioset will allocate enough
 303         * bytes for entire btrfs_io_bio but relies on bio being last.
 304         */
 305        struct bio bio;
 306};
 307
 308static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
 309{
 310        return container_of(bio, struct btrfs_io_bio, bio);
 311}
 312
 313static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
 314{
 315        if (io_bio->csum != io_bio->csum_inline) {
 316                kfree(io_bio->csum);
 317                io_bio->csum = NULL;
 318        }
 319}
 320
 321struct btrfs_bio_stripe {
 322        struct btrfs_device *dev;
 323        u64 physical;
 324        u64 length; /* only used for discard mappings */
 325};
 326
 327struct btrfs_bio {
 328        refcount_t refs;
 329        atomic_t stripes_pending;
 330        struct btrfs_fs_info *fs_info;
 331        u64 map_type; /* get from map_lookup->type */
 332        bio_end_io_t *end_io;
 333        struct bio *orig_bio;
 334        unsigned long flags;
 335        void *private;
 336        atomic_t error;
 337        int max_errors;
 338        int num_stripes;
 339        int mirror_num;
 340        int num_tgtdevs;
 341        int *tgtdev_map;
 342        /*
 343         * logical block numbers for the start of each stripe
 344         * The last one or two are p/q.  These are sorted,
 345         * so raid_map[0] is the start of our full stripe
 346         */
 347        u64 *raid_map;
 348        struct btrfs_bio_stripe stripes[];
 349};
 350
 351struct btrfs_device_info {
 352        struct btrfs_device *dev;
 353        u64 dev_offset;
 354        u64 max_avail;
 355        u64 total_avail;
 356};
 357
 358struct btrfs_raid_attr {
 359        u8 sub_stripes;         /* sub_stripes info for map */
 360        u8 dev_stripes;         /* stripes per dev */
 361        u8 devs_max;            /* max devs to use */
 362        u8 devs_min;            /* min devs needed */
 363        u8 tolerated_failures;  /* max tolerated fail devs */
 364        u8 devs_increment;      /* ndevs has to be a multiple of this */
 365        u8 ncopies;             /* how many copies to data has */
 366        u8 nparity;             /* number of stripes worth of bytes to store
 367                                 * parity information */
 368        u8 mindev_error;        /* error code if min devs requisite is unmet */
 369        const char raid_name[8]; /* name of the raid */
 370        u64 bg_flag;            /* block group flag of the raid */
 371};
 372
 373extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
 374
 375struct map_lookup {
 376        u64 type;
 377        int io_align;
 378        int io_width;
 379        u64 stripe_len;
 380        int num_stripes;
 381        int sub_stripes;
 382        int verified_stripes; /* For mount time dev extent verification */
 383        struct btrfs_bio_stripe stripes[];
 384};
 385
 386#define map_lookup_size(n) (sizeof(struct map_lookup) + \
 387                            (sizeof(struct btrfs_bio_stripe) * (n)))
 388
 389struct btrfs_balance_args;
 390struct btrfs_balance_progress;
 391struct btrfs_balance_control {
 392        struct btrfs_balance_args data;
 393        struct btrfs_balance_args meta;
 394        struct btrfs_balance_args sys;
 395
 396        u64 flags;
 397
 398        struct btrfs_balance_progress stat;
 399};
 400
 401enum btrfs_map_op {
 402        BTRFS_MAP_READ,
 403        BTRFS_MAP_WRITE,
 404        BTRFS_MAP_DISCARD,
 405        BTRFS_MAP_GET_READ_MIRRORS,
 406};
 407
 408static inline enum btrfs_map_op btrfs_op(struct bio *bio)
 409{
 410        switch (bio_op(bio)) {
 411        case REQ_OP_DISCARD:
 412                return BTRFS_MAP_DISCARD;
 413        case REQ_OP_WRITE:
 414                return BTRFS_MAP_WRITE;
 415        default:
 416                WARN_ON_ONCE(1);
 417                /* fall through */
 418        case REQ_OP_READ:
 419                return BTRFS_MAP_READ;
 420        }
 421}
 422
 423void btrfs_get_bbio(struct btrfs_bio *bbio);
 424void btrfs_put_bbio(struct btrfs_bio *bbio);
 425int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
 426                    u64 logical, u64 *length,
 427                    struct btrfs_bio **bbio_ret, int mirror_num);
 428int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
 429                     u64 logical, u64 *length,
 430                     struct btrfs_bio **bbio_ret);
 431int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
 432                u64 logical, u64 len, struct btrfs_io_geometry *io_geom);
 433int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
 434                     u64 physical, u64 **logical, int *naddrs, int *stripe_len);
 435int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
 436int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
 437int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
 438void btrfs_mapping_tree_free(struct extent_map_tree *tree);
 439blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
 440                           int mirror_num, int async_submit);
 441int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 442                       fmode_t flags, void *holder);
 443struct btrfs_device *btrfs_scan_one_device(const char *path,
 444                                           fmode_t flags, void *holder);
 445int btrfs_forget_devices(const char *path);
 446int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
 447void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
 448void btrfs_assign_next_active_device(struct btrfs_device *device,
 449                                     struct btrfs_device *this_dev);
 450struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
 451                                                  u64 devid,
 452                                                  const char *devpath);
 453struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
 454                                        const u64 *devid,
 455                                        const u8 *uuid);
 456void btrfs_free_device(struct btrfs_device *device);
 457int btrfs_rm_device(struct btrfs_fs_info *fs_info,
 458                    const char *device_path, u64 devid);
 459void __exit btrfs_cleanup_fs_uuids(void);
 460int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
 461int btrfs_grow_device(struct btrfs_trans_handle *trans,
 462                      struct btrfs_device *device, u64 new_size);
 463struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
 464                                       u64 devid, u8 *uuid, u8 *fsid, bool seed);
 465int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 466int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
 467int btrfs_balance(struct btrfs_fs_info *fs_info,
 468                  struct btrfs_balance_control *bctl,
 469                  struct btrfs_ioctl_balance_args *bargs);
 470void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
 471int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
 472int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
 473int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
 474int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 475int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
 476int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
 477int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
 478int find_free_dev_extent_start(struct btrfs_device *device, u64 num_bytes,
 479                               u64 search_start, u64 *start, u64 *max_avail);
 480int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
 481                         u64 *start, u64 *max_avail);
 482void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
 483int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
 484                        struct btrfs_ioctl_get_dev_stats *stats);
 485void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
 486int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
 487int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
 488void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
 489void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
 490void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
 491void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
 492int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
 493                           u64 logical, u64 len);
 494unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
 495                                    u64 logical);
 496int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 497                             u64 chunk_offset, u64 chunk_size);
 498int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
 499struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
 500                                       u64 logical, u64 length);
 501
 502static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
 503                                      int index)
 504{
 505        atomic_inc(dev->dev_stat_values + index);
 506        /*
 507         * This memory barrier orders stores updating statistics before stores
 508         * updating dev_stats_ccnt.
 509         *
 510         * It pairs with smp_rmb() in btrfs_run_dev_stats().
 511         */
 512        smp_mb__before_atomic();
 513        atomic_inc(&dev->dev_stats_ccnt);
 514}
 515
 516static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
 517                                      int index)
 518{
 519        return atomic_read(dev->dev_stat_values + index);
 520}
 521
 522static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
 523                                                int index)
 524{
 525        int ret;
 526
 527        ret = atomic_xchg(dev->dev_stat_values + index, 0);
 528        /*
 529         * atomic_xchg implies a full memory barriers as per atomic_t.txt:
 530         * - RMW operations that have a return value are fully ordered;
 531         *
 532         * This implicit memory barriers is paired with the smp_rmb in
 533         * btrfs_run_dev_stats
 534         */
 535        atomic_inc(&dev->dev_stats_ccnt);
 536        return ret;
 537}
 538
 539static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
 540                                      int index, unsigned long val)
 541{
 542        atomic_set(dev->dev_stat_values + index, val);
 543        /*
 544         * This memory barrier orders stores updating statistics before stores
 545         * updating dev_stats_ccnt.
 546         *
 547         * It pairs with smp_rmb() in btrfs_run_dev_stats().
 548         */
 549        smp_mb__before_atomic();
 550        atomic_inc(&dev->dev_stats_ccnt);
 551}
 552
 553static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
 554                                        int index)
 555{
 556        btrfs_dev_stat_set(dev, index, 0);
 557}
 558
 559/*
 560 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
 561 * can be used as index to access btrfs_raid_array[].
 562 */
 563static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
 564{
 565        if (flags & BTRFS_BLOCK_GROUP_RAID10)
 566                return BTRFS_RAID_RAID10;
 567        else if (flags & BTRFS_BLOCK_GROUP_RAID1)
 568                return BTRFS_RAID_RAID1;
 569        else if (flags & BTRFS_BLOCK_GROUP_DUP)
 570                return BTRFS_RAID_DUP;
 571        else if (flags & BTRFS_BLOCK_GROUP_RAID0)
 572                return BTRFS_RAID_RAID0;
 573        else if (flags & BTRFS_BLOCK_GROUP_RAID5)
 574                return BTRFS_RAID_RAID5;
 575        else if (flags & BTRFS_BLOCK_GROUP_RAID6)
 576                return BTRFS_RAID_RAID6;
 577
 578        return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
 579}
 580
 581void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
 582
 583struct list_head *btrfs_get_fs_uuids(void);
 584void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
 585void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
 586bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
 587                                        struct btrfs_device *failing_dev);
 588
 589int btrfs_bg_type_to_factor(u64 flags);
 590const char *btrfs_bg_type_to_raid_name(u64 flags);
 591int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
 592
 593#endif
 594