linux/drivers/staging/erofs/internal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0
   2 *
   3 * linux/drivers/staging/erofs/internal.h
   4 *
   5 * Copyright (C) 2017-2018 HUAWEI, Inc.
   6 *             http://www.huawei.com/
   7 * Created by Gao Xiang <gaoxiang25@huawei.com>
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file COPYING in the main directory of the Linux
  11 * distribution for more details.
  12 */
  13#ifndef __INTERNAL_H
  14#define __INTERNAL_H
  15
  16#include <linux/fs.h>
  17#include <linux/dcache.h>
  18#include <linux/mm.h>
  19#include <linux/pagemap.h>
  20#include <linux/bio.h>
  21#include <linux/buffer_head.h>
  22#include <linux/cleancache.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include "erofs_fs.h"
  26
  27/* redefine pr_fmt "erofs: " */
  28#undef pr_fmt
  29#define pr_fmt(fmt) "erofs: " fmt
  30
  31#define errln(x, ...)   pr_err(x "\n", ##__VA_ARGS__)
  32#define infoln(x, ...)  pr_info(x "\n", ##__VA_ARGS__)
  33#ifdef CONFIG_EROFS_FS_DEBUG
  34#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
  35
  36#define dbg_might_sleep         might_sleep
  37#define DBG_BUGON               BUG_ON
  38#else
  39#define debugln(x, ...)         ((void)0)
  40
  41#define dbg_might_sleep()       ((void)0)
  42#define DBG_BUGON(x)            ((void)(x))
  43#endif
  44
  45enum {
  46        FAULT_KMALLOC,
  47        FAULT_READ_IO,
  48        FAULT_MAX,
  49};
  50
  51#ifdef CONFIG_EROFS_FAULT_INJECTION
  52extern const char *erofs_fault_name[FAULT_MAX];
  53#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
  54
  55struct erofs_fault_info {
  56        atomic_t inject_ops;
  57        unsigned int inject_rate;
  58        unsigned int inject_type;
  59};
  60#endif
  61
  62#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
  63#define EROFS_FS_ZIP_CACHE_LVL  (2)
  64#elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
  65#define EROFS_FS_ZIP_CACHE_LVL  (1)
  66#else
  67#define EROFS_FS_ZIP_CACHE_LVL  (0)
  68#endif
  69
  70#if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
  71#define EROFS_FS_HAS_MANAGED_CACHE
  72#endif
  73
  74/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
  75#define EROFS_SUPER_MAGIC   EROFS_SUPER_MAGIC_V1
  76
  77typedef u64 erofs_nid_t;
  78
  79struct erofs_sb_info {
  80        /* list for all registered superblocks, mainly for shrinker */
  81        struct list_head list;
  82        struct mutex umount_mutex;
  83
  84        u32 blocks;
  85        u32 meta_blkaddr;
  86#ifdef CONFIG_EROFS_FS_XATTR
  87        u32 xattr_blkaddr;
  88#endif
  89
  90        /* inode slot unit size in bit shift */
  91        unsigned char islotbits;
  92#ifdef CONFIG_EROFS_FS_ZIP
  93        /* cluster size in bit shift */
  94        unsigned char clusterbits;
  95
  96        /* the dedicated workstation for compression */
  97        struct radix_tree_root workstn_tree;
  98
  99        /* threshold for decompression synchronously */
 100        unsigned int max_sync_decompress_pages;
 101
 102#ifdef EROFS_FS_HAS_MANAGED_CACHE
 103        struct inode *managed_cache;
 104#endif
 105
 106#endif
 107
 108        u32 build_time_nsec;
 109        u64 build_time;
 110
 111        /* what we really care is nid, rather than ino.. */
 112        erofs_nid_t root_nid;
 113        /* used for statfs, f_files - f_favail */
 114        u64 inos;
 115
 116        u8 uuid[16];                    /* 128-bit uuid for volume */
 117        u8 volume_name[16];             /* volume name */
 118        u32 requirements;
 119
 120        char *dev_name;
 121
 122        unsigned int mount_opt;
 123        unsigned int shrinker_run_no;
 124
 125#ifdef CONFIG_EROFS_FAULT_INJECTION
 126        struct erofs_fault_info fault_info;     /* For fault injection */
 127#endif
 128};
 129
 130#ifdef CONFIG_EROFS_FAULT_INJECTION
 131#define erofs_show_injection_info(type)                                 \
 132        infoln("inject %s in %s of %pS", erofs_fault_name[type],        \
 133                __func__, __builtin_return_address(0))
 134
 135static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
 136{
 137        struct erofs_fault_info *ffi = &sbi->fault_info;
 138
 139        if (!ffi->inject_rate)
 140                return false;
 141
 142        if (!IS_FAULT_SET(ffi, type))
 143                return false;
 144
 145        atomic_inc(&ffi->inject_ops);
 146        if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
 147                atomic_set(&ffi->inject_ops, 0);
 148                return true;
 149        }
 150        return false;
 151}
 152#else
 153static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
 154{
 155        return false;
 156}
 157
 158static inline void erofs_show_injection_info(int type)
 159{
 160}
 161#endif
 162
 163static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
 164                                        size_t size, gfp_t flags)
 165{
 166        if (time_to_inject(sbi, FAULT_KMALLOC)) {
 167                erofs_show_injection_info(FAULT_KMALLOC);
 168                return NULL;
 169        }
 170        return kmalloc(size, flags);
 171}
 172
 173#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
 174#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
 175
 176/* Mount flags set via mount options or defaults */
 177#define EROFS_MOUNT_XATTR_USER          0x00000010
 178#define EROFS_MOUNT_POSIX_ACL           0x00000020
 179#define EROFS_MOUNT_FAULT_INJECTION     0x00000040
 180
 181#define clear_opt(sbi, option)  ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
 182#define set_opt(sbi, option)    ((sbi)->mount_opt |= EROFS_MOUNT_##option)
 183#define test_opt(sbi, option)   ((sbi)->mount_opt & EROFS_MOUNT_##option)
 184
 185#ifdef CONFIG_EROFS_FS_ZIP
 186#define erofs_workstn_lock(sbi)         xa_lock(&(sbi)->workstn_tree)
 187#define erofs_workstn_unlock(sbi)       xa_unlock(&(sbi)->workstn_tree)
 188
 189/* basic unit of the workstation of a super_block */
 190struct erofs_workgroup {
 191        /* the workgroup index in the workstation */
 192        pgoff_t index;
 193
 194        /* overall workgroup reference count */
 195        atomic_t refcount;
 196};
 197
 198#define EROFS_LOCKED_MAGIC     (INT_MIN | 0xE0F510CCL)
 199
 200#if defined(CONFIG_SMP)
 201static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
 202                                                 int val)
 203{
 204        preempt_disable();
 205        if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
 206                preempt_enable();
 207                return false;
 208        }
 209        return true;
 210}
 211
 212static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
 213                                            int orig_val)
 214{
 215        /*
 216         * other observers should notice all modifications
 217         * in the freezing period.
 218         */
 219        smp_mb();
 220        atomic_set(&grp->refcount, orig_val);
 221        preempt_enable();
 222}
 223
 224static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
 225{
 226        return atomic_cond_read_relaxed(&grp->refcount,
 227                                        VAL != EROFS_LOCKED_MAGIC);
 228}
 229#else
 230static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
 231                                                 int val)
 232{
 233        preempt_disable();
 234        /* no need to spin on UP platforms, let's just disable preemption. */
 235        if (val != atomic_read(&grp->refcount)) {
 236                preempt_enable();
 237                return false;
 238        }
 239        return true;
 240}
 241
 242static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
 243                                            int orig_val)
 244{
 245        preempt_enable();
 246}
 247
 248static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
 249{
 250        int v = atomic_read(&grp->refcount);
 251
 252        /* workgroup is never freezed on uniprocessor systems */
 253        DBG_BUGON(v == EROFS_LOCKED_MAGIC);
 254        return v;
 255}
 256#endif
 257
 258int erofs_workgroup_put(struct erofs_workgroup *grp);
 259struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
 260                                             pgoff_t index, bool *tag);
 261int erofs_register_workgroup(struct super_block *sb,
 262                             struct erofs_workgroup *grp, bool tag);
 263unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
 264                                       unsigned long nr_shrink, bool cleanup);
 265void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
 266
 267#ifdef EROFS_FS_HAS_MANAGED_CACHE
 268int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
 269                                       struct erofs_workgroup *egrp);
 270int erofs_try_to_free_cached_page(struct address_space *mapping,
 271                                  struct page *page);
 272
 273#define MNGD_MAPPING(sbi)       ((sbi)->managed_cache->i_mapping)
 274static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
 275                                         struct page *page)
 276{
 277        return page->mapping == MNGD_MAPPING(sbi);
 278}
 279#else
 280#define MNGD_MAPPING(sbi)       (NULL)
 281static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
 282                                         struct page *page) { return false; }
 283#endif
 284
 285#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES       3
 286
 287static inline bool __should_decompress_synchronously(struct erofs_sb_info *sbi,
 288                                                     unsigned int nr)
 289{
 290        return nr <= sbi->max_sync_decompress_pages;
 291}
 292
 293int __init z_erofs_init_zip_subsystem(void);
 294void z_erofs_exit_zip_subsystem(void);
 295#else
 296/* dummy initializer/finalizer for the decompression subsystem */
 297static inline int z_erofs_init_zip_subsystem(void) { return 0; }
 298static inline void z_erofs_exit_zip_subsystem(void) {}
 299#endif
 300
 301/* we strictly follow PAGE_SIZE and no buffer head yet */
 302#define LOG_BLOCK_SIZE          PAGE_SHIFT
 303
 304#undef LOG_SECTORS_PER_BLOCK
 305#define LOG_SECTORS_PER_BLOCK   (PAGE_SHIFT - 9)
 306
 307#undef SECTORS_PER_BLOCK
 308#define SECTORS_PER_BLOCK       (1 << SECTORS_PER_BLOCK)
 309
 310#define EROFS_BLKSIZ            (1 << LOG_BLOCK_SIZE)
 311
 312#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
 313#error erofs cannot be used in this platform
 314#endif
 315
 316#define ROOT_NID(sb)            ((sb)->root_nid)
 317
 318#ifdef CONFIG_EROFS_FS_ZIP
 319/* hard limit of pages per compressed cluster */
 320#define Z_EROFS_CLUSTER_MAX_PAGES       (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
 321
 322/* page count of a compressed cluster */
 323#define erofs_clusterpages(sbi)         ((1 << (sbi)->clusterbits) / PAGE_SIZE)
 324
 325#define EROFS_PCPUBUF_NR_PAGES          Z_EROFS_CLUSTER_MAX_PAGES
 326#else
 327#define EROFS_PCPUBUF_NR_PAGES          0
 328#endif
 329
 330typedef u64 erofs_off_t;
 331
 332/* data type for filesystem-wide blocks number */
 333typedef u32 erofs_blk_t;
 334
 335#define erofs_blknr(addr)       ((addr) / EROFS_BLKSIZ)
 336#define erofs_blkoff(addr)      ((addr) % EROFS_BLKSIZ)
 337#define blknr_to_addr(nr)       ((erofs_off_t)(nr) * EROFS_BLKSIZ)
 338
 339static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
 340{
 341        return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
 342}
 343
 344/* atomic flag definitions */
 345#define EROFS_V_EA_INITED_BIT   0
 346#define EROFS_V_Z_INITED_BIT    1
 347
 348/* bitlock definitions (arranged in reverse order) */
 349#define EROFS_V_BL_XATTR_BIT    (BITS_PER_LONG - 1)
 350#define EROFS_V_BL_Z_BIT        (BITS_PER_LONG - 2)
 351
 352struct erofs_vnode {
 353        erofs_nid_t nid;
 354
 355        /* atomic flags (including bitlocks) */
 356        unsigned long flags;
 357
 358        unsigned char datamode;
 359        unsigned char inode_isize;
 360        unsigned short xattr_isize;
 361
 362        unsigned xattr_shared_count;
 363        unsigned *xattr_shared_xattrs;
 364
 365        union {
 366                erofs_blk_t raw_blkaddr;
 367#ifdef CONFIG_EROFS_FS_ZIP
 368                struct {
 369                        unsigned short z_advise;
 370                        unsigned char  z_algorithmtype[2];
 371                        unsigned char  z_logical_clusterbits;
 372                        unsigned char  z_physical_clusterbits[2];
 373                };
 374#endif
 375        };
 376        /* the corresponding vfs inode */
 377        struct inode vfs_inode;
 378};
 379
 380#define EROFS_V(ptr)    \
 381        container_of(ptr, struct erofs_vnode, vfs_inode)
 382
 383#define __inode_advise(x, bit, bits) \
 384        (((x) >> (bit)) & ((1 << (bits)) - 1))
 385
 386#define __inode_version(advise) \
 387        __inode_advise(advise, EROFS_I_VERSION_BIT,     \
 388                EROFS_I_VERSION_BITS)
 389
 390#define __inode_data_mapping(advise)    \
 391        __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
 392                EROFS_I_DATA_MAPPING_BITS)
 393
 394static inline unsigned long inode_datablocks(struct inode *inode)
 395{
 396        /* since i_size cannot be changed */
 397        return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
 398}
 399
 400static inline bool is_inode_layout_compression(struct inode *inode)
 401{
 402        return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode);
 403}
 404
 405static inline bool is_inode_flat_inline(struct inode *inode)
 406{
 407        return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE;
 408}
 409
 410extern const struct super_operations erofs_sops;
 411
 412extern const struct address_space_operations erofs_raw_access_aops;
 413#ifdef CONFIG_EROFS_FS_ZIP
 414extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
 415#endif
 416
 417/*
 418 * Logical to physical block mapping, used by erofs_map_blocks()
 419 *
 420 * Different with other file systems, it is used for 2 access modes:
 421 *
 422 * 1) RAW access mode:
 423 *
 424 * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
 425 * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
 426 *
 427 * Note that m_lblk in the RAW access mode refers to the number of
 428 * the compressed ondisk block rather than the uncompressed
 429 * in-memory block for the compressed file.
 430 *
 431 * m_pofs equals to m_lofs except for the inline data page.
 432 *
 433 * 2) Normal access mode:
 434 *
 435 * If the inode is not compressed, it has no difference with
 436 * the RAW access mode. However, if the inode is compressed,
 437 * users should pass a valid (m_lblk, m_lofs) pair, and get
 438 * the needed m_pblk, m_pofs, m_len to get the compressed data
 439 * and the updated m_lblk, m_lofs which indicates the start
 440 * of the corresponding uncompressed data in the file.
 441 */
 442enum {
 443        BH_Zipped = BH_PrivateStart,
 444        BH_FullMapped,
 445};
 446
 447/* Has a disk mapping */
 448#define EROFS_MAP_MAPPED        (1 << BH_Mapped)
 449/* Located in metadata (could be copied from bd_inode) */
 450#define EROFS_MAP_META          (1 << BH_Meta)
 451/* The extent has been compressed */
 452#define EROFS_MAP_ZIPPED        (1 << BH_Zipped)
 453/* The length of extent is full */
 454#define EROFS_MAP_FULL_MAPPED   (1 << BH_FullMapped)
 455
 456struct erofs_map_blocks {
 457        erofs_off_t m_pa, m_la;
 458        u64 m_plen, m_llen;
 459
 460        unsigned int m_flags;
 461
 462        struct page *mpage;
 463};
 464
 465/* Flags used by erofs_map_blocks() */
 466#define EROFS_GET_BLOCKS_RAW    0x0001
 467
 468/* zmap.c */
 469#ifdef CONFIG_EROFS_FS_ZIP
 470int z_erofs_fill_inode(struct inode *inode);
 471int z_erofs_map_blocks_iter(struct inode *inode,
 472                            struct erofs_map_blocks *map,
 473                            int flags);
 474#else
 475static inline int z_erofs_fill_inode(struct inode *inode) { return -ENOTSUPP; }
 476static inline int z_erofs_map_blocks_iter(struct inode *inode,
 477                                          struct erofs_map_blocks *map,
 478                                          int flags)
 479{
 480        return -ENOTSUPP;
 481}
 482#endif
 483
 484/* data.c */
 485static inline struct bio *
 486erofs_grab_bio(struct super_block *sb,
 487               erofs_blk_t blkaddr, unsigned int nr_pages, void *bi_private,
 488               bio_end_io_t endio, bool nofail)
 489{
 490        const gfp_t gfp = GFP_NOIO;
 491        struct bio *bio;
 492
 493        do {
 494                if (nr_pages == 1) {
 495                        bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
 496                        if (unlikely(!bio)) {
 497                                DBG_BUGON(nofail);
 498                                return ERR_PTR(-ENOMEM);
 499                        }
 500                        break;
 501                }
 502                bio = bio_alloc(gfp, nr_pages);
 503                nr_pages /= 2;
 504        } while (unlikely(!bio));
 505
 506        bio->bi_end_io = endio;
 507        bio_set_dev(bio, sb->s_bdev);
 508        bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
 509        bio->bi_private = bi_private;
 510        return bio;
 511}
 512
 513static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
 514{
 515        bio_set_op_attrs(bio, op, op_flags);
 516        submit_bio(bio);
 517}
 518
 519#ifndef CONFIG_EROFS_FS_IO_MAX_RETRIES
 520#define EROFS_IO_MAX_RETRIES_NOFAIL     0
 521#else
 522#define EROFS_IO_MAX_RETRIES_NOFAIL     CONFIG_EROFS_FS_IO_MAX_RETRIES
 523#endif
 524
 525struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr,
 526                                   bool prio, bool nofail);
 527
 528static inline struct page *erofs_get_meta_page(struct super_block *sb,
 529        erofs_blk_t blkaddr, bool prio)
 530{
 531        return __erofs_get_meta_page(sb, blkaddr, prio, false);
 532}
 533
 534static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
 535        erofs_blk_t blkaddr, bool prio)
 536{
 537        return __erofs_get_meta_page(sb, blkaddr, prio, true);
 538}
 539
 540int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
 541
 542static inline struct page *
 543erofs_get_inline_page(struct inode *inode,
 544                      erofs_blk_t blkaddr)
 545{
 546        return erofs_get_meta_page(inode->i_sb,
 547                blkaddr, S_ISDIR(inode->i_mode));
 548}
 549
 550/* inode.c */
 551static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
 552{
 553#if BITS_PER_LONG == 32
 554        return (nid >> 32) ^ (nid & 0xffffffff);
 555#else
 556        return nid;
 557#endif
 558}
 559
 560extern const struct inode_operations erofs_generic_iops;
 561extern const struct inode_operations erofs_symlink_iops;
 562extern const struct inode_operations erofs_fast_symlink_iops;
 563
 564static inline void set_inode_fast_symlink(struct inode *inode)
 565{
 566        inode->i_op = &erofs_fast_symlink_iops;
 567}
 568
 569static inline bool is_inode_fast_symlink(struct inode *inode)
 570{
 571        return inode->i_op == &erofs_fast_symlink_iops;
 572}
 573
 574struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
 575int erofs_getattr(const struct path *path, struct kstat *stat,
 576                  u32 request_mask, unsigned int query_flags);
 577
 578/* namei.c */
 579extern const struct inode_operations erofs_dir_iops;
 580
 581int erofs_namei(struct inode *dir, struct qstr *name,
 582                erofs_nid_t *nid, unsigned int *d_type);
 583
 584/* dir.c */
 585extern const struct file_operations erofs_dir_fops;
 586
 587static inline void *erofs_vmap(struct page **pages, unsigned int count)
 588{
 589#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
 590        int i = 0;
 591
 592        while (1) {
 593                void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
 594                /* retry two more times (totally 3 times) */
 595                if (addr || ++i >= 3)
 596                        return addr;
 597                vm_unmap_aliases();
 598        }
 599        return NULL;
 600#else
 601        return vmap(pages, count, VM_MAP, PAGE_KERNEL);
 602#endif
 603}
 604
 605static inline void erofs_vunmap(const void *mem, unsigned int count)
 606{
 607#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
 608        vm_unmap_ram(mem, count);
 609#else
 610        vunmap(mem);
 611#endif
 612}
 613
 614/* utils.c */
 615extern struct shrinker erofs_shrinker_info;
 616
 617struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
 618
 619#if (EROFS_PCPUBUF_NR_PAGES > 0)
 620void *erofs_get_pcpubuf(unsigned int pagenr);
 621#define erofs_put_pcpubuf(buf) do { \
 622        (void)&(buf);   \
 623        preempt_enable();       \
 624} while (0)
 625#else
 626static inline void *erofs_get_pcpubuf(unsigned int pagenr)
 627{
 628        return ERR_PTR(-ENOTSUPP);
 629}
 630
 631#define erofs_put_pcpubuf(buf) do {} while (0)
 632#endif
 633
 634void erofs_register_super(struct super_block *sb);
 635void erofs_unregister_super(struct super_block *sb);
 636
 637#ifndef lru_to_page
 638#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
 639#endif
 640
 641#endif
 642
 643