linux/fs/ext4/block_validity.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ext4/block_validity.c
   4 *
   5 * Copyright (C) 2009
   6 * Theodore Ts'o (tytso@mit.edu)
   7 *
   8 * Track which blocks in the filesystem are metadata blocks that
   9 * should never be used as data blocks by files or directories.
  10 */
  11
  12#include <linux/time.h>
  13#include <linux/fs.h>
  14#include <linux/namei.h>
  15#include <linux/quotaops.h>
  16#include <linux/buffer_head.h>
  17#include <linux/swap.h>
  18#include <linux/pagemap.h>
  19#include <linux/blkdev.h>
  20#include <linux/slab.h>
  21#include "ext4.h"
  22
  23struct ext4_system_zone {
  24        struct rb_node  node;
  25        ext4_fsblk_t    start_blk;
  26        unsigned int    count;
  27};
  28
  29static struct kmem_cache *ext4_system_zone_cachep;
  30
  31int __init ext4_init_system_zone(void)
  32{
  33        ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone, 0);
  34        if (ext4_system_zone_cachep == NULL)
  35                return -ENOMEM;
  36        return 0;
  37}
  38
  39void ext4_exit_system_zone(void)
  40{
  41        kmem_cache_destroy(ext4_system_zone_cachep);
  42}
  43
  44static inline int can_merge(struct ext4_system_zone *entry1,
  45                     struct ext4_system_zone *entry2)
  46{
  47        if ((entry1->start_blk + entry1->count) == entry2->start_blk)
  48                return 1;
  49        return 0;
  50}
  51
  52/*
  53 * Mark a range of blocks as belonging to the "system zone" --- that
  54 * is, filesystem metadata blocks which should never be used by
  55 * inodes.
  56 */
  57static int add_system_zone(struct ext4_sb_info *sbi,
  58                           ext4_fsblk_t start_blk,
  59                           unsigned int count)
  60{
  61        struct ext4_system_zone *new_entry = NULL, *entry;
  62        struct rb_node **n = &sbi->system_blks.rb_node, *node;
  63        struct rb_node *parent = NULL, *new_node = NULL;
  64
  65        while (*n) {
  66                parent = *n;
  67                entry = rb_entry(parent, struct ext4_system_zone, node);
  68                if (start_blk < entry->start_blk)
  69                        n = &(*n)->rb_left;
  70                else if (start_blk >= (entry->start_blk + entry->count))
  71                        n = &(*n)->rb_right;
  72                else {
  73                        if (start_blk + count > (entry->start_blk +
  74                                                 entry->count))
  75                                entry->count = (start_blk + count -
  76                                                entry->start_blk);
  77                        new_node = *n;
  78                        new_entry = rb_entry(new_node, struct ext4_system_zone,
  79                                             node);
  80                        break;
  81                }
  82        }
  83
  84        if (!new_entry) {
  85                new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
  86                                             GFP_KERNEL);
  87                if (!new_entry)
  88                        return -ENOMEM;
  89                new_entry->start_blk = start_blk;
  90                new_entry->count = count;
  91                new_node = &new_entry->node;
  92
  93                rb_link_node(new_node, parent, n);
  94                rb_insert_color(new_node, &sbi->system_blks);
  95        }
  96
  97        /* Can we merge to the left? */
  98        node = rb_prev(new_node);
  99        if (node) {
 100                entry = rb_entry(node, struct ext4_system_zone, node);
 101                if (can_merge(entry, new_entry)) {
 102                        new_entry->start_blk = entry->start_blk;
 103                        new_entry->count += entry->count;
 104                        rb_erase(node, &sbi->system_blks);
 105                        kmem_cache_free(ext4_system_zone_cachep, entry);
 106                }
 107        }
 108
 109        /* Can we merge to the right? */
 110        node = rb_next(new_node);
 111        if (node) {
 112                entry = rb_entry(node, struct ext4_system_zone, node);
 113                if (can_merge(new_entry, entry)) {
 114                        new_entry->count += entry->count;
 115                        rb_erase(node, &sbi->system_blks);
 116                        kmem_cache_free(ext4_system_zone_cachep, entry);
 117                }
 118        }
 119        return 0;
 120}
 121
 122static void debug_print_tree(struct ext4_sb_info *sbi)
 123{
 124        struct rb_node *node;
 125        struct ext4_system_zone *entry;
 126        int first = 1;
 127
 128        printk(KERN_INFO "System zones: ");
 129        node = rb_first(&sbi->system_blks);
 130        while (node) {
 131                entry = rb_entry(node, struct ext4_system_zone, node);
 132                printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
 133                       entry->start_blk, entry->start_blk + entry->count - 1);
 134                first = 0;
 135                node = rb_next(node);
 136        }
 137        printk(KERN_CONT "\n");
 138}
 139
 140int ext4_setup_system_zone(struct super_block *sb)
 141{
 142        ext4_group_t ngroups = ext4_get_groups_count(sb);
 143        struct ext4_sb_info *sbi = EXT4_SB(sb);
 144        struct ext4_group_desc *gdp;
 145        ext4_group_t i;
 146        int flex_size = ext4_flex_bg_size(sbi);
 147        int ret;
 148
 149        if (!test_opt(sb, BLOCK_VALIDITY)) {
 150                if (EXT4_SB(sb)->system_blks.rb_node)
 151                        ext4_release_system_zone(sb);
 152                return 0;
 153        }
 154        if (EXT4_SB(sb)->system_blks.rb_node)
 155                return 0;
 156
 157        for (i=0; i < ngroups; i++) {
 158                if (ext4_bg_has_super(sb, i) &&
 159                    ((i < 5) || ((i % flex_size) == 0)))
 160                        add_system_zone(sbi, ext4_group_first_block_no(sb, i),
 161                                        ext4_bg_num_gdb(sb, i) + 1);
 162                gdp = ext4_get_group_desc(sb, i, NULL);
 163                ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
 164                if (ret)
 165                        return ret;
 166                ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
 167                if (ret)
 168                        return ret;
 169                ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
 170                                sbi->s_itb_per_group);
 171                if (ret)
 172                        return ret;
 173        }
 174
 175        if (test_opt(sb, DEBUG))
 176                debug_print_tree(EXT4_SB(sb));
 177        return 0;
 178}
 179
 180/* Called when the filesystem is unmounted */
 181void ext4_release_system_zone(struct super_block *sb)
 182{
 183        struct ext4_system_zone *entry, *n;
 184
 185        rbtree_postorder_for_each_entry_safe(entry, n,
 186                        &EXT4_SB(sb)->system_blks, node)
 187                kmem_cache_free(ext4_system_zone_cachep, entry);
 188
 189        EXT4_SB(sb)->system_blks = RB_ROOT;
 190}
 191
 192/*
 193 * Returns 1 if the passed-in block region (start_blk,
 194 * start_blk+count) is valid; 0 if some part of the block region
 195 * overlaps with filesystem metadata blocks.
 196 */
 197int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
 198                          unsigned int count)
 199{
 200        struct ext4_system_zone *entry;
 201        struct rb_node *n = sbi->system_blks.rb_node;
 202
 203        if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
 204            (start_blk + count < start_blk) ||
 205            (start_blk + count > ext4_blocks_count(sbi->s_es))) {
 206                sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
 207                return 0;
 208        }
 209        while (n) {
 210                entry = rb_entry(n, struct ext4_system_zone, node);
 211                if (start_blk + count - 1 < entry->start_blk)
 212                        n = n->rb_left;
 213                else if (start_blk >= (entry->start_blk + entry->count))
 214                        n = n->rb_right;
 215                else {
 216                        sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
 217                        return 0;
 218                }
 219        }
 220        return 1;
 221}
 222
 223int ext4_check_blockref(const char *function, unsigned int line,
 224                        struct inode *inode, __le32 *p, unsigned int max)
 225{
 226        struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 227        __le32 *bref = p;
 228        unsigned int blk;
 229
 230        while (bref < p+max) {
 231                blk = le32_to_cpu(*bref++);
 232                if (blk &&
 233                    unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
 234                                                    blk, 1))) {
 235                        es->s_last_error_block = cpu_to_le64(blk);
 236                        ext4_error_inode(inode, function, line, blk,
 237                                         "invalid block");
 238                        return -EFSCORRUPTED;
 239                }
 240        }
 241        return 0;
 242}
 243
 244