linux/fs/ext4/mmp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/fs.h>
   3#include <linux/random.h>
   4#include <linux/buffer_head.h>
   5#include <linux/utsname.h>
   6#include <linux/kthread.h>
   7
   8#include "ext4.h"
   9
  10/* Checksumming functions */
  11static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
  12{
  13        struct ext4_sb_info *sbi = EXT4_SB(sb);
  14        int offset = offsetof(struct mmp_struct, mmp_checksum);
  15        __u32 csum;
  16
  17        csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
  18
  19        return cpu_to_le32(csum);
  20}
  21
  22static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
  23{
  24        if (!ext4_has_metadata_csum(sb))
  25                return 1;
  26
  27        return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
  28}
  29
  30static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
  31{
  32        if (!ext4_has_metadata_csum(sb))
  33                return;
  34
  35        mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
  36}
  37
  38/*
  39 * Write the MMP block using REQ_SYNC to try to get the block on-disk
  40 * faster.
  41 */
  42static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
  43{
  44        struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
  45
  46        /*
  47         * We protect against freezing so that we don't create dirty buffers
  48         * on frozen filesystem.
  49         */
  50        sb_start_write(sb);
  51        ext4_mmp_csum_set(sb, mmp);
  52        lock_buffer(bh);
  53        bh->b_end_io = end_buffer_write_sync;
  54        get_bh(bh);
  55        submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
  56        wait_on_buffer(bh);
  57        sb_end_write(sb);
  58        if (unlikely(!buffer_uptodate(bh)))
  59                return 1;
  60
  61        return 0;
  62}
  63
  64/*
  65 * Read the MMP block. It _must_ be read from disk and hence we clear the
  66 * uptodate flag on the buffer.
  67 */
  68static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
  69                          ext4_fsblk_t mmp_block)
  70{
  71        struct mmp_struct *mmp;
  72        int ret;
  73
  74        if (*bh)
  75                clear_buffer_uptodate(*bh);
  76
  77        /* This would be sb_bread(sb, mmp_block), except we need to be sure
  78         * that the MD RAID device cache has been bypassed, and that the read
  79         * is not blocked in the elevator. */
  80        if (!*bh) {
  81                *bh = sb_getblk(sb, mmp_block);
  82                if (!*bh) {
  83                        ret = -ENOMEM;
  84                        goto warn_exit;
  85                }
  86        }
  87
  88        get_bh(*bh);
  89        lock_buffer(*bh);
  90        (*bh)->b_end_io = end_buffer_read_sync;
  91        submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh);
  92        wait_on_buffer(*bh);
  93        if (!buffer_uptodate(*bh)) {
  94                ret = -EIO;
  95                goto warn_exit;
  96        }
  97        mmp = (struct mmp_struct *)((*bh)->b_data);
  98        if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
  99                ret = -EFSCORRUPTED;
 100                goto warn_exit;
 101        }
 102        if (!ext4_mmp_csum_verify(sb, mmp)) {
 103                ret = -EFSBADCRC;
 104                goto warn_exit;
 105        }
 106        return 0;
 107warn_exit:
 108        brelse(*bh);
 109        *bh = NULL;
 110        ext4_warning(sb, "Error %d while reading MMP block %llu",
 111                     ret, mmp_block);
 112        return ret;
 113}
 114
 115/*
 116 * Dump as much information as possible to help the admin.
 117 */
 118void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
 119                    const char *function, unsigned int line, const char *msg)
 120{
 121        __ext4_warning(sb, function, line, "%s", msg);
 122        __ext4_warning(sb, function, line,
 123                       "MMP failure info: last update time: %llu, last update "
 124                       "node: %s, last update device: %s",
 125                       (long long unsigned int) le64_to_cpu(mmp->mmp_time),
 126                       mmp->mmp_nodename, mmp->mmp_bdevname);
 127}
 128
 129/*
 130 * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
 131 */
 132static int kmmpd(void *data)
 133{
 134        struct super_block *sb = ((struct mmpd_data *) data)->sb;
 135        struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
 136        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 137        struct mmp_struct *mmp;
 138        ext4_fsblk_t mmp_block;
 139        u32 seq = 0;
 140        unsigned long failed_writes = 0;
 141        int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
 142        unsigned mmp_check_interval;
 143        unsigned long last_update_time;
 144        unsigned long diff;
 145        int retval;
 146
 147        mmp_block = le64_to_cpu(es->s_mmp_block);
 148        mmp = (struct mmp_struct *)(bh->b_data);
 149        mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
 150        /*
 151         * Start with the higher mmp_check_interval and reduce it if
 152         * the MMP block is being updated on time.
 153         */
 154        mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
 155                                 EXT4_MMP_MIN_CHECK_INTERVAL);
 156        mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
 157        bdevname(bh->b_bdev, mmp->mmp_bdevname);
 158
 159        memcpy(mmp->mmp_nodename, init_utsname()->nodename,
 160               sizeof(mmp->mmp_nodename));
 161
 162        while (!kthread_should_stop()) {
 163                if (++seq > EXT4_MMP_SEQ_MAX)
 164                        seq = 1;
 165
 166                mmp->mmp_seq = cpu_to_le32(seq);
 167                mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
 168                last_update_time = jiffies;
 169
 170                retval = write_mmp_block(sb, bh);
 171                /*
 172                 * Don't spew too many error messages. Print one every
 173                 * (s_mmp_update_interval * 60) seconds.
 174                 */
 175                if (retval) {
 176                        if ((failed_writes % 60) == 0)
 177                                ext4_error(sb, "Error writing to MMP block");
 178                        failed_writes++;
 179                }
 180
 181                if (!(le32_to_cpu(es->s_feature_incompat) &
 182                    EXT4_FEATURE_INCOMPAT_MMP)) {
 183                        ext4_warning(sb, "kmmpd being stopped since MMP feature"
 184                                     " has been disabled.");
 185                        goto exit_thread;
 186                }
 187
 188                if (sb_rdonly(sb))
 189                        break;
 190
 191                diff = jiffies - last_update_time;
 192                if (diff < mmp_update_interval * HZ)
 193                        schedule_timeout_interruptible(mmp_update_interval *
 194                                                       HZ - diff);
 195
 196                /*
 197                 * We need to make sure that more than mmp_check_interval
 198                 * seconds have not passed since writing. If that has happened
 199                 * we need to check if the MMP block is as we left it.
 200                 */
 201                diff = jiffies - last_update_time;
 202                if (diff > mmp_check_interval * HZ) {
 203                        struct buffer_head *bh_check = NULL;
 204                        struct mmp_struct *mmp_check;
 205
 206                        retval = read_mmp_block(sb, &bh_check, mmp_block);
 207                        if (retval) {
 208                                ext4_error(sb, "error reading MMP data: %d",
 209                                           retval);
 210                                goto exit_thread;
 211                        }
 212
 213                        mmp_check = (struct mmp_struct *)(bh_check->b_data);
 214                        if (mmp->mmp_seq != mmp_check->mmp_seq ||
 215                            memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
 216                                   sizeof(mmp->mmp_nodename))) {
 217                                dump_mmp_msg(sb, mmp_check,
 218                                             "Error while updating MMP info. "
 219                                             "The filesystem seems to have been"
 220                                             " multiply mounted.");
 221                                ext4_error(sb, "abort");
 222                                put_bh(bh_check);
 223                                retval = -EBUSY;
 224                                goto exit_thread;
 225                        }
 226                        put_bh(bh_check);
 227                }
 228
 229                 /*
 230                 * Adjust the mmp_check_interval depending on how much time
 231                 * it took for the MMP block to be written.
 232                 */
 233                mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
 234                                             EXT4_MMP_MAX_CHECK_INTERVAL),
 235                                         EXT4_MMP_MIN_CHECK_INTERVAL);
 236                mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
 237        }
 238
 239        /*
 240         * Unmount seems to be clean.
 241         */
 242        mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
 243        mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
 244
 245        retval = write_mmp_block(sb, bh);
 246
 247exit_thread:
 248        EXT4_SB(sb)->s_mmp_tsk = NULL;
 249        kfree(data);
 250        brelse(bh);
 251        return retval;
 252}
 253
 254/*
 255 * Get a random new sequence number but make sure it is not greater than
 256 * EXT4_MMP_SEQ_MAX.
 257 */
 258static unsigned int mmp_new_seq(void)
 259{
 260        u32 new_seq;
 261
 262        do {
 263                new_seq = prandom_u32();
 264        } while (new_seq > EXT4_MMP_SEQ_MAX);
 265
 266        return new_seq;
 267}
 268
 269/*
 270 * Protect the filesystem from being mounted more than once.
 271 */
 272int ext4_multi_mount_protect(struct super_block *sb,
 273                                    ext4_fsblk_t mmp_block)
 274{
 275        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 276        struct buffer_head *bh = NULL;
 277        struct mmp_struct *mmp = NULL;
 278        struct mmpd_data *mmpd_data;
 279        u32 seq;
 280        unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
 281        unsigned int wait_time = 0;
 282        int retval;
 283
 284        if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
 285            mmp_block >= ext4_blocks_count(es)) {
 286                ext4_warning(sb, "Invalid MMP block in superblock");
 287                goto failed;
 288        }
 289
 290        retval = read_mmp_block(sb, &bh, mmp_block);
 291        if (retval)
 292                goto failed;
 293
 294        mmp = (struct mmp_struct *)(bh->b_data);
 295
 296        if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
 297                mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
 298
 299        /*
 300         * If check_interval in MMP block is larger, use that instead of
 301         * update_interval from the superblock.
 302         */
 303        if (le16_to_cpu(mmp->mmp_check_interval) > mmp_check_interval)
 304                mmp_check_interval = le16_to_cpu(mmp->mmp_check_interval);
 305
 306        seq = le32_to_cpu(mmp->mmp_seq);
 307        if (seq == EXT4_MMP_SEQ_CLEAN)
 308                goto skip;
 309
 310        if (seq == EXT4_MMP_SEQ_FSCK) {
 311                dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
 312                goto failed;
 313        }
 314
 315        wait_time = min(mmp_check_interval * 2 + 1,
 316                        mmp_check_interval + 60);
 317
 318        /* Print MMP interval if more than 20 secs. */
 319        if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
 320                ext4_warning(sb, "MMP interval %u higher than expected, please"
 321                             " wait.\n", wait_time * 2);
 322
 323        if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
 324                ext4_warning(sb, "MMP startup interrupted, failing mount\n");
 325                goto failed;
 326        }
 327
 328        retval = read_mmp_block(sb, &bh, mmp_block);
 329        if (retval)
 330                goto failed;
 331        mmp = (struct mmp_struct *)(bh->b_data);
 332        if (seq != le32_to_cpu(mmp->mmp_seq)) {
 333                dump_mmp_msg(sb, mmp,
 334                             "Device is already active on another node.");
 335                goto failed;
 336        }
 337
 338skip:
 339        /*
 340         * write a new random sequence number.
 341         */
 342        seq = mmp_new_seq();
 343        mmp->mmp_seq = cpu_to_le32(seq);
 344
 345        retval = write_mmp_block(sb, bh);
 346        if (retval)
 347                goto failed;
 348
 349        /*
 350         * wait for MMP interval and check mmp_seq.
 351         */
 352        if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
 353                ext4_warning(sb, "MMP startup interrupted, failing mount");
 354                goto failed;
 355        }
 356
 357        retval = read_mmp_block(sb, &bh, mmp_block);
 358        if (retval)
 359                goto failed;
 360        mmp = (struct mmp_struct *)(bh->b_data);
 361        if (seq != le32_to_cpu(mmp->mmp_seq)) {
 362                dump_mmp_msg(sb, mmp,
 363                             "Device is already active on another node.");
 364                goto failed;
 365        }
 366
 367        mmpd_data = kmalloc(sizeof(*mmpd_data), GFP_KERNEL);
 368        if (!mmpd_data) {
 369                ext4_warning(sb, "not enough memory for mmpd_data");
 370                goto failed;
 371        }
 372        mmpd_data->sb = sb;
 373        mmpd_data->bh = bh;
 374
 375        /*
 376         * Start a kernel thread to update the MMP block periodically.
 377         */
 378        EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
 379                                             bdevname(bh->b_bdev,
 380                                                      mmp->mmp_bdevname));
 381        if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
 382                EXT4_SB(sb)->s_mmp_tsk = NULL;
 383                kfree(mmpd_data);
 384                ext4_warning(sb, "Unable to create kmmpd thread for %s.",
 385                             sb->s_id);
 386                goto failed;
 387        }
 388
 389        return 0;
 390
 391failed:
 392        brelse(bh);
 393        return 1;
 394}
 395
 396
 397