linux/fs/ext4/readpage.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/fs/ext4/readpage.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 * Copyright (C) 2015, Google, Inc.
   7 *
   8 * This was originally taken from fs/mpage.c
   9 *
  10 * The ext4_mpage_readpages() function here is intended to
  11 * replace mpage_readahead() in the general case, not just for
  12 * encrypted files.  It has some limitations (see below), where it
  13 * will fall back to read_block_full_page(), but these limitations
  14 * should only be hit when page_size != block_size.
  15 *
  16 * This will allow us to attach a callback function to support ext4
  17 * encryption.
  18 *
  19 * If anything unusual happens, such as:
  20 *
  21 * - encountering a page which has buffers
  22 * - encountering a page which has a non-hole after a hole
  23 * - encountering a page with non-contiguous blocks
  24 *
  25 * then this code just gives up and calls the buffer_head-based read function.
  26 * It does handle a page which has holes at the end - that is a common case:
  27 * the end-of-file on blocksize < PAGE_SIZE setups.
  28 *
  29 */
  30
  31#include <linux/kernel.h>
  32#include <linux/export.h>
  33#include <linux/mm.h>
  34#include <linux/kdev_t.h>
  35#include <linux/gfp.h>
  36#include <linux/bio.h>
  37#include <linux/fs.h>
  38#include <linux/buffer_head.h>
  39#include <linux/blkdev.h>
  40#include <linux/highmem.h>
  41#include <linux/prefetch.h>
  42#include <linux/mpage.h>
  43#include <linux/writeback.h>
  44#include <linux/backing-dev.h>
  45#include <linux/pagevec.h>
  46#include <linux/cleancache.h>
  47
  48#include "ext4.h"
  49
  50#define NUM_PREALLOC_POST_READ_CTXS     128
  51
  52static struct kmem_cache *bio_post_read_ctx_cache;
  53static mempool_t *bio_post_read_ctx_pool;
  54
  55/* postprocessing steps for read bios */
  56enum bio_post_read_step {
  57        STEP_INITIAL = 0,
  58        STEP_DECRYPT,
  59        STEP_VERITY,
  60        STEP_MAX,
  61};
  62
  63struct bio_post_read_ctx {
  64        struct bio *bio;
  65        struct work_struct work;
  66        unsigned int cur_step;
  67        unsigned int enabled_steps;
  68};
  69
  70static void __read_end_io(struct bio *bio)
  71{
  72        struct page *page;
  73        struct bio_vec *bv;
  74        struct bvec_iter_all iter_all;
  75
  76        bio_for_each_segment_all(bv, bio, iter_all) {
  77                page = bv->bv_page;
  78
  79                /* PG_error was set if any post_read step failed */
  80                if (bio->bi_status || PageError(page)) {
  81                        ClearPageUptodate(page);
  82                        /* will re-read again later */
  83                        ClearPageError(page);
  84                } else {
  85                        SetPageUptodate(page);
  86                }
  87                unlock_page(page);
  88        }
  89        if (bio->bi_private)
  90                mempool_free(bio->bi_private, bio_post_read_ctx_pool);
  91        bio_put(bio);
  92}
  93
  94static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
  95
  96static void decrypt_work(struct work_struct *work)
  97{
  98        struct bio_post_read_ctx *ctx =
  99                container_of(work, struct bio_post_read_ctx, work);
 100
 101        fscrypt_decrypt_bio(ctx->bio);
 102
 103        bio_post_read_processing(ctx);
 104}
 105
 106static void verity_work(struct work_struct *work)
 107{
 108        struct bio_post_read_ctx *ctx =
 109                container_of(work, struct bio_post_read_ctx, work);
 110        struct bio *bio = ctx->bio;
 111
 112        /*
 113         * fsverity_verify_bio() may call readpages() again, and although verity
 114         * will be disabled for that, decryption may still be needed, causing
 115         * another bio_post_read_ctx to be allocated.  So to guarantee that
 116         * mempool_alloc() never deadlocks we must free the current ctx first.
 117         * This is safe because verity is the last post-read step.
 118         */
 119        BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
 120        mempool_free(ctx, bio_post_read_ctx_pool);
 121        bio->bi_private = NULL;
 122
 123        fsverity_verify_bio(bio);
 124
 125        __read_end_io(bio);
 126}
 127
 128static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
 129{
 130        /*
 131         * We use different work queues for decryption and for verity because
 132         * verity may require reading metadata pages that need decryption, and
 133         * we shouldn't recurse to the same workqueue.
 134         */
 135        switch (++ctx->cur_step) {
 136        case STEP_DECRYPT:
 137                if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
 138                        INIT_WORK(&ctx->work, decrypt_work);
 139                        fscrypt_enqueue_decrypt_work(&ctx->work);
 140                        return;
 141                }
 142                ctx->cur_step++;
 143                fallthrough;
 144        case STEP_VERITY:
 145                if (ctx->enabled_steps & (1 << STEP_VERITY)) {
 146                        INIT_WORK(&ctx->work, verity_work);
 147                        fsverity_enqueue_verify_work(&ctx->work);
 148                        return;
 149                }
 150                ctx->cur_step++;
 151                fallthrough;
 152        default:
 153                __read_end_io(ctx->bio);
 154        }
 155}
 156
 157static bool bio_post_read_required(struct bio *bio)
 158{
 159        return bio->bi_private && !bio->bi_status;
 160}
 161
 162/*
 163 * I/O completion handler for multipage BIOs.
 164 *
 165 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 166 * If a page does not map to a contiguous run of blocks then it simply falls
 167 * back to block_read_full_page().
 168 *
 169 * Why is this?  If a page's completion depends on a number of different BIOs
 170 * which can complete in any order (or at the same time) then determining the
 171 * status of that page is hard.  See end_buffer_async_read() for the details.
 172 * There is no point in duplicating all that complexity.
 173 */
 174static void mpage_end_io(struct bio *bio)
 175{
 176        if (bio_post_read_required(bio)) {
 177                struct bio_post_read_ctx *ctx = bio->bi_private;
 178
 179                ctx->cur_step = STEP_INITIAL;
 180                bio_post_read_processing(ctx);
 181                return;
 182        }
 183        __read_end_io(bio);
 184}
 185
 186static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
 187{
 188        return fsverity_active(inode) &&
 189               idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
 190}
 191
 192static void ext4_set_bio_post_read_ctx(struct bio *bio,
 193                                       const struct inode *inode,
 194                                       pgoff_t first_idx)
 195{
 196        unsigned int post_read_steps = 0;
 197
 198        if (fscrypt_inode_uses_fs_layer_crypto(inode))
 199                post_read_steps |= 1 << STEP_DECRYPT;
 200
 201        if (ext4_need_verity(inode, first_idx))
 202                post_read_steps |= 1 << STEP_VERITY;
 203
 204        if (post_read_steps) {
 205                /* Due to the mempool, this never fails. */
 206                struct bio_post_read_ctx *ctx =
 207                        mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
 208
 209                ctx->bio = bio;
 210                ctx->enabled_steps = post_read_steps;
 211                bio->bi_private = ctx;
 212        }
 213}
 214
 215static inline loff_t ext4_readpage_limit(struct inode *inode)
 216{
 217        if (IS_ENABLED(CONFIG_FS_VERITY) &&
 218            (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
 219                return inode->i_sb->s_maxbytes;
 220
 221        return i_size_read(inode);
 222}
 223
 224int ext4_mpage_readpages(struct inode *inode,
 225                struct readahead_control *rac, struct page *page)
 226{
 227        struct bio *bio = NULL;
 228        sector_t last_block_in_bio = 0;
 229
 230        const unsigned blkbits = inode->i_blkbits;
 231        const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
 232        const unsigned blocksize = 1 << blkbits;
 233        sector_t next_block;
 234        sector_t block_in_file;
 235        sector_t last_block;
 236        sector_t last_block_in_file;
 237        sector_t blocks[MAX_BUF_PER_PAGE];
 238        unsigned page_block;
 239        struct block_device *bdev = inode->i_sb->s_bdev;
 240        int length;
 241        unsigned relative_block = 0;
 242        struct ext4_map_blocks map;
 243        unsigned int nr_pages = rac ? readahead_count(rac) : 1;
 244
 245        map.m_pblk = 0;
 246        map.m_lblk = 0;
 247        map.m_len = 0;
 248        map.m_flags = 0;
 249
 250        for (; nr_pages; nr_pages--) {
 251                int fully_mapped = 1;
 252                unsigned first_hole = blocks_per_page;
 253
 254                if (rac) {
 255                        page = readahead_page(rac);
 256                        prefetchw(&page->flags);
 257                }
 258
 259                if (page_has_buffers(page))
 260                        goto confused;
 261
 262                block_in_file = next_block =
 263                        (sector_t)page->index << (PAGE_SHIFT - blkbits);
 264                last_block = block_in_file + nr_pages * blocks_per_page;
 265                last_block_in_file = (ext4_readpage_limit(inode) +
 266                                      blocksize - 1) >> blkbits;
 267                if (last_block > last_block_in_file)
 268                        last_block = last_block_in_file;
 269                page_block = 0;
 270
 271                /*
 272                 * Map blocks using the previous result first.
 273                 */
 274                if ((map.m_flags & EXT4_MAP_MAPPED) &&
 275                    block_in_file > map.m_lblk &&
 276                    block_in_file < (map.m_lblk + map.m_len)) {
 277                        unsigned map_offset = block_in_file - map.m_lblk;
 278                        unsigned last = map.m_len - map_offset;
 279
 280                        for (relative_block = 0; ; relative_block++) {
 281                                if (relative_block == last) {
 282                                        /* needed? */
 283                                        map.m_flags &= ~EXT4_MAP_MAPPED;
 284                                        break;
 285                                }
 286                                if (page_block == blocks_per_page)
 287                                        break;
 288                                blocks[page_block] = map.m_pblk + map_offset +
 289                                        relative_block;
 290                                page_block++;
 291                                block_in_file++;
 292                        }
 293                }
 294
 295                /*
 296                 * Then do more ext4_map_blocks() calls until we are
 297                 * done with this page.
 298                 */
 299                while (page_block < blocks_per_page) {
 300                        if (block_in_file < last_block) {
 301                                map.m_lblk = block_in_file;
 302                                map.m_len = last_block - block_in_file;
 303
 304                                if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
 305                                set_error_page:
 306                                        SetPageError(page);
 307                                        zero_user_segment(page, 0,
 308                                                          PAGE_SIZE);
 309                                        unlock_page(page);
 310                                        goto next_page;
 311                                }
 312                        }
 313                        if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
 314                                fully_mapped = 0;
 315                                if (first_hole == blocks_per_page)
 316                                        first_hole = page_block;
 317                                page_block++;
 318                                block_in_file++;
 319                                continue;
 320                        }
 321                        if (first_hole != blocks_per_page)
 322                                goto confused;          /* hole -> non-hole */
 323
 324                        /* Contiguous blocks? */
 325                        if (page_block && blocks[page_block-1] != map.m_pblk-1)
 326                                goto confused;
 327                        for (relative_block = 0; ; relative_block++) {
 328                                if (relative_block == map.m_len) {
 329                                        /* needed? */
 330                                        map.m_flags &= ~EXT4_MAP_MAPPED;
 331                                        break;
 332                                } else if (page_block == blocks_per_page)
 333                                        break;
 334                                blocks[page_block] = map.m_pblk+relative_block;
 335                                page_block++;
 336                                block_in_file++;
 337                        }
 338                }
 339                if (first_hole != blocks_per_page) {
 340                        zero_user_segment(page, first_hole << blkbits,
 341                                          PAGE_SIZE);
 342                        if (first_hole == 0) {
 343                                if (ext4_need_verity(inode, page->index) &&
 344                                    !fsverity_verify_page(page))
 345                                        goto set_error_page;
 346                                SetPageUptodate(page);
 347                                unlock_page(page);
 348                                goto next_page;
 349                        }
 350                } else if (fully_mapped) {
 351                        SetPageMappedToDisk(page);
 352                }
 353                if (fully_mapped && blocks_per_page == 1 &&
 354                    !PageUptodate(page) && cleancache_get_page(page) == 0) {
 355                        SetPageUptodate(page);
 356                        goto confused;
 357                }
 358
 359                /*
 360                 * This page will go to BIO.  Do we need to send this
 361                 * BIO off first?
 362                 */
 363                if (bio && (last_block_in_bio != blocks[0] - 1 ||
 364                            !fscrypt_mergeable_bio(bio, inode, next_block))) {
 365                submit_and_realloc:
 366                        submit_bio(bio);
 367                        bio = NULL;
 368                }
 369                if (bio == NULL) {
 370                        /*
 371                         * bio_alloc will _always_ be able to allocate a bio if
 372                         * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
 373                         */
 374                        bio = bio_alloc(GFP_KERNEL,
 375                                min_t(int, nr_pages, BIO_MAX_PAGES));
 376                        fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
 377                                                  GFP_KERNEL);
 378                        ext4_set_bio_post_read_ctx(bio, inode, page->index);
 379                        bio_set_dev(bio, bdev);
 380                        bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
 381                        bio->bi_end_io = mpage_end_io;
 382                        bio_set_op_attrs(bio, REQ_OP_READ,
 383                                                rac ? REQ_RAHEAD : 0);
 384                }
 385
 386                length = first_hole << blkbits;
 387                if (bio_add_page(bio, page, length, 0) < length)
 388                        goto submit_and_realloc;
 389
 390                if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
 391                     (relative_block == map.m_len)) ||
 392                    (first_hole != blocks_per_page)) {
 393                        submit_bio(bio);
 394                        bio = NULL;
 395                } else
 396                        last_block_in_bio = blocks[blocks_per_page - 1];
 397                goto next_page;
 398        confused:
 399                if (bio) {
 400                        submit_bio(bio);
 401                        bio = NULL;
 402                }
 403                if (!PageUptodate(page))
 404                        block_read_full_page(page, ext4_get_block);
 405                else
 406                        unlock_page(page);
 407        next_page:
 408                if (rac)
 409                        put_page(page);
 410        }
 411        if (bio)
 412                submit_bio(bio);
 413        return 0;
 414}
 415
 416int __init ext4_init_post_read_processing(void)
 417{
 418        bio_post_read_ctx_cache =
 419                kmem_cache_create("ext4_bio_post_read_ctx",
 420                                  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
 421        if (!bio_post_read_ctx_cache)
 422                goto fail;
 423        bio_post_read_ctx_pool =
 424                mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
 425                                         bio_post_read_ctx_cache);
 426        if (!bio_post_read_ctx_pool)
 427                goto fail_free_cache;
 428        return 0;
 429
 430fail_free_cache:
 431        kmem_cache_destroy(bio_post_read_ctx_cache);
 432fail:
 433        return -ENOMEM;
 434}
 435
 436void ext4_exit_post_read_processing(void)
 437{
 438        mempool_destroy(bio_post_read_ctx_pool);
 439        kmem_cache_destroy(bio_post_read_ctx_cache);
 440}
 441