linux/mm/readahead.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/readahead.c - address_space-level file readahead.
   4 *
   5 * Copyright (C) 2002, Linus Torvalds
   6 *
   7 * 09Apr2002    Andrew Morton
   8 *              Initial version.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/dax.h>
  13#include <linux/gfp.h>
  14#include <linux/export.h>
  15#include <linux/blkdev.h>
  16#include <linux/backing-dev.h>
  17#include <linux/task_io_accounting_ops.h>
  18#include <linux/pagevec.h>
  19#include <linux/pagemap.h>
  20#include <linux/syscalls.h>
  21#include <linux/file.h>
  22#include <linux/mm_inline.h>
  23#include <linux/blk-cgroup.h>
  24#include <linux/fadvise.h>
  25#include <linux/sched/mm.h>
  26
  27#include "internal.h"
  28
  29/*
  30 * Initialise a struct file's readahead state.  Assumes that the caller has
  31 * memset *ra to zero.
  32 */
  33void
  34file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
  35{
  36        ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
  37        ra->prev_pos = -1;
  38}
  39EXPORT_SYMBOL_GPL(file_ra_state_init);
  40
  41/*
  42 * see if a page needs releasing upon read_cache_pages() failure
  43 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
  44 *   before calling, such as the NFS fs marking pages that are cached locally
  45 *   on disk, thus we need to give the fs a chance to clean up in the event of
  46 *   an error
  47 */
  48static void read_cache_pages_invalidate_page(struct address_space *mapping,
  49                                             struct page *page)
  50{
  51        if (page_has_private(page)) {
  52                if (!trylock_page(page))
  53                        BUG();
  54                page->mapping = mapping;
  55                do_invalidatepage(page, 0, PAGE_SIZE);
  56                page->mapping = NULL;
  57                unlock_page(page);
  58        }
  59        put_page(page);
  60}
  61
  62/*
  63 * release a list of pages, invalidating them first if need be
  64 */
  65static void read_cache_pages_invalidate_pages(struct address_space *mapping,
  66                                              struct list_head *pages)
  67{
  68        struct page *victim;
  69
  70        while (!list_empty(pages)) {
  71                victim = lru_to_page(pages);
  72                list_del(&victim->lru);
  73                read_cache_pages_invalidate_page(mapping, victim);
  74        }
  75}
  76
  77/**
  78 * read_cache_pages - populate an address space with some pages & start reads against them
  79 * @mapping: the address_space
  80 * @pages: The address of a list_head which contains the target pages.  These
  81 *   pages have their ->index populated and are otherwise uninitialised.
  82 * @filler: callback routine for filling a single page.
  83 * @data: private data for the callback routine.
  84 *
  85 * Hides the details of the LRU cache etc from the filesystems.
  86 *
  87 * Returns: %0 on success, error return by @filler otherwise
  88 */
  89int read_cache_pages(struct address_space *mapping, struct list_head *pages,
  90                        int (*filler)(void *, struct page *), void *data)
  91{
  92        struct page *page;
  93        int ret = 0;
  94
  95        while (!list_empty(pages)) {
  96                page = lru_to_page(pages);
  97                list_del(&page->lru);
  98                if (add_to_page_cache_lru(page, mapping, page->index,
  99                                readahead_gfp_mask(mapping))) {
 100                        read_cache_pages_invalidate_page(mapping, page);
 101                        continue;
 102                }
 103                put_page(page);
 104
 105                ret = filler(data, page);
 106                if (unlikely(ret)) {
 107                        read_cache_pages_invalidate_pages(mapping, pages);
 108                        break;
 109                }
 110                task_io_account_read(PAGE_SIZE);
 111        }
 112        return ret;
 113}
 114
 115EXPORT_SYMBOL(read_cache_pages);
 116
 117static void read_pages(struct readahead_control *rac, struct list_head *pages,
 118                bool skip_page)
 119{
 120        const struct address_space_operations *aops = rac->mapping->a_ops;
 121        struct page *page;
 122        struct blk_plug plug;
 123
 124        if (!readahead_count(rac))
 125                goto out;
 126
 127        blk_start_plug(&plug);
 128
 129        if (aops->readahead) {
 130                aops->readahead(rac);
 131                /* Clean up the remaining pages */
 132                while ((page = readahead_page(rac))) {
 133                        unlock_page(page);
 134                        put_page(page);
 135                }
 136        } else if (aops->readpages) {
 137                aops->readpages(rac->file, rac->mapping, pages,
 138                                readahead_count(rac));
 139                /* Clean up the remaining pages */
 140                put_pages_list(pages);
 141                rac->_index += rac->_nr_pages;
 142                rac->_nr_pages = 0;
 143        } else {
 144                while ((page = readahead_page(rac))) {
 145                        aops->readpage(rac->file, page);
 146                        put_page(page);
 147                }
 148        }
 149
 150        blk_finish_plug(&plug);
 151
 152        BUG_ON(!list_empty(pages));
 153        BUG_ON(readahead_count(rac));
 154
 155out:
 156        if (skip_page)
 157                rac->_index++;
 158}
 159
 160/**
 161 * page_cache_readahead_unbounded - Start unchecked readahead.
 162 * @mapping: File address space.
 163 * @file: This instance of the open file; used for authentication.
 164 * @index: First page index to read.
 165 * @nr_to_read: The number of pages to read.
 166 * @lookahead_size: Where to start the next readahead.
 167 *
 168 * This function is for filesystems to call when they want to start
 169 * readahead beyond a file's stated i_size.  This is almost certainly
 170 * not the function you want to call.  Use page_cache_async_readahead()
 171 * or page_cache_sync_readahead() instead.
 172 *
 173 * Context: File is referenced by caller.  Mutexes may be held by caller.
 174 * May sleep, but will not reenter filesystem to reclaim memory.
 175 */
 176void page_cache_readahead_unbounded(struct address_space *mapping,
 177                struct file *file, pgoff_t index, unsigned long nr_to_read,
 178                unsigned long lookahead_size)
 179{
 180        LIST_HEAD(page_pool);
 181        gfp_t gfp_mask = readahead_gfp_mask(mapping);
 182        struct readahead_control rac = {
 183                .mapping = mapping,
 184                .file = file,
 185                ._index = index,
 186        };
 187        unsigned long i;
 188
 189        /*
 190         * Partway through the readahead operation, we will have added
 191         * locked pages to the page cache, but will not yet have submitted
 192         * them for I/O.  Adding another page may need to allocate memory,
 193         * which can trigger memory reclaim.  Telling the VM we're in
 194         * the middle of a filesystem operation will cause it to not
 195         * touch file-backed pages, preventing a deadlock.  Most (all?)
 196         * filesystems already specify __GFP_NOFS in their mapping's
 197         * gfp_mask, but let's be explicit here.
 198         */
 199        unsigned int nofs = memalloc_nofs_save();
 200
 201        /*
 202         * Preallocate as many pages as we will need.
 203         */
 204        for (i = 0; i < nr_to_read; i++) {
 205                struct page *page = xa_load(&mapping->i_pages, index + i);
 206
 207                BUG_ON(index + i != rac._index + rac._nr_pages);
 208
 209                if (page && !xa_is_value(page)) {
 210                        /*
 211                         * Page already present?  Kick off the current batch
 212                         * of contiguous pages before continuing with the
 213                         * next batch.  This page may be the one we would
 214                         * have intended to mark as Readahead, but we don't
 215                         * have a stable reference to this page, and it's
 216                         * not worth getting one just for that.
 217                         */
 218                        read_pages(&rac, &page_pool, true);
 219                        continue;
 220                }
 221
 222                page = __page_cache_alloc(gfp_mask);
 223                if (!page)
 224                        break;
 225                if (mapping->a_ops->readpages) {
 226                        page->index = index + i;
 227                        list_add(&page->lru, &page_pool);
 228                } else if (add_to_page_cache_lru(page, mapping, index + i,
 229                                        gfp_mask) < 0) {
 230                        put_page(page);
 231                        read_pages(&rac, &page_pool, true);
 232                        continue;
 233                }
 234                if (i == nr_to_read - lookahead_size)
 235                        SetPageReadahead(page);
 236                rac._nr_pages++;
 237        }
 238
 239        /*
 240         * Now start the IO.  We ignore I/O errors - if the page is not
 241         * uptodate then the caller will launch readpage again, and
 242         * will then handle the error.
 243         */
 244        read_pages(&rac, &page_pool, false);
 245        memalloc_nofs_restore(nofs);
 246}
 247EXPORT_SYMBOL_GPL(page_cache_readahead_unbounded);
 248
 249/*
 250 * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates
 251 * the pages first, then submits them for I/O. This avoids the very bad
 252 * behaviour which would occur if page allocations are causing VM writeback.
 253 * We really don't want to intermingle reads and writes like that.
 254 */
 255void __do_page_cache_readahead(struct address_space *mapping,
 256                struct file *file, pgoff_t index, unsigned long nr_to_read,
 257                unsigned long lookahead_size)
 258{
 259        struct inode *inode = mapping->host;
 260        loff_t isize = i_size_read(inode);
 261        pgoff_t end_index;      /* The last page we want to read */
 262
 263        if (isize == 0)
 264                return;
 265
 266        end_index = (isize - 1) >> PAGE_SHIFT;
 267        if (index > end_index)
 268                return;
 269        /* Don't read past the page containing the last byte of the file */
 270        if (nr_to_read > end_index - index)
 271                nr_to_read = end_index - index + 1;
 272
 273        page_cache_readahead_unbounded(mapping, file, index, nr_to_read,
 274                        lookahead_size);
 275}
 276
 277/*
 278 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
 279 * memory at once.
 280 */
 281void force_page_cache_readahead(struct address_space *mapping,
 282                struct file *filp, pgoff_t index, unsigned long nr_to_read)
 283{
 284        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 285        struct file_ra_state *ra = &filp->f_ra;
 286        unsigned long max_pages;
 287
 288        if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
 289                        !mapping->a_ops->readahead))
 290                return;
 291
 292        /*
 293         * If the request exceeds the readahead window, allow the read to
 294         * be up to the optimal hardware IO size
 295         */
 296        max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
 297        nr_to_read = min(nr_to_read, max_pages);
 298        while (nr_to_read) {
 299                unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
 300
 301                if (this_chunk > nr_to_read)
 302                        this_chunk = nr_to_read;
 303                __do_page_cache_readahead(mapping, filp, index, this_chunk, 0);
 304
 305                index += this_chunk;
 306                nr_to_read -= this_chunk;
 307        }
 308}
 309
 310/*
 311 * Set the initial window size, round to next power of 2 and square
 312 * for small size, x 4 for medium, and x 2 for large
 313 * for 128k (32 page) max ra
 314 * 1-8 page = 32k initial, > 8 page = 128k initial
 315 */
 316static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
 317{
 318        unsigned long newsize = roundup_pow_of_two(size);
 319
 320        if (newsize <= max / 32)
 321                newsize = newsize * 4;
 322        else if (newsize <= max / 4)
 323                newsize = newsize * 2;
 324        else
 325                newsize = max;
 326
 327        return newsize;
 328}
 329
 330/*
 331 *  Get the previous window size, ramp it up, and
 332 *  return it as the new window size.
 333 */
 334static unsigned long get_next_ra_size(struct file_ra_state *ra,
 335                                      unsigned long max)
 336{
 337        unsigned long cur = ra->size;
 338
 339        if (cur < max / 16)
 340                return 4 * cur;
 341        if (cur <= max / 2)
 342                return 2 * cur;
 343        return max;
 344}
 345
 346/*
 347 * On-demand readahead design.
 348 *
 349 * The fields in struct file_ra_state represent the most-recently-executed
 350 * readahead attempt:
 351 *
 352 *                        |<----- async_size ---------|
 353 *     |------------------- size -------------------->|
 354 *     |==================#===========================|
 355 *     ^start             ^page marked with PG_readahead
 356 *
 357 * To overlap application thinking time and disk I/O time, we do
 358 * `readahead pipelining': Do not wait until the application consumed all
 359 * readahead pages and stalled on the missing page at readahead_index;
 360 * Instead, submit an asynchronous readahead I/O as soon as there are
 361 * only async_size pages left in the readahead window. Normally async_size
 362 * will be equal to size, for maximum pipelining.
 363 *
 364 * In interleaved sequential reads, concurrent streams on the same fd can
 365 * be invalidating each other's readahead state. So we flag the new readahead
 366 * page at (start+size-async_size) with PG_readahead, and use it as readahead
 367 * indicator. The flag won't be set on already cached pages, to avoid the
 368 * readahead-for-nothing fuss, saving pointless page cache lookups.
 369 *
 370 * prev_pos tracks the last visited byte in the _previous_ read request.
 371 * It should be maintained by the caller, and will be used for detecting
 372 * small random reads. Note that the readahead algorithm checks loosely
 373 * for sequential patterns. Hence interleaved reads might be served as
 374 * sequential ones.
 375 *
 376 * There is a special-case: if the first page which the application tries to
 377 * read happens to be the first page of the file, it is assumed that a linear
 378 * read is about to happen and the window is immediately set to the initial size
 379 * based on I/O request size and the max_readahead.
 380 *
 381 * The code ramps up the readahead size aggressively at first, but slow down as
 382 * it approaches max_readhead.
 383 */
 384
 385/*
 386 * Count contiguously cached pages from @index-1 to @index-@max,
 387 * this count is a conservative estimation of
 388 *      - length of the sequential read sequence, or
 389 *      - thrashing threshold in memory tight systems
 390 */
 391static pgoff_t count_history_pages(struct address_space *mapping,
 392                                   pgoff_t index, unsigned long max)
 393{
 394        pgoff_t head;
 395
 396        rcu_read_lock();
 397        head = page_cache_prev_miss(mapping, index - 1, max);
 398        rcu_read_unlock();
 399
 400        return index - 1 - head;
 401}
 402
 403/*
 404 * page cache context based read-ahead
 405 */
 406static int try_context_readahead(struct address_space *mapping,
 407                                 struct file_ra_state *ra,
 408                                 pgoff_t index,
 409                                 unsigned long req_size,
 410                                 unsigned long max)
 411{
 412        pgoff_t size;
 413
 414        size = count_history_pages(mapping, index, max);
 415
 416        /*
 417         * not enough history pages:
 418         * it could be a random read
 419         */
 420        if (size <= req_size)
 421                return 0;
 422
 423        /*
 424         * starts from beginning of file:
 425         * it is a strong indication of long-run stream (or whole-file-read)
 426         */
 427        if (size >= index)
 428                size *= 2;
 429
 430        ra->start = index;
 431        ra->size = min(size + req_size, max);
 432        ra->async_size = 1;
 433
 434        return 1;
 435}
 436
 437/*
 438 * A minimal readahead algorithm for trivial sequential/random reads.
 439 */
 440static void ondemand_readahead(struct address_space *mapping,
 441                struct file_ra_state *ra, struct file *filp,
 442                bool hit_readahead_marker, pgoff_t index,
 443                unsigned long req_size)
 444{
 445        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 446        unsigned long max_pages = ra->ra_pages;
 447        unsigned long add_pages;
 448        pgoff_t prev_index;
 449
 450        /*
 451         * If the request exceeds the readahead window, allow the read to
 452         * be up to the optimal hardware IO size
 453         */
 454        if (req_size > max_pages && bdi->io_pages > max_pages)
 455                max_pages = min(req_size, bdi->io_pages);
 456
 457        /*
 458         * start of file
 459         */
 460        if (!index)
 461                goto initial_readahead;
 462
 463        /*
 464         * It's the expected callback index, assume sequential access.
 465         * Ramp up sizes, and push forward the readahead window.
 466         */
 467        if ((index == (ra->start + ra->size - ra->async_size) ||
 468             index == (ra->start + ra->size))) {
 469                ra->start += ra->size;
 470                ra->size = get_next_ra_size(ra, max_pages);
 471                ra->async_size = ra->size;
 472                goto readit;
 473        }
 474
 475        /*
 476         * Hit a marked page without valid readahead state.
 477         * E.g. interleaved reads.
 478         * Query the pagecache for async_size, which normally equals to
 479         * readahead size. Ramp it up and use it as the new readahead size.
 480         */
 481        if (hit_readahead_marker) {
 482                pgoff_t start;
 483
 484                rcu_read_lock();
 485                start = page_cache_next_miss(mapping, index + 1, max_pages);
 486                rcu_read_unlock();
 487
 488                if (!start || start - index > max_pages)
 489                        return;
 490
 491                ra->start = start;
 492                ra->size = start - index;       /* old async_size */
 493                ra->size += req_size;
 494                ra->size = get_next_ra_size(ra, max_pages);
 495                ra->async_size = ra->size;
 496                goto readit;
 497        }
 498
 499        /*
 500         * oversize read
 501         */
 502        if (req_size > max_pages)
 503                goto initial_readahead;
 504
 505        /*
 506         * sequential cache miss
 507         * trivial case: (index - prev_index) == 1
 508         * unaligned reads: (index - prev_index) == 0
 509         */
 510        prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
 511        if (index - prev_index <= 1UL)
 512                goto initial_readahead;
 513
 514        /*
 515         * Query the page cache and look for the traces(cached history pages)
 516         * that a sequential stream would leave behind.
 517         */
 518        if (try_context_readahead(mapping, ra, index, req_size, max_pages))
 519                goto readit;
 520
 521        /*
 522         * standalone, small random read
 523         * Read as is, and do not pollute the readahead state.
 524         */
 525        __do_page_cache_readahead(mapping, filp, index, req_size, 0);
 526        return;
 527
 528initial_readahead:
 529        ra->start = index;
 530        ra->size = get_init_ra_size(req_size, max_pages);
 531        ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
 532
 533readit:
 534        /*
 535         * Will this read hit the readahead marker made by itself?
 536         * If so, trigger the readahead marker hit now, and merge
 537         * the resulted next readahead window into the current one.
 538         * Take care of maximum IO pages as above.
 539         */
 540        if (index == ra->start && ra->size == ra->async_size) {
 541                add_pages = get_next_ra_size(ra, max_pages);
 542                if (ra->size + add_pages <= max_pages) {
 543                        ra->async_size = add_pages;
 544                        ra->size += add_pages;
 545                } else {
 546                        ra->size = max_pages;
 547                        ra->async_size = max_pages >> 1;
 548                }
 549        }
 550
 551        ra_submit(ra, mapping, filp);
 552}
 553
 554/**
 555 * page_cache_sync_readahead - generic file readahead
 556 * @mapping: address_space which holds the pagecache and I/O vectors
 557 * @ra: file_ra_state which holds the readahead state
 558 * @filp: passed on to ->readpage() and ->readpages()
 559 * @index: Index of first page to be read.
 560 * @req_count: Total number of pages being read by the caller.
 561 *
 562 * page_cache_sync_readahead() should be called when a cache miss happened:
 563 * it will submit the read.  The readahead logic may decide to piggyback more
 564 * pages onto the read request if access patterns suggest it will improve
 565 * performance.
 566 */
 567void page_cache_sync_readahead(struct address_space *mapping,
 568                               struct file_ra_state *ra, struct file *filp,
 569                               pgoff_t index, unsigned long req_count)
 570{
 571        /* no read-ahead */
 572        if (!ra->ra_pages)
 573                return;
 574
 575        if (blk_cgroup_congested())
 576                return;
 577
 578        /* be dumb */
 579        if (filp && (filp->f_mode & FMODE_RANDOM)) {
 580                force_page_cache_readahead(mapping, filp, index, req_count);
 581                return;
 582        }
 583
 584        /* do read-ahead */
 585        ondemand_readahead(mapping, ra, filp, false, index, req_count);
 586}
 587EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
 588
 589/**
 590 * page_cache_async_readahead - file readahead for marked pages
 591 * @mapping: address_space which holds the pagecache and I/O vectors
 592 * @ra: file_ra_state which holds the readahead state
 593 * @filp: passed on to ->readpage() and ->readpages()
 594 * @page: The page at @index which triggered the readahead call.
 595 * @index: Index of first page to be read.
 596 * @req_count: Total number of pages being read by the caller.
 597 *
 598 * page_cache_async_readahead() should be called when a page is used which
 599 * is marked as PageReadahead; this is a marker to suggest that the application
 600 * has used up enough of the readahead window that we should start pulling in
 601 * more pages.
 602 */
 603void
 604page_cache_async_readahead(struct address_space *mapping,
 605                           struct file_ra_state *ra, struct file *filp,
 606                           struct page *page, pgoff_t index,
 607                           unsigned long req_count)
 608{
 609        /* no read-ahead */
 610        if (!ra->ra_pages)
 611                return;
 612
 613        /*
 614         * Same bit is used for PG_readahead and PG_reclaim.
 615         */
 616        if (PageWriteback(page))
 617                return;
 618
 619        ClearPageReadahead(page);
 620
 621        /*
 622         * Defer asynchronous read-ahead on IO congestion.
 623         */
 624        if (inode_read_congested(mapping->host))
 625                return;
 626
 627        if (blk_cgroup_congested())
 628                return;
 629
 630        /* do read-ahead */
 631        ondemand_readahead(mapping, ra, filp, true, index, req_count);
 632}
 633EXPORT_SYMBOL_GPL(page_cache_async_readahead);
 634
 635ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 636{
 637        ssize_t ret;
 638        struct fd f;
 639
 640        ret = -EBADF;
 641        f = fdget(fd);
 642        if (!f.file || !(f.file->f_mode & FMODE_READ))
 643                goto out;
 644
 645        /*
 646         * The readahead() syscall is intended to run only on files
 647         * that can execute readahead. If readahead is not possible
 648         * on this file, then we must return -EINVAL.
 649         */
 650        ret = -EINVAL;
 651        if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
 652            !S_ISREG(file_inode(f.file)->i_mode))
 653                goto out;
 654
 655        ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
 656out:
 657        fdput(f);
 658        return ret;
 659}
 660
 661SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
 662{
 663        return ksys_readahead(fd, offset, count);
 664}
 665