linux/mm/readahead.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/readahead.c - address_space-level file readahead.
   4 *
   5 * Copyright (C) 2002, Linus Torvalds
   6 *
   7 * 09Apr2002    Andrew Morton
   8 *              Initial version.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/dax.h>
  13#include <linux/gfp.h>
  14#include <linux/export.h>
  15#include <linux/blkdev.h>
  16#include <linux/backing-dev.h>
  17#include <linux/task_io_accounting_ops.h>
  18#include <linux/pagevec.h>
  19#include <linux/pagemap.h>
  20#include <linux/syscalls.h>
  21#include <linux/file.h>
  22#include <linux/mm_inline.h>
  23#include <linux/blk-cgroup.h>
  24#include <linux/fadvise.h>
  25#include <linux/sched/mm.h>
  26
  27#include "internal.h"
  28
  29/*
  30 * Initialise a struct file's readahead state.  Assumes that the caller has
  31 * memset *ra to zero.
  32 */
  33void
  34file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
  35{
  36        ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
  37        ra->prev_pos = -1;
  38}
  39EXPORT_SYMBOL_GPL(file_ra_state_init);
  40
  41/*
  42 * see if a page needs releasing upon read_cache_pages() failure
  43 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
  44 *   before calling, such as the NFS fs marking pages that are cached locally
  45 *   on disk, thus we need to give the fs a chance to clean up in the event of
  46 *   an error
  47 */
  48static void read_cache_pages_invalidate_page(struct address_space *mapping,
  49                                             struct page *page)
  50{
  51        if (page_has_private(page)) {
  52                if (!trylock_page(page))
  53                        BUG();
  54                page->mapping = mapping;
  55                do_invalidatepage(page, 0, PAGE_SIZE);
  56                page->mapping = NULL;
  57                unlock_page(page);
  58        }
  59        put_page(page);
  60}
  61
  62/*
  63 * release a list of pages, invalidating them first if need be
  64 */
  65static void read_cache_pages_invalidate_pages(struct address_space *mapping,
  66                                              struct list_head *pages)
  67{
  68        struct page *victim;
  69
  70        while (!list_empty(pages)) {
  71                victim = lru_to_page(pages);
  72                list_del(&victim->lru);
  73                read_cache_pages_invalidate_page(mapping, victim);
  74        }
  75}
  76
  77/**
  78 * read_cache_pages - populate an address space with some pages & start reads against them
  79 * @mapping: the address_space
  80 * @pages: The address of a list_head which contains the target pages.  These
  81 *   pages have their ->index populated and are otherwise uninitialised.
  82 * @filler: callback routine for filling a single page.
  83 * @data: private data for the callback routine.
  84 *
  85 * Hides the details of the LRU cache etc from the filesystems.
  86 *
  87 * Returns: %0 on success, error return by @filler otherwise
  88 */
  89int read_cache_pages(struct address_space *mapping, struct list_head *pages,
  90                        int (*filler)(void *, struct page *), void *data)
  91{
  92        struct page *page;
  93        int ret = 0;
  94
  95        while (!list_empty(pages)) {
  96                page = lru_to_page(pages);
  97                list_del(&page->lru);
  98                if (add_to_page_cache_lru(page, mapping, page->index,
  99                                readahead_gfp_mask(mapping))) {
 100                        read_cache_pages_invalidate_page(mapping, page);
 101                        continue;
 102                }
 103                put_page(page);
 104
 105                ret = filler(data, page);
 106                if (unlikely(ret)) {
 107                        read_cache_pages_invalidate_pages(mapping, pages);
 108                        break;
 109                }
 110                task_io_account_read(PAGE_SIZE);
 111        }
 112        return ret;
 113}
 114
 115EXPORT_SYMBOL(read_cache_pages);
 116
 117static void read_pages(struct readahead_control *rac, struct list_head *pages,
 118                bool skip_page)
 119{
 120        const struct address_space_operations *aops = rac->mapping->a_ops;
 121        struct page *page;
 122        struct blk_plug plug;
 123
 124        if (!readahead_count(rac))
 125                goto out;
 126
 127        blk_start_plug(&plug);
 128
 129        if (aops->readahead) {
 130                aops->readahead(rac);
 131                /* Clean up the remaining pages */
 132                while ((page = readahead_page(rac))) {
 133                        unlock_page(page);
 134                        put_page(page);
 135                }
 136        } else if (aops->readpages) {
 137                aops->readpages(rac->file, rac->mapping, pages,
 138                                readahead_count(rac));
 139                /* Clean up the remaining pages */
 140                put_pages_list(pages);
 141                rac->_index += rac->_nr_pages;
 142                rac->_nr_pages = 0;
 143        } else {
 144                while ((page = readahead_page(rac))) {
 145                        aops->readpage(rac->file, page);
 146                        put_page(page);
 147                }
 148        }
 149
 150        blk_finish_plug(&plug);
 151
 152        BUG_ON(!list_empty(pages));
 153        BUG_ON(readahead_count(rac));
 154
 155out:
 156        if (skip_page)
 157                rac->_index++;
 158}
 159
 160/**
 161 * page_cache_ra_unbounded - Start unchecked readahead.
 162 * @ractl: Readahead control.
 163 * @nr_to_read: The number of pages to read.
 164 * @lookahead_size: Where to start the next readahead.
 165 *
 166 * This function is for filesystems to call when they want to start
 167 * readahead beyond a file's stated i_size.  This is almost certainly
 168 * not the function you want to call.  Use page_cache_async_readahead()
 169 * or page_cache_sync_readahead() instead.
 170 *
 171 * Context: File is referenced by caller.  Mutexes may be held by caller.
 172 * May sleep, but will not reenter filesystem to reclaim memory.
 173 */
 174void page_cache_ra_unbounded(struct readahead_control *ractl,
 175                unsigned long nr_to_read, unsigned long lookahead_size)
 176{
 177        struct address_space *mapping = ractl->mapping;
 178        unsigned long index = readahead_index(ractl);
 179        LIST_HEAD(page_pool);
 180        gfp_t gfp_mask = readahead_gfp_mask(mapping);
 181        unsigned long i;
 182
 183        /*
 184         * Partway through the readahead operation, we will have added
 185         * locked pages to the page cache, but will not yet have submitted
 186         * them for I/O.  Adding another page may need to allocate memory,
 187         * which can trigger memory reclaim.  Telling the VM we're in
 188         * the middle of a filesystem operation will cause it to not
 189         * touch file-backed pages, preventing a deadlock.  Most (all?)
 190         * filesystems already specify __GFP_NOFS in their mapping's
 191         * gfp_mask, but let's be explicit here.
 192         */
 193        unsigned int nofs = memalloc_nofs_save();
 194
 195        filemap_invalidate_lock_shared(mapping);
 196        /*
 197         * Preallocate as many pages as we will need.
 198         */
 199        for (i = 0; i < nr_to_read; i++) {
 200                struct page *page = xa_load(&mapping->i_pages, index + i);
 201
 202                if (page && !xa_is_value(page)) {
 203                        /*
 204                         * Page already present?  Kick off the current batch
 205                         * of contiguous pages before continuing with the
 206                         * next batch.  This page may be the one we would
 207                         * have intended to mark as Readahead, but we don't
 208                         * have a stable reference to this page, and it's
 209                         * not worth getting one just for that.
 210                         */
 211                        read_pages(ractl, &page_pool, true);
 212                        i = ractl->_index + ractl->_nr_pages - index - 1;
 213                        continue;
 214                }
 215
 216                page = __page_cache_alloc(gfp_mask);
 217                if (!page)
 218                        break;
 219                if (mapping->a_ops->readpages) {
 220                        page->index = index + i;
 221                        list_add(&page->lru, &page_pool);
 222                } else if (add_to_page_cache_lru(page, mapping, index + i,
 223                                        gfp_mask) < 0) {
 224                        put_page(page);
 225                        read_pages(ractl, &page_pool, true);
 226                        i = ractl->_index + ractl->_nr_pages - index - 1;
 227                        continue;
 228                }
 229                if (i == nr_to_read - lookahead_size)
 230                        SetPageReadahead(page);
 231                ractl->_nr_pages++;
 232        }
 233
 234        /*
 235         * Now start the IO.  We ignore I/O errors - if the page is not
 236         * uptodate then the caller will launch readpage again, and
 237         * will then handle the error.
 238         */
 239        read_pages(ractl, &page_pool, false);
 240        filemap_invalidate_unlock_shared(mapping);
 241        memalloc_nofs_restore(nofs);
 242}
 243EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
 244
 245/*
 246 * do_page_cache_ra() actually reads a chunk of disk.  It allocates
 247 * the pages first, then submits them for I/O. This avoids the very bad
 248 * behaviour which would occur if page allocations are causing VM writeback.
 249 * We really don't want to intermingle reads and writes like that.
 250 */
 251void do_page_cache_ra(struct readahead_control *ractl,
 252                unsigned long nr_to_read, unsigned long lookahead_size)
 253{
 254        struct inode *inode = ractl->mapping->host;
 255        unsigned long index = readahead_index(ractl);
 256        loff_t isize = i_size_read(inode);
 257        pgoff_t end_index;      /* The last page we want to read */
 258
 259        if (isize == 0)
 260                return;
 261
 262        end_index = (isize - 1) >> PAGE_SHIFT;
 263        if (index > end_index)
 264                return;
 265        /* Don't read past the page containing the last byte of the file */
 266        if (nr_to_read > end_index - index)
 267                nr_to_read = end_index - index + 1;
 268
 269        page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
 270}
 271
 272/*
 273 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
 274 * memory at once.
 275 */
 276void force_page_cache_ra(struct readahead_control *ractl,
 277                unsigned long nr_to_read)
 278{
 279        struct address_space *mapping = ractl->mapping;
 280        struct file_ra_state *ra = ractl->ra;
 281        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 282        unsigned long max_pages, index;
 283
 284        if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
 285                        !mapping->a_ops->readahead))
 286                return;
 287
 288        /*
 289         * If the request exceeds the readahead window, allow the read to
 290         * be up to the optimal hardware IO size
 291         */
 292        index = readahead_index(ractl);
 293        max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
 294        nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
 295        while (nr_to_read) {
 296                unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
 297
 298                if (this_chunk > nr_to_read)
 299                        this_chunk = nr_to_read;
 300                ractl->_index = index;
 301                do_page_cache_ra(ractl, this_chunk, 0);
 302
 303                index += this_chunk;
 304                nr_to_read -= this_chunk;
 305        }
 306}
 307
 308/*
 309 * Set the initial window size, round to next power of 2 and square
 310 * for small size, x 4 for medium, and x 2 for large
 311 * for 128k (32 page) max ra
 312 * 1-8 page = 32k initial, > 8 page = 128k initial
 313 */
 314static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
 315{
 316        unsigned long newsize = roundup_pow_of_two(size);
 317
 318        if (newsize <= max / 32)
 319                newsize = newsize * 4;
 320        else if (newsize <= max / 4)
 321                newsize = newsize * 2;
 322        else
 323                newsize = max;
 324
 325        return newsize;
 326}
 327
 328/*
 329 *  Get the previous window size, ramp it up, and
 330 *  return it as the new window size.
 331 */
 332static unsigned long get_next_ra_size(struct file_ra_state *ra,
 333                                      unsigned long max)
 334{
 335        unsigned long cur = ra->size;
 336
 337        if (cur < max / 16)
 338                return 4 * cur;
 339        if (cur <= max / 2)
 340                return 2 * cur;
 341        return max;
 342}
 343
 344/*
 345 * On-demand readahead design.
 346 *
 347 * The fields in struct file_ra_state represent the most-recently-executed
 348 * readahead attempt:
 349 *
 350 *                        |<----- async_size ---------|
 351 *     |------------------- size -------------------->|
 352 *     |==================#===========================|
 353 *     ^start             ^page marked with PG_readahead
 354 *
 355 * To overlap application thinking time and disk I/O time, we do
 356 * `readahead pipelining': Do not wait until the application consumed all
 357 * readahead pages and stalled on the missing page at readahead_index;
 358 * Instead, submit an asynchronous readahead I/O as soon as there are
 359 * only async_size pages left in the readahead window. Normally async_size
 360 * will be equal to size, for maximum pipelining.
 361 *
 362 * In interleaved sequential reads, concurrent streams on the same fd can
 363 * be invalidating each other's readahead state. So we flag the new readahead
 364 * page at (start+size-async_size) with PG_readahead, and use it as readahead
 365 * indicator. The flag won't be set on already cached pages, to avoid the
 366 * readahead-for-nothing fuss, saving pointless page cache lookups.
 367 *
 368 * prev_pos tracks the last visited byte in the _previous_ read request.
 369 * It should be maintained by the caller, and will be used for detecting
 370 * small random reads. Note that the readahead algorithm checks loosely
 371 * for sequential patterns. Hence interleaved reads might be served as
 372 * sequential ones.
 373 *
 374 * There is a special-case: if the first page which the application tries to
 375 * read happens to be the first page of the file, it is assumed that a linear
 376 * read is about to happen and the window is immediately set to the initial size
 377 * based on I/O request size and the max_readahead.
 378 *
 379 * The code ramps up the readahead size aggressively at first, but slow down as
 380 * it approaches max_readhead.
 381 */
 382
 383/*
 384 * Count contiguously cached pages from @index-1 to @index-@max,
 385 * this count is a conservative estimation of
 386 *      - length of the sequential read sequence, or
 387 *      - thrashing threshold in memory tight systems
 388 */
 389static pgoff_t count_history_pages(struct address_space *mapping,
 390                                   pgoff_t index, unsigned long max)
 391{
 392        pgoff_t head;
 393
 394        rcu_read_lock();
 395        head = page_cache_prev_miss(mapping, index - 1, max);
 396        rcu_read_unlock();
 397
 398        return index - 1 - head;
 399}
 400
 401/*
 402 * page cache context based read-ahead
 403 */
 404static int try_context_readahead(struct address_space *mapping,
 405                                 struct file_ra_state *ra,
 406                                 pgoff_t index,
 407                                 unsigned long req_size,
 408                                 unsigned long max)
 409{
 410        pgoff_t size;
 411
 412        size = count_history_pages(mapping, index, max);
 413
 414        /*
 415         * not enough history pages:
 416         * it could be a random read
 417         */
 418        if (size <= req_size)
 419                return 0;
 420
 421        /*
 422         * starts from beginning of file:
 423         * it is a strong indication of long-run stream (or whole-file-read)
 424         */
 425        if (size >= index)
 426                size *= 2;
 427
 428        ra->start = index;
 429        ra->size = min(size + req_size, max);
 430        ra->async_size = 1;
 431
 432        return 1;
 433}
 434
 435/*
 436 * A minimal readahead algorithm for trivial sequential/random reads.
 437 */
 438static void ondemand_readahead(struct readahead_control *ractl,
 439                bool hit_readahead_marker, unsigned long req_size)
 440{
 441        struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
 442        struct file_ra_state *ra = ractl->ra;
 443        unsigned long max_pages = ra->ra_pages;
 444        unsigned long add_pages;
 445        unsigned long index = readahead_index(ractl);
 446        pgoff_t prev_index;
 447
 448        /*
 449         * If the request exceeds the readahead window, allow the read to
 450         * be up to the optimal hardware IO size
 451         */
 452        if (req_size > max_pages && bdi->io_pages > max_pages)
 453                max_pages = min(req_size, bdi->io_pages);
 454
 455        /*
 456         * start of file
 457         */
 458        if (!index)
 459                goto initial_readahead;
 460
 461        /*
 462         * It's the expected callback index, assume sequential access.
 463         * Ramp up sizes, and push forward the readahead window.
 464         */
 465        if ((index == (ra->start + ra->size - ra->async_size) ||
 466             index == (ra->start + ra->size))) {
 467                ra->start += ra->size;
 468                ra->size = get_next_ra_size(ra, max_pages);
 469                ra->async_size = ra->size;
 470                goto readit;
 471        }
 472
 473        /*
 474         * Hit a marked page without valid readahead state.
 475         * E.g. interleaved reads.
 476         * Query the pagecache for async_size, which normally equals to
 477         * readahead size. Ramp it up and use it as the new readahead size.
 478         */
 479        if (hit_readahead_marker) {
 480                pgoff_t start;
 481
 482                rcu_read_lock();
 483                start = page_cache_next_miss(ractl->mapping, index + 1,
 484                                max_pages);
 485                rcu_read_unlock();
 486
 487                if (!start || start - index > max_pages)
 488                        return;
 489
 490                ra->start = start;
 491                ra->size = start - index;       /* old async_size */
 492                ra->size += req_size;
 493                ra->size = get_next_ra_size(ra, max_pages);
 494                ra->async_size = ra->size;
 495                goto readit;
 496        }
 497
 498        /*
 499         * oversize read
 500         */
 501        if (req_size > max_pages)
 502                goto initial_readahead;
 503
 504        /*
 505         * sequential cache miss
 506         * trivial case: (index - prev_index) == 1
 507         * unaligned reads: (index - prev_index) == 0
 508         */
 509        prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
 510        if (index - prev_index <= 1UL)
 511                goto initial_readahead;
 512
 513        /*
 514         * Query the page cache and look for the traces(cached history pages)
 515         * that a sequential stream would leave behind.
 516         */
 517        if (try_context_readahead(ractl->mapping, ra, index, req_size,
 518                        max_pages))
 519                goto readit;
 520
 521        /*
 522         * standalone, small random read
 523         * Read as is, and do not pollute the readahead state.
 524         */
 525        do_page_cache_ra(ractl, req_size, 0);
 526        return;
 527
 528initial_readahead:
 529        ra->start = index;
 530        ra->size = get_init_ra_size(req_size, max_pages);
 531        ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
 532
 533readit:
 534        /*
 535         * Will this read hit the readahead marker made by itself?
 536         * If so, trigger the readahead marker hit now, and merge
 537         * the resulted next readahead window into the current one.
 538         * Take care of maximum IO pages as above.
 539         */
 540        if (index == ra->start && ra->size == ra->async_size) {
 541                add_pages = get_next_ra_size(ra, max_pages);
 542                if (ra->size + add_pages <= max_pages) {
 543                        ra->async_size = add_pages;
 544                        ra->size += add_pages;
 545                } else {
 546                        ra->size = max_pages;
 547                        ra->async_size = max_pages >> 1;
 548                }
 549        }
 550
 551        ractl->_index = ra->start;
 552        do_page_cache_ra(ractl, ra->size, ra->async_size);
 553}
 554
 555void page_cache_sync_ra(struct readahead_control *ractl,
 556                unsigned long req_count)
 557{
 558        bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
 559
 560        /*
 561         * Even if read-ahead is disabled, issue this request as read-ahead
 562         * as we'll need it to satisfy the requested range. The forced
 563         * read-ahead will do the right thing and limit the read to just the
 564         * requested range, which we'll set to 1 page for this case.
 565         */
 566        if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
 567                if (!ractl->file)
 568                        return;
 569                req_count = 1;
 570                do_forced_ra = true;
 571        }
 572
 573        /* be dumb */
 574        if (do_forced_ra) {
 575                force_page_cache_ra(ractl, req_count);
 576                return;
 577        }
 578
 579        /* do read-ahead */
 580        ondemand_readahead(ractl, false, req_count);
 581}
 582EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 583
 584void page_cache_async_ra(struct readahead_control *ractl,
 585                struct page *page, unsigned long req_count)
 586{
 587        /* no read-ahead */
 588        if (!ractl->ra->ra_pages)
 589                return;
 590
 591        /*
 592         * Same bit is used for PG_readahead and PG_reclaim.
 593         */
 594        if (PageWriteback(page))
 595                return;
 596
 597        ClearPageReadahead(page);
 598
 599        /*
 600         * Defer asynchronous read-ahead on IO congestion.
 601         */
 602        if (inode_read_congested(ractl->mapping->host))
 603                return;
 604
 605        if (blk_cgroup_congested())
 606                return;
 607
 608        /* do read-ahead */
 609        ondemand_readahead(ractl, true, req_count);
 610}
 611EXPORT_SYMBOL_GPL(page_cache_async_ra);
 612
 613ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 614{
 615        ssize_t ret;
 616        struct fd f;
 617
 618        ret = -EBADF;
 619        f = fdget(fd);
 620        if (!f.file || !(f.file->f_mode & FMODE_READ))
 621                goto out;
 622
 623        /*
 624         * The readahead() syscall is intended to run only on files
 625         * that can execute readahead. If readahead is not possible
 626         * on this file, then we must return -EINVAL.
 627         */
 628        ret = -EINVAL;
 629        if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
 630            !S_ISREG(file_inode(f.file)->i_mode))
 631                goto out;
 632
 633        ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
 634out:
 635        fdput(f);
 636        return ret;
 637}
 638
 639SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
 640{
 641        return ksys_readahead(fd, offset, count);
 642}
 643
 644/**
 645 * readahead_expand - Expand a readahead request
 646 * @ractl: The request to be expanded
 647 * @new_start: The revised start
 648 * @new_len: The revised size of the request
 649 *
 650 * Attempt to expand a readahead request outwards from the current size to the
 651 * specified size by inserting locked pages before and after the current window
 652 * to increase the size to the new window.  This may involve the insertion of
 653 * THPs, in which case the window may get expanded even beyond what was
 654 * requested.
 655 *
 656 * The algorithm will stop if it encounters a conflicting page already in the
 657 * pagecache and leave a smaller expansion than requested.
 658 *
 659 * The caller must check for this by examining the revised @ractl object for a
 660 * different expansion than was requested.
 661 */
 662void readahead_expand(struct readahead_control *ractl,
 663                      loff_t new_start, size_t new_len)
 664{
 665        struct address_space *mapping = ractl->mapping;
 666        struct file_ra_state *ra = ractl->ra;
 667        pgoff_t new_index, new_nr_pages;
 668        gfp_t gfp_mask = readahead_gfp_mask(mapping);
 669
 670        new_index = new_start / PAGE_SIZE;
 671
 672        /* Expand the leading edge downwards */
 673        while (ractl->_index > new_index) {
 674                unsigned long index = ractl->_index - 1;
 675                struct page *page = xa_load(&mapping->i_pages, index);
 676
 677                if (page && !xa_is_value(page))
 678                        return; /* Page apparently present */
 679
 680                page = __page_cache_alloc(gfp_mask);
 681                if (!page)
 682                        return;
 683                if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
 684                        put_page(page);
 685                        return;
 686                }
 687
 688                ractl->_nr_pages++;
 689                ractl->_index = page->index;
 690        }
 691
 692        new_len += new_start - readahead_pos(ractl);
 693        new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
 694
 695        /* Expand the trailing edge upwards */
 696        while (ractl->_nr_pages < new_nr_pages) {
 697                unsigned long index = ractl->_index + ractl->_nr_pages;
 698                struct page *page = xa_load(&mapping->i_pages, index);
 699
 700                if (page && !xa_is_value(page))
 701                        return; /* Page apparently present */
 702
 703                page = __page_cache_alloc(gfp_mask);
 704                if (!page)
 705                        return;
 706                if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
 707                        put_page(page);
 708                        return;
 709                }
 710                ractl->_nr_pages++;
 711                if (ra) {
 712                        ra->size++;
 713                        ra->async_size++;
 714                }
 715        }
 716}
 717EXPORT_SYMBOL(readahead_expand);
 718