linux/drivers/staging/lustre/lustre/llite/rw.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lustre/llite/rw.c
  37 *
  38 * Lustre Lite I/O page cache routines shared by different kernel revs
  39 */
  40
  41#include <linux/kernel.h>
  42#include <linux/mm.h>
  43#include <linux/string.h>
  44#include <linux/stat.h>
  45#include <linux/errno.h>
  46#include <linux/unistd.h>
  47#include <linux/writeback.h>
  48#include <linux/uaccess.h>
  49
  50#include <linux/fs.h>
  51#include <linux/pagemap.h>
  52/* current_is_kswapd() */
  53#include <linux/swap.h>
  54
  55#define DEBUG_SUBSYSTEM S_LLITE
  56
  57#include "../include/lustre_lite.h"
  58#include "../include/obd_cksum.h"
  59#include "llite_internal.h"
  60#include "../include/linux/lustre_compat25.h"
  61
  62/**
  63 * Finalizes cl-data before exiting typical address_space operation. Dual to
  64 * ll_cl_init().
  65 */
  66static void ll_cl_fini(struct ll_cl_context *lcc)
  67{
  68        struct lu_env  *env  = lcc->lcc_env;
  69        struct cl_io   *io   = lcc->lcc_io;
  70        struct cl_page *page = lcc->lcc_page;
  71
  72        LASSERT(lcc->lcc_cookie == current);
  73        LASSERT(env != NULL);
  74
  75        if (page != NULL) {
  76                lu_ref_del(&page->cp_reference, "cl_io", io);
  77                cl_page_put(env, page);
  78        }
  79
  80        cl_env_put(env, &lcc->lcc_refcheck);
  81}
  82
  83/**
  84 * Initializes common cl-data at the typical address_space operation entry
  85 * point.
  86 */
  87static struct ll_cl_context *ll_cl_init(struct file *file,
  88                                        struct page *vmpage, int create)
  89{
  90        struct ll_cl_context *lcc;
  91        struct lu_env    *env;
  92        struct cl_io     *io;
  93        struct cl_object *clob;
  94        struct ccc_io    *cio;
  95
  96        int refcheck;
  97        int result = 0;
  98
  99        clob = ll_i2info(vmpage->mapping->host)->lli_clob;
 100        LASSERT(clob != NULL);
 101
 102        env = cl_env_get(&refcheck);
 103        if (IS_ERR(env))
 104                return ERR_CAST(env);
 105
 106        lcc = &vvp_env_info(env)->vti_io_ctx;
 107        memset(lcc, 0, sizeof(*lcc));
 108        lcc->lcc_env = env;
 109        lcc->lcc_refcheck = refcheck;
 110        lcc->lcc_cookie = current;
 111
 112        cio = ccc_env_io(env);
 113        io = cio->cui_cl.cis_io;
 114        if (io == NULL && create) {
 115                struct inode *inode = vmpage->mapping->host;
 116                loff_t pos;
 117
 118                if (mutex_trylock(&inode->i_mutex)) {
 119                        mutex_unlock(&(inode)->i_mutex);
 120
 121                        /* this is too bad. Someone is trying to write the
 122                         * page w/o holding inode mutex. This means we can
 123                         * add dirty pages into cache during truncate */
 124                        CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
 125                               current->comm);
 126                        dump_stack();
 127                        LBUG();
 128                        return ERR_PTR(-EIO);
 129                }
 130
 131                /*
 132                 * Loop-back driver calls ->prepare_write().
 133                 * methods directly, bypassing file system ->write() operation,
 134                 * so cl_io has to be created here.
 135                 */
 136                io = ccc_env_thread_io(env);
 137                ll_io_init(io, file, 1);
 138
 139                /* No lock at all for this kind of IO - we can't do it because
 140                 * we have held page lock, it would cause deadlock.
 141                 * XXX: This causes poor performance to loop device - One page
 142                 *      per RPC.
 143                 *      In order to get better performance, users should use
 144                 *      lloop driver instead.
 145                 */
 146                io->ci_lockreq = CILR_NEVER;
 147
 148                pos = vmpage->index << PAGE_CACHE_SHIFT;
 149
 150                /* Create a temp IO to serve write. */
 151                result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
 152                if (result == 0) {
 153                        cio->cui_fd = LUSTRE_FPRIVATE(file);
 154                        cio->cui_iter = NULL;
 155                        result = cl_io_iter_init(env, io);
 156                        if (result == 0) {
 157                                result = cl_io_lock(env, io);
 158                                if (result == 0)
 159                                        result = cl_io_start(env, io);
 160                        }
 161                } else
 162                        result = io->ci_result;
 163        }
 164
 165        lcc->lcc_io = io;
 166        if (io == NULL)
 167                result = -EIO;
 168        if (result == 0) {
 169                struct cl_page   *page;
 170
 171                LASSERT(io != NULL);
 172                LASSERT(io->ci_state == CIS_IO_GOING);
 173                LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
 174                page = cl_page_find(env, clob, vmpage->index, vmpage,
 175                                    CPT_CACHEABLE);
 176                if (!IS_ERR(page)) {
 177                        lcc->lcc_page = page;
 178                        lu_ref_add(&page->cp_reference, "cl_io", io);
 179                        result = 0;
 180                } else
 181                        result = PTR_ERR(page);
 182        }
 183        if (result) {
 184                ll_cl_fini(lcc);
 185                lcc = ERR_PTR(result);
 186        }
 187
 188        CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
 189               vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
 190               env, io);
 191        return lcc;
 192}
 193
 194static struct ll_cl_context *ll_cl_get(void)
 195{
 196        struct ll_cl_context *lcc;
 197        struct lu_env *env;
 198        int refcheck;
 199
 200        env = cl_env_get(&refcheck);
 201        LASSERT(!IS_ERR(env));
 202        lcc = &vvp_env_info(env)->vti_io_ctx;
 203        LASSERT(env == lcc->lcc_env);
 204        LASSERT(current == lcc->lcc_cookie);
 205        cl_env_put(env, &refcheck);
 206
 207        /* env has got in ll_cl_init, so it is still usable. */
 208        return lcc;
 209}
 210
 211/**
 212 * ->prepare_write() address space operation called by generic_file_write()
 213 * for every page during write.
 214 */
 215int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
 216                     unsigned to)
 217{
 218        struct ll_cl_context *lcc;
 219        int result;
 220
 221        lcc = ll_cl_init(file, vmpage, 1);
 222        if (!IS_ERR(lcc)) {
 223                struct lu_env  *env = lcc->lcc_env;
 224                struct cl_io   *io  = lcc->lcc_io;
 225                struct cl_page *page = lcc->lcc_page;
 226
 227                cl_page_assume(env, io, page);
 228
 229                result = cl_io_prepare_write(env, io, page, from, to);
 230                if (result == 0) {
 231                        /*
 232                         * Add a reference, so that page is not evicted from
 233                         * the cache until ->commit_write() is called.
 234                         */
 235                        cl_page_get(page);
 236                        lu_ref_add(&page->cp_reference, "prepare_write",
 237                                   current);
 238                } else {
 239                        cl_page_unassume(env, io, page);
 240                        ll_cl_fini(lcc);
 241                }
 242                /* returning 0 in prepare assumes commit must be called
 243                 * afterwards */
 244        } else {
 245                result = PTR_ERR(lcc);
 246        }
 247        return result;
 248}
 249
 250int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
 251                    unsigned to)
 252{
 253        struct ll_cl_context *lcc;
 254        struct lu_env    *env;
 255        struct cl_io     *io;
 256        struct cl_page   *page;
 257        int result = 0;
 258
 259        lcc  = ll_cl_get();
 260        env  = lcc->lcc_env;
 261        page = lcc->lcc_page;
 262        io   = lcc->lcc_io;
 263
 264        LASSERT(cl_page_is_owned(page, io));
 265        LASSERT(from <= to);
 266        if (from != to) /* handle short write case. */
 267                result = cl_io_commit_write(env, io, page, from, to);
 268        if (cl_page_is_owned(page, io))
 269                cl_page_unassume(env, io, page);
 270
 271        /*
 272         * Release reference acquired by ll_prepare_write().
 273         */
 274        lu_ref_del(&page->cp_reference, "prepare_write", current);
 275        cl_page_put(env, page);
 276        ll_cl_fini(lcc);
 277        return result;
 278}
 279
 280struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
 281{
 282        __u64 opc;
 283
 284        opc = crt == CRT_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
 285        return ll_osscapa_get(inode, opc);
 286}
 287
 288static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
 289
 290/**
 291 * Get readahead pages from the filesystem readahead pool of the client for a
 292 * thread.
 293 *
 294 * /param sbi superblock for filesystem readahead state ll_ra_info
 295 * /param ria per-thread readahead state
 296 * /param pages number of pages requested for readahead for the thread.
 297 *
 298 * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
 299 * It should work well if the ra_max_pages is much greater than the single
 300 * file's read-ahead window, and not too many threads contending for
 301 * these readahead pages.
 302 *
 303 * TODO: There may be a 'global sync problem' if many threads are trying
 304 * to get an ra budget that is larger than the remaining readahead pages
 305 * and reach here at exactly the same time. They will compute /a ret to
 306 * consume the remaining pages, but will fail at atomic_add_return() and
 307 * get a zero ra window, although there is still ra space remaining. - Jay */
 308
 309static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
 310                                     struct ra_io_arg *ria,
 311                                     unsigned long pages)
 312{
 313        struct ll_ra_info *ra = &sbi->ll_ra_info;
 314        long ret;
 315
 316        /* If read-ahead pages left are less than 1M, do not do read-ahead,
 317         * otherwise it will form small read RPC(< 1M), which hurt server
 318         * performance a lot. */
 319        ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
 320        if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) {
 321                ret = 0;
 322                goto out;
 323        }
 324
 325        /* If the non-strided (ria_pages == 0) readahead window
 326         * (ria_start + ret) has grown across an RPC boundary, then trim
 327         * readahead size by the amount beyond the RPC so it ends on an
 328         * RPC boundary. If the readahead window is already ending on
 329         * an RPC boundary (beyond_rpc == 0), or smaller than a full
 330         * RPC (beyond_rpc < ret) the readahead size is unchanged.
 331         * The (beyond_rpc != 0) check is skipped since the conditional
 332         * branch is more expensive than subtracting zero from the result.
 333         *
 334         * Strided read is left unaligned to avoid small fragments beyond
 335         * the RPC boundary from needing an extra read RPC. */
 336        if (ria->ria_pages == 0) {
 337                long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
 338                if (/* beyond_rpc != 0 && */ beyond_rpc < ret)
 339                        ret -= beyond_rpc;
 340        }
 341
 342        if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
 343                atomic_sub(ret, &ra->ra_cur_pages);
 344                ret = 0;
 345        }
 346
 347out:
 348        return ret;
 349}
 350
 351void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
 352{
 353        struct ll_ra_info *ra = &sbi->ll_ra_info;
 354        atomic_sub(len, &ra->ra_cur_pages);
 355}
 356
 357static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
 358{
 359        LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
 360        lprocfs_counter_incr(sbi->ll_ra_stats, which);
 361}
 362
 363void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
 364{
 365        struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
 366        ll_ra_stats_inc_sbi(sbi, which);
 367}
 368
 369#define RAS_CDEBUG(ras) \
 370        CDEBUG(D_READA,                                               \
 371               "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu"    \
 372               "csr %lu sf %lu sp %lu sl %lu \n",                           \
 373               ras->ras_last_readpage, ras->ras_consecutive_requests,   \
 374               ras->ras_consecutive_pages, ras->ras_window_start,           \
 375               ras->ras_window_len, ras->ras_next_readahead,             \
 376               ras->ras_requests, ras->ras_request_index,                   \
 377               ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
 378               ras->ras_stride_pages, ras->ras_stride_length)
 379
 380static int index_in_window(unsigned long index, unsigned long point,
 381                           unsigned long before, unsigned long after)
 382{
 383        unsigned long start = point - before, end = point + after;
 384
 385        if (start > point)
 386               start = 0;
 387        if (end < point)
 388               end = ~0;
 389
 390        return start <= index && index <= end;
 391}
 392
 393static struct ll_readahead_state *ll_ras_get(struct file *f)
 394{
 395        struct ll_file_data       *fd;
 396
 397        fd = LUSTRE_FPRIVATE(f);
 398        return &fd->fd_ras;
 399}
 400
 401void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
 402{
 403        struct ll_readahead_state *ras;
 404
 405        ras = ll_ras_get(f);
 406
 407        spin_lock(&ras->ras_lock);
 408        ras->ras_requests++;
 409        ras->ras_request_index = 0;
 410        ras->ras_consecutive_requests++;
 411        rar->lrr_reader = current;
 412
 413        list_add(&rar->lrr_linkage, &ras->ras_read_beads);
 414        spin_unlock(&ras->ras_lock);
 415}
 416
 417void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
 418{
 419        struct ll_readahead_state *ras;
 420
 421        ras = ll_ras_get(f);
 422
 423        spin_lock(&ras->ras_lock);
 424        list_del_init(&rar->lrr_linkage);
 425        spin_unlock(&ras->ras_lock);
 426}
 427
 428static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
 429{
 430        struct ll_ra_read *scan;
 431
 432        list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
 433                if (scan->lrr_reader == current)
 434                        return scan;
 435        }
 436        return NULL;
 437}
 438
 439struct ll_ra_read *ll_ra_read_get(struct file *f)
 440{
 441        struct ll_readahead_state *ras;
 442        struct ll_ra_read        *bead;
 443
 444        ras = ll_ras_get(f);
 445
 446        spin_lock(&ras->ras_lock);
 447        bead = ll_ra_read_get_locked(ras);
 448        spin_unlock(&ras->ras_lock);
 449        return bead;
 450}
 451
 452static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
 453                              struct cl_page_list *queue, struct cl_page *page,
 454                              struct page *vmpage)
 455{
 456        struct ccc_page *cp;
 457        int           rc;
 458
 459        rc = 0;
 460        cl_page_assume(env, io, page);
 461        lu_ref_add(&page->cp_reference, "ra", current);
 462        cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
 463        if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
 464                rc = cl_page_is_under_lock(env, io, page);
 465                if (rc == -EBUSY) {
 466                        cp->cpg_defer_uptodate = 1;
 467                        cp->cpg_ra_used = 0;
 468                        cl_page_list_add(queue, page);
 469                        rc = 1;
 470                } else {
 471                        cl_page_delete(env, page);
 472                        rc = -ENOLCK;
 473                }
 474        } else {
 475                /* skip completed pages */
 476                cl_page_unassume(env, io, page);
 477        }
 478        lu_ref_del(&page->cp_reference, "ra", current);
 479        cl_page_put(env, page);
 480        return rc;
 481}
 482
 483/**
 484 * Initiates read-ahead of a page with given index.
 485 *
 486 * \retval     +ve: page was added to \a queue.
 487 *
 488 * \retval -ENOLCK: there is no extent lock for this part of a file, stop
 489 *                read-ahead.
 490 *
 491 * \retval  -ve, 0: page wasn't added to \a queue for other reason.
 492 */
 493static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
 494                              struct cl_page_list *queue,
 495                              pgoff_t index, struct address_space *mapping)
 496{
 497        struct page      *vmpage;
 498        struct cl_object *clob  = ll_i2info(mapping->host)->lli_clob;
 499        struct cl_page   *page;
 500        enum ra_stat      which = _NR_RA_STAT; /* keep gcc happy */
 501        int            rc    = 0;
 502        const char       *msg   = NULL;
 503
 504        vmpage = grab_cache_page_nowait(mapping, index);
 505        if (vmpage != NULL) {
 506                /* Check if vmpage was truncated or reclaimed */
 507                if (vmpage->mapping == mapping) {
 508                        page = cl_page_find(env, clob, vmpage->index,
 509                                            vmpage, CPT_CACHEABLE);
 510                        if (!IS_ERR(page)) {
 511                                rc = cl_read_ahead_page(env, io, queue,
 512                                                        page, vmpage);
 513                                if (rc == -ENOLCK) {
 514                                        which = RA_STAT_FAILED_MATCH;
 515                                        msg   = "lock match failed";
 516                                }
 517                        } else {
 518                                which = RA_STAT_FAILED_GRAB_PAGE;
 519                                msg   = "cl_page_find failed";
 520                        }
 521                } else {
 522                        which = RA_STAT_WRONG_GRAB_PAGE;
 523                        msg   = "g_c_p_n returned invalid page";
 524                }
 525                if (rc != 1)
 526                        unlock_page(vmpage);
 527                page_cache_release(vmpage);
 528        } else {
 529                which = RA_STAT_FAILED_GRAB_PAGE;
 530                msg   = "g_c_p_n failed";
 531        }
 532        if (msg != NULL) {
 533                ll_ra_stats_inc(mapping, which);
 534                CDEBUG(D_READA, "%s\n", msg);
 535        }
 536        return rc;
 537}
 538
 539#define RIA_DEBUG(ria)                                                 \
 540        CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n",       \
 541        ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
 542        ria->ria_pages)
 543
 544/* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't
 545 * know what the actual RPC size is.  If this needs to change, it makes more
 546 * sense to tune the i_blkbits value for the file based on the OSTs it is
 547 * striped over, rather than having a constant value for all files here. */
 548
 549/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
 550 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
 551 * by default, this should be adjusted corresponding with max_read_ahead_mb
 552 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
 553 * up quickly which will affect read performance significantly. See LU-2816 */
 554#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
 555
 556static inline int stride_io_mode(struct ll_readahead_state *ras)
 557{
 558        return ras->ras_consecutive_stride_requests > 1;
 559}
 560/* The function calculates how much pages will be read in
 561 * [off, off + length], in such stride IO area,
 562 * stride_offset = st_off, stride_length = st_len,
 563 * stride_pages = st_pgs
 564 *
 565 *   |------------------|*****|------------------|*****|------------|*****|....
 566 * st_off
 567 *   |--- st_pgs     ---|
 568 *   |-----     st_len   -----|
 569 *
 570 *            How many pages it should read in such pattern
 571 *            |-------------------------------------------------------------|
 572 *            off
 573 *            |<------            length                      ------->|
 574 *
 575 *        =   |<----->|  +  |-------------------------------------| +   |---|
 576 *           start_left          st_pgs * i                 end_left
 577 */
 578static unsigned long
 579stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
 580                unsigned long off, unsigned long length)
 581{
 582        __u64 start = off > st_off ? off - st_off : 0;
 583        __u64 end = off + length > st_off ? off + length - st_off : 0;
 584        unsigned long start_left = 0;
 585        unsigned long end_left = 0;
 586        unsigned long pg_count;
 587
 588        if (st_len == 0 || length == 0 || end == 0)
 589                return length;
 590
 591        start_left = do_div(start, st_len);
 592        if (start_left < st_pgs)
 593                start_left = st_pgs - start_left;
 594        else
 595                start_left = 0;
 596
 597        end_left = do_div(end, st_len);
 598        if (end_left > st_pgs)
 599                end_left = st_pgs;
 600
 601        CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n",
 602               start, end, start_left, end_left);
 603
 604        if (start == end)
 605                pg_count = end_left - (st_pgs - start_left);
 606        else
 607                pg_count = start_left + st_pgs * (end - start - 1) + end_left;
 608
 609        CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
 610               st_off, st_len, st_pgs, off, length, pg_count);
 611
 612        return pg_count;
 613}
 614
 615static int ria_page_count(struct ra_io_arg *ria)
 616{
 617        __u64 length = ria->ria_end >= ria->ria_start ?
 618                       ria->ria_end - ria->ria_start + 1 : 0;
 619
 620        return stride_pg_count(ria->ria_stoff, ria->ria_length,
 621                               ria->ria_pages, ria->ria_start,
 622                               length);
 623}
 624
 625/*Check whether the index is in the defined ra-window */
 626static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
 627{
 628        /* If ria_length == ria_pages, it means non-stride I/O mode,
 629         * idx should always inside read-ahead window in this case
 630         * For stride I/O mode, just check whether the idx is inside
 631         * the ria_pages. */
 632        return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
 633               (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
 634                ria->ria_length < ria->ria_pages);
 635}
 636
 637static int ll_read_ahead_pages(const struct lu_env *env,
 638                               struct cl_io *io, struct cl_page_list *queue,
 639                               struct ra_io_arg *ria,
 640                               unsigned long *reserved_pages,
 641                               struct address_space *mapping,
 642                               unsigned long *ra_end)
 643{
 644        int rc, count = 0, stride_ria;
 645        unsigned long page_idx;
 646
 647        LASSERT(ria != NULL);
 648        RIA_DEBUG(ria);
 649
 650        stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
 651        for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
 652                        *reserved_pages > 0; page_idx++) {
 653                if (ras_inside_ra_window(page_idx, ria)) {
 654                        /* If the page is inside the read-ahead window*/
 655                        rc = ll_read_ahead_page(env, io, queue,
 656                                                page_idx, mapping);
 657                        if (rc == 1) {
 658                                (*reserved_pages)--;
 659                                count ++;
 660                        } else if (rc == -ENOLCK)
 661                                break;
 662                } else if (stride_ria) {
 663                        /* If it is not in the read-ahead window, and it is
 664                         * read-ahead mode, then check whether it should skip
 665                         * the stride gap */
 666                        pgoff_t offset;
 667                        /* FIXME: This assertion only is valid when it is for
 668                         * forward read-ahead, it will be fixed when backward
 669                         * read-ahead is implemented */
 670                        LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
 671                                 page_idx,
 672                                 ria->ria_start, ria->ria_end, ria->ria_stoff,
 673                                 ria->ria_length, ria->ria_pages);
 674                        offset = page_idx - ria->ria_stoff;
 675                        offset = offset % (ria->ria_length);
 676                        if (offset > ria->ria_pages) {
 677                                page_idx += ria->ria_length - offset;
 678                                CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
 679                                       ria->ria_length - offset);
 680                                continue;
 681                        }
 682                }
 683        }
 684        *ra_end = page_idx;
 685        return count;
 686}
 687
 688int ll_readahead(const struct lu_env *env, struct cl_io *io,
 689                 struct ll_readahead_state *ras, struct address_space *mapping,
 690                 struct cl_page_list *queue, int flags)
 691{
 692        struct vvp_io *vio = vvp_env_io(env);
 693        struct vvp_thread_info *vti = vvp_env_info(env);
 694        struct cl_attr *attr = ccc_env_thread_attr(env);
 695        unsigned long start = 0, end = 0, reserved;
 696        unsigned long ra_end, len;
 697        struct inode *inode;
 698        struct ll_ra_read *bead;
 699        struct ra_io_arg *ria = &vti->vti_ria;
 700        struct ll_inode_info *lli;
 701        struct cl_object *clob;
 702        int ret = 0;
 703        __u64 kms;
 704
 705        inode = mapping->host;
 706        lli = ll_i2info(inode);
 707        clob = lli->lli_clob;
 708
 709        memset(ria, 0, sizeof(*ria));
 710
 711        cl_object_attr_lock(clob);
 712        ret = cl_object_attr_get(env, clob, attr);
 713        cl_object_attr_unlock(clob);
 714
 715        if (ret != 0)
 716                return ret;
 717        kms = attr->cat_kms;
 718        if (kms == 0) {
 719                ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
 720                return 0;
 721        }
 722
 723        spin_lock(&ras->ras_lock);
 724        if (vio->cui_ra_window_set)
 725                bead = &vio->cui_bead;
 726        else
 727                bead = NULL;
 728
 729        /* Enlarge the RA window to encompass the full read */
 730        if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
 731            bead->lrr_start + bead->lrr_count) {
 732                ras->ras_window_len = bead->lrr_start + bead->lrr_count -
 733                                      ras->ras_window_start;
 734        }
 735        /* Reserve a part of the read-ahead window that we'll be issuing */
 736        if (ras->ras_window_len) {
 737                start = ras->ras_next_readahead;
 738                end = ras->ras_window_start + ras->ras_window_len - 1;
 739        }
 740        if (end != 0) {
 741                unsigned long rpc_boundary;
 742                /*
 743                 * Align RA window to an optimal boundary.
 744                 *
 745                 * XXX This would be better to align to cl_max_pages_per_rpc
 746                 * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may
 747                 * be aligned to the RAID stripe size in the future and that
 748                 * is more important than the RPC size.
 749                 */
 750                /* Note: we only trim the RPC, instead of extending the RPC
 751                 * to the boundary, so to avoid reading too much pages during
 752                 * random reading. */
 753                rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1));
 754                if (rpc_boundary > 0)
 755                        rpc_boundary--;
 756
 757                if (rpc_boundary  > start)
 758                        end = rpc_boundary;
 759
 760                /* Truncate RA window to end of file */
 761                end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
 762
 763                ras->ras_next_readahead = max(end, end + 1);
 764                RAS_CDEBUG(ras);
 765        }
 766        ria->ria_start = start;
 767        ria->ria_end = end;
 768        /* If stride I/O mode is detected, get stride window*/
 769        if (stride_io_mode(ras)) {
 770                ria->ria_stoff = ras->ras_stride_offset;
 771                ria->ria_length = ras->ras_stride_length;
 772                ria->ria_pages = ras->ras_stride_pages;
 773        }
 774        spin_unlock(&ras->ras_lock);
 775
 776        if (end == 0) {
 777                ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
 778                return 0;
 779        }
 780        len = ria_page_count(ria);
 781        if (len == 0)
 782                return 0;
 783
 784        reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
 785        if (reserved < len)
 786                ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
 787
 788        CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
 789               atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
 790               ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
 791
 792        ret = ll_read_ahead_pages(env, io, queue,
 793                                  ria, &reserved, mapping, &ra_end);
 794
 795        LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
 796        if (reserved != 0)
 797                ll_ra_count_put(ll_i2sbi(inode), reserved);
 798
 799        if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
 800                ll_ra_stats_inc(mapping, RA_STAT_EOF);
 801
 802        /* if we didn't get to the end of the region we reserved from
 803         * the ras we need to go back and update the ras so that the
 804         * next read-ahead tries from where we left off.  we only do so
 805         * if the region we failed to issue read-ahead on is still ahead
 806         * of the app and behind the next index to start read-ahead from */
 807        CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
 808               ra_end, end, ria->ria_end);
 809
 810        if (ra_end != end + 1) {
 811                spin_lock(&ras->ras_lock);
 812                if (ra_end < ras->ras_next_readahead &&
 813                    index_in_window(ra_end, ras->ras_window_start, 0,
 814                                    ras->ras_window_len)) {
 815                        ras->ras_next_readahead = ra_end;
 816                        RAS_CDEBUG(ras);
 817                }
 818                spin_unlock(&ras->ras_lock);
 819        }
 820
 821        return ret;
 822}
 823
 824static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras,
 825                          unsigned long index)
 826{
 827        ras->ras_window_start = index & (~(RAS_INCREASE_STEP(inode) - 1));
 828}
 829
 830/* called with the ras_lock held or from places where it doesn't matter */
 831static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
 832                      unsigned long index)
 833{
 834        ras->ras_last_readpage = index;
 835        ras->ras_consecutive_requests = 0;
 836        ras->ras_consecutive_pages = 0;
 837        ras->ras_window_len = 0;
 838        ras_set_start(inode, ras, index);
 839        ras->ras_next_readahead = max(ras->ras_window_start, index);
 840
 841        RAS_CDEBUG(ras);
 842}
 843
 844/* called with the ras_lock held or from places where it doesn't matter */
 845static void ras_stride_reset(struct ll_readahead_state *ras)
 846{
 847        ras->ras_consecutive_stride_requests = 0;
 848        ras->ras_stride_length = 0;
 849        ras->ras_stride_pages = 0;
 850        RAS_CDEBUG(ras);
 851}
 852
 853void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
 854{
 855        spin_lock_init(&ras->ras_lock);
 856        ras_reset(inode, ras, 0);
 857        ras->ras_requests = 0;
 858        INIT_LIST_HEAD(&ras->ras_read_beads);
 859}
 860
 861/*
 862 * Check whether the read request is in the stride window.
 863 * If it is in the stride window, return 1, otherwise return 0.
 864 */
 865static int index_in_stride_window(struct ll_readahead_state *ras,
 866                                  unsigned long index)
 867{
 868        unsigned long stride_gap;
 869
 870        if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 ||
 871            ras->ras_stride_pages == ras->ras_stride_length)
 872                return 0;
 873
 874        stride_gap = index - ras->ras_last_readpage - 1;
 875
 876        /* If it is contiguous read */
 877        if (stride_gap == 0)
 878                return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
 879
 880        /* Otherwise check the stride by itself */
 881        return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
 882                ras->ras_consecutive_pages == ras->ras_stride_pages;
 883}
 884
 885static void ras_update_stride_detector(struct ll_readahead_state *ras,
 886                                       unsigned long index)
 887{
 888        unsigned long stride_gap = index - ras->ras_last_readpage - 1;
 889
 890        if (!stride_io_mode(ras) && (stride_gap != 0 ||
 891             ras->ras_consecutive_stride_requests == 0)) {
 892                ras->ras_stride_pages = ras->ras_consecutive_pages;
 893                ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
 894        }
 895        LASSERT(ras->ras_request_index == 0);
 896        LASSERT(ras->ras_consecutive_stride_requests == 0);
 897
 898        if (index <= ras->ras_last_readpage) {
 899                /*Reset stride window for forward read*/
 900                ras_stride_reset(ras);
 901                return;
 902        }
 903
 904        ras->ras_stride_pages = ras->ras_consecutive_pages;
 905        ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
 906
 907        RAS_CDEBUG(ras);
 908        return;
 909}
 910
 911static unsigned long
 912stride_page_count(struct ll_readahead_state *ras, unsigned long len)
 913{
 914        return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
 915                               ras->ras_stride_pages, ras->ras_stride_offset,
 916                               len);
 917}
 918
 919/* Stride Read-ahead window will be increased inc_len according to
 920 * stride I/O pattern */
 921static void ras_stride_increase_window(struct ll_readahead_state *ras,
 922                                       struct ll_ra_info *ra,
 923                                       unsigned long inc_len)
 924{
 925        unsigned long left, step, window_len;
 926        unsigned long stride_len;
 927
 928        LASSERT(ras->ras_stride_length > 0);
 929        LASSERTF(ras->ras_window_start + ras->ras_window_len
 930                 >= ras->ras_stride_offset, "window_start %lu, window_len %lu stride_offset %lu\n",
 931                 ras->ras_window_start,
 932                 ras->ras_window_len, ras->ras_stride_offset);
 933
 934        stride_len = ras->ras_window_start + ras->ras_window_len -
 935                     ras->ras_stride_offset;
 936
 937        left = stride_len % ras->ras_stride_length;
 938        window_len = ras->ras_window_len - left;
 939
 940        if (left < ras->ras_stride_pages)
 941                left += inc_len;
 942        else
 943                left = ras->ras_stride_pages + inc_len;
 944
 945        LASSERT(ras->ras_stride_pages != 0);
 946
 947        step = left / ras->ras_stride_pages;
 948        left %= ras->ras_stride_pages;
 949
 950        window_len += step * ras->ras_stride_length + left;
 951
 952        if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file)
 953                ras->ras_window_len = window_len;
 954
 955        RAS_CDEBUG(ras);
 956}
 957
 958static void ras_increase_window(struct inode *inode,
 959                                struct ll_readahead_state *ras,
 960                                struct ll_ra_info *ra)
 961{
 962        /* The stretch of ra-window should be aligned with max rpc_size
 963         * but current clio architecture does not support retrieve such
 964         * information from lower layer. FIXME later
 965         */
 966        if (stride_io_mode(ras))
 967                ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP(inode));
 968        else
 969                ras->ras_window_len = min(ras->ras_window_len +
 970                                          RAS_INCREASE_STEP(inode),
 971                                          ra->ra_max_pages_per_file);
 972}
 973
 974void ras_update(struct ll_sb_info *sbi, struct inode *inode,
 975                struct ll_readahead_state *ras, unsigned long index,
 976                unsigned hit)
 977{
 978        struct ll_ra_info *ra = &sbi->ll_ra_info;
 979        int zero = 0, stride_detect = 0, ra_miss = 0;
 980
 981        spin_lock(&ras->ras_lock);
 982
 983        ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
 984
 985        /* reset the read-ahead window in two cases.  First when the app seeks
 986         * or reads to some other part of the file.  Secondly if we get a
 987         * read-ahead miss that we think we've previously issued.  This can
 988         * be a symptom of there being so many read-ahead pages that the VM is
 989         * reclaiming it before we get to it. */
 990        if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
 991                zero = 1;
 992                ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
 993        } else if (!hit && ras->ras_window_len &&
 994                   index < ras->ras_next_readahead &&
 995                   index_in_window(index, ras->ras_window_start, 0,
 996                                   ras->ras_window_len)) {
 997                ra_miss = 1;
 998                ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
 999        }
1000
1001        /* On the second access to a file smaller than the tunable
1002         * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1003         * file up to ra_max_pages_per_file.  This is simply a best effort
1004         * and only occurs once per open file.  Normal RA behavior is reverted
1005         * to for subsequent IO.  The mmap case does not increment
1006         * ras_requests and thus can never trigger this behavior. */
1007        if (ras->ras_requests == 2 && !ras->ras_request_index) {
1008                __u64 kms_pages;
1009
1010                kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1011                            PAGE_CACHE_SHIFT;
1012
1013                CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
1014                       ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
1015
1016                if (kms_pages &&
1017                    kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1018                        ras->ras_window_start = 0;
1019                        ras->ras_last_readpage = 0;
1020                        ras->ras_next_readahead = 0;
1021                        ras->ras_window_len = min(ra->ra_max_pages_per_file,
1022                                ra->ra_max_read_ahead_whole_pages);
1023                        goto out_unlock;
1024                }
1025        }
1026        if (zero) {
1027                /* check whether it is in stride I/O mode*/
1028                if (!index_in_stride_window(ras, index)) {
1029                        if (ras->ras_consecutive_stride_requests == 0 &&
1030                            ras->ras_request_index == 0) {
1031                                ras_update_stride_detector(ras, index);
1032                                ras->ras_consecutive_stride_requests++;
1033                        } else {
1034                                ras_stride_reset(ras);
1035                        }
1036                        ras_reset(inode, ras, index);
1037                        ras->ras_consecutive_pages++;
1038                        goto out_unlock;
1039                } else {
1040                        ras->ras_consecutive_pages = 0;
1041                        ras->ras_consecutive_requests = 0;
1042                        if (++ras->ras_consecutive_stride_requests > 1)
1043                                stride_detect = 1;
1044                        RAS_CDEBUG(ras);
1045                }
1046        } else {
1047                if (ra_miss) {
1048                        if (index_in_stride_window(ras, index) &&
1049                            stride_io_mode(ras)) {
1050                                /*If stride-RA hit cache miss, the stride dector
1051                                 *will not be reset to avoid the overhead of
1052                                 *redetecting read-ahead mode */
1053                                if (index != ras->ras_last_readpage + 1)
1054                                        ras->ras_consecutive_pages = 0;
1055                                ras_reset(inode, ras, index);
1056                                RAS_CDEBUG(ras);
1057                        } else {
1058                                /* Reset both stride window and normal RA
1059                                 * window */
1060                                ras_reset(inode, ras, index);
1061                                ras->ras_consecutive_pages++;
1062                                ras_stride_reset(ras);
1063                                goto out_unlock;
1064                        }
1065                } else if (stride_io_mode(ras)) {
1066                        /* If this is contiguous read but in stride I/O mode
1067                         * currently, check whether stride step still is valid,
1068                         * if invalid, it will reset the stride ra window*/
1069                        if (!index_in_stride_window(ras, index)) {
1070                                /* Shrink stride read-ahead window to be zero */
1071                                ras_stride_reset(ras);
1072                                ras->ras_window_len = 0;
1073                                ras->ras_next_readahead = index;
1074                        }
1075                }
1076        }
1077        ras->ras_consecutive_pages++;
1078        ras->ras_last_readpage = index;
1079        ras_set_start(inode, ras, index);
1080
1081        if (stride_io_mode(ras))
1082                /* Since stride readahead is sensitive to the offset
1083                 * of read-ahead, so we use original offset here,
1084                 * instead of ras_window_start, which is RPC aligned */
1085                ras->ras_next_readahead = max(index, ras->ras_next_readahead);
1086        else
1087                ras->ras_next_readahead = max(ras->ras_window_start,
1088                                              ras->ras_next_readahead);
1089        RAS_CDEBUG(ras);
1090
1091        /* Trigger RA in the mmap case where ras_consecutive_requests
1092         * is not incremented and thus can't be used to trigger RA */
1093        if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
1094                ras->ras_window_len = RAS_INCREASE_STEP(inode);
1095                goto out_unlock;
1096        }
1097
1098        /* Initially reset the stride window offset to next_readahead*/
1099        if (ras->ras_consecutive_stride_requests == 2 && stride_detect) {
1100                /**
1101                 * Once stride IO mode is detected, next_readahead should be
1102                 * reset to make sure next_readahead > stride offset
1103                 */
1104                ras->ras_next_readahead = max(index, ras->ras_next_readahead);
1105                ras->ras_stride_offset = index;
1106                ras->ras_window_len = RAS_INCREASE_STEP(inode);
1107        }
1108
1109        /* The initial ras_window_len is set to the request size.  To avoid
1110         * uselessly reading and discarding pages for random IO the window is
1111         * only increased once per consecutive request received. */
1112        if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
1113            !ras->ras_request_index)
1114                ras_increase_window(inode, ras, ra);
1115out_unlock:
1116        RAS_CDEBUG(ras);
1117        ras->ras_request_index++;
1118        spin_unlock(&ras->ras_lock);
1119        return;
1120}
1121
1122int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1123{
1124        struct inode           *inode = vmpage->mapping->host;
1125        struct ll_inode_info   *lli   = ll_i2info(inode);
1126        struct lu_env     *env;
1127        struct cl_io       *io;
1128        struct cl_page   *page;
1129        struct cl_object       *clob;
1130        struct cl_env_nest      nest;
1131        bool redirtied = false;
1132        bool unlocked = false;
1133        int result;
1134
1135        LASSERT(PageLocked(vmpage));
1136        LASSERT(!PageWriteback(vmpage));
1137
1138        LASSERT(ll_i2dtexp(inode) != NULL);
1139
1140        env = cl_env_nested_get(&nest);
1141        if (IS_ERR(env)) {
1142                result = PTR_ERR(env);
1143                goto out;
1144        }
1145
1146        clob  = ll_i2info(inode)->lli_clob;
1147        LASSERT(clob != NULL);
1148
1149        io = ccc_env_thread_io(env);
1150        io->ci_obj = clob;
1151        io->ci_ignore_layout = 1;
1152        result = cl_io_init(env, io, CIT_MISC, clob);
1153        if (result == 0) {
1154                page = cl_page_find(env, clob, vmpage->index,
1155                                    vmpage, CPT_CACHEABLE);
1156                if (!IS_ERR(page)) {
1157                        lu_ref_add(&page->cp_reference, "writepage",
1158                                   current);
1159                        cl_page_assume(env, io, page);
1160                        result = cl_page_flush(env, io, page);
1161                        if (result != 0) {
1162                                /*
1163                                 * Re-dirty page on error so it retries write,
1164                                 * but not in case when IO has actually
1165                                 * occurred and completed with an error.
1166                                 */
1167                                if (!PageError(vmpage)) {
1168                                        redirty_page_for_writepage(wbc, vmpage);
1169                                        result = 0;
1170                                        redirtied = true;
1171                                }
1172                        }
1173                        cl_page_disown(env, io, page);
1174                        unlocked = true;
1175                        lu_ref_del(&page->cp_reference,
1176                                   "writepage", current);
1177                        cl_page_put(env, page);
1178                } else {
1179                        result = PTR_ERR(page);
1180                }
1181        }
1182        cl_io_fini(env, io);
1183
1184        if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
1185                loff_t offset = cl_offset(clob, vmpage->index);
1186
1187                /* Flush page failed because the extent is being written out.
1188                 * Wait for the write of extent to be finished to avoid
1189                 * breaking kernel which assumes ->writepage should mark
1190                 * PageWriteback or clean the page. */
1191                result = cl_sync_file_range(inode, offset,
1192                                            offset + PAGE_CACHE_SIZE - 1,
1193                                            CL_FSYNC_LOCAL, 1);
1194                if (result > 0) {
1195                        /* actually we may have written more than one page.
1196                         * decreasing this page because the caller will count
1197                         * it. */
1198                        wbc->nr_to_write -= result - 1;
1199                        result = 0;
1200                }
1201        }
1202
1203        cl_env_nested_put(&nest, env);
1204        goto out;
1205
1206out:
1207        if (result < 0) {
1208                if (!lli->lli_async_rc)
1209                        lli->lli_async_rc = result;
1210                SetPageError(vmpage);
1211                if (!unlocked)
1212                        unlock_page(vmpage);
1213        }
1214        return result;
1215}
1216
1217int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1218{
1219        struct inode *inode = mapping->host;
1220        struct ll_sb_info *sbi = ll_i2sbi(inode);
1221        loff_t start;
1222        loff_t end;
1223        enum cl_fsync_mode mode;
1224        int range_whole = 0;
1225        int result;
1226        int ignore_layout = 0;
1227
1228        if (wbc->range_cyclic) {
1229                start = mapping->writeback_index << PAGE_CACHE_SHIFT;
1230                end = OBD_OBJECT_EOF;
1231        } else {
1232                start = wbc->range_start;
1233                end = wbc->range_end;
1234                if (end == LLONG_MAX) {
1235                        end = OBD_OBJECT_EOF;
1236                        range_whole = start == 0;
1237                }
1238        }
1239
1240        mode = CL_FSYNC_NONE;
1241        if (wbc->sync_mode == WB_SYNC_ALL)
1242                mode = CL_FSYNC_LOCAL;
1243
1244        if (sbi->ll_umounting)
1245                /* if the mountpoint is being umounted, all pages have to be
1246                 * evicted to avoid hitting LBUG when truncate_inode_pages()
1247                 * is called later on. */
1248                ignore_layout = 1;
1249        result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
1250        if (result > 0) {
1251                wbc->nr_to_write -= result;
1252                result = 0;
1253         }
1254
1255        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1256                if (end == OBD_OBJECT_EOF)
1257                        end = i_size_read(inode);
1258                mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
1259        }
1260        return result;
1261}
1262
1263int ll_readpage(struct file *file, struct page *vmpage)
1264{
1265        struct ll_cl_context *lcc;
1266        int result;
1267
1268        lcc = ll_cl_init(file, vmpage, 0);
1269        if (!IS_ERR(lcc)) {
1270                struct lu_env  *env  = lcc->lcc_env;
1271                struct cl_io   *io   = lcc->lcc_io;
1272                struct cl_page *page = lcc->lcc_page;
1273
1274                LASSERT(page->cp_type == CPT_CACHEABLE);
1275                if (likely(!PageUptodate(vmpage))) {
1276                        cl_page_assume(env, io, page);
1277                        result = cl_io_read_page(env, io, page);
1278                } else {
1279                        /* Page from a non-object file. */
1280                        unlock_page(vmpage);
1281                        result = 0;
1282                }
1283                ll_cl_fini(lcc);
1284        } else {
1285                unlock_page(vmpage);
1286                result = PTR_ERR(lcc);
1287        }
1288        return result;
1289}
1290