linux/drivers/staging/lustre/lustre/include/cl_object.h
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * GPL HEADER START
   4 *
   5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 only,
   9 * as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License version 2 for more details (a copy is included
  15 * in the LICENSE file that accompanied this code).
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * version 2 along with this program; If not, see
  19 * http://www.gnu.org/licenses/gpl-2.0.html
  20 *
  21 * GPL HEADER END
  22 */
  23/*
  24 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  25 * Use is subject to license terms.
  26 *
  27 * Copyright (c) 2011, 2015, Intel Corporation.
  28 */
  29/*
  30 * This file is part of Lustre, http://www.lustre.org/
  31 * Lustre is a trademark of Sun Microsystems, Inc.
  32 */
  33#ifndef _LUSTRE_CL_OBJECT_H
  34#define _LUSTRE_CL_OBJECT_H
  35
  36/** \defgroup clio clio
  37 *
  38 * Client objects implement io operations and cache pages.
  39 *
  40 * Examples: lov and osc are implementations of cl interface.
  41 *
  42 * Big Theory Statement.
  43 *
  44 * Layered objects.
  45 *
  46 * Client implementation is based on the following data-types:
  47 *
  48 *   - cl_object
  49 *
  50 *   - cl_page
  51 *
  52 *   - cl_lock     represents an extent lock on an object.
  53 *
  54 *   - cl_io       represents high-level i/o activity such as whole read/write
  55 *               system call, or write-out of pages from under the lock being
  56 *               canceled. cl_io has sub-ios that can be stopped and resumed
  57 *               independently, thus achieving high degree of transfer
  58 *               parallelism. Single cl_io can be advanced forward by
  59 *               the multiple threads (although in the most usual case of
  60 *               read/write system call it is associated with the single user
  61 *               thread, that issued the system call).
  62 *
  63 * Terminology
  64 *
  65 *     - to avoid confusion high-level I/O operation like read or write system
  66 *     call is referred to as "an io", whereas low-level I/O operation, like
  67 *     RPC, is referred to as "a transfer"
  68 *
  69 *     - "generic code" means generic (not file system specific) code in the
  70 *     hosting environment. "cl-code" means code (mostly in cl_*.c files) that
  71 *     is not layer specific.
  72 *
  73 * Locking.
  74 *
  75 *  - i_mutex
  76 *      - PG_locked
  77 *        - cl_object_header::coh_page_guard
  78 *        - lu_site::ls_guard
  79 *
  80 * See the top comment in cl_object.c for the description of overall locking and
  81 * reference-counting design.
  82 *
  83 * See comments below for the description of i/o, page, and dlm-locking
  84 * design.
  85 *
  86 * @{
  87 */
  88
  89/*
  90 * super-class definitions.
  91 */
  92#include <lu_object.h>
  93#include <lustre_compat.h>
  94#include <linux/atomic.h>
  95#include <linux/mutex.h>
  96#include <linux/radix-tree.h>
  97#include <linux/spinlock.h>
  98#include <linux/wait.h>
  99
 100struct inode;
 101
 102struct cl_device;
 103
 104struct cl_object;
 105
 106struct cl_page;
 107struct cl_page_slice;
 108struct cl_lock;
 109struct cl_lock_slice;
 110
 111struct cl_lock_operations;
 112struct cl_page_operations;
 113
 114struct cl_io;
 115struct cl_io_slice;
 116
 117struct cl_req_attr;
 118
 119/**
 120 * Device in the client stack.
 121 *
 122 * \see vvp_device, lov_device, lovsub_device, osc_device
 123 */
 124struct cl_device {
 125        /** Super-class. */
 126        struct lu_device                   cd_lu_dev;
 127};
 128
 129/** \addtogroup cl_object cl_object
 130 * @{
 131 */
 132/**
 133 * "Data attributes" of cl_object. Data attributes can be updated
 134 * independently for a sub-object, and top-object's attributes are calculated
 135 * from sub-objects' ones.
 136 */
 137struct cl_attr {
 138        /** Object size, in bytes */
 139        loff_t cat_size;
 140        /**
 141         * Known minimal size, in bytes.
 142         *
 143         * This is only valid when at least one DLM lock is held.
 144         */
 145        loff_t cat_kms;
 146        /** Modification time. Measured in seconds since epoch. */
 147        time64_t cat_mtime;
 148        /** Access time. Measured in seconds since epoch. */
 149        time64_t cat_atime;
 150        /** Change time. Measured in seconds since epoch. */
 151        time64_t cat_ctime;
 152        /**
 153         * Blocks allocated to this cl_object on the server file system.
 154         *
 155         * \todo XXX An interface for block size is needed.
 156         */
 157        __u64  cat_blocks;
 158        /**
 159         * User identifier for quota purposes.
 160         */
 161        uid_t  cat_uid;
 162        /**
 163         * Group identifier for quota purposes.
 164         */
 165        gid_t  cat_gid;
 166
 167        /* nlink of the directory */
 168        __u64  cat_nlink;
 169};
 170
 171/**
 172 * Fields in cl_attr that are being set.
 173 */
 174enum cl_attr_valid {
 175        CAT_SIZE   = 1 << 0,
 176        CAT_KMS    = 1 << 1,
 177        CAT_MTIME  = 1 << 3,
 178        CAT_ATIME  = 1 << 4,
 179        CAT_CTIME  = 1 << 5,
 180        CAT_BLOCKS = 1 << 6,
 181        CAT_UID    = 1 << 7,
 182        CAT_GID    = 1 << 8
 183};
 184
 185/**
 186 * Sub-class of lu_object with methods common for objects on the client
 187 * stacks.
 188 *
 189 * cl_object: represents a regular file system object, both a file and a
 190 *    stripe. cl_object is based on lu_object: it is identified by a fid,
 191 *    layered, cached, hashed, and lrued. Important distinction with the server
 192 *    side, where md_object and dt_object are used, is that cl_object "fans out"
 193 *    at the lov/sns level: depending on the file layout, single file is
 194 *    represented as a set of "sub-objects" (stripes). At the implementation
 195 *    level, struct lov_object contains an array of cl_objects. Each sub-object
 196 *    is a full-fledged cl_object, having its fid, living in the lru and hash
 197 *    table.
 198 *
 199 *    This leads to the next important difference with the server side: on the
 200 *    client, it's quite usual to have objects with the different sequence of
 201 *    layers. For example, typical top-object is composed of the following
 202 *    layers:
 203 *
 204 *      - vvp
 205 *      - lov
 206 *
 207 *    whereas its sub-objects are composed of
 208 *
 209 *      - lovsub
 210 *      - osc
 211 *
 212 *    layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
 213 *    track of the object-subobject relationship.
 214 *
 215 *    Sub-objects are not cached independently: when top-object is about to
 216 *    be discarded from the memory, all its sub-objects are torn-down and
 217 *    destroyed too.
 218 *
 219 * \see vvp_object, lov_object, lovsub_object, osc_object
 220 */
 221struct cl_object {
 222        /** super class */
 223        struct lu_object                   co_lu;
 224        /** per-object-layer operations */
 225        const struct cl_object_operations *co_ops;
 226        /** offset of page slice in cl_page buffer */
 227        int                                co_slice_off;
 228};
 229
 230/**
 231 * Description of the client object configuration. This is used for the
 232 * creation of a new client object that is identified by a more state than
 233 * fid.
 234 */
 235struct cl_object_conf {
 236        /** Super-class. */
 237        struct lu_object_conf     coc_lu;
 238        union {
 239                /**
 240                 * Object layout. This is consumed by lov.
 241                 */
 242                struct lu_buf     coc_layout;
 243                /**
 244                 * Description of particular stripe location in the
 245                 * cluster. This is consumed by osc.
 246                 */
 247                struct lov_oinfo *coc_oinfo;
 248        } u;
 249        /**
 250         * VFS inode. This is consumed by vvp.
 251         */
 252        struct inode         *coc_inode;
 253        /**
 254         * Layout lock handle.
 255         */
 256        struct ldlm_lock         *coc_lock;
 257        /**
 258         * Operation to handle layout, OBJECT_CONF_XYZ.
 259         */
 260        int                       coc_opc;
 261};
 262
 263enum {
 264        /** configure layout, set up a new stripe, must be called while
 265         * holding layout lock.
 266         */
 267        OBJECT_CONF_SET = 0,
 268        /** invalidate the current stripe configuration due to losing
 269         * layout lock.
 270         */
 271        OBJECT_CONF_INVALIDATE = 1,
 272        /** wait for old layout to go away so that new layout can be set up. */
 273        OBJECT_CONF_WAIT = 2
 274};
 275
 276enum {
 277        CL_LAYOUT_GEN_NONE      = (u32)-2,      /* layout lock was cancelled */
 278        CL_LAYOUT_GEN_EMPTY     = (u32)-1,      /* for empty layout */
 279};
 280
 281struct cl_layout {
 282        /** the buffer to return the layout in lov_mds_md format. */
 283        struct lu_buf   cl_buf;
 284        /** size of layout in lov_mds_md format. */
 285        size_t          cl_size;
 286        /** Layout generation. */
 287        u32             cl_layout_gen;
 288};
 289
 290/**
 291 * Operations implemented for each cl object layer.
 292 *
 293 * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
 294 */
 295struct cl_object_operations {
 296        /**
 297         * Initialize page slice for this layer. Called top-to-bottom through
 298         * every object layer when a new cl_page is instantiated. Layer
 299         * keeping private per-page data, or requiring its own page operations
 300         * vector should allocate these data here, and attach then to the page
 301         * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
 302         * sense). Optional.
 303         *
 304         * \retval NULL success.
 305         *
 306         * \retval ERR_PTR(errno) failure code.
 307         *
 308         * \retval valid-pointer pointer to already existing referenced page
 309         *       to be used instead of newly created.
 310         */
 311        int  (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
 312                              struct cl_page *page, pgoff_t index);
 313        /**
 314         * Initialize lock slice for this layer. Called top-to-bottom through
 315         * every object layer when a new cl_lock is instantiated. Layer
 316         * keeping private per-lock data, or requiring its own lock operations
 317         * vector should allocate these data here, and attach then to the lock
 318         * by calling cl_lock_slice_add(). Mandatory.
 319         */
 320        int  (*coo_lock_init)(const struct lu_env *env,
 321                              struct cl_object *obj, struct cl_lock *lock,
 322                              const struct cl_io *io);
 323        /**
 324         * Initialize io state for a given layer.
 325         *
 326         * called top-to-bottom once per io existence to initialize io
 327         * state. If layer wants to keep some state for this type of io, it
 328         * has to embed struct cl_io_slice in lu_env::le_ses, and register
 329         * slice with cl_io_slice_add(). It is guaranteed that all threads
 330         * participating in this io share the same session.
 331         */
 332        int  (*coo_io_init)(const struct lu_env *env,
 333                            struct cl_object *obj, struct cl_io *io);
 334        /**
 335         * Fill portion of \a attr that this layer controls. This method is
 336         * called top-to-bottom through all object layers.
 337         *
 338         * \pre cl_object_header::coh_attr_guard of the top-object is locked.
 339         *
 340         * \return   0: to continue
 341         * \return +ve: to stop iterating through layers (but 0 is returned
 342         * from enclosing cl_object_attr_get())
 343         * \return -ve: to signal error
 344         */
 345        int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
 346                            struct cl_attr *attr);
 347        /**
 348         * Update attributes.
 349         *
 350         * \a valid is a bitmask composed from enum #cl_attr_valid, and
 351         * indicating what attributes are to be set.
 352         *
 353         * \pre cl_object_header::coh_attr_guard of the top-object is locked.
 354         *
 355         * \return the same convention as for
 356         * cl_object_operations::coo_attr_get() is used.
 357         */
 358        int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
 359                               const struct cl_attr *attr, unsigned int valid);
 360        /**
 361         * Update object configuration. Called top-to-bottom to modify object
 362         * configuration.
 363         *
 364         * XXX error conditions and handling.
 365         */
 366        int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
 367                            const struct cl_object_conf *conf);
 368        /**
 369         * Glimpse ast. Executed when glimpse ast arrives for a lock on this
 370         * object. Layers are supposed to fill parts of \a lvb that will be
 371         * shipped to the glimpse originator as a glimpse result.
 372         *
 373         * \see vvp_object_glimpse(), lovsub_object_glimpse(),
 374         * \see osc_object_glimpse()
 375         */
 376        int (*coo_glimpse)(const struct lu_env *env,
 377                           const struct cl_object *obj, struct ost_lvb *lvb);
 378        /**
 379         * Object prune method. Called when the layout is going to change on
 380         * this object, therefore each layer has to clean up their cache,
 381         * mainly pages and locks.
 382         */
 383        int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
 384        /**
 385         * Object getstripe method.
 386         */
 387        int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
 388                             struct lov_user_md __user *lum);
 389        /**
 390         * Get FIEMAP mapping from the object.
 391         */
 392        int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
 393                          struct ll_fiemap_info_key *fmkey,
 394                          struct fiemap *fiemap, size_t *buflen);
 395        /**
 396         * Get layout and generation of the object.
 397         */
 398        int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
 399                              struct cl_layout *layout);
 400        /**
 401         * Get maximum size of the object.
 402         */
 403        loff_t (*coo_maxbytes)(struct cl_object *obj);
 404        /**
 405         * Set request attributes.
 406         */
 407        void (*coo_req_attr_set)(const struct lu_env *env,
 408                                 struct cl_object *obj,
 409                                 struct cl_req_attr *attr);
 410};
 411
 412/**
 413 * Extended header for client object.
 414 */
 415struct cl_object_header {
 416        /** Standard lu_object_header. cl_object::co_lu::lo_header points
 417         * here.
 418         */
 419        struct lu_object_header  coh_lu;
 420
 421        /**
 422         * Parent object. It is assumed that an object has a well-defined
 423         * parent, but not a well-defined child (there may be multiple
 424         * sub-objects, for the same top-object). cl_object_header::coh_parent
 425         * field allows certain code to be written generically, without
 426         * limiting possible cl_object layouts unduly.
 427         */
 428        struct cl_object_header *coh_parent;
 429        /**
 430         * Protects consistency between cl_attr of parent object and
 431         * attributes of sub-objects, that the former is calculated ("merged")
 432         * from.
 433         *
 434         * \todo XXX this can be read/write lock if needed.
 435         */
 436        spinlock_t               coh_attr_guard;
 437        /**
 438         * Size of cl_page + page slices
 439         */
 440        unsigned short           coh_page_bufsize;
 441        /**
 442         * Number of objects above this one: 0 for a top-object, 1 for its
 443         * sub-object, etc.
 444         */
 445        unsigned char            coh_nesting;
 446};
 447
 448/**
 449 * Helper macro: iterate over all layers of the object \a obj, assigning every
 450 * layer top-to-bottom to \a slice.
 451 */
 452#define cl_object_for_each(slice, obj)                                \
 453        list_for_each_entry((slice),                                \
 454                                &(obj)->co_lu.lo_header->loh_layers,    \
 455                                co_lu.lo_linkage)
 456/**
 457 * Helper macro: iterate over all layers of the object \a obj, assigning every
 458 * layer bottom-to-top to \a slice.
 459 */
 460#define cl_object_for_each_reverse(slice, obj)                         \
 461        list_for_each_entry_reverse((slice),                         \
 462                                        &(obj)->co_lu.lo_header->loh_layers, \
 463                                        co_lu.lo_linkage)
 464/** @} cl_object */
 465
 466#define CL_PAGE_EOF ((pgoff_t)~0ull)
 467
 468/** \addtogroup cl_page cl_page
 469 * @{
 470 */
 471
 472/** \struct cl_page
 473 * Layered client page.
 474 *
 475 * cl_page: represents a portion of a file, cached in the memory. All pages
 476 *    of the given file are of the same size, and are kept in the radix tree
 477 *    hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
 478 *    of the top-level file object are first class cl_objects, they have their
 479 *    own radix trees of pages and hence page is implemented as a sequence of
 480 *    struct cl_pages's, linked into double-linked list through
 481 *    cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
 482 *    corresponding radix tree at the corresponding logical offset.
 483 *
 484 * cl_page is associated with VM page of the hosting environment (struct
 485 *    page in Linux kernel, for example), struct page. It is assumed, that this
 486 *    association is implemented by one of cl_page layers (top layer in the
 487 *    current design) that
 488 *
 489 *      - intercepts per-VM-page call-backs made by the environment (e.g.,
 490 *        memory pressure),
 491 *
 492 *      - translates state (page flag bits) and locking between lustre and
 493 *        environment.
 494 *
 495 *    The association between cl_page and struct page is immutable and
 496 *    established when cl_page is created.
 497 *
 498 * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
 499 *    this io an exclusive access to this page w.r.t. other io attempts and
 500 *    various events changing page state (such as transfer completion, or
 501 *    eviction of the page from the memory). Note, that in general cl_io
 502 *    cannot be identified with a particular thread, and page ownership is not
 503 *    exactly equal to the current thread holding a lock on the page. Layer
 504 *    implementing association between cl_page and struct page has to implement
 505 *    ownership on top of available synchronization mechanisms.
 506 *
 507 *    While lustre client maintains the notion of an page ownership by io,
 508 *    hosting MM/VM usually has its own page concurrency control
 509 *    mechanisms. For example, in Linux, page access is synchronized by the
 510 *    per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
 511 *    takes care to acquire and release such locks as necessary around the
 512 *    calls to the file system methods (->readpage(), ->prepare_write(),
 513 *    ->commit_write(), etc.). This leads to the situation when there are two
 514 *    different ways to own a page in the client:
 515 *
 516 *      - client code explicitly and voluntary owns the page (cl_page_own());
 517 *
 518 *      - VM locks a page and then calls the client, that has "to assume"
 519 *        the ownership from the VM (cl_page_assume()).
 520 *
 521 *    Dual methods to release ownership are cl_page_disown() and
 522 *    cl_page_unassume().
 523 *
 524 * cl_page is reference counted (cl_page::cp_ref). When reference counter
 525 *    drops to 0, the page is returned to the cache, unless it is in
 526 *    cl_page_state::CPS_FREEING state, in which case it is immediately
 527 *    destroyed.
 528 *
 529 *    The general logic guaranteeing the absence of "existential races" for
 530 *    pages is the following:
 531 *
 532 *      - there are fixed known ways for a thread to obtain a new reference
 533 *        to a page:
 534 *
 535 *          - by doing a lookup in the cl_object radix tree, protected by the
 536 *            spin-lock;
 537 *
 538 *          - by starting from VM-locked struct page and following some
 539 *            hosting environment method (e.g., following ->private pointer in
 540 *            the case of Linux kernel), see cl_vmpage_page();
 541 *
 542 *      - when the page enters cl_page_state::CPS_FREEING state, all these
 543 *        ways are severed with the proper synchronization
 544 *        (cl_page_delete());
 545 *
 546 *      - entry into cl_page_state::CPS_FREEING is serialized by the VM page
 547 *        lock;
 548 *
 549 *      - no new references to the page in cl_page_state::CPS_FREEING state
 550 *        are allowed (checked in cl_page_get()).
 551 *
 552 *    Together this guarantees that when last reference to a
 553 *    cl_page_state::CPS_FREEING page is released, it is safe to destroy the
 554 *    page, as neither references to it can be acquired at that point, nor
 555 *    ones exist.
 556 *
 557 * cl_page is a state machine. States are enumerated in enum
 558 *    cl_page_state. Possible state transitions are enumerated in
 559 *    cl_page_state_set(). State transition process (i.e., actual changing of
 560 *    cl_page::cp_state field) is protected by the lock on the underlying VM
 561 *    page.
 562 *
 563 * Linux Kernel implementation.
 564 *
 565 *    Binding between cl_page and struct page (which is a typedef for
 566 *    struct page) is implemented in the vvp layer. cl_page is attached to the
 567 *    ->private pointer of the struct page, together with the setting of
 568 *    PG_private bit in page->flags, and acquiring additional reference on the
 569 *    struct page (much like struct buffer_head, or any similar file system
 570 *    private data structures).
 571 *
 572 *    PG_locked lock is used to implement both ownership and transfer
 573 *    synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
 574 *    states. No additional references are acquired for the duration of the
 575 *    transfer.
 576 *
 577 * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
 578 *        write-out is "protected" by the special PG_writeback bit.
 579 */
 580
 581/**
 582 * States of cl_page. cl_page.c assumes particular order here.
 583 *
 584 * The page state machine is rather crude, as it doesn't recognize finer page
 585 * states like "dirty" or "up to date". This is because such states are not
 586 * always well defined for the whole stack (see, for example, the
 587 * implementation of the read-ahead, that hides page up-to-dateness to track
 588 * cache hits accurately). Such sub-states are maintained by the layers that
 589 * are interested in them.
 590 */
 591enum cl_page_state {
 592        /**
 593         * Page is in the cache, un-owned. Page leaves cached state in the
 594         * following cases:
 595         *
 596         *     - [cl_page_state::CPS_OWNED] io comes across the page and
 597         *     owns it;
 598         *
 599         *     - [cl_page_state::CPS_PAGEOUT] page is dirty, the
 600         *     req-formation engine decides that it wants to include this page
 601         *     into an RPC being constructed, and yanks it from the cache;
 602         *
 603         *     - [cl_page_state::CPS_FREEING] VM callback is executed to
 604         *     evict the page form the memory;
 605         *
 606         * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
 607         */
 608        CPS_CACHED,
 609        /**
 610         * Page is exclusively owned by some cl_io. Page may end up in this
 611         * state as a result of
 612         *
 613         *     - io creating new page and immediately owning it;
 614         *
 615         *     - [cl_page_state::CPS_CACHED] io finding existing cached page
 616         *     and owning it;
 617         *
 618         *     - [cl_page_state::CPS_OWNED] io finding existing owned page
 619         *     and waiting for owner to release the page;
 620         *
 621         * Page leaves owned state in the following cases:
 622         *
 623         *     - [cl_page_state::CPS_CACHED] io decides to leave the page in
 624         *     the cache, doing nothing;
 625         *
 626         *     - [cl_page_state::CPS_PAGEIN] io starts read transfer for
 627         *     this page;
 628         *
 629         *     - [cl_page_state::CPS_PAGEOUT] io starts immediate write
 630         *     transfer for this page;
 631         *
 632         *     - [cl_page_state::CPS_FREEING] io decides to destroy this
 633         *     page (e.g., as part of truncate or extent lock cancellation).
 634         *
 635         * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
 636         */
 637        CPS_OWNED,
 638        /**
 639         * Page is being written out, as a part of a transfer. This state is
 640         * entered when req-formation logic decided that it wants this page to
 641         * be sent through the wire _now_. Specifically, it means that once
 642         * this state is achieved, transfer completion handler (with either
 643         * success or failure indication) is guaranteed to be executed against
 644         * this page independently of any locks and any scheduling decisions
 645         * made by the hosting environment (that effectively means that the
 646         * page is never put into cl_page_state::CPS_PAGEOUT state "in
 647         * advance". This property is mentioned, because it is important when
 648         * reasoning about possible dead-locks in the system). The page can
 649         * enter this state as a result of
 650         *
 651         *     - [cl_page_state::CPS_OWNED] an io requesting an immediate
 652         *     write-out of this page, or
 653         *
 654         *     - [cl_page_state::CPS_CACHED] req-forming engine deciding
 655         *     that it has enough dirty pages cached to issue a "good"
 656         *     transfer.
 657         *
 658         * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
 659         * is completed---it is moved into cl_page_state::CPS_CACHED state.
 660         *
 661         * Underlying VM page is locked for the duration of transfer.
 662         *
 663         * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
 664         */
 665        CPS_PAGEOUT,
 666        /**
 667         * Page is being read in, as a part of a transfer. This is quite
 668         * similar to the cl_page_state::CPS_PAGEOUT state, except that
 669         * read-in is always "immediate"---there is no such thing a sudden
 670         * construction of read request from cached, presumably not up to date,
 671         * pages.
 672         *
 673         * Underlying VM page is locked for the duration of transfer.
 674         *
 675         * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
 676         */
 677        CPS_PAGEIN,
 678        /**
 679         * Page is being destroyed. This state is entered when client decides
 680         * that page has to be deleted from its host object, as, e.g., a part
 681         * of truncate.
 682         *
 683         * Once this state is reached, there is no way to escape it.
 684         *
 685         * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
 686         */
 687        CPS_FREEING,
 688        CPS_NR
 689};
 690
 691enum cl_page_type {
 692        /** Host page, the page is from the host inode which the cl_page
 693         * belongs to.
 694         */
 695        CPT_CACHEABLE = 1,
 696
 697        /** Transient page, the transient cl_page is used to bind a cl_page
 698         *  to vmpage which is not belonging to the same object of cl_page.
 699         *  it is used in DirectIO and lockless IO.
 700         */
 701        CPT_TRANSIENT,
 702};
 703
 704/**
 705 * Fields are protected by the lock on struct page, except for atomics and
 706 * immutables.
 707 *
 708 * \invariant Data type invariants are in cl_page_invariant(). Basically:
 709 * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
 710 * list, consistent with the parent/child pointers in the cl_page::cp_obj and
 711 * cl_page::cp_owner (when set).
 712 */
 713struct cl_page {
 714        /** Reference counter. */
 715        atomic_t             cp_ref;
 716        /** An object this page is a part of. Immutable after creation. */
 717        struct cl_object        *cp_obj;
 718        /** vmpage */
 719        struct page             *cp_vmpage;
 720        /** Linkage of pages within group. Pages must be owned */
 721        struct list_head         cp_batch;
 722        /** List of slices. Immutable after creation. */
 723        struct list_head         cp_layers;
 724        /**
 725         * Page state. This field is const to avoid accidental update, it is
 726         * modified only internally within cl_page.c. Protected by a VM lock.
 727         */
 728        const enum cl_page_state cp_state;
 729        /**
 730         * Page type. Only CPT_TRANSIENT is used so far. Immutable after
 731         * creation.
 732         */
 733        enum cl_page_type       cp_type;
 734
 735        /**
 736         * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
 737         * by sub-io. Protected by a VM lock.
 738         */
 739        struct cl_io        *cp_owner;
 740        /** List of references to this page, for debugging. */
 741        struct lu_ref       cp_reference;
 742        /** Link to an object, for debugging. */
 743        struct lu_ref_link       cp_obj_ref;
 744        /** Link to a queue, for debugging. */
 745        struct lu_ref_link       cp_queue_ref;
 746        /** Assigned if doing a sync_io */
 747        struct cl_sync_io       *cp_sync_io;
 748};
 749
 750/**
 751 * Per-layer part of cl_page.
 752 *
 753 * \see vvp_page, lov_page, osc_page
 754 */
 755struct cl_page_slice {
 756        struct cl_page            *cpl_page;
 757        pgoff_t                          cpl_index;
 758        /**
 759         * Object slice corresponding to this page slice. Immutable after
 760         * creation.
 761         */
 762        struct cl_object                *cpl_obj;
 763        const struct cl_page_operations *cpl_ops;
 764        /** Linkage into cl_page::cp_layers. Immutable after creation. */
 765        struct list_head                       cpl_linkage;
 766};
 767
 768/**
 769 * Lock mode. For the client extent locks.
 770 *
 771 * \ingroup cl_lock
 772 */
 773enum cl_lock_mode {
 774        CLM_READ,
 775        CLM_WRITE,
 776        CLM_GROUP
 777};
 778
 779/**
 780 * Requested transfer type.
 781 */
 782enum cl_req_type {
 783        CRT_READ,
 784        CRT_WRITE,
 785        CRT_NR
 786};
 787
 788/**
 789 * Per-layer page operations.
 790 *
 791 * Methods taking an \a io argument are for the activity happening in the
 792 * context of given \a io. Page is assumed to be owned by that io, except for
 793 * the obvious cases (like cl_page_operations::cpo_own()).
 794 *
 795 * \see vvp_page_ops, lov_page_ops, osc_page_ops
 796 */
 797struct cl_page_operations {
 798        /**
 799         * cl_page<->struct page methods. Only one layer in the stack has to
 800         * implement these. Current code assumes that this functionality is
 801         * provided by the topmost layer, see cl_page_disown0() as an example.
 802         */
 803
 804        /**
 805         * Called when \a io acquires this page into the exclusive
 806         * ownership. When this method returns, it is guaranteed that the is
 807         * not owned by other io, and no transfer is going on against
 808         * it. Optional.
 809         *
 810         * \see cl_page_own()
 811         * \see vvp_page_own(), lov_page_own()
 812         */
 813        int  (*cpo_own)(const struct lu_env *env,
 814                        const struct cl_page_slice *slice,
 815                        struct cl_io *io, int nonblock);
 816        /** Called when ownership it yielded. Optional.
 817         *
 818         * \see cl_page_disown()
 819         * \see vvp_page_disown()
 820         */
 821        void (*cpo_disown)(const struct lu_env *env,
 822                           const struct cl_page_slice *slice, struct cl_io *io);
 823        /**
 824         * Called for a page that is already "owned" by \a io from VM point of
 825         * view. Optional.
 826         *
 827         * \see cl_page_assume()
 828         * \see vvp_page_assume(), lov_page_assume()
 829         */
 830        void (*cpo_assume)(const struct lu_env *env,
 831                           const struct cl_page_slice *slice, struct cl_io *io);
 832        /** Dual to cl_page_operations::cpo_assume(). Optional. Called
 833         * bottom-to-top when IO releases a page without actually unlocking
 834         * it.
 835         *
 836         * \see cl_page_unassume()
 837         * \see vvp_page_unassume()
 838         */
 839        void (*cpo_unassume)(const struct lu_env *env,
 840                             const struct cl_page_slice *slice,
 841                             struct cl_io *io);
 842        /**
 843         * Announces whether the page contains valid data or not by \a uptodate.
 844         *
 845         * \see cl_page_export()
 846         * \see vvp_page_export()
 847         */
 848        void  (*cpo_export)(const struct lu_env *env,
 849                            const struct cl_page_slice *slice, int uptodate);
 850        /**
 851         * Checks whether underlying VM page is locked (in the suitable
 852         * sense). Used for assertions.
 853         *
 854         * \retval    -EBUSY: page is protected by a lock of a given mode;
 855         * \retval  -ENODATA: page is not protected by a lock;
 856         * \retval       0: this layer cannot decide. (Should never happen.)
 857         */
 858        int (*cpo_is_vmlocked)(const struct lu_env *env,
 859                               const struct cl_page_slice *slice);
 860        /**
 861         * Page destruction.
 862         */
 863
 864        /**
 865         * Called when page is truncated from the object. Optional.
 866         *
 867         * \see cl_page_discard()
 868         * \see vvp_page_discard(), osc_page_discard()
 869         */
 870        void (*cpo_discard)(const struct lu_env *env,
 871                            const struct cl_page_slice *slice,
 872                            struct cl_io *io);
 873        /**
 874         * Called when page is removed from the cache, and is about to being
 875         * destroyed. Optional.
 876         *
 877         * \see cl_page_delete()
 878         * \see vvp_page_delete(), osc_page_delete()
 879         */
 880        void (*cpo_delete)(const struct lu_env *env,
 881                           const struct cl_page_slice *slice);
 882        /** Destructor. Frees resources and slice itself. */
 883        void (*cpo_fini)(const struct lu_env *env,
 884                         struct cl_page_slice *slice);
 885        /**
 886         * Optional debugging helper. Prints given page slice.
 887         *
 888         * \see cl_page_print()
 889         */
 890        int (*cpo_print)(const struct lu_env *env,
 891                         const struct cl_page_slice *slice,
 892                         void *cookie, lu_printer_t p);
 893        /**
 894         * \name transfer
 895         *
 896         * Transfer methods.
 897         *
 898         * @{
 899         */
 900        /**
 901         * Request type dependent vector of operations.
 902         *
 903         * Transfer operations depend on transfer mode (cl_req_type). To avoid
 904         * passing transfer mode to each and every of these methods, and to
 905         * avoid branching on request type inside of the methods, separate
 906         * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
 907         * provided. That is, method invocation usually looks like
 908         *
 909         *       slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
 910         */
 911        struct {
 912                /**
 913                 * Called when a page is submitted for a transfer as a part of
 914                 * cl_page_list.
 915                 *
 916                 * \return    0  : page is eligible for submission;
 917                 * \return    -EALREADY : skip this page;
 918                 * \return    -ve       : error.
 919                 *
 920                 * \see cl_page_prep()
 921                 */
 922                int  (*cpo_prep)(const struct lu_env *env,
 923                                 const struct cl_page_slice *slice,
 924                                 struct cl_io *io);
 925                /**
 926                 * Completion handler. This is guaranteed to be eventually
 927                 * fired after cl_page_operations::cpo_prep() or
 928                 * cl_page_operations::cpo_make_ready() call.
 929                 *
 930                 * This method can be called in a non-blocking context. It is
 931                 * guaranteed however, that the page involved and its object
 932                 * are pinned in memory (and, hence, calling cl_page_put() is
 933                 * safe).
 934                 *
 935                 * \see cl_page_completion()
 936                 */
 937                void (*cpo_completion)(const struct lu_env *env,
 938                                       const struct cl_page_slice *slice,
 939                                       int ioret);
 940                /**
 941                 * Called when cached page is about to be added to the
 942                 * ptlrpc request as a part of req formation.
 943                 *
 944                 * \return    0       : proceed with this page;
 945                 * \return    -EAGAIN : skip this page;
 946                 * \return    -ve     : error.
 947                 *
 948                 * \see cl_page_make_ready()
 949                 */
 950                int  (*cpo_make_ready)(const struct lu_env *env,
 951                                       const struct cl_page_slice *slice);
 952        } io[CRT_NR];
 953        /**
 954         * Tell transfer engine that only [to, from] part of a page should be
 955         * transmitted.
 956         *
 957         * This is used for immediate transfers.
 958         *
 959         * \todo XXX this is not very good interface. It would be much better
 960         * if all transfer parameters were supplied as arguments to
 961         * cl_io_operations::cio_submit() call, but it is not clear how to do
 962         * this for page queues.
 963         *
 964         * \see cl_page_clip()
 965         */
 966        void (*cpo_clip)(const struct lu_env *env,
 967                         const struct cl_page_slice *slice,
 968                         int from, int to);
 969        /**
 970         * \pre  the page was queued for transferring.
 971         * \post page is removed from client's pending list, or -EBUSY
 972         *       is returned if it has already been in transferring.
 973         *
 974         * This is one of seldom page operation which is:
 975         * 0. called from top level;
 976         * 1. don't have vmpage locked;
 977         * 2. every layer should synchronize execution of its ->cpo_cancel()
 978         *    with completion handlers. Osc uses client obd lock for this
 979         *    purpose. Based on there is no vvp_page_cancel and
 980         *    lov_page_cancel(), cpo_cancel is defacto protected by client lock.
 981         *
 982         * \see osc_page_cancel().
 983         */
 984        int (*cpo_cancel)(const struct lu_env *env,
 985                          const struct cl_page_slice *slice);
 986        /**
 987         * Write out a page by kernel. This is only called by ll_writepage
 988         * right now.
 989         *
 990         * \see cl_page_flush()
 991         */
 992        int (*cpo_flush)(const struct lu_env *env,
 993                         const struct cl_page_slice *slice,
 994                         struct cl_io *io);
 995        /** @} transfer */
 996};
 997
 998/**
 999 * Helper macro, dumping detailed information about \a page into a log.
1000 */
1001#define CL_PAGE_DEBUG(mask, env, page, format, ...)                  \
1002do {                                                                \
1003        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {              \
1004                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);        \
1005                cl_page_print(env, &msgdata, lu_cdebug_printer, page);  \
1006                CDEBUG(mask, format, ## __VA_ARGS__);             \
1007        }                                                              \
1008} while (0)
1009
1010/**
1011 * Helper macro, dumping shorter information about \a page into a log.
1012 */
1013#define CL_PAGE_HEADER(mask, env, page, format, ...)                      \
1014do {                                                                      \
1015        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                    \
1016                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);                \
1017                cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
1018                CDEBUG(mask, format, ## __VA_ARGS__);                   \
1019        }                                                                    \
1020} while (0)
1021
1022static inline struct page *cl_page_vmpage(struct cl_page *page)
1023{
1024        LASSERT(page->cp_vmpage);
1025        return page->cp_vmpage;
1026}
1027
1028/**
1029 * Check if a cl_page is in use.
1030 *
1031 * Client cache holds a refcount, this refcount will be dropped when
1032 * the page is taken out of cache, see vvp_page_delete().
1033 */
1034static inline bool __page_in_use(const struct cl_page *page, int refc)
1035{
1036        return (atomic_read(&page->cp_ref) > refc + 1);
1037}
1038
1039/**
1040 * Caller itself holds a refcount of cl_page.
1041 */
1042#define cl_page_in_use(pg)       __page_in_use(pg, 1)
1043/**
1044 * Caller doesn't hold a refcount.
1045 */
1046#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
1047
1048/** @} cl_page */
1049
1050/** \addtogroup cl_lock cl_lock
1051 * @{
1052 */
1053/** \struct cl_lock
1054 *
1055 * Extent locking on the client.
1056 *
1057 * LAYERING
1058 *
1059 * The locking model of the new client code is built around
1060 *
1061 *      struct cl_lock
1062 *
1063 * data-type representing an extent lock on a regular file. cl_lock is a
1064 * layered object (much like cl_object and cl_page), it consists of a header
1065 * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
1066 * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
1067 *
1068 * Typical cl_lock consists of the two layers:
1069 *
1070 *     - vvp_lock (vvp specific data), and
1071 *     - lov_lock (lov specific data).
1072 *
1073 * lov_lock contains an array of sub-locks. Each of these sub-locks is a
1074 * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
1075 *
1076 *     - lovsub_lock, and
1077 *     - osc_lock
1078 *
1079 * Each sub-lock is associated with a cl_object (representing stripe
1080 * sub-object or the file to which top-level cl_lock is associated to), and is
1081 * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
1082 * cl_object (that at lov layer also fans out into multiple sub-objects), and
1083 * is different from cl_page, that doesn't fan out (there is usually exactly
1084 * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
1085 * a "top-lock" and its lovsub-osc portion a "sub-lock".
1086 *
1087 * LIFE CYCLE
1088 *
1089 * cl_lock is a cacheless data container for the requirements of locks to
1090 * complete the IO. cl_lock is created before I/O starts and destroyed when the
1091 * I/O is complete.
1092 *
1093 * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
1094 * to cl_lock at OSC layer. LDLM lock is still cacheable.
1095 *
1096 * INTERFACE AND USAGE
1097 *
1098 * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel.  A
1099 * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
1100 * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
1101 * consists of multiple sub cl_locks, each sub locks will be enqueued
1102 * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
1103 * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
1104 * OST side.
1105 *
1106 * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
1107 * method will be called for each layer to release the resource held by this
1108 * lock. At OSC layer, the reference count of LDLM lock, which is held at
1109 * clo_enqueue time, is released.
1110 *
1111 * LDLM lock can only be canceled if there is no cl_lock using it.
1112 *
1113 * Overall process of the locking during IO operation is as following:
1114 *
1115 *     - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
1116 *       is called on each layer. Responsibility of this method is to add locks,
1117 *       needed by a given layer into cl_io.ci_lockset.
1118 *
1119 *     - once locks for all layers were collected, they are sorted to avoid
1120 *       dead-locks (cl_io_locks_sort()), and enqueued.
1121 *
1122 *     - when all locks are acquired, IO is performed;
1123 *
1124 *     - locks are released after IO is complete.
1125 *
1126 * Striping introduces major additional complexity into locking. The
1127 * fundamental problem is that it is generally unsafe to actively use (hold)
1128 * two locks on the different OST servers at the same time, as this introduces
1129 * inter-server dependency and can lead to cascading evictions.
1130 *
1131 * Basic solution is to sub-divide large read/write IOs into smaller pieces so
1132 * that no multi-stripe locks are taken (note that this design abandons POSIX
1133 * read/write semantics). Such pieces ideally can be executed concurrently. At
1134 * the same time, certain types of IO cannot be sub-divived, without
1135 * sacrificing correctness. This includes:
1136 *
1137 *  - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
1138 *  atomicity;
1139 *
1140 *  - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
1141 *
1142 * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
1143 * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
1144 * has to be held together with the usual lock on [offset, offset + count].
1145 *
1146 * Interaction with DLM
1147 *
1148 * In the expected setup, cl_lock is ultimately backed up by a collection of
1149 * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
1150 * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
1151 * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
1152 * description of interaction with DLM.
1153 */
1154
1155/**
1156 * Lock description.
1157 */
1158struct cl_lock_descr {
1159        /** Object this lock is granted for. */
1160        struct cl_object *cld_obj;
1161        /** Index of the first page protected by this lock. */
1162        pgoff_t    cld_start;
1163        /** Index of the last page (inclusive) protected by this lock. */
1164        pgoff_t    cld_end;
1165        /** Group ID, for group lock */
1166        __u64        cld_gid;
1167        /** Lock mode. */
1168        enum cl_lock_mode cld_mode;
1169        /**
1170         * flags to enqueue lock. A combination of bit-flags from
1171         * enum cl_enq_flags.
1172         */
1173        __u32        cld_enq_flags;
1174};
1175
1176#define DDESCR "%s(%d):[%lu, %lu]:%x"
1177#define PDESCR(descr)                                              \
1178        cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode,        \
1179        (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
1180
1181const char *cl_lock_mode_name(const enum cl_lock_mode mode);
1182
1183/**
1184 * Layered client lock.
1185 */
1186struct cl_lock {
1187        /** List of slices. Immutable after creation. */
1188        struct list_head            cll_layers;
1189        /** lock attribute, extent, cl_object, etc. */
1190        struct cl_lock_descr  cll_descr;
1191};
1192
1193/**
1194 * Per-layer part of cl_lock
1195 *
1196 * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
1197 */
1198struct cl_lock_slice {
1199        struct cl_lock            *cls_lock;
1200        /** Object slice corresponding to this lock slice. Immutable after
1201         * creation.
1202         */
1203        struct cl_object                *cls_obj;
1204        const struct cl_lock_operations *cls_ops;
1205        /** Linkage into cl_lock::cll_layers. Immutable after creation. */
1206        struct list_head                       cls_linkage;
1207};
1208
1209/**
1210 *
1211 * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
1212 */
1213struct cl_lock_operations {
1214        /** @{ */
1215        /**
1216         * Attempts to enqueue the lock. Called top-to-bottom.
1217         *
1218         * \retval 0    this layer has enqueued the lock successfully
1219         * \retval >0   this layer has enqueued the lock, but need to wait on
1220         *              @anchor for resources
1221         * \retval -ve  failure
1222         *
1223         * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
1224         * \see osc_lock_enqueue()
1225         */
1226        int  (*clo_enqueue)(const struct lu_env *env,
1227                            const struct cl_lock_slice *slice,
1228                            struct cl_io *io, struct cl_sync_io *anchor);
1229        /**
1230         * Cancel a lock, release its DLM lock ref, while does not cancel the
1231         * DLM lock
1232         */
1233        void (*clo_cancel)(const struct lu_env *env,
1234                           const struct cl_lock_slice *slice);
1235        /** @} */
1236        /**
1237         * Destructor. Frees resources and the slice.
1238         *
1239         * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
1240         * \see osc_lock_fini()
1241         */
1242        void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
1243        /**
1244         * Optional debugging helper. Prints given lock slice.
1245         */
1246        int (*clo_print)(const struct lu_env *env,
1247                         void *cookie, lu_printer_t p,
1248                         const struct cl_lock_slice *slice);
1249};
1250
1251#define CL_LOCK_DEBUG(mask, env, lock, format, ...)                  \
1252do {                                                                \
1253        LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);                \
1254                                                                        \
1255        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {              \
1256                cl_lock_print(env, &msgdata, lu_cdebug_printer, lock);  \
1257                CDEBUG(mask, format, ## __VA_ARGS__);             \
1258        }                                                              \
1259} while (0)
1260
1261#define CL_LOCK_ASSERT(expr, env, lock) do {                        \
1262        if (likely(expr))                                              \
1263                break;                                            \
1264                                                                        \
1265        CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr);    \
1266        LBUG();                                                  \
1267} while (0)
1268
1269/** @} cl_lock */
1270
1271/** \addtogroup cl_page_list cl_page_list
1272 * Page list used to perform collective operations on a group of pages.
1273 *
1274 * Pages are added to the list one by one. cl_page_list acquires a reference
1275 * for every page in it. Page list is used to perform collective operations on
1276 * pages:
1277 *
1278 *     - submit pages for an immediate transfer,
1279 *
1280 *     - own pages on behalf of certain io (waiting for each page in turn),
1281 *
1282 *     - discard pages.
1283 *
1284 * When list is finalized, it releases references on all pages it still has.
1285 *
1286 * \todo XXX concurrency control.
1287 *
1288 * @{
1289 */
1290struct cl_page_list {
1291        unsigned int             pl_nr;
1292        struct list_head           pl_pages;
1293        struct task_struct      *pl_owner;
1294};
1295
1296/**
1297 * A 2-queue of pages. A convenience data-type for common use case, 2-queue
1298 * contains an incoming page list and an outgoing page list.
1299 */
1300struct cl_2queue {
1301        struct cl_page_list c2_qin;
1302        struct cl_page_list c2_qout;
1303};
1304
1305/** @} cl_page_list */
1306
1307/** \addtogroup cl_io cl_io
1308 * @{
1309 */
1310/** \struct cl_io
1311 * I/O
1312 *
1313 * cl_io represents a high level I/O activity like
1314 * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
1315 * lock.
1316 *
1317 * cl_io is a layered object, much like cl_{object,page,lock} but with one
1318 * important distinction. We want to minimize number of calls to the allocator
1319 * in the fast path, e.g., in the case of read(2) when everything is cached:
1320 * client already owns the lock over region being read, and data are cached
1321 * due to read-ahead. To avoid allocation of cl_io layers in such situations,
1322 * per-layer io state is stored in the session, associated with the io, see
1323 * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
1324 * by using free-lists, see cl_env_get().
1325 *
1326 * There is a small predefined number of possible io types, enumerated in enum
1327 * cl_io_type.
1328 *
1329 * cl_io is a state machine, that can be advanced concurrently by the multiple
1330 * threads. It is up to these threads to control the concurrency and,
1331 * specifically, to detect when io is done, and its state can be safely
1332 * released.
1333 *
1334 * For read/write io overall execution plan is as following:
1335 *
1336 *     (0) initialize io state through all layers;
1337 *
1338 *     (1) loop: prepare chunk of work to do
1339 *
1340 *     (2) call all layers to collect locks they need to process current chunk
1341 *
1342 *     (3) sort all locks to avoid dead-locks, and acquire them
1343 *
1344 *     (4) process the chunk: call per-page methods
1345 *       cl_io_operations::cio_prepare_write(),
1346 *       cl_io_operations::cio_commit_write() for write)
1347 *
1348 *     (5) release locks
1349 *
1350 *     (6) repeat loop.
1351 *
1352 * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
1353 * address allocation efficiency issues mentioned above), and returns with the
1354 * special error condition from per-page method when current sub-io has to
1355 * block. This causes io loop to be repeated, and lov switches to the next
1356 * sub-io in its cl_io_operations::cio_iter_init() implementation.
1357 */
1358
1359/** IO types */
1360enum cl_io_type {
1361        /** read system call */
1362        CIT_READ = 1,
1363        /** write system call */
1364        CIT_WRITE,
1365        /** truncate, utime system calls */
1366        CIT_SETATTR,
1367        /** get data version */
1368        CIT_DATA_VERSION,
1369        /**
1370         * page fault handling
1371         */
1372        CIT_FAULT,
1373        /**
1374         * fsync system call handling
1375         * To write out a range of file
1376         */
1377        CIT_FSYNC,
1378        /**
1379         * Miscellaneous io. This is used for occasional io activity that
1380         * doesn't fit into other types. Currently this is used for:
1381         *
1382         *     - cancellation of an extent lock. This io exists as a context
1383         *     to write dirty pages from under the lock being canceled back
1384         *     to the server;
1385         *
1386         *     - VM induced page write-out. An io context for writing page out
1387         *     for memory cleansing;
1388         *
1389         *     - glimpse. An io context to acquire glimpse lock.
1390         *
1391         *     - grouplock. An io context to acquire group lock.
1392         *
1393         * CIT_MISC io is used simply as a context in which locks and pages
1394         * are manipulated. Such io has no internal "process", that is,
1395         * cl_io_loop() is never called for it.
1396         */
1397        CIT_MISC,
1398        CIT_OP_NR
1399};
1400
1401/**
1402 * States of cl_io state machine
1403 */
1404enum cl_io_state {
1405        /** Not initialized. */
1406        CIS_ZERO,
1407        /** Initialized. */
1408        CIS_INIT,
1409        /** IO iteration started. */
1410        CIS_IT_STARTED,
1411        /** Locks taken. */
1412        CIS_LOCKED,
1413        /** Actual IO is in progress. */
1414        CIS_IO_GOING,
1415        /** IO for the current iteration finished. */
1416        CIS_IO_FINISHED,
1417        /** Locks released. */
1418        CIS_UNLOCKED,
1419        /** Iteration completed. */
1420        CIS_IT_ENDED,
1421        /** cl_io finalized. */
1422        CIS_FINI
1423};
1424
1425/**
1426 * IO state private for a layer.
1427 *
1428 * This is usually embedded into layer session data, rather than allocated
1429 * dynamically.
1430 *
1431 * \see vvp_io, lov_io, osc_io
1432 */
1433struct cl_io_slice {
1434        struct cl_io              *cis_io;
1435        /** corresponding object slice. Immutable after creation. */
1436        struct cl_object              *cis_obj;
1437        /** io operations. Immutable after creation. */
1438        const struct cl_io_operations *cis_iop;
1439        /**
1440         * linkage into a list of all slices for a given cl_io, hanging off
1441         * cl_io::ci_layers. Immutable after creation.
1442         */
1443        struct list_head                     cis_linkage;
1444};
1445
1446typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
1447                              struct cl_page *);
1448
1449struct cl_read_ahead {
1450        /*
1451         * Maximum page index the readahead window will end.
1452         * This is determined DLM lock coverage, RPC and stripe boundary.
1453         * cra_end is included.
1454         */
1455        pgoff_t cra_end;
1456        /* optimal RPC size for this read, by pages */
1457        unsigned long cra_rpc_size;
1458        /*
1459         * Release callback. If readahead holds resources underneath, this
1460         * function should be called to release it.
1461         */
1462        void (*cra_release)(const struct lu_env *env, void *cbdata);
1463        /* Callback data for cra_release routine */
1464        void *cra_cbdata;
1465};
1466
1467static inline void cl_read_ahead_release(const struct lu_env *env,
1468                                         struct cl_read_ahead *ra)
1469{
1470        if (ra->cra_release)
1471                ra->cra_release(env, ra->cra_cbdata);
1472        memset(ra, 0, sizeof(*ra));
1473}
1474
1475/**
1476 * Per-layer io operations.
1477 * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
1478 */
1479struct cl_io_operations {
1480        /**
1481         * Vector of io state transition methods for every io type.
1482         *
1483         * \see cl_page_operations::io
1484         */
1485        struct {
1486                /**
1487                 * Prepare io iteration at a given layer.
1488                 *
1489                 * Called top-to-bottom at the beginning of each iteration of
1490                 * "io loop" (if it makes sense for this type of io). Here
1491                 * layer selects what work it will do during this iteration.
1492                 *
1493                 * \see cl_io_operations::cio_iter_fini()
1494                 */
1495                int (*cio_iter_init)(const struct lu_env *env,
1496                                     const struct cl_io_slice *slice);
1497                /**
1498                 * Finalize io iteration.
1499                 *
1500                 * Called bottom-to-top at the end of each iteration of "io
1501                 * loop". Here layers can decide whether IO has to be
1502                 * continued.
1503                 *
1504                 * \see cl_io_operations::cio_iter_init()
1505                 */
1506                void (*cio_iter_fini)(const struct lu_env *env,
1507                                      const struct cl_io_slice *slice);
1508                /**
1509                 * Collect locks for the current iteration of io.
1510                 *
1511                 * Called top-to-bottom to collect all locks necessary for
1512                 * this iteration. This methods shouldn't actually enqueue
1513                 * anything, instead it should post a lock through
1514                 * cl_io_lock_add(). Once all locks are collected, they are
1515                 * sorted and enqueued in the proper order.
1516                 */
1517                int  (*cio_lock)(const struct lu_env *env,
1518                                 const struct cl_io_slice *slice);
1519                /**
1520                 * Finalize unlocking.
1521                 *
1522                 * Called bottom-to-top to finish layer specific unlocking
1523                 * functionality, after generic code released all locks
1524                 * acquired by cl_io_operations::cio_lock().
1525                 */
1526                void  (*cio_unlock)(const struct lu_env *env,
1527                                    const struct cl_io_slice *slice);
1528                /**
1529                 * Start io iteration.
1530                 *
1531                 * Once all locks are acquired, called top-to-bottom to
1532                 * commence actual IO. In the current implementation,
1533                 * top-level vvp_io_{read,write}_start() does all the work
1534                 * synchronously by calling generic_file_*(), so other layers
1535                 * are called when everything is done.
1536                 */
1537                int  (*cio_start)(const struct lu_env *env,
1538                                  const struct cl_io_slice *slice);
1539                /**
1540                 * Called top-to-bottom at the end of io loop. Here layer
1541                 * might wait for an unfinished asynchronous io.
1542                 */
1543                void (*cio_end)(const struct lu_env *env,
1544                                const struct cl_io_slice *slice);
1545                /**
1546                 * Called bottom-to-top to notify layers that read/write IO
1547                 * iteration finished, with \a nob bytes transferred.
1548                 */
1549                void (*cio_advance)(const struct lu_env *env,
1550                                    const struct cl_io_slice *slice,
1551                                    size_t nob);
1552                /**
1553                 * Called once per io, bottom-to-top to release io resources.
1554                 */
1555                void (*cio_fini)(const struct lu_env *env,
1556                                 const struct cl_io_slice *slice);
1557        } op[CIT_OP_NR];
1558
1559                /**
1560                 * Submit pages from \a queue->c2_qin for IO, and move
1561                 * successfully submitted pages into \a queue->c2_qout. Return
1562                 * non-zero if failed to submit even the single page. If
1563                 * submission failed after some pages were moved into \a
1564                 * queue->c2_qout, completion callback with non-zero ioret is
1565                 * executed on them.
1566                 */
1567                int  (*cio_submit)(const struct lu_env *env,
1568                                   const struct cl_io_slice *slice,
1569                                   enum cl_req_type crt,
1570                                   struct cl_2queue *queue);
1571        /**
1572         * Queue async page for write.
1573         * The difference between cio_submit and cio_queue is that
1574         * cio_submit is for urgent request.
1575         */
1576        int  (*cio_commit_async)(const struct lu_env *env,
1577                                 const struct cl_io_slice *slice,
1578                                 struct cl_page_list *queue, int from, int to,
1579                                 cl_commit_cbt cb);
1580        /**
1581         * Decide maximum read ahead extent
1582         *
1583         * \pre io->ci_type == CIT_READ
1584         */
1585        int (*cio_read_ahead)(const struct lu_env *env,
1586                              const struct cl_io_slice *slice,
1587                              pgoff_t start, struct cl_read_ahead *ra);
1588        /**
1589         * Optional debugging helper. Print given io slice.
1590         */
1591        int (*cio_print)(const struct lu_env *env, void *cookie,
1592                         lu_printer_t p, const struct cl_io_slice *slice);
1593};
1594
1595/**
1596 * Flags to lock enqueue procedure.
1597 * \ingroup cl_lock
1598 */
1599enum cl_enq_flags {
1600        /**
1601         * instruct server to not block, if conflicting lock is found. Instead
1602         * -EWOULDBLOCK is returned immediately.
1603         */
1604        CEF_NONBLOCK     = 0x00000001,
1605        /**
1606         * take lock asynchronously (out of order), as it cannot
1607         * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
1608         */
1609        CEF_ASYNC       = 0x00000002,
1610        /**
1611         * tell the server to instruct (though a flag in the blocking ast) an
1612         * owner of the conflicting lock, that it can drop dirty pages
1613         * protected by this lock, without sending them to the server.
1614         */
1615        CEF_DISCARD_DATA = 0x00000004,
1616        /**
1617         * tell the sub layers that it must be a `real' lock. This is used for
1618         * mmapped-buffer locks and glimpse locks that must be never converted
1619         * into lockless mode.
1620         *
1621         * \see vvp_mmap_locks(), cl_glimpse_lock().
1622         */
1623        CEF_MUST         = 0x00000008,
1624        /**
1625         * tell the sub layers that never request a `real' lock. This flag is
1626         * not used currently.
1627         *
1628         * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
1629         * conversion policy: ci_lockreq describes generic information of lock
1630         * requirement for this IO, especially for locks which belong to the
1631         * object doing IO; however, lock itself may have precise requirements
1632         * that are described by the enqueue flags.
1633         */
1634        CEF_NEVER       = 0x00000010,
1635        /**
1636         * for async glimpse lock.
1637         */
1638        CEF_AGL   = 0x00000020,
1639        /**
1640         * enqueue a lock to test DLM lock existence.
1641         */
1642        CEF_PEEK        = 0x00000040,
1643        /**
1644         * Lock match only. Used by group lock in I/O as group lock
1645         * is known to exist.
1646         */
1647        CEF_LOCK_MATCH  = BIT(7),
1648        /**
1649         * mask of enq_flags.
1650         */
1651        CEF_MASK        = 0x000000ff,
1652};
1653
1654/**
1655 * Link between lock and io. Intermediate structure is needed, because the
1656 * same lock can be part of multiple io's simultaneously.
1657 */
1658struct cl_io_lock_link {
1659        /** linkage into one of cl_lockset lists. */
1660        struct list_head           cill_linkage;
1661        struct cl_lock          cill_lock;
1662        /** optional destructor */
1663        void           (*cill_fini)(const struct lu_env *env,
1664                                    struct cl_io_lock_link *link);
1665};
1666#define cill_descr      cill_lock.cll_descr
1667
1668/**
1669 * Lock-set represents a collection of locks, that io needs at a
1670 * time. Generally speaking, client tries to avoid holding multiple locks when
1671 * possible, because
1672 *
1673 *      - holding extent locks over multiple ost's introduces the danger of
1674 *      "cascading timeouts";
1675 *
1676 *      - holding multiple locks over the same ost is still dead-lock prone,
1677 *      see comment in osc_lock_enqueue(),
1678 *
1679 * but there are certain situations where this is unavoidable:
1680 *
1681 *      - O_APPEND writes have to take [0, EOF] lock for correctness;
1682 *
1683 *      - truncate has to take [new-size, EOF] lock for correctness;
1684 *
1685 *      - SNS has to take locks across full stripe for correctness;
1686 *
1687 *      - in the case when user level buffer, supplied to {read,write}(file0),
1688 *      is a part of a memory mapped lustre file, client has to take a dlm
1689 *      locks on file0, and all files that back up the buffer (or a part of
1690 *      the buffer, that is being processed in the current chunk, in any
1691 *      case, there are situations where at least 2 locks are necessary).
1692 *
1693 * In such cases we at least try to take locks in the same consistent
1694 * order. To this end, all locks are first collected, then sorted, and then
1695 * enqueued.
1696 */
1697struct cl_lockset {
1698        /** locks to be acquired. */
1699        struct list_head  cls_todo;
1700        /** locks acquired. */
1701        struct list_head  cls_done;
1702};
1703
1704/**
1705 * Lock requirements(demand) for IO. It should be cl_io_lock_req,
1706 * but 'req' is always to be thought as 'request' :-)
1707 */
1708enum cl_io_lock_dmd {
1709        /** Always lock data (e.g., O_APPEND). */
1710        CILR_MANDATORY = 0,
1711        /** Layers are free to decide between local and global locking. */
1712        CILR_MAYBE,
1713        /** Never lock: there is no cache (e.g., lockless IO). */
1714        CILR_NEVER
1715};
1716
1717enum cl_fsync_mode {
1718        /** start writeback, do not wait for them to finish */
1719        CL_FSYNC_NONE  = 0,
1720        /** start writeback and wait for them to finish */
1721        CL_FSYNC_LOCAL = 1,
1722        /** discard all of dirty pages in a specific file range */
1723        CL_FSYNC_DISCARD = 2,
1724        /** start writeback and make sure they have reached storage before
1725         * return. OST_SYNC RPC must be issued and finished
1726         */
1727        CL_FSYNC_ALL   = 3
1728};
1729
1730struct cl_io_rw_common {
1731        loff_t      crw_pos;
1732        size_t      crw_count;
1733        int      crw_nonblock;
1734};
1735
1736/**
1737 * State for io.
1738 *
1739 * cl_io is shared by all threads participating in this IO (in current
1740 * implementation only one thread advances IO, but parallel IO design and
1741 * concurrent copy_*_user() require multiple threads acting on the same IO. It
1742 * is up to these threads to serialize their activities, including updates to
1743 * mutable cl_io fields.
1744 */
1745struct cl_io {
1746        /** type of this IO. Immutable after creation. */
1747        enum cl_io_type         ci_type;
1748        /** current state of cl_io state machine. */
1749        enum cl_io_state               ci_state;
1750        /** main object this io is against. Immutable after creation. */
1751        struct cl_object              *ci_obj;
1752        /**
1753         * Upper layer io, of which this io is a part of. Immutable after
1754         * creation.
1755         */
1756        struct cl_io              *ci_parent;
1757        /** List of slices. Immutable after creation. */
1758        struct list_head                     ci_layers;
1759        /** list of locks (to be) acquired by this io. */
1760        struct cl_lockset             ci_lockset;
1761        /** lock requirements, this is just a help info for sublayers. */
1762        enum cl_io_lock_dmd         ci_lockreq;
1763        union {
1764                struct cl_rd_io {
1765                        struct cl_io_rw_common rd;
1766                } ci_rd;
1767                struct cl_wr_io {
1768                        struct cl_io_rw_common wr;
1769                        int                 wr_append;
1770                        int                 wr_sync;
1771                } ci_wr;
1772                struct cl_io_rw_common ci_rw;
1773                struct cl_setattr_io {
1774                        struct ost_lvb   sa_attr;
1775                        unsigned int             sa_attr_flags;
1776                        unsigned int     sa_valid;
1777                        int             sa_stripe_index;
1778                        const struct lu_fid     *sa_parent_fid;
1779                } ci_setattr;
1780                struct cl_data_version_io {
1781                        u64 dv_data_version;
1782                        int dv_flags;
1783                } ci_data_version;
1784                struct cl_fault_io {
1785                        /** page index within file. */
1786                        pgoff_t  ft_index;
1787                        /** bytes valid byte on a faulted page. */
1788                        size_t       ft_nob;
1789                        /** writable page? for nopage() only */
1790                        int          ft_writable;
1791                        /** page of an executable? */
1792                        int          ft_executable;
1793                        /** page_mkwrite() */
1794                        int          ft_mkwrite;
1795                        /** resulting page */
1796                        struct cl_page *ft_page;
1797                } ci_fault;
1798                struct cl_fsync_io {
1799                        loff_t       fi_start;
1800                        loff_t       fi_end;
1801                        /** file system level fid */
1802                        struct lu_fid     *fi_fid;
1803                        enum cl_fsync_mode fi_mode;
1804                        /* how many pages were written/discarded */
1805                        unsigned int       fi_nr_written;
1806                } ci_fsync;
1807        } u;
1808        struct cl_2queue     ci_queue;
1809        size_t         ci_nob;
1810        int               ci_result;
1811        unsigned int     ci_continue:1,
1812        /**
1813         * This io has held grouplock, to inform sublayers that
1814         * don't do lockless i/o.
1815         */
1816                             ci_no_srvlock:1,
1817        /**
1818         * The whole IO need to be restarted because layout has been changed
1819         */
1820                             ci_need_restart:1,
1821        /**
1822         * to not refresh layout - the IO issuer knows that the layout won't
1823         * change(page operations, layout change causes all page to be
1824         * discarded), or it doesn't matter if it changes(sync).
1825         */
1826                             ci_ignore_layout:1,
1827        /**
1828         * Check if layout changed after the IO finishes. Mainly for HSM
1829         * requirement. If IO occurs to openning files, it doesn't need to
1830         * verify layout because HSM won't release openning files.
1831         * Right now, only two operations need to verify layout: glimpse
1832         * and setattr.
1833         */
1834                             ci_verify_layout:1,
1835        /**
1836         * file is released, restore has to to be triggered by vvp layer
1837         */
1838                             ci_restore_needed:1,
1839        /**
1840         * O_NOATIME
1841         */
1842                             ci_noatime:1;
1843        /**
1844         * Number of pages owned by this IO. For invariant checking.
1845         */
1846        unsigned int         ci_owned_nr;
1847};
1848
1849/** @} cl_io */
1850
1851/**
1852 * Per-transfer attributes.
1853 */
1854struct cl_req_attr {
1855        enum cl_req_type cra_type;
1856        u64              cra_flags;
1857        struct cl_page  *cra_page;
1858
1859        /** Generic attributes for the server consumption. */
1860        struct obdo     *cra_oa;
1861        /** Jobid */
1862        char             cra_jobid[LUSTRE_JOBID_SIZE];
1863};
1864
1865enum cache_stats_item {
1866        /** how many cache lookups were performed */
1867        CS_lookup = 0,
1868        /** how many times cache lookup resulted in a hit */
1869        CS_hit,
1870        /** how many entities are in the cache right now */
1871        CS_total,
1872        /** how many entities in the cache are actively used (and cannot be
1873         * evicted) right now
1874         */
1875        CS_busy,
1876        /** how many entities were created at all */
1877        CS_create,
1878        CS_NR
1879};
1880
1881#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
1882
1883/**
1884 * Stats for a generic cache (similar to inode, lu_object, etc. caches).
1885 */
1886struct cache_stats {
1887        const char    *cs_name;
1888        atomic_t   cs_stats[CS_NR];
1889};
1890
1891/** These are not exported so far */
1892void cache_stats_init(struct cache_stats *cs, const char *name);
1893
1894/**
1895 * Client-side site. This represents particular client stack. "Global"
1896 * variables should (directly or indirectly) be added here to allow multiple
1897 * clients to co-exist in the single address space.
1898 */
1899struct cl_site {
1900        struct lu_site  cs_lu;
1901        /**
1902         * Statistical counters. Atomics do not scale, something better like
1903         * per-cpu counters is needed.
1904         *
1905         * These are exported as /sys/kernel/debug/lustre/llite/.../site
1906         *
1907         * When interpreting keep in mind that both sub-locks (and sub-pages)
1908         * and top-locks (and top-pages) are accounted here.
1909         */
1910        struct cache_stats    cs_pages;
1911        atomic_t          cs_pages_state[CPS_NR];
1912};
1913
1914int  cl_site_init(struct cl_site *s, struct cl_device *top);
1915void cl_site_fini(struct cl_site *s);
1916void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
1917
1918/**
1919 * Output client site statistical counters into a buffer. Suitable for
1920 * ll_rd_*()-style functions.
1921 */
1922int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
1923
1924/**
1925 * \name helpers
1926 *
1927 * Type conversion and accessory functions.
1928 */
1929/** @{ */
1930
1931static inline struct cl_site *lu2cl_site(const struct lu_site *site)
1932{
1933        return container_of(site, struct cl_site, cs_lu);
1934}
1935
1936static inline int lu_device_is_cl(const struct lu_device *d)
1937{
1938        return d->ld_type->ldt_tags & LU_DEVICE_CL;
1939}
1940
1941static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
1942{
1943        LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d));
1944        return container_of0(d, struct cl_device, cd_lu_dev);
1945}
1946
1947static inline struct lu_device *cl2lu_dev(struct cl_device *d)
1948{
1949        return &d->cd_lu_dev;
1950}
1951
1952static inline struct cl_object *lu2cl(const struct lu_object *o)
1953{
1954        LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
1955        return container_of0(o, struct cl_object, co_lu);
1956}
1957
1958static inline const struct cl_object_conf *
1959lu2cl_conf(const struct lu_object_conf *conf)
1960{
1961        return container_of0(conf, struct cl_object_conf, coc_lu);
1962}
1963
1964static inline struct cl_object *cl_object_next(const struct cl_object *obj)
1965{
1966        return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
1967}
1968
1969static inline struct cl_device *cl_object_device(const struct cl_object *o)
1970{
1971        LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
1972        return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
1973}
1974
1975static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
1976{
1977        return container_of0(h, struct cl_object_header, coh_lu);
1978}
1979
1980static inline struct cl_site *cl_object_site(const struct cl_object *obj)
1981{
1982        return lu2cl_site(obj->co_lu.lo_dev->ld_site);
1983}
1984
1985static inline
1986struct cl_object_header *cl_object_header(const struct cl_object *obj)
1987{
1988        return luh2coh(obj->co_lu.lo_header);
1989}
1990
1991static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
1992{
1993        return lu_device_init(&d->cd_lu_dev, t);
1994}
1995
1996static inline void cl_device_fini(struct cl_device *d)
1997{
1998        lu_device_fini(&d->cd_lu_dev);
1999}
2000
2001void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
2002                       struct cl_object *obj, pgoff_t index,
2003                       const struct cl_page_operations *ops);
2004void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
2005                       struct cl_object *obj,
2006                       const struct cl_lock_operations *ops);
2007void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
2008                     struct cl_object *obj, const struct cl_io_operations *ops);
2009/** @} helpers */
2010
2011/** \defgroup cl_object cl_object
2012 * @{
2013 */
2014struct cl_object *cl_object_top(struct cl_object *o);
2015struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
2016                                 const struct lu_fid *fid,
2017                                 const struct cl_object_conf *c);
2018
2019int  cl_object_header_init(struct cl_object_header *h);
2020void cl_object_put(const struct lu_env *env, struct cl_object *o);
2021void cl_object_get(struct cl_object *o);
2022void cl_object_attr_lock(struct cl_object *o);
2023void cl_object_attr_unlock(struct cl_object *o);
2024int  cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
2025                        struct cl_attr *attr);
2026int  cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
2027                           const struct cl_attr *attr, unsigned int valid);
2028int  cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
2029                       struct ost_lvb *lvb);
2030int  cl_conf_set(const struct lu_env *env, struct cl_object *obj,
2031                 const struct cl_object_conf *conf);
2032int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
2033void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
2034int  cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
2035                         struct lov_user_md __user *lum);
2036int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
2037                     struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
2038                     size_t *buflen);
2039int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
2040                         struct cl_layout *cl);
2041loff_t cl_object_maxbytes(struct cl_object *obj);
2042
2043/**
2044 * Returns true, iff \a o0 and \a o1 are slices of the same object.
2045 */
2046static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
2047{
2048        return cl_object_header(o0) == cl_object_header(o1);
2049}
2050
2051static inline void cl_object_page_init(struct cl_object *clob, int size)
2052{
2053        clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
2054        cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
2055        WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
2056}
2057
2058static inline void *cl_object_page_slice(struct cl_object *clob,
2059                                         struct cl_page *page)
2060{
2061        return (void *)((char *)page + clob->co_slice_off);
2062}
2063
2064/**
2065 * Return refcount of cl_object.
2066 */
2067static inline int cl_object_refc(struct cl_object *clob)
2068{
2069        struct lu_object_header *header = clob->co_lu.lo_header;
2070
2071        return atomic_read(&header->loh_ref);
2072}
2073
2074/** @} cl_object */
2075
2076/** \defgroup cl_page cl_page
2077 * @{
2078 */
2079enum {
2080        CLP_GANG_OKAY = 0,
2081        CLP_GANG_RESCHED,
2082        CLP_GANG_AGAIN,
2083        CLP_GANG_ABORT
2084};
2085
2086/* callback of cl_page_gang_lookup() */
2087struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
2088                             pgoff_t idx, struct page *vmpage,
2089                             enum cl_page_type type);
2090struct cl_page *cl_page_alloc(const struct lu_env *env,
2091                              struct cl_object *o, pgoff_t ind,
2092                              struct page *vmpage,
2093                              enum cl_page_type type);
2094void cl_page_get(struct cl_page *page);
2095void cl_page_put(const struct lu_env *env, struct cl_page *page);
2096void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
2097                   const struct cl_page *pg);
2098void cl_page_header_print(const struct lu_env *env, void *cookie,
2099                          lu_printer_t printer, const struct cl_page *pg);
2100struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
2101
2102const struct cl_page_slice *cl_page_at(const struct cl_page *page,
2103                                       const struct lu_device_type *dtype);
2104
2105/**
2106 * \name ownership
2107 *
2108 * Functions dealing with the ownership of page by io.
2109 */
2110/** @{ */
2111
2112int cl_page_own(const struct lu_env *env,
2113                struct cl_io *io, struct cl_page *page);
2114int cl_page_own_try(const struct lu_env *env,
2115                    struct cl_io *io, struct cl_page *page);
2116void cl_page_assume(const struct lu_env *env,
2117                    struct cl_io *io, struct cl_page *page);
2118void cl_page_unassume(const struct lu_env *env,
2119                      struct cl_io *io, struct cl_page *pg);
2120void cl_page_disown(const struct lu_env *env,
2121                    struct cl_io *io, struct cl_page *page);
2122void cl_page_disown0(const struct lu_env *env,
2123                     struct cl_io *io, struct cl_page *pg);
2124int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io);
2125
2126/** @} ownership */
2127
2128/**
2129 * \name transfer
2130 *
2131 * Functions dealing with the preparation of a page for a transfer, and
2132 * tracking transfer state.
2133 */
2134/** @{ */
2135int cl_page_prep(const struct lu_env *env, struct cl_io *io,
2136                 struct cl_page *pg, enum cl_req_type crt);
2137void cl_page_completion(const struct lu_env *env,
2138                        struct cl_page *pg, enum cl_req_type crt, int ioret);
2139int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
2140                       enum cl_req_type crt);
2141int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
2142                      struct cl_page *pg, enum cl_req_type crt);
2143void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
2144                  int from, int to);
2145int cl_page_cancel(const struct lu_env *env, struct cl_page *page);
2146int cl_page_flush(const struct lu_env *env, struct cl_io *io,
2147                  struct cl_page *pg);
2148
2149/** @} transfer */
2150
2151/**
2152 * \name helper routines
2153 * Functions to discard, delete and export a cl_page.
2154 */
2155/** @{ */
2156void cl_page_discard(const struct lu_env *env, struct cl_io *io,
2157                     struct cl_page *pg);
2158void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
2159int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
2160void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
2161loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
2162pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
2163size_t cl_page_size(const struct cl_object *obj);
2164int cl_pages_prune(const struct lu_env *env, struct cl_object *obj);
2165
2166void cl_lock_print(const struct lu_env *env, void *cookie,
2167                   lu_printer_t printer, const struct cl_lock *lock);
2168void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2169                         lu_printer_t printer,
2170                         const struct cl_lock_descr *descr);
2171/* @} helper */
2172
2173/**
2174 * Data structure managing a client's cached pages. A count of
2175 * "unstable" pages is maintained, and an LRU of clean pages is
2176 * maintained. "unstable" pages are pages pinned by the ptlrpc
2177 * layer for recovery purposes.
2178 */
2179struct cl_client_cache {
2180        /**
2181         * # of client cache refcount
2182         * # of users (OSCs) + 2 (held by llite and lov)
2183         */
2184        atomic_t                ccc_users;
2185        /**
2186         * # of threads are doing shrinking
2187         */
2188        unsigned int            ccc_lru_shrinkers;
2189        /**
2190         * # of LRU entries available
2191         */
2192        atomic_long_t           ccc_lru_left;
2193        /**
2194         * List of entities(OSCs) for this LRU cache
2195         */
2196        struct list_head        ccc_lru;
2197        /**
2198         * Max # of LRU entries
2199         */
2200        unsigned long           ccc_lru_max;
2201        /**
2202         * Lock to protect ccc_lru list
2203         */
2204        spinlock_t              ccc_lru_lock;
2205        /**
2206         * Set if unstable check is enabled
2207         */
2208        unsigned int            ccc_unstable_check:1;
2209        /**
2210         * # of unstable pages for this mount point
2211         */
2212        atomic_long_t           ccc_unstable_nr;
2213        /**
2214         * Waitq for awaiting unstable pages to reach zero.
2215         * Used at umounting time and signaled on BRW commit
2216         */
2217        wait_queue_head_t       ccc_unstable_waitq;
2218
2219};
2220
2221/**
2222 * cl_cache functions
2223 */
2224struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
2225void cl_cache_incref(struct cl_client_cache *cache);
2226void cl_cache_decref(struct cl_client_cache *cache);
2227
2228/** @} cl_page */
2229
2230/** \defgroup cl_lock cl_lock
2231 * @{
2232 */
2233
2234int cl_lock_request(const struct lu_env *env, struct cl_io *io,
2235                    struct cl_lock *lock);
2236int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
2237                 const struct cl_io *io);
2238void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
2239const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
2240                                       const struct lu_device_type *dtype);
2241void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
2242int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
2243                    struct cl_lock *lock, struct cl_sync_io *anchor);
2244void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
2245
2246/** @} cl_lock */
2247
2248/** \defgroup cl_io cl_io
2249 * @{
2250 */
2251
2252int cl_io_init(const struct lu_env *env, struct cl_io *io,
2253               enum cl_io_type iot, struct cl_object *obj);
2254int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
2255                   enum cl_io_type iot, struct cl_object *obj);
2256int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
2257                  enum cl_io_type iot, loff_t pos, size_t count);
2258int cl_io_loop(const struct lu_env *env, struct cl_io *io);
2259
2260void cl_io_fini(const struct lu_env *env, struct cl_io *io);
2261int cl_io_iter_init(const struct lu_env *env, struct cl_io *io);
2262void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io);
2263int cl_io_lock(const struct lu_env *env, struct cl_io *io);
2264void cl_io_unlock(const struct lu_env *env, struct cl_io *io);
2265int cl_io_start(const struct lu_env *env, struct cl_io *io);
2266void cl_io_end(const struct lu_env *env, struct cl_io *io);
2267int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
2268                   struct cl_io_lock_link *link);
2269int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
2270                         struct cl_lock_descr *descr);
2271int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
2272                    enum cl_req_type iot, struct cl_2queue *queue);
2273int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
2274                      enum cl_req_type iot, struct cl_2queue *queue,
2275                      long timeout);
2276int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
2277                       struct cl_page_list *queue, int from, int to,
2278                       cl_commit_cbt cb);
2279int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
2280                     pgoff_t start, struct cl_read_ahead *ra);
2281int cl_io_is_going(const struct lu_env *env);
2282
2283/**
2284 * True, iff \a io is an O_APPEND write(2).
2285 */
2286static inline int cl_io_is_append(const struct cl_io *io)
2287{
2288        return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
2289}
2290
2291static inline int cl_io_is_sync_write(const struct cl_io *io)
2292{
2293        return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
2294}
2295
2296static inline int cl_io_is_mkwrite(const struct cl_io *io)
2297{
2298        return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
2299}
2300
2301/**
2302 * True, iff \a io is a truncate(2).
2303 */
2304static inline int cl_io_is_trunc(const struct cl_io *io)
2305{
2306        return io->ci_type == CIT_SETATTR &&
2307                (io->u.ci_setattr.sa_valid & ATTR_SIZE);
2308}
2309
2310struct cl_io *cl_io_top(struct cl_io *io);
2311
2312#define CL_IO_SLICE_CLEAN(foo_io, base)                                 \
2313do {                                                                    \
2314        typeof(foo_io) __foo_io = (foo_io);                             \
2315                                                                        \
2316        BUILD_BUG_ON(offsetof(typeof(*__foo_io), base) != 0);           \
2317        memset(&__foo_io->base + 1, 0,                                  \
2318               sizeof(*__foo_io) - sizeof(__foo_io->base));             \
2319} while (0)
2320
2321/** @} cl_io */
2322
2323/** \defgroup cl_page_list cl_page_list
2324 * @{
2325 */
2326
2327/**
2328 * Last page in the page list.
2329 */
2330static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
2331{
2332        LASSERT(plist->pl_nr > 0);
2333        return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
2334}
2335
2336static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
2337{
2338        LASSERT(plist->pl_nr > 0);
2339        return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
2340}
2341
2342/**
2343 * Iterate over pages in a page list.
2344 */
2345#define cl_page_list_for_each(page, list)                              \
2346        list_for_each_entry((page), &(list)->pl_pages, cp_batch)
2347
2348/**
2349 * Iterate over pages in a page list, taking possible removals into account.
2350 */
2351#define cl_page_list_for_each_safe(page, temp, list)                \
2352        list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
2353
2354void cl_page_list_init(struct cl_page_list *plist);
2355void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
2356void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
2357                       struct cl_page *page);
2358void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
2359                            struct cl_page *page);
2360void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
2361void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
2362                      struct cl_page *page);
2363void cl_page_list_disown(const struct lu_env *env,
2364                         struct cl_io *io, struct cl_page_list *plist);
2365void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
2366
2367void cl_2queue_init(struct cl_2queue *queue);
2368void cl_2queue_disown(const struct lu_env *env,
2369                      struct cl_io *io, struct cl_2queue *queue);
2370void cl_2queue_discard(const struct lu_env *env,
2371                       struct cl_io *io, struct cl_2queue *queue);
2372void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue);
2373void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
2374
2375/** @} cl_page_list */
2376
2377void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
2378                     struct cl_req_attr *attr);
2379
2380/** \defgroup cl_sync_io cl_sync_io
2381 * @{
2382 */
2383
2384/**
2385 * Anchor for synchronous transfer. This is allocated on a stack by thread
2386 * doing synchronous transfer, and a pointer to this structure is set up in
2387 * every page submitted for transfer. Transfer completion routine updates
2388 * anchor and wakes up waiting thread when transfer is complete.
2389 */
2390struct cl_sync_io {
2391        /** number of pages yet to be transferred. */
2392        atomic_t                csi_sync_nr;
2393        /** error code. */
2394        int                     csi_sync_rc;
2395        /** barrier of destroy this structure */
2396        atomic_t                csi_barrier;
2397        /** completion to be signaled when transfer is complete. */
2398        wait_queue_head_t               csi_waitq;
2399        /** callback to invoke when this IO is finished */
2400        void                    (*csi_end_io)(const struct lu_env *,
2401                                              struct cl_sync_io *);
2402};
2403
2404void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
2405                     void (*end)(const struct lu_env *, struct cl_sync_io *));
2406int  cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
2407                     long timeout);
2408void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
2409                     int ioret);
2410void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
2411
2412/** @} cl_sync_io */
2413
2414/** \defgroup cl_env cl_env
2415 *
2416 * lu_env handling for a client.
2417 *
2418 * lu_env is an environment within which lustre code executes. Its major part
2419 * is lu_context---a fast memory allocation mechanism that is used to conserve
2420 * precious kernel stack space. Originally lu_env was designed for a server,
2421 * where
2422 *
2423 *     - there is a (mostly) fixed number of threads, and
2424 *
2425 *     - call chains have no non-lustre portions inserted between lustre code.
2426 *
2427 * On a client both these assumption fails, because every user thread can
2428 * potentially execute lustre code as part of a system call, and lustre calls
2429 * into VFS or MM that call back into lustre.
2430 *
2431 * To deal with that, cl_env wrapper functions implement the following
2432 * optimizations:
2433 *
2434 *     - allocation and destruction of environment is amortized by caching no
2435 *     longer used environments instead of destroying them;
2436 *
2437 * \see lu_env, lu_context, lu_context_key
2438 * @{
2439 */
2440
2441struct lu_env *cl_env_get(u16 *refcheck);
2442struct lu_env *cl_env_alloc(u16 *refcheck, __u32 tags);
2443void cl_env_put(struct lu_env *env, u16 *refcheck);
2444unsigned int cl_env_cache_purge(unsigned int nr);
2445struct lu_env *cl_env_percpu_get(void);
2446void cl_env_percpu_put(struct lu_env *env);
2447
2448/** @} cl_env */
2449
2450/*
2451 * Misc
2452 */
2453void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
2454
2455struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
2456                                struct lu_device_type *ldt,
2457                                struct lu_device *next);
2458/** @} clio */
2459
2460int cl_global_init(void);
2461void cl_global_fini(void);
2462
2463#endif /* _LINUX_CL_OBJECT_H */
2464