linux/drivers/staging/lustre/lustre/include/lustre_dlm.h
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * GPL HEADER START
   4 *
   5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 only,
   9 * as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License version 2 for more details (a copy is included
  15 * in the LICENSE file that accompanied this code).
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * version 2 along with this program; If not, see
  19 * http://www.gnu.org/licenses/gpl-2.0.html
  20 *
  21 * GPL HEADER END
  22 */
  23/*
  24 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  25 * Use is subject to license terms.
  26 *
  27 * Copyright (c) 2010, 2015, Intel Corporation.
  28 */
  29/*
  30 * This file is part of Lustre, http://www.lustre.org/
  31 * Lustre is a trademark of Sun Microsystems, Inc.
  32 */
  33
  34/** \defgroup LDLM Lustre Distributed Lock Manager
  35 *
  36 * Lustre DLM is based on VAX DLM.
  37 * Its two main roles are:
  38 *   - To provide locking assuring consistency of data on all Lustre nodes.
  39 *   - To allow clients to cache state protected by a lock by holding the
  40 *     lock until a conflicting lock is requested or it is expired by the LRU.
  41 *
  42 * @{
  43 */
  44
  45#ifndef _LUSTRE_DLM_H__
  46#define _LUSTRE_DLM_H__
  47
  48#include <lustre_lib.h>
  49#include <lustre_net.h>
  50#include <lustre_import.h>
  51#include <lustre_handles.h>
  52#include <interval_tree.h>      /* for interval_node{}, ldlm_extent */
  53#include <lu_ref.h>
  54
  55#include "lustre_dlm_flags.h"
  56
  57struct obd_ops;
  58struct obd_device;
  59
  60#define OBD_LDLM_DEVICENAME  "ldlm"
  61
  62#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
  63#define LDLM_DEFAULT_MAX_ALIVE (65 * 60 * HZ) /* 65 min */
  64#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
  65
  66/**
  67 * LDLM non-error return states
  68 */
  69enum ldlm_error {
  70        ELDLM_OK = 0,
  71        ELDLM_LOCK_MATCHED = 1,
  72
  73        ELDLM_LOCK_CHANGED = 300,
  74        ELDLM_LOCK_ABORTED = 301,
  75        ELDLM_LOCK_REPLACED = 302,
  76        ELDLM_NO_LOCK_DATA = 303,
  77        ELDLM_LOCK_WOULDBLOCK = 304,
  78
  79        ELDLM_NAMESPACE_EXISTS = 400,
  80        ELDLM_BAD_NAMESPACE    = 401
  81};
  82
  83/**
  84 * LDLM namespace type.
  85 * The "client" type is actually an indication that this is a narrow local view
  86 * into complete namespace on the server. Such namespaces cannot make any
  87 * decisions about lack of conflicts or do any autonomous lock granting without
  88 * first speaking to a server.
  89 */
  90enum ldlm_side {
  91        LDLM_NAMESPACE_SERVER = 1 << 0,
  92        LDLM_NAMESPACE_CLIENT = 1 << 1
  93};
  94
  95/**
  96 * The blocking callback is overloaded to perform two functions.  These flags
  97 * indicate which operation should be performed.
  98 */
  99#define LDLM_CB_BLOCKING    1
 100#define LDLM_CB_CANCELING   2
 101
 102/**
 103 * \name Lock Compatibility Matrix.
 104 *
 105 * A lock has both a type (extent, flock, inode bits, or plain) and a mode.
 106 * Lock types are described in their respective implementation files:
 107 * ldlm_{extent,flock,inodebits,plain}.c.
 108 *
 109 * There are six lock modes along with a compatibility matrix to indicate if
 110 * two locks are compatible.
 111 *
 112 * - EX: Exclusive mode. Before a new file is created, MDS requests EX lock
 113 *   on the parent.
 114 * - PW: Protective Write (normal write) mode. When a client requests a write
 115 *   lock from an OST, a lock with PW mode will be issued.
 116 * - PR: Protective Read (normal read) mode. When a client requests a read from
 117 *   an OST, a lock with PR mode will be issued. Also, if the client opens a
 118 *   file for execution, it is granted a lock with PR mode.
 119 * - CW: Concurrent Write mode. The type of lock that the MDS grants if a client
 120 *   requests a write lock during a file open operation.
 121 * - CR Concurrent Read mode. When a client performs a path lookup, MDS grants
 122 *   an inodebit lock with the CR mode on the intermediate path component.
 123 * - NL Null mode.
 124 *
 125 * <PRE>
 126 *       NL  CR  CW  PR  PW  EX
 127 *  NL    1   1   1   1   1   1
 128 *  CR    1   1   1   1   1   0
 129 *  CW    1   1   1   0   0   0
 130 *  PR    1   1   0   1   0   0
 131 *  PW    1   1   0   0   0   0
 132 *  EX    1   0   0   0   0   0
 133 * </PRE>
 134 */
 135/** @{ */
 136#define LCK_COMPAT_EX  LCK_NL
 137#define LCK_COMPAT_PW  (LCK_COMPAT_EX | LCK_CR)
 138#define LCK_COMPAT_PR  (LCK_COMPAT_PW | LCK_PR)
 139#define LCK_COMPAT_CW  (LCK_COMPAT_PW | LCK_CW)
 140#define LCK_COMPAT_CR  (LCK_COMPAT_CW | LCK_PR | LCK_PW)
 141#define LCK_COMPAT_NL  (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
 142#define LCK_COMPAT_GROUP  (LCK_GROUP | LCK_NL)
 143#define LCK_COMPAT_COS (LCK_COS)
 144/** @} Lock Compatibility Matrix */
 145
 146extern enum ldlm_mode lck_compat_array[];
 147
 148static inline void lockmode_verify(enum ldlm_mode mode)
 149{
 150        LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
 151}
 152
 153static inline int lockmode_compat(enum ldlm_mode exist_mode,
 154                                  enum ldlm_mode new_mode)
 155{
 156        return (lck_compat_array[exist_mode] & new_mode);
 157}
 158
 159/*
 160 *
 161 * cluster name spaces
 162 *
 163 */
 164
 165#define DLM_OST_NAMESPACE 1
 166#define DLM_MDS_NAMESPACE 2
 167
 168/* XXX
 169   - do we just separate this by security domains and use a prefix for
 170     multiple namespaces in the same domain?
 171   -
 172*/
 173
 174/**
 175 * Locking rules for LDLM:
 176 *
 177 * lr_lock
 178 *
 179 * lr_lock
 180 *     waiting_locks_spinlock
 181 *
 182 * lr_lock
 183 *     led_lock
 184 *
 185 * lr_lock
 186 *     ns_lock
 187 *
 188 * lr_lvb_mutex
 189 *     lr_lock
 190 *
 191 */
 192
 193struct ldlm_pool;
 194struct ldlm_lock;
 195struct ldlm_resource;
 196struct ldlm_namespace;
 197
 198/**
 199 * Operations on LDLM pools.
 200 * LDLM pool is a pool of locks in the namespace without any implicitly
 201 * specified limits.
 202 * Locks in the pool are organized in LRU.
 203 * Local memory pressure or server instructions (e.g. mempressure on server)
 204 * can trigger freeing of locks from the pool
 205 */
 206struct ldlm_pool_ops {
 207        /** Recalculate pool \a pl usage */
 208        int (*po_recalc)(struct ldlm_pool *pl);
 209        /** Cancel at least \a nr locks from pool \a pl */
 210        int (*po_shrink)(struct ldlm_pool *pl, int nr,
 211                         gfp_t gfp_mask);
 212};
 213
 214/** One second for pools thread check interval. Each pool has own period. */
 215#define LDLM_POOLS_THREAD_PERIOD (1)
 216
 217/** ~6% margin for modest pools. See ldlm_pool.c for details. */
 218#define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
 219
 220/** Default recalc period for server side pools in sec. */
 221#define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
 222
 223/** Default recalc period for client side pools in sec. */
 224#define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
 225
 226/**
 227 * LDLM pool structure to track granted locks.
 228 * For purposes of determining when to release locks on e.g. memory pressure.
 229 * This feature is commonly referred to as lru_resize.
 230 */
 231struct ldlm_pool {
 232        /** Pool debugfs directory. */
 233        struct dentry           *pl_debugfs_entry;
 234        /** Pool name, must be long enough to hold compound proc entry name. */
 235        char                    pl_name[100];
 236        /** Lock for protecting SLV/CLV updates. */
 237        spinlock_t              pl_lock;
 238        /** Number of allowed locks in in pool, both, client and server side. */
 239        atomic_t                pl_limit;
 240        /** Number of granted locks in */
 241        atomic_t                pl_granted;
 242        /** Grant rate per T. */
 243        atomic_t                pl_grant_rate;
 244        /** Cancel rate per T. */
 245        atomic_t                pl_cancel_rate;
 246        /** Server lock volume (SLV). Protected by pl_lock. */
 247        __u64                   pl_server_lock_volume;
 248        /** Current biggest client lock volume. Protected by pl_lock. */
 249        __u64                   pl_client_lock_volume;
 250        /** Lock volume factor. SLV on client is calculated as following:
 251         *  server_slv * lock_volume_factor.
 252         */
 253        atomic_t                pl_lock_volume_factor;
 254        /** Time when last SLV from server was obtained. */
 255        time64_t                pl_recalc_time;
 256        /** Recalculation period for pool. */
 257        time64_t                pl_recalc_period;
 258        /** Recalculation and shrink operations. */
 259        const struct ldlm_pool_ops      *pl_ops;
 260        /** Number of planned locks for next period. */
 261        int                     pl_grant_plan;
 262        /** Pool statistics. */
 263        struct lprocfs_stats    *pl_stats;
 264
 265        /* sysfs object */
 266        struct kobject           pl_kobj;
 267        struct completion        pl_kobj_unregister;
 268};
 269
 270typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
 271
 272/**
 273 * LVB operations.
 274 * LVB is Lock Value Block. This is a special opaque (to LDLM) value that could
 275 * be associated with an LDLM lock and transferred from client to server and
 276 * back.
 277 *
 278 * Currently LVBs are used by:
 279 *  - OSC-OST code to maintain current object size/times
 280 *  - layout lock code to return the layout when the layout lock is granted
 281 */
 282struct ldlm_valblock_ops {
 283        int (*lvbo_init)(struct ldlm_resource *res);
 284        int (*lvbo_update)(struct ldlm_resource *res,
 285                           struct ptlrpc_request *r,
 286                           int increase);
 287        int (*lvbo_free)(struct ldlm_resource *res);
 288        /* Return size of lvb data appropriate RPC size can be reserved */
 289        int (*lvbo_size)(struct ldlm_lock *lock);
 290        /* Called to fill in lvb data to RPC buffer @buf */
 291        int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
 292};
 293
 294/**
 295 * LDLM pools related, type of lock pool in the namespace.
 296 * Greedy means release cached locks aggressively
 297 */
 298enum ldlm_appetite {
 299        LDLM_NAMESPACE_GREEDY = 1 << 0,
 300        LDLM_NAMESPACE_MODEST = 1 << 1
 301};
 302
 303struct ldlm_ns_bucket {
 304        /** back pointer to namespace */
 305        struct ldlm_namespace      *nsb_namespace;
 306        /**
 307         * Estimated lock callback time.  Used by adaptive timeout code to
 308         * avoid spurious client evictions due to unresponsiveness when in
 309         * fact the network or overall system load is at fault
 310         */
 311        struct adaptive_timeout     nsb_at_estimate;
 312};
 313
 314enum {
 315        /** LDLM namespace lock stats */
 316        LDLM_NSS_LOCKS    = 0,
 317        LDLM_NSS_LAST
 318};
 319
 320enum ldlm_ns_type {
 321        /** invalid type */
 322        LDLM_NS_TYPE_UNKNOWN    = 0,
 323        /** mdc namespace */
 324        LDLM_NS_TYPE_MDC,
 325        /** mds namespace */
 326        LDLM_NS_TYPE_MDT,
 327        /** osc namespace */
 328        LDLM_NS_TYPE_OSC,
 329        /** ost namespace */
 330        LDLM_NS_TYPE_OST,
 331        /** mgc namespace */
 332        LDLM_NS_TYPE_MGC,
 333        /** mgs namespace */
 334        LDLM_NS_TYPE_MGT,
 335};
 336
 337/**
 338 * LDLM Namespace.
 339 *
 340 * Namespace serves to contain locks related to a particular service.
 341 * There are two kinds of namespaces:
 342 * - Server namespace has knowledge of all locks and is therefore authoritative
 343 *   to make decisions like what locks could be granted and what conflicts
 344 *   exist during new lock enqueue.
 345 * - Client namespace only has limited knowledge about locks in the namespace,
 346 *   only seeing locks held by the client.
 347 *
 348 * Every Lustre service has one server namespace present on the server serving
 349 * that service. Every client connected to the service has a client namespace
 350 * for it.
 351 * Every lock obtained by client in that namespace is actually represented by
 352 * two in-memory locks. One on the server and one on the client. The locks are
 353 * linked by a special cookie by which one node can tell to the other which lock
 354 * it actually means during communications. Such locks are called remote locks.
 355 * The locks held by server only without any reference to a client are called
 356 * local locks.
 357 */
 358struct ldlm_namespace {
 359        /** Backward link to OBD, required for LDLM pool to store new SLV. */
 360        struct obd_device       *ns_obd;
 361
 362        /** Flag indicating if namespace is on client instead of server */
 363        enum ldlm_side          ns_client;
 364
 365        /** Resource hash table for namespace. */
 366        struct cfs_hash         *ns_rs_hash;
 367
 368        /** serialize */
 369        spinlock_t              ns_lock;
 370
 371        /** big refcount (by bucket) */
 372        atomic_t                ns_bref;
 373
 374        /**
 375         * Namespace connect flags supported by server (may be changed via
 376         * sysfs, LRU resize may be disabled/enabled).
 377         */
 378        __u64                   ns_connect_flags;
 379
 380        /** Client side original connect flags supported by server. */
 381        __u64                   ns_orig_connect_flags;
 382
 383        /* namespace debugfs dir entry */
 384        struct dentry           *ns_debugfs_entry;
 385
 386        /**
 387         * Position in global namespace list linking all namespaces on
 388         * the node.
 389         */
 390        struct list_head                ns_list_chain;
 391
 392        /**
 393         * List of unused locks for this namespace. This list is also called
 394         * LRU lock list.
 395         * Unused locks are locks with zero reader/writer reference counts.
 396         * This list is only used on clients for lock caching purposes.
 397         * When we want to release some locks voluntarily or if server wants
 398         * us to release some locks due to e.g. memory pressure, we take locks
 399         * to release from the head of this list.
 400         * Locks are linked via l_lru field in \see struct ldlm_lock.
 401         */
 402        struct list_head                ns_unused_list;
 403        /** Number of locks in the LRU list above */
 404        int                     ns_nr_unused;
 405
 406        /**
 407         * Maximum number of locks permitted in the LRU. If 0, means locks
 408         * are managed by pools and there is no preset limit, rather it is all
 409         * controlled by available memory on this client and on server.
 410         */
 411        unsigned int            ns_max_unused;
 412        /** Maximum allowed age (last used time) for locks in the LRU */
 413        unsigned int            ns_max_age;
 414
 415        /**
 416         * Used to rate-limit ldlm_namespace_dump calls.
 417         * \see ldlm_namespace_dump. Increased by 10 seconds every time
 418         * it is called.
 419         */
 420        unsigned long           ns_next_dump;
 421
 422        /**
 423         * LVB operations for this namespace.
 424         * \see struct ldlm_valblock_ops
 425         */
 426        struct ldlm_valblock_ops *ns_lvbo;
 427
 428        /**
 429         * Used by filter code to store pointer to OBD of the service.
 430         * Should be dropped in favor of \a ns_obd
 431         */
 432        void                    *ns_lvbp;
 433
 434        /**
 435         * Wait queue used by __ldlm_namespace_free. Gets woken up every time
 436         * a resource is removed.
 437         */
 438        wait_queue_head_t               ns_waitq;
 439        /** LDLM pool structure for this namespace */
 440        struct ldlm_pool        ns_pool;
 441        /** Definition of how eagerly unused locks will be released from LRU */
 442        enum ldlm_appetite      ns_appetite;
 443
 444        /** Limit of parallel AST RPC count. */
 445        unsigned                ns_max_parallel_ast;
 446
 447        /**
 448         * Callback to check if a lock is good to be canceled by ELC or
 449         * during recovery.
 450         */
 451        ldlm_cancel_cbt         ns_cancel;
 452
 453        /** LDLM lock stats */
 454        struct lprocfs_stats    *ns_stats;
 455
 456        /**
 457         * Flag to indicate namespace is being freed. Used to determine if
 458         * recalculation of LDLM pool statistics should be skipped.
 459         */
 460        unsigned                ns_stopping:1;
 461
 462        struct kobject          ns_kobj; /* sysfs object */
 463        struct completion       ns_kobj_unregister;
 464};
 465
 466/**
 467 * Returns 1 if namespace \a ns supports early lock cancel (ELC).
 468 */
 469static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
 470{
 471        return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
 472}
 473
 474/**
 475 * Returns 1 if this namespace supports lru_resize.
 476 */
 477static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
 478{
 479        return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
 480}
 481
 482static inline void ns_register_cancel(struct ldlm_namespace *ns,
 483                                      ldlm_cancel_cbt arg)
 484{
 485        ns->ns_cancel = arg;
 486}
 487
 488struct ldlm_lock;
 489
 490/** Type for blocking callback function of a lock. */
 491typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
 492                                      struct ldlm_lock_desc *new, void *data,
 493                                      int flag);
 494/** Type for completion callback function of a lock. */
 495typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
 496                                        void *data);
 497/** Type for glimpse callback function of a lock. */
 498typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
 499
 500/** Work list for sending GL ASTs to multiple locks. */
 501struct ldlm_glimpse_work {
 502        struct ldlm_lock        *gl_lock; /* lock to glimpse */
 503        struct list_head                 gl_list; /* linkage to other gl work structs */
 504        __u32                    gl_flags;/* see LDLM_GL_WORK_* below */
 505        union ldlm_gl_desc      *gl_desc; /* glimpse descriptor to be packed in
 506                                           * glimpse callback request
 507                                           */
 508};
 509
 510/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
 511#define LDLM_GL_WORK_NOFREE 0x1
 512
 513/** Interval node data for each LDLM_EXTENT lock. */
 514struct ldlm_interval {
 515        struct interval_node    li_node;  /* node for tree management */
 516        struct list_head        li_group; /* the locks which have the same
 517                                           * policy - group of the policy
 518                                           */
 519};
 520
 521#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
 522
 523/**
 524 * Interval tree for extent locks.
 525 * The interval tree must be accessed under the resource lock.
 526 * Interval trees are used for granted extent locks to speed up conflicts
 527 * lookup. See ldlm/interval_tree.c for more details.
 528 */
 529struct ldlm_interval_tree {
 530        /** Tree size. */
 531        int                     lit_size;
 532        enum ldlm_mode          lit_mode;  /* lock mode */
 533        struct interval_node    *lit_root; /* actual ldlm_interval */
 534};
 535
 536/** Whether to track references to exports by LDLM locks. */
 537#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
 538
 539/** Cancel flags. */
 540enum ldlm_cancel_flags {
 541        LCF_ASYNC      = 0x1, /* Cancel locks asynchronously. */
 542        LCF_LOCAL      = 0x2, /* Cancel locks locally, not notifing server */
 543        LCF_BL_AST     = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
 544                               * in the same RPC
 545                               */
 546};
 547
 548struct ldlm_flock {
 549        __u64 start;
 550        __u64 end;
 551        __u64 owner;
 552        __u64 blocking_owner;
 553        struct obd_export *blocking_export;
 554        __u32 pid;
 555};
 556
 557union ldlm_policy_data {
 558        struct ldlm_extent l_extent;
 559        struct ldlm_flock l_flock;
 560        struct ldlm_inodebits l_inodebits;
 561};
 562
 563void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
 564                                  const union ldlm_wire_policy_data *wpolicy,
 565                                  union ldlm_policy_data *lpolicy);
 566
 567enum lvb_type {
 568        LVB_T_NONE      = 0,
 569        LVB_T_OST       = 1,
 570        LVB_T_LQUOTA    = 2,
 571        LVB_T_LAYOUT    = 3,
 572};
 573
 574/**
 575 * LDLM_GID_ANY is used to match any group id in ldlm_lock_match().
 576 */
 577#define LDLM_GID_ANY    ((__u64)-1)
 578
 579/**
 580 * LDLM lock structure
 581 *
 582 * Represents a single LDLM lock and its state in memory. Each lock is
 583 * associated with a single ldlm_resource, the object which is being
 584 * locked. There may be multiple ldlm_locks on a single resource,
 585 * depending on the lock type and whether the locks are conflicting or
 586 * not.
 587 */
 588struct ldlm_lock {
 589        /**
 590         * Local lock handle.
 591         * When remote side wants to tell us about a lock, they address
 592         * it by this opaque handle.  The handle does not hold a
 593         * reference on the ldlm_lock, so it can be safely passed to
 594         * other threads or nodes. When the lock needs to be accessed
 595         * from the handle, it is looked up again in the lock table, and
 596         * may no longer exist.
 597         *
 598         * Must be first in the structure.
 599         */
 600        struct portals_handle   l_handle;
 601        /**
 602         * Lock reference count.
 603         * This is how many users have pointers to actual structure, so that
 604         * we do not accidentally free lock structure that is in use.
 605         */
 606        atomic_t                l_refc;
 607        /**
 608         * Internal spinlock protects l_resource.  We should hold this lock
 609         * first before taking res_lock.
 610         */
 611        spinlock_t              l_lock;
 612        /**
 613         * Pointer to actual resource this lock is in.
 614         * ldlm_lock_change_resource() can change this.
 615         */
 616        struct ldlm_resource    *l_resource;
 617        /**
 618         * List item for client side LRU list.
 619         * Protected by ns_lock in struct ldlm_namespace.
 620         */
 621        struct list_head                l_lru;
 622        /**
 623         * Linkage to resource's lock queues according to current lock state.
 624         * (could be granted, waiting or converting)
 625         * Protected by lr_lock in struct ldlm_resource.
 626         */
 627        struct list_head                l_res_link;
 628        /**
 629         * Tree node for ldlm_extent.
 630         */
 631        struct ldlm_interval    *l_tree_node;
 632        /**
 633         * Per export hash of locks.
 634         * Protected by per-bucket exp->exp_lock_hash locks.
 635         */
 636        struct hlist_node       l_exp_hash;
 637        /**
 638         * Per export hash of flock locks.
 639         * Protected by per-bucket exp->exp_flock_hash locks.
 640         */
 641        struct hlist_node       l_exp_flock_hash;
 642        /**
 643         * Requested mode.
 644         * Protected by lr_lock.
 645         */
 646        enum ldlm_mode          l_req_mode;
 647        /**
 648         * Granted mode, also protected by lr_lock.
 649         */
 650        enum ldlm_mode          l_granted_mode;
 651        /** Lock completion handler pointer. Called when lock is granted. */
 652        ldlm_completion_callback l_completion_ast;
 653        /**
 654         * Lock blocking AST handler pointer.
 655         * It plays two roles:
 656         * - as a notification of an attempt to queue a conflicting lock (once)
 657         * - as a notification when the lock is being cancelled.
 658         *
 659         * As such it's typically called twice: once for the initial conflict
 660         * and then once more when the last user went away and the lock is
 661         * cancelled (could happen recursively).
 662         */
 663        ldlm_blocking_callback  l_blocking_ast;
 664        /**
 665         * Lock glimpse handler.
 666         * Glimpse handler is used to obtain LVB updates from a client by
 667         * server
 668         */
 669        ldlm_glimpse_callback   l_glimpse_ast;
 670
 671        /**
 672         * Lock export.
 673         * This is a pointer to actual client export for locks that were granted
 674         * to clients. Used server-side.
 675         */
 676        struct obd_export       *l_export;
 677        /**
 678         * Lock connection export.
 679         * Pointer to server export on a client.
 680         */
 681        struct obd_export       *l_conn_export;
 682
 683        /**
 684         * Remote lock handle.
 685         * If the lock is remote, this is the handle of the other side lock
 686         * (l_handle)
 687         */
 688        struct lustre_handle    l_remote_handle;
 689
 690        /**
 691         * Representation of private data specific for a lock type.
 692         * Examples are: extent range for extent lock or bitmask for ibits locks
 693         */
 694        union ldlm_policy_data  l_policy_data;
 695
 696        /**
 697         * Lock state flags. Protected by lr_lock.
 698         * \see lustre_dlm_flags.h where the bits are defined.
 699         */
 700        __u64                   l_flags;
 701
 702        /**
 703         * Lock r/w usage counters.
 704         * Protected by lr_lock.
 705         */
 706        __u32                   l_readers;
 707        __u32                   l_writers;
 708        /**
 709         * If the lock is granted, a process sleeps on this waitq to learn when
 710         * it's no longer in use.  If the lock is not granted, a process sleeps
 711         * on this waitq to learn when it becomes granted.
 712         */
 713        wait_queue_head_t               l_waitq;
 714
 715        /**
 716         * Seconds. It will be updated if there is any activity related to
 717         * the lock, e.g. enqueue the lock or send blocking AST.
 718         */
 719        time64_t                l_last_activity;
 720
 721        /**
 722         * Time last used by e.g. being matched by lock match.
 723         * Jiffies. Should be converted to time if needed.
 724         */
 725        unsigned long           l_last_used;
 726
 727        /** Originally requested extent for the extent lock. */
 728        struct ldlm_extent      l_req_extent;
 729
 730        /*
 731         * Client-side-only members.
 732         */
 733
 734        enum lvb_type         l_lvb_type;
 735
 736        /**
 737         * Temporary storage for a LVB received during an enqueue operation.
 738         */
 739        __u32                   l_lvb_len;
 740        void                    *l_lvb_data;
 741
 742        /** Private storage for lock user. Opaque to LDLM. */
 743        void                    *l_ast_data;
 744
 745        /*
 746         * Server-side-only members.
 747         */
 748
 749        /**
 750         * Connection cookie for the client originating the operation.
 751         * Used by Commit on Share (COS) code. Currently only used for
 752         * inodebits locks on MDS.
 753         */
 754        __u64                   l_client_cookie;
 755
 756        /**
 757         * List item for locks waiting for cancellation from clients.
 758         * The lists this could be linked into are:
 759         * waiting_locks_list (protected by waiting_locks_spinlock),
 760         * then if the lock timed out, it is moved to
 761         * expired_lock_thread.elt_expired_locks for further processing.
 762         * Protected by elt_lock.
 763         */
 764        struct list_head                l_pending_chain;
 765
 766        /**
 767         * Set when lock is sent a blocking AST. Time in seconds when timeout
 768         * is reached and client holding this lock could be evicted.
 769         * This timeout could be further extended by e.g. certain IO activity
 770         * under this lock.
 771         * \see ost_rw_prolong_locks
 772         */
 773        unsigned long           l_callback_timeout;
 774
 775        /** Local PID of process which created this lock. */
 776        __u32                   l_pid;
 777
 778        /**
 779         * Number of times blocking AST was sent for this lock.
 780         * This is for debugging. Valid values are 0 and 1, if there is an
 781         * attempt to send blocking AST more than once, an assertion would be
 782         * hit. \see ldlm_work_bl_ast_lock
 783         */
 784        int                     l_bl_ast_run;
 785        /** List item ldlm_add_ast_work_item() for case of blocking ASTs. */
 786        struct list_head                l_bl_ast;
 787        /** List item ldlm_add_ast_work_item() for case of completion ASTs. */
 788        struct list_head                l_cp_ast;
 789        /** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */
 790        struct list_head                l_rk_ast;
 791
 792        /**
 793         * Pointer to a conflicting lock that caused blocking AST to be sent
 794         * for this lock
 795         */
 796        struct ldlm_lock        *l_blocking_lock;
 797
 798        /**
 799         * Protected by lr_lock, linkages to "skip lists".
 800         * For more explanations of skip lists see ldlm/ldlm_inodebits.c
 801         */
 802        struct list_head                l_sl_mode;
 803        struct list_head                l_sl_policy;
 804
 805        /** Reference tracking structure to debug leaked locks. */
 806        struct lu_ref           l_reference;
 807#if LUSTRE_TRACKS_LOCK_EXP_REFS
 808        /* Debugging stuff for bug 20498, for tracking export references. */
 809        /** number of export references taken */
 810        int                     l_exp_refs_nr;
 811        /** link all locks referencing one export */
 812        struct list_head                l_exp_refs_link;
 813        /** referenced export object */
 814        struct obd_export       *l_exp_refs_target;
 815#endif
 816};
 817
 818/**
 819 * LDLM resource description.
 820 * Basically, resource is a representation for a single object.
 821 * Object has a name which is currently 4 64-bit integers. LDLM user is
 822 * responsible for creation of a mapping between objects it wants to be
 823 * protected and resource names.
 824 *
 825 * A resource can only hold locks of a single lock type, though there may be
 826 * multiple ldlm_locks on a single resource, depending on the lock type and
 827 * whether the locks are conflicting or not.
 828 */
 829struct ldlm_resource {
 830        struct ldlm_ns_bucket   *lr_ns_bucket;
 831
 832        /**
 833         * List item for list in namespace hash.
 834         * protected by ns_lock
 835         */
 836        struct hlist_node       lr_hash;
 837
 838        /** Spinlock to protect locks under this resource. */
 839        spinlock_t              lr_lock;
 840
 841        /**
 842         * protected by lr_lock
 843         * @{
 844         */
 845        /** List of locks in granted state */
 846        struct list_head                lr_granted;
 847        /**
 848         * List of locks that could not be granted due to conflicts and
 849         * that are waiting for conflicts to go away
 850         */
 851        struct list_head                lr_waiting;
 852        /** @} */
 853
 854        /** Type of locks this resource can hold. Only one type per resource. */
 855        enum ldlm_type          lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
 856
 857        /** Resource name */
 858        struct ldlm_res_id      lr_name;
 859        /** Reference count for this resource */
 860        atomic_t                lr_refcount;
 861
 862        /**
 863         * Interval trees (only for extent locks) for all modes of this resource
 864         */
 865        struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];
 866
 867        /**
 868         * Server-side-only lock value block elements.
 869         * To serialize lvbo_init.
 870         */
 871        struct mutex            lr_lvb_mutex;
 872        int                     lr_lvb_len;
 873
 874        /** When the resource was considered as contended. */
 875        unsigned long           lr_contention_time;
 876        /** List of references to this resource. For debugging. */
 877        struct lu_ref           lr_reference;
 878
 879        struct inode            *lr_lvb_inode;
 880};
 881
 882static inline bool ldlm_has_layout(struct ldlm_lock *lock)
 883{
 884        return lock->l_resource->lr_type == LDLM_IBITS &&
 885                lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT;
 886}
 887
 888static inline char *
 889ldlm_ns_name(struct ldlm_namespace *ns)
 890{
 891        return ns->ns_rs_hash->hs_name;
 892}
 893
 894static inline struct ldlm_namespace *
 895ldlm_res_to_ns(struct ldlm_resource *res)
 896{
 897        return res->lr_ns_bucket->nsb_namespace;
 898}
 899
 900static inline struct ldlm_namespace *
 901ldlm_lock_to_ns(struct ldlm_lock *lock)
 902{
 903        return ldlm_res_to_ns(lock->l_resource);
 904}
 905
 906static inline char *
 907ldlm_lock_to_ns_name(struct ldlm_lock *lock)
 908{
 909        return ldlm_ns_name(ldlm_lock_to_ns(lock));
 910}
 911
 912static inline struct adaptive_timeout *
 913ldlm_lock_to_ns_at(struct ldlm_lock *lock)
 914{
 915        return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
 916}
 917
 918static inline int ldlm_lvbo_init(struct ldlm_resource *res)
 919{
 920        struct ldlm_namespace *ns = ldlm_res_to_ns(res);
 921
 922        if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init)
 923                return ns->ns_lvbo->lvbo_init(res);
 924
 925        return 0;
 926}
 927
 928static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
 929{
 930        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 931
 932        if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size)
 933                return ns->ns_lvbo->lvbo_size(lock);
 934
 935        return 0;
 936}
 937
 938static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
 939{
 940        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 941
 942        if (ns->ns_lvbo)
 943                return ns->ns_lvbo->lvbo_fill(lock, buf, len);
 944
 945        return 0;
 946}
 947
 948struct ldlm_ast_work {
 949        struct ldlm_lock      *w_lock;
 950        int                 w_blocking;
 951        struct ldlm_lock_desc  w_desc;
 952        struct list_head             w_list;
 953        int                 w_flags;
 954        void              *w_data;
 955        int                 w_datalen;
 956};
 957
 958/**
 959 * Common ldlm_enqueue parameters
 960 */
 961struct ldlm_enqueue_info {
 962        enum ldlm_type  ei_type;  /** Type of the lock being enqueued. */
 963        enum ldlm_mode  ei_mode;  /** Mode of the lock being enqueued. */
 964        void *ei_cb_bl;  /** blocking lock callback */
 965        void *ei_cb_cp;  /** lock completion callback */
 966        void *ei_cb_gl;  /** lock glimpse callback */
 967        void *ei_cbdata; /** Data to be passed into callbacks. */
 968        unsigned int ei_enq_slave:1; /* whether enqueue slave stripes */
 969};
 970
 971extern struct obd_ops ldlm_obd_ops;
 972
 973extern char *ldlm_lockname[];
 974const char *ldlm_it2str(enum ldlm_intent_flags it);
 975
 976/**
 977 * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
 978 * For the cases where we do not have actual lock to print along
 979 * with a debugging message that is ldlm-related
 980 */
 981#define LDLM_DEBUG_NOLOCK(format, a...)                 \
 982        CDEBUG(D_DLMTRACE, "### " format "\n", ##a)
 983
 984/**
 985 * Support function for lock information printing into debug logs.
 986 * \see LDLM_DEBUG
 987 */
 988#define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do {      \
 989        CFS_CHECK_STACK(msgdata, mask, cdls);                      \
 990                                                                        \
 991        if (((mask) & D_CANTMASK) != 0 ||                              \
 992            ((libcfs_debug & (mask)) != 0 &&                        \
 993             (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0))    \
 994                _ldlm_lock_debug(lock, msgdata, fmt, ##a);            \
 995} while (0)
 996
 997void _ldlm_lock_debug(struct ldlm_lock *lock,
 998                      struct libcfs_debug_msg_data *data,
 999                      const char *fmt, ...)
1000        __printf(3, 4);
1001
1002/**
1003 * Rate-limited version of lock printing function.
1004 */
1005#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do {                     \
1006        static struct cfs_debug_limit_state _ldlm_cdls;                    \
1007        LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls);       \
1008        ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt, ##a);\
1009} while (0)
1010
1011#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
1012#define LDLM_WARN(lock, fmt, a...)  LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
1013
1014/** Non-rate-limited lock printing function for debugging purposes. */
1015#define LDLM_DEBUG(lock, fmt, a...)   do {                                \
1016        if (likely(lock)) {                                                 \
1017                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL);      \
1018                ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock,           \
1019                                "### " fmt, ##a);                           \
1020        } else {                                                            \
1021                LDLM_DEBUG_NOLOCK("no dlm lock: " fmt, ##a);                \
1022        }                                                                   \
1023} while (0)
1024
1025typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
1026                                      int first_enq, enum ldlm_error *err,
1027                                      struct list_head *work_list);
1028
1029/**
1030 * Return values for lock iterators.
1031 * Also used during deciding of lock grants and cancellations.
1032 */
1033#define LDLM_ITER_CONTINUE 1 /* keep iterating */
1034#define LDLM_ITER_STOP     2 /* stop iterating */
1035
1036typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
1037typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
1038
1039/** \defgroup ldlm_iterator Lock iterators
1040 *
1041 * LDLM provides for a way to iterate through every lock on a resource or
1042 * namespace or every resource in a namespace.
1043 * @{
1044 */
1045int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
1046                          ldlm_iterator_t iter, void *data);
1047/** @} ldlm_iterator */
1048
1049int ldlm_replay_locks(struct obd_import *imp);
1050
1051/* ldlm_flock.c */
1052int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1053
1054/* ldlm_extent.c */
1055__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
1056
1057struct ldlm_callback_suite {
1058        ldlm_completion_callback lcs_completion;
1059        ldlm_blocking_callback   lcs_blocking;
1060        ldlm_glimpse_callback    lcs_glimpse;
1061};
1062
1063/* ldlm_lockd.c */
1064int ldlm_get_ref(void);
1065void ldlm_put_ref(void);
1066struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req);
1067
1068/* ldlm_lock.c */
1069void ldlm_lock2handle(const struct ldlm_lock *lock,
1070                      struct lustre_handle *lockh);
1071struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
1072void ldlm_cancel_callback(struct ldlm_lock *);
1073int ldlm_lock_remove_from_lru(struct ldlm_lock *);
1074int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data);
1075
1076/**
1077 * Obtain a lock reference by its handle.
1078 */
1079static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
1080{
1081        return __ldlm_handle2lock(h, 0);
1082}
1083
1084#define LDLM_LOCK_REF_DEL(lock) \
1085        lu_ref_del(&lock->l_reference, "handle", current)
1086
1087static inline struct ldlm_lock *
1088ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
1089{
1090        struct ldlm_lock *lock;
1091
1092        lock = __ldlm_handle2lock(h, flags);
1093        if (lock)
1094                LDLM_LOCK_REF_DEL(lock);
1095        return lock;
1096}
1097
1098/**
1099 * Update Lock Value Block Operations (LVBO) on a resource taking into account
1100 * data from request \a r
1101 */
1102static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
1103                                       struct ptlrpc_request *r, int increase)
1104{
1105        if (ldlm_res_to_ns(res)->ns_lvbo &&
1106            ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
1107                return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
1108                                                                 increase);
1109        }
1110        return 0;
1111}
1112
1113int ldlm_error2errno(enum ldlm_error error);
1114
1115#if LUSTRE_TRACKS_LOCK_EXP_REFS
1116void ldlm_dump_export_locks(struct obd_export *exp);
1117#endif
1118
1119/**
1120 * Release a temporary lock reference obtained by ldlm_handle2lock() or
1121 * __ldlm_handle2lock().
1122 */
1123#define LDLM_LOCK_PUT(lock)                  \
1124do {                                        \
1125        LDLM_LOCK_REF_DEL(lock);                \
1126        /*LDLM_DEBUG((lock), "put");*/    \
1127        ldlm_lock_put(lock);                \
1128} while (0)
1129
1130/**
1131 * Release a lock reference obtained by some other means (see
1132 * LDLM_LOCK_PUT()).
1133 */
1134#define LDLM_LOCK_RELEASE(lock)          \
1135do {                                        \
1136        /*LDLM_DEBUG((lock), "put");*/    \
1137        ldlm_lock_put(lock);                \
1138} while (0)
1139
1140#define LDLM_LOCK_GET(lock)                  \
1141({                                            \
1142        ldlm_lock_get(lock);                \
1143        /*LDLM_DEBUG((lock), "get");*/    \
1144        lock;                              \
1145})
1146
1147#define ldlm_lock_list_put(head, member, count)              \
1148({                                                                \
1149        struct ldlm_lock *_lock, *_next;                            \
1150        int c = count;                                        \
1151        list_for_each_entry_safe(_lock, _next, head, member) {  \
1152                if (c-- == 0)                                  \
1153                        break;                                \
1154                list_del_init(&_lock->member);            \
1155                LDLM_LOCK_RELEASE(_lock);                          \
1156        }                                                          \
1157        LASSERT(c <= 0);                                            \
1158})
1159
1160struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
1161void ldlm_lock_put(struct ldlm_lock *lock);
1162void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
1163void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode);
1164int  ldlm_lock_addref_try(const struct lustre_handle *lockh,
1165                          enum ldlm_mode mode);
1166void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode);
1167void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
1168                                 enum ldlm_mode mode);
1169void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
1170void ldlm_lock_allow_match(struct ldlm_lock *lock);
1171void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
1172enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1173                               const struct ldlm_res_id *,
1174                               enum ldlm_type type, union ldlm_policy_data *,
1175                               enum ldlm_mode mode, struct lustre_handle *,
1176                               int unref);
1177enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
1178                                           __u64 *bits);
1179void ldlm_lock_cancel(struct ldlm_lock *lock);
1180void ldlm_lock_dump_handle(int level, const struct lustre_handle *);
1181void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
1182
1183/* resource.c */
1184struct ldlm_namespace *
1185ldlm_namespace_new(struct obd_device *obd, char *name,
1186                   enum ldlm_side client, enum ldlm_appetite apt,
1187                   enum ldlm_ns_type ns_type);
1188int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
1189void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
1190                               struct obd_import *imp,
1191                               int force);
1192void ldlm_namespace_free_post(struct ldlm_namespace *ns);
1193void ldlm_namespace_get(struct ldlm_namespace *ns);
1194void ldlm_namespace_put(struct ldlm_namespace *ns);
1195int ldlm_debugfs_setup(void);
1196void ldlm_debugfs_cleanup(void);
1197
1198/* resource.c - internal */
1199struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
1200                                        struct ldlm_resource *parent,
1201                                        const struct ldlm_res_id *,
1202                                        enum ldlm_type type, int create);
1203int ldlm_resource_putref(struct ldlm_resource *res);
1204void ldlm_resource_add_lock(struct ldlm_resource *res,
1205                            struct list_head *head,
1206                            struct ldlm_lock *lock);
1207void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
1208void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
1209void ldlm_dump_all_namespaces(enum ldlm_side client, int level);
1210void ldlm_namespace_dump(int level, struct ldlm_namespace *);
1211void ldlm_resource_dump(int level, struct ldlm_resource *);
1212int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
1213                              const struct ldlm_res_id *);
1214
1215#define LDLM_RESOURCE_ADDREF(res) do {                            \
1216        lu_ref_add_atomic(&(res)->lr_reference, __func__, current);  \
1217} while (0)
1218
1219#define LDLM_RESOURCE_DELREF(res) do {                            \
1220        lu_ref_del(&(res)->lr_reference, __func__, current);      \
1221} while (0)
1222
1223/* ldlm_request.c */
1224/** \defgroup ldlm_local_ast Default AST handlers for local locks
1225 * These AST handlers are typically used for server-side local locks and are
1226 * also used by client-side lock handlers to perform minimum level base
1227 * processing.
1228 * @{
1229 */
1230int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
1231int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1232/** @} ldlm_local_ast */
1233
1234/** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users.
1235 * These are typically used by client and server (*_local versions)
1236 * to obtain and release locks.
1237 * @{
1238 */
1239int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
1240                     struct ldlm_enqueue_info *einfo,
1241                     const struct ldlm_res_id *res_id,
1242                     union ldlm_policy_data const *policy, __u64 *flags,
1243                     void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
1244                     struct lustre_handle *lockh, int async);
1245int ldlm_prep_enqueue_req(struct obd_export *exp,
1246                          struct ptlrpc_request *req,
1247                          struct list_head *cancels,
1248                          int count);
1249int ldlm_prep_elc_req(struct obd_export *exp,
1250                      struct ptlrpc_request *req,
1251                      int version, int opc, int canceloff,
1252                      struct list_head *cancels, int count);
1253
1254int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
1255                          enum ldlm_type type, __u8 with_policy,
1256                          enum ldlm_mode mode,
1257                          __u64 *flags, void *lvb, __u32 lvb_len,
1258                          const struct lustre_handle *lockh, int rc);
1259int ldlm_cli_update_pool(struct ptlrpc_request *req);
1260int ldlm_cli_cancel(const struct lustre_handle *lockh,
1261                    enum ldlm_cancel_flags cancel_flags);
1262int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
1263                           enum ldlm_cancel_flags flags, void *opaque);
1264int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1265                                    const struct ldlm_res_id *res_id,
1266                                    union ldlm_policy_data *policy,
1267                                    enum ldlm_mode mode,
1268                                    enum ldlm_cancel_flags flags,
1269                                    void *opaque);
1270int ldlm_cancel_resource_local(struct ldlm_resource *res,
1271                               struct list_head *cancels,
1272                               union ldlm_policy_data *policy,
1273                               enum ldlm_mode mode, __u64 lock_flags,
1274                               enum ldlm_cancel_flags cancel_flags,
1275                               void *opaque);
1276int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
1277                               enum ldlm_cancel_flags flags);
1278int ldlm_cli_cancel_list(struct list_head *head, int count,
1279                         struct ptlrpc_request *req,
1280                         enum ldlm_cancel_flags flags);
1281/** @} ldlm_cli_api */
1282
1283/* mds/handler.c */
1284/* This has to be here because recursive inclusion sucks. */
1285int intent_disposition(struct ldlm_reply *rep, int flag);
1286void intent_set_disposition(struct ldlm_reply *rep, int flag);
1287
1288/**
1289 * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
1290 * than one lock_res is dead-lock safe.
1291 */
1292enum lock_res_type {
1293        LRT_NORMAL,
1294        LRT_NEW
1295};
1296
1297/** Lock resource. */
1298static inline void lock_res(struct ldlm_resource *res)
1299{
1300        spin_lock(&res->lr_lock);
1301}
1302
1303/** Lock resource with a way to instruct lockdep code about nestedness-safe. */
1304static inline void lock_res_nested(struct ldlm_resource *res,
1305                                   enum lock_res_type mode)
1306{
1307        spin_lock_nested(&res->lr_lock, mode);
1308}
1309
1310/** Unlock resource. */
1311static inline void unlock_res(struct ldlm_resource *res)
1312{
1313        spin_unlock(&res->lr_lock);
1314}
1315
1316/** Check if resource is already locked, assert if not. */
1317static inline void check_res_locked(struct ldlm_resource *res)
1318{
1319        assert_spin_locked(&res->lr_lock);
1320}
1321
1322struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock);
1323void unlock_res_and_lock(struct ldlm_lock *lock);
1324
1325/* ldlm_pool.c */
1326/** \defgroup ldlm_pools Various LDLM pool related functions
1327 * There are not used outside of ldlm.
1328 * @{
1329 */
1330int ldlm_pools_init(void);
1331void ldlm_pools_fini(void);
1332
1333int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1334                   int idx, enum ldlm_side client);
1335void ldlm_pool_fini(struct ldlm_pool *pl);
1336void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
1337void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
1338/** @} */
1339
1340static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
1341                                      const struct ldlm_extent *ex2)
1342{
1343        return ex1->start <= ex2->end && ex2->start <= ex1->end;
1344}
1345
1346/* check if @ex1 contains @ex2 */
1347static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
1348                                      const struct ldlm_extent *ex2)
1349{
1350        return ex1->start <= ex2->start && ex1->end >= ex2->end;
1351}
1352
1353#endif
1354/** @} LDLM */
1355