linux/fs/locks.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/locks.c
   3 *
   4 *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5 *  Doug Evans (dje@spiff.uucp), August 07, 1992
   6 *
   7 *  Deadlock detection added.
   8 *  FIXME: one thing isn't handled yet:
   9 *      - mandatory locks (requires lots of changes elsewhere)
  10 *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
  11 *
  12 *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
  13 *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
  14 *  
  15 *  Converted file_lock_table to a linked list from an array, which eliminates
  16 *  the limits on how many active file locks are open.
  17 *  Chad Page (pageone@netcom.com), November 27, 1994
  18 * 
  19 *  Removed dependency on file descriptors. dup()'ed file descriptors now
  20 *  get the same locks as the original file descriptors, and a close() on
  21 *  any file descriptor removes ALL the locks on the file for the current
  22 *  process. Since locks still depend on the process id, locks are inherited
  23 *  after an exec() but not after a fork(). This agrees with POSIX, and both
  24 *  BSD and SVR4 practice.
  25 *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
  26 *
  27 *  Scrapped free list which is redundant now that we allocate locks
  28 *  dynamically with kmalloc()/kfree().
  29 *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
  30 *
  31 *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
  32 *
  33 *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
  34 *  fcntl() system call. They have the semantics described above.
  35 *
  36 *  FL_FLOCK locks are created with calls to flock(), through the flock()
  37 *  system call, which is new. Old C libraries implement flock() via fcntl()
  38 *  and will continue to use the old, broken implementation.
  39 *
  40 *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
  41 *  with a file pointer (filp). As a result they can be shared by a parent
  42 *  process and its children after a fork(). They are removed when the last
  43 *  file descriptor referring to the file pointer is closed (unless explicitly
  44 *  unlocked). 
  45 *
  46 *  FL_FLOCK locks never deadlock, an existing lock is always removed before
  47 *  upgrading from shared to exclusive (or vice versa). When this happens
  48 *  any processes blocked by the current lock are woken up and allowed to
  49 *  run before the new lock is applied.
  50 *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
  51 *
  52 *  Removed some race conditions in flock_lock_file(), marked other possible
  53 *  races. Just grep for FIXME to see them. 
  54 *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
  55 *
  56 *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
  57 *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
  58 *  once we've checked for blocking and deadlocking.
  59 *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
  60 *
  61 *  Initial implementation of mandatory locks. SunOS turned out to be
  62 *  a rotten model, so I implemented the "obvious" semantics.
  63 *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
  64 *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
  65 *
  66 *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
  67 *  check if a file has mandatory locks, used by mmap(), open() and creat() to
  68 *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
  69 *  Manual, Section 2.
  70 *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
  71 *
  72 *  Tidied up block list handling. Added '/proc/locks' interface.
  73 *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
  74 *
  75 *  Fixed deadlock condition for pathological code that mixes calls to
  76 *  flock() and fcntl().
  77 *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
  78 *
  79 *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
  80 *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
  81 *  guarantee sensible behaviour in the case where file system modules might
  82 *  be compiled with different options than the kernel itself.
  83 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  84 *
  85 *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
  86 *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
  87 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  88 *
  89 *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
  90 *  locks. Changed process synchronisation to avoid dereferencing locks that
  91 *  have already been freed.
  92 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
  93 *
  94 *  Made the block list a circular list to minimise searching in the list.
  95 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
  96 *
  97 *  Made mandatory locking a mount option. Default is not to allow mandatory
  98 *  locking.
  99 *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
 100 *
 101 *  Some adaptations for NFS support.
 102 *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
 103 *
 104 *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
 105 *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
 106 *
 107 *  Use slab allocator instead of kmalloc/kfree.
 108 *  Use generic list implementation from <linux/list.h>.
 109 *  Sped up posix_locks_deadlock by only considering blocked locks.
 110 *  Matthew Wilcox <willy@debian.org>, March, 2000.
 111 *
 112 *  Leases and LOCK_MAND
 113 *  Matthew Wilcox <willy@debian.org>, June, 2000.
 114 *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
 115 */
 116
 117#include <linux/capability.h>
 118#include <linux/file.h>
 119#include <linux/fdtable.h>
 120#include <linux/fs.h>
 121#include <linux/init.h>
 122#include <linux/module.h>
 123#include <linux/security.h>
 124#include <linux/slab.h>
 125#include <linux/syscalls.h>
 126#include <linux/time.h>
 127#include <linux/rcupdate.h>
 128#include <linux/pid_namespace.h>
 129#include <linux/hashtable.h>
 130#include <linux/percpu.h>
 131#include <linux/lglock.h>
 132
 133#define CREATE_TRACE_POINTS
 134#include <trace/events/filelock.h>
 135
 136#include <asm/uaccess.h>
 137
 138#define IS_POSIX(fl)    (fl->fl_flags & FL_POSIX)
 139#define IS_FLOCK(fl)    (fl->fl_flags & FL_FLOCK)
 140#define IS_LEASE(fl)    (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
 141#define IS_OFDLCK(fl)   (fl->fl_flags & FL_OFDLCK)
 142
 143static bool lease_breaking(struct file_lock *fl)
 144{
 145        return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
 146}
 147
 148static int target_leasetype(struct file_lock *fl)
 149{
 150        if (fl->fl_flags & FL_UNLOCK_PENDING)
 151                return F_UNLCK;
 152        if (fl->fl_flags & FL_DOWNGRADE_PENDING)
 153                return F_RDLCK;
 154        return fl->fl_type;
 155}
 156
 157int leases_enable = 1;
 158int lease_break_time = 45;
 159
 160/*
 161 * The global file_lock_list is only used for displaying /proc/locks, so we
 162 * keep a list on each CPU, with each list protected by its own spinlock via
 163 * the file_lock_lglock. Note that alterations to the list also require that
 164 * the relevant flc_lock is held.
 165 */
 166DEFINE_STATIC_LGLOCK(file_lock_lglock);
 167static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
 168
 169/*
 170 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
 171 * It is protected by blocked_lock_lock.
 172 *
 173 * We hash locks by lockowner in order to optimize searching for the lock a
 174 * particular lockowner is waiting on.
 175 *
 176 * FIXME: make this value scale via some heuristic? We generally will want more
 177 * buckets when we have more lockowners holding locks, but that's a little
 178 * difficult to determine without knowing what the workload will look like.
 179 */
 180#define BLOCKED_HASH_BITS       7
 181static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
 182
 183/*
 184 * This lock protects the blocked_hash. Generally, if you're accessing it, you
 185 * want to be holding this lock.
 186 *
 187 * In addition, it also protects the fl->fl_block list, and the fl->fl_next
 188 * pointer for file_lock structures that are acting as lock requests (in
 189 * contrast to those that are acting as records of acquired locks).
 190 *
 191 * Note that when we acquire this lock in order to change the above fields,
 192 * we often hold the flc_lock as well. In certain cases, when reading the fields
 193 * protected by this lock, we can skip acquiring it iff we already hold the
 194 * flc_lock.
 195 *
 196 * In particular, adding an entry to the fl_block list requires that you hold
 197 * both the flc_lock and the blocked_lock_lock (acquired in that order).
 198 * Deleting an entry from the list however only requires the file_lock_lock.
 199 */
 200static DEFINE_SPINLOCK(blocked_lock_lock);
 201
 202static struct kmem_cache *flctx_cache __read_mostly;
 203static struct kmem_cache *filelock_cache __read_mostly;
 204
 205static struct file_lock_context *
 206locks_get_lock_context(struct inode *inode, int type)
 207{
 208        struct file_lock_context *new;
 209
 210        if (likely(inode->i_flctx) || type == F_UNLCK)
 211                goto out;
 212
 213        new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
 214        if (!new)
 215                goto out;
 216
 217        spin_lock_init(&new->flc_lock);
 218        INIT_LIST_HEAD(&new->flc_flock);
 219        INIT_LIST_HEAD(&new->flc_posix);
 220        INIT_LIST_HEAD(&new->flc_lease);
 221
 222        /*
 223         * Assign the pointer if it's not already assigned. If it is, then
 224         * free the context we just allocated.
 225         */
 226        if (cmpxchg(&inode->i_flctx, NULL, new))
 227                kmem_cache_free(flctx_cache, new);
 228out:
 229        return inode->i_flctx;
 230}
 231
 232void
 233locks_free_lock_context(struct file_lock_context *ctx)
 234{
 235        if (ctx) {
 236                WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
 237                WARN_ON_ONCE(!list_empty(&ctx->flc_posix));
 238                WARN_ON_ONCE(!list_empty(&ctx->flc_lease));
 239                kmem_cache_free(flctx_cache, ctx);
 240        }
 241}
 242
 243static void locks_init_lock_heads(struct file_lock *fl)
 244{
 245        INIT_HLIST_NODE(&fl->fl_link);
 246        INIT_LIST_HEAD(&fl->fl_list);
 247        INIT_LIST_HEAD(&fl->fl_block);
 248        init_waitqueue_head(&fl->fl_wait);
 249}
 250
 251/* Allocate an empty lock structure. */
 252struct file_lock *locks_alloc_lock(void)
 253{
 254        struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
 255
 256        if (fl)
 257                locks_init_lock_heads(fl);
 258
 259        return fl;
 260}
 261EXPORT_SYMBOL_GPL(locks_alloc_lock);
 262
 263void locks_release_private(struct file_lock *fl)
 264{
 265        if (fl->fl_ops) {
 266                if (fl->fl_ops->fl_release_private)
 267                        fl->fl_ops->fl_release_private(fl);
 268                fl->fl_ops = NULL;
 269        }
 270
 271        if (fl->fl_lmops) {
 272                if (fl->fl_lmops->lm_put_owner) {
 273                        fl->fl_lmops->lm_put_owner(fl->fl_owner);
 274                        fl->fl_owner = NULL;
 275                }
 276                fl->fl_lmops = NULL;
 277        }
 278}
 279EXPORT_SYMBOL_GPL(locks_release_private);
 280
 281/* Free a lock which is not in use. */
 282void locks_free_lock(struct file_lock *fl)
 283{
 284        BUG_ON(waitqueue_active(&fl->fl_wait));
 285        BUG_ON(!list_empty(&fl->fl_list));
 286        BUG_ON(!list_empty(&fl->fl_block));
 287        BUG_ON(!hlist_unhashed(&fl->fl_link));
 288
 289        locks_release_private(fl);
 290        kmem_cache_free(filelock_cache, fl);
 291}
 292EXPORT_SYMBOL(locks_free_lock);
 293
 294static void
 295locks_dispose_list(struct list_head *dispose)
 296{
 297        struct file_lock *fl;
 298
 299        while (!list_empty(dispose)) {
 300                fl = list_first_entry(dispose, struct file_lock, fl_list);
 301                list_del_init(&fl->fl_list);
 302                locks_free_lock(fl);
 303        }
 304}
 305
 306void locks_init_lock(struct file_lock *fl)
 307{
 308        memset(fl, 0, sizeof(struct file_lock));
 309        locks_init_lock_heads(fl);
 310}
 311
 312EXPORT_SYMBOL(locks_init_lock);
 313
 314/*
 315 * Initialize a new lock from an existing file_lock structure.
 316 */
 317void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
 318{
 319        new->fl_owner = fl->fl_owner;
 320        new->fl_pid = fl->fl_pid;
 321        new->fl_file = NULL;
 322        new->fl_flags = fl->fl_flags;
 323        new->fl_type = fl->fl_type;
 324        new->fl_start = fl->fl_start;
 325        new->fl_end = fl->fl_end;
 326        new->fl_lmops = fl->fl_lmops;
 327        new->fl_ops = NULL;
 328
 329        if (fl->fl_lmops) {
 330                if (fl->fl_lmops->lm_get_owner)
 331                        fl->fl_lmops->lm_get_owner(fl->fl_owner);
 332        }
 333}
 334EXPORT_SYMBOL(locks_copy_conflock);
 335
 336void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
 337{
 338        /* "new" must be a freshly-initialized lock */
 339        WARN_ON_ONCE(new->fl_ops);
 340
 341        locks_copy_conflock(new, fl);
 342
 343        new->fl_file = fl->fl_file;
 344        new->fl_ops = fl->fl_ops;
 345
 346        if (fl->fl_ops) {
 347                if (fl->fl_ops->fl_copy_lock)
 348                        fl->fl_ops->fl_copy_lock(new, fl);
 349        }
 350}
 351
 352EXPORT_SYMBOL(locks_copy_lock);
 353
 354static inline int flock_translate_cmd(int cmd) {
 355        if (cmd & LOCK_MAND)
 356                return cmd & (LOCK_MAND | LOCK_RW);
 357        switch (cmd) {
 358        case LOCK_SH:
 359                return F_RDLCK;
 360        case LOCK_EX:
 361                return F_WRLCK;
 362        case LOCK_UN:
 363                return F_UNLCK;
 364        }
 365        return -EINVAL;
 366}
 367
 368/* Fill in a file_lock structure with an appropriate FLOCK lock. */
 369static struct file_lock *
 370flock_make_lock(struct file *filp, unsigned int cmd)
 371{
 372        struct file_lock *fl;
 373        int type = flock_translate_cmd(cmd);
 374
 375        if (type < 0)
 376                return ERR_PTR(type);
 377        
 378        fl = locks_alloc_lock();
 379        if (fl == NULL)
 380                return ERR_PTR(-ENOMEM);
 381
 382        fl->fl_file = filp;
 383        fl->fl_owner = filp;
 384        fl->fl_pid = current->tgid;
 385        fl->fl_flags = FL_FLOCK;
 386        fl->fl_type = type;
 387        fl->fl_end = OFFSET_MAX;
 388        
 389        return fl;
 390}
 391
 392static int assign_type(struct file_lock *fl, long type)
 393{
 394        switch (type) {
 395        case F_RDLCK:
 396        case F_WRLCK:
 397        case F_UNLCK:
 398                fl->fl_type = type;
 399                break;
 400        default:
 401                return -EINVAL;
 402        }
 403        return 0;
 404}
 405
 406static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
 407                                 struct flock64 *l)
 408{
 409        switch (l->l_whence) {
 410        case SEEK_SET:
 411                fl->fl_start = 0;
 412                break;
 413        case SEEK_CUR:
 414                fl->fl_start = filp->f_pos;
 415                break;
 416        case SEEK_END:
 417                fl->fl_start = i_size_read(file_inode(filp));
 418                break;
 419        default:
 420                return -EINVAL;
 421        }
 422        if (l->l_start > OFFSET_MAX - fl->fl_start)
 423                return -EOVERFLOW;
 424        fl->fl_start += l->l_start;
 425        if (fl->fl_start < 0)
 426                return -EINVAL;
 427
 428        /* POSIX-1996 leaves the case l->l_len < 0 undefined;
 429           POSIX-2001 defines it. */
 430        if (l->l_len > 0) {
 431                if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
 432                        return -EOVERFLOW;
 433                fl->fl_end = fl->fl_start + l->l_len - 1;
 434
 435        } else if (l->l_len < 0) {
 436                if (fl->fl_start + l->l_len < 0)
 437                        return -EINVAL;
 438                fl->fl_end = fl->fl_start - 1;
 439                fl->fl_start += l->l_len;
 440        } else
 441                fl->fl_end = OFFSET_MAX;
 442
 443        fl->fl_owner = current->files;
 444        fl->fl_pid = current->tgid;
 445        fl->fl_file = filp;
 446        fl->fl_flags = FL_POSIX;
 447        fl->fl_ops = NULL;
 448        fl->fl_lmops = NULL;
 449
 450        return assign_type(fl, l->l_type);
 451}
 452
 453/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
 454 * style lock.
 455 */
 456static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
 457                               struct flock *l)
 458{
 459        struct flock64 ll = {
 460                .l_type = l->l_type,
 461                .l_whence = l->l_whence,
 462                .l_start = l->l_start,
 463                .l_len = l->l_len,
 464        };
 465
 466        return flock64_to_posix_lock(filp, fl, &ll);
 467}
 468
 469/* default lease lock manager operations */
 470static bool
 471lease_break_callback(struct file_lock *fl)
 472{
 473        kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
 474        return false;
 475}
 476
 477static void
 478lease_setup(struct file_lock *fl, void **priv)
 479{
 480        struct file *filp = fl->fl_file;
 481        struct fasync_struct *fa = *priv;
 482
 483        /*
 484         * fasync_insert_entry() returns the old entry if any. If there was no
 485         * old entry, then it used "priv" and inserted it into the fasync list.
 486         * Clear the pointer to indicate that it shouldn't be freed.
 487         */
 488        if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
 489                *priv = NULL;
 490
 491        __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 492}
 493
 494static const struct lock_manager_operations lease_manager_ops = {
 495        .lm_break = lease_break_callback,
 496        .lm_change = lease_modify,
 497        .lm_setup = lease_setup,
 498};
 499
 500/*
 501 * Initialize a lease, use the default lock manager operations
 502 */
 503static int lease_init(struct file *filp, long type, struct file_lock *fl)
 504 {
 505        if (assign_type(fl, type) != 0)
 506                return -EINVAL;
 507
 508        fl->fl_owner = filp;
 509        fl->fl_pid = current->tgid;
 510
 511        fl->fl_file = filp;
 512        fl->fl_flags = FL_LEASE;
 513        fl->fl_start = 0;
 514        fl->fl_end = OFFSET_MAX;
 515        fl->fl_ops = NULL;
 516        fl->fl_lmops = &lease_manager_ops;
 517        return 0;
 518}
 519
 520/* Allocate a file_lock initialised to this type of lease */
 521static struct file_lock *lease_alloc(struct file *filp, long type)
 522{
 523        struct file_lock *fl = locks_alloc_lock();
 524        int error = -ENOMEM;
 525
 526        if (fl == NULL)
 527                return ERR_PTR(error);
 528
 529        error = lease_init(filp, type, fl);
 530        if (error) {
 531                locks_free_lock(fl);
 532                return ERR_PTR(error);
 533        }
 534        return fl;
 535}
 536
 537/* Check if two locks overlap each other.
 538 */
 539static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
 540{
 541        return ((fl1->fl_end >= fl2->fl_start) &&
 542                (fl2->fl_end >= fl1->fl_start));
 543}
 544
 545/*
 546 * Check whether two locks have the same owner.
 547 */
 548static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
 549{
 550        if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
 551                return fl2->fl_lmops == fl1->fl_lmops &&
 552                        fl1->fl_lmops->lm_compare_owner(fl1, fl2);
 553        return fl1->fl_owner == fl2->fl_owner;
 554}
 555
 556/* Must be called with the flc_lock held! */
 557static void locks_insert_global_locks(struct file_lock *fl)
 558{
 559        lg_local_lock(&file_lock_lglock);
 560        fl->fl_link_cpu = smp_processor_id();
 561        hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
 562        lg_local_unlock(&file_lock_lglock);
 563}
 564
 565/* Must be called with the flc_lock held! */
 566static void locks_delete_global_locks(struct file_lock *fl)
 567{
 568        /*
 569         * Avoid taking lock if already unhashed. This is safe since this check
 570         * is done while holding the flc_lock, and new insertions into the list
 571         * also require that it be held.
 572         */
 573        if (hlist_unhashed(&fl->fl_link))
 574                return;
 575        lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 576        hlist_del_init(&fl->fl_link);
 577        lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 578}
 579
 580static unsigned long
 581posix_owner_key(struct file_lock *fl)
 582{
 583        if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
 584                return fl->fl_lmops->lm_owner_key(fl);
 585        return (unsigned long)fl->fl_owner;
 586}
 587
 588static void locks_insert_global_blocked(struct file_lock *waiter)
 589{
 590        lockdep_assert_held(&blocked_lock_lock);
 591
 592        hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
 593}
 594
 595static void locks_delete_global_blocked(struct file_lock *waiter)
 596{
 597        lockdep_assert_held(&blocked_lock_lock);
 598
 599        hash_del(&waiter->fl_link);
 600}
 601
 602/* Remove waiter from blocker's block list.
 603 * When blocker ends up pointing to itself then the list is empty.
 604 *
 605 * Must be called with blocked_lock_lock held.
 606 */
 607static void __locks_delete_block(struct file_lock *waiter)
 608{
 609        locks_delete_global_blocked(waiter);
 610        list_del_init(&waiter->fl_block);
 611        waiter->fl_next = NULL;
 612}
 613
 614static void locks_delete_block(struct file_lock *waiter)
 615{
 616        spin_lock(&blocked_lock_lock);
 617        __locks_delete_block(waiter);
 618        spin_unlock(&blocked_lock_lock);
 619}
 620
 621/* Insert waiter into blocker's block list.
 622 * We use a circular list so that processes can be easily woken up in
 623 * the order they blocked. The documentation doesn't require this but
 624 * it seems like the reasonable thing to do.
 625 *
 626 * Must be called with both the flc_lock and blocked_lock_lock held. The
 627 * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
 628 * that the flc_lock is also held on insertions we can avoid taking the
 629 * blocked_lock_lock in some cases when we see that the fl_block list is empty.
 630 */
 631static void __locks_insert_block(struct file_lock *blocker,
 632                                        struct file_lock *waiter)
 633{
 634        BUG_ON(!list_empty(&waiter->fl_block));
 635        waiter->fl_next = blocker;
 636        list_add_tail(&waiter->fl_block, &blocker->fl_block);
 637        if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
 638                locks_insert_global_blocked(waiter);
 639}
 640
 641/* Must be called with flc_lock held. */
 642static void locks_insert_block(struct file_lock *blocker,
 643                                        struct file_lock *waiter)
 644{
 645        spin_lock(&blocked_lock_lock);
 646        __locks_insert_block(blocker, waiter);
 647        spin_unlock(&blocked_lock_lock);
 648}
 649
 650/*
 651 * Wake up processes blocked waiting for blocker.
 652 *
 653 * Must be called with the inode->flc_lock held!
 654 */
 655static void locks_wake_up_blocks(struct file_lock *blocker)
 656{
 657        /*
 658         * Avoid taking global lock if list is empty. This is safe since new
 659         * blocked requests are only added to the list under the flc_lock, and
 660         * the flc_lock is always held here. Note that removal from the fl_block
 661         * list does not require the flc_lock, so we must recheck list_empty()
 662         * after acquiring the blocked_lock_lock.
 663         */
 664        if (list_empty(&blocker->fl_block))
 665                return;
 666
 667        spin_lock(&blocked_lock_lock);
 668        while (!list_empty(&blocker->fl_block)) {
 669                struct file_lock *waiter;
 670
 671                waiter = list_first_entry(&blocker->fl_block,
 672                                struct file_lock, fl_block);
 673                __locks_delete_block(waiter);
 674                if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
 675                        waiter->fl_lmops->lm_notify(waiter);
 676                else
 677                        wake_up(&waiter->fl_wait);
 678        }
 679        spin_unlock(&blocked_lock_lock);
 680}
 681
 682static void
 683locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
 684{
 685        fl->fl_nspid = get_pid(task_tgid(current));
 686        list_add_tail(&fl->fl_list, before);
 687        locks_insert_global_locks(fl);
 688}
 689
 690static void
 691locks_unlink_lock_ctx(struct file_lock *fl)
 692{
 693        locks_delete_global_locks(fl);
 694        list_del_init(&fl->fl_list);
 695        if (fl->fl_nspid) {
 696                put_pid(fl->fl_nspid);
 697                fl->fl_nspid = NULL;
 698        }
 699        locks_wake_up_blocks(fl);
 700}
 701
 702static void
 703locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
 704{
 705        locks_unlink_lock_ctx(fl);
 706        if (dispose)
 707                list_add(&fl->fl_list, dispose);
 708        else
 709                locks_free_lock(fl);
 710}
 711
 712/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
 713 * checks for shared/exclusive status of overlapping locks.
 714 */
 715static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 716{
 717        if (sys_fl->fl_type == F_WRLCK)
 718                return 1;
 719        if (caller_fl->fl_type == F_WRLCK)
 720                return 1;
 721        return 0;
 722}
 723
 724/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
 725 * checking before calling the locks_conflict().
 726 */
 727static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 728{
 729        /* POSIX locks owned by the same process do not conflict with
 730         * each other.
 731         */
 732        if (posix_same_owner(caller_fl, sys_fl))
 733                return (0);
 734
 735        /* Check whether they overlap */
 736        if (!locks_overlap(caller_fl, sys_fl))
 737                return 0;
 738
 739        return (locks_conflict(caller_fl, sys_fl));
 740}
 741
 742/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
 743 * checking before calling the locks_conflict().
 744 */
 745static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 746{
 747        /* FLOCK locks referring to the same filp do not conflict with
 748         * each other.
 749         */
 750        if (caller_fl->fl_file == sys_fl->fl_file)
 751                return (0);
 752        if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
 753                return 0;
 754
 755        return (locks_conflict(caller_fl, sys_fl));
 756}
 757
 758void
 759posix_test_lock(struct file *filp, struct file_lock *fl)
 760{
 761        struct file_lock *cfl;
 762        struct file_lock_context *ctx;
 763        struct inode *inode = file_inode(filp);
 764
 765        ctx = inode->i_flctx;
 766        if (!ctx || list_empty_careful(&ctx->flc_posix)) {
 767                fl->fl_type = F_UNLCK;
 768                return;
 769        }
 770
 771        spin_lock(&ctx->flc_lock);
 772        list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
 773                if (posix_locks_conflict(fl, cfl)) {
 774                        locks_copy_conflock(fl, cfl);
 775                        if (cfl->fl_nspid)
 776                                fl->fl_pid = pid_vnr(cfl->fl_nspid);
 777                        goto out;
 778                }
 779        }
 780        fl->fl_type = F_UNLCK;
 781out:
 782        spin_unlock(&ctx->flc_lock);
 783        return;
 784}
 785EXPORT_SYMBOL(posix_test_lock);
 786
 787/*
 788 * Deadlock detection:
 789 *
 790 * We attempt to detect deadlocks that are due purely to posix file
 791 * locks.
 792 *
 793 * We assume that a task can be waiting for at most one lock at a time.
 794 * So for any acquired lock, the process holding that lock may be
 795 * waiting on at most one other lock.  That lock in turns may be held by
 796 * someone waiting for at most one other lock.  Given a requested lock
 797 * caller_fl which is about to wait for a conflicting lock block_fl, we
 798 * follow this chain of waiters to ensure we are not about to create a
 799 * cycle.
 800 *
 801 * Since we do this before we ever put a process to sleep on a lock, we
 802 * are ensured that there is never a cycle; that is what guarantees that
 803 * the while() loop in posix_locks_deadlock() eventually completes.
 804 *
 805 * Note: the above assumption may not be true when handling lock
 806 * requests from a broken NFS client. It may also fail in the presence
 807 * of tasks (such as posix threads) sharing the same open file table.
 808 * To handle those cases, we just bail out after a few iterations.
 809 *
 810 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
 811 * Because the owner is not even nominally tied to a thread of
 812 * execution, the deadlock detection below can't reasonably work well. Just
 813 * skip it for those.
 814 *
 815 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
 816 * locks that just checks for the case where two tasks are attempting to
 817 * upgrade from read to write locks on the same inode.
 818 */
 819
 820#define MAX_DEADLK_ITERATIONS 10
 821
 822/* Find a lock that the owner of the given block_fl is blocking on. */
 823static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
 824{
 825        struct file_lock *fl;
 826
 827        hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
 828                if (posix_same_owner(fl, block_fl))
 829                        return fl->fl_next;
 830        }
 831        return NULL;
 832}
 833
 834/* Must be called with the blocked_lock_lock held! */
 835static int posix_locks_deadlock(struct file_lock *caller_fl,
 836                                struct file_lock *block_fl)
 837{
 838        int i = 0;
 839
 840        lockdep_assert_held(&blocked_lock_lock);
 841
 842        /*
 843         * This deadlock detector can't reasonably detect deadlocks with
 844         * FL_OFDLCK locks, since they aren't owned by a process, per-se.
 845         */
 846        if (IS_OFDLCK(caller_fl))
 847                return 0;
 848
 849        while ((block_fl = what_owner_is_waiting_for(block_fl))) {
 850                if (i++ > MAX_DEADLK_ITERATIONS)
 851                        return 0;
 852                if (posix_same_owner(caller_fl, block_fl))
 853                        return 1;
 854        }
 855        return 0;
 856}
 857
 858/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
 859 * after any leases, but before any posix locks.
 860 *
 861 * Note that if called with an FL_EXISTS argument, the caller may determine
 862 * whether or not a lock was successfully freed by testing the return
 863 * value for -ENOENT.
 864 */
 865static int flock_lock_file(struct file *filp, struct file_lock *request)
 866{
 867        struct file_lock *new_fl = NULL;
 868        struct file_lock *fl;
 869        struct file_lock_context *ctx;
 870        struct inode *inode = file_inode(filp);
 871        int error = 0;
 872        bool found = false;
 873        LIST_HEAD(dispose);
 874
 875        ctx = locks_get_lock_context(inode, request->fl_type);
 876        if (!ctx) {
 877                if (request->fl_type != F_UNLCK)
 878                        return -ENOMEM;
 879                return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
 880        }
 881
 882        if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
 883                new_fl = locks_alloc_lock();
 884                if (!new_fl)
 885                        return -ENOMEM;
 886        }
 887
 888        spin_lock(&ctx->flc_lock);
 889        if (request->fl_flags & FL_ACCESS)
 890                goto find_conflict;
 891
 892        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
 893                if (filp != fl->fl_file)
 894                        continue;
 895                if (request->fl_type == fl->fl_type)
 896                        goto out;
 897                found = true;
 898                locks_delete_lock_ctx(fl, &dispose);
 899                break;
 900        }
 901
 902        if (request->fl_type == F_UNLCK) {
 903                if ((request->fl_flags & FL_EXISTS) && !found)
 904                        error = -ENOENT;
 905                goto out;
 906        }
 907
 908find_conflict:
 909        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
 910                if (!flock_locks_conflict(request, fl))
 911                        continue;
 912                error = -EAGAIN;
 913                if (!(request->fl_flags & FL_SLEEP))
 914                        goto out;
 915                error = FILE_LOCK_DEFERRED;
 916                locks_insert_block(fl, request);
 917                goto out;
 918        }
 919        if (request->fl_flags & FL_ACCESS)
 920                goto out;
 921        locks_copy_lock(new_fl, request);
 922        locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
 923        new_fl = NULL;
 924        error = 0;
 925
 926out:
 927        spin_unlock(&ctx->flc_lock);
 928        if (new_fl)
 929                locks_free_lock(new_fl);
 930        locks_dispose_list(&dispose);
 931        return error;
 932}
 933
 934static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
 935{
 936        struct file_lock *fl, *tmp;
 937        struct file_lock *new_fl = NULL;
 938        struct file_lock *new_fl2 = NULL;
 939        struct file_lock *left = NULL;
 940        struct file_lock *right = NULL;
 941        struct file_lock_context *ctx;
 942        int error;
 943        bool added = false;
 944        LIST_HEAD(dispose);
 945
 946        ctx = locks_get_lock_context(inode, request->fl_type);
 947        if (!ctx)
 948                return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
 949
 950        /*
 951         * We may need two file_lock structures for this operation,
 952         * so we get them in advance to avoid races.
 953         *
 954         * In some cases we can be sure, that no new locks will be needed
 955         */
 956        if (!(request->fl_flags & FL_ACCESS) &&
 957            (request->fl_type != F_UNLCK ||
 958             request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
 959                new_fl = locks_alloc_lock();
 960                new_fl2 = locks_alloc_lock();
 961        }
 962
 963        spin_lock(&ctx->flc_lock);
 964        /*
 965         * New lock request. Walk all POSIX locks and look for conflicts. If
 966         * there are any, either return error or put the request on the
 967         * blocker's list of waiters and the global blocked_hash.
 968         */
 969        if (request->fl_type != F_UNLCK) {
 970                list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
 971                        if (!posix_locks_conflict(request, fl))
 972                                continue;
 973                        if (conflock)
 974                                locks_copy_conflock(conflock, fl);
 975                        error = -EAGAIN;
 976                        if (!(request->fl_flags & FL_SLEEP))
 977                                goto out;
 978                        /*
 979                         * Deadlock detection and insertion into the blocked
 980                         * locks list must be done while holding the same lock!
 981                         */
 982                        error = -EDEADLK;
 983                        spin_lock(&blocked_lock_lock);
 984                        if (likely(!posix_locks_deadlock(request, fl))) {
 985                                error = FILE_LOCK_DEFERRED;
 986                                __locks_insert_block(fl, request);
 987                        }
 988                        spin_unlock(&blocked_lock_lock);
 989                        goto out;
 990                }
 991        }
 992
 993        /* If we're just looking for a conflict, we're done. */
 994        error = 0;
 995        if (request->fl_flags & FL_ACCESS)
 996                goto out;
 997
 998        /* Find the first old lock with the same owner as the new lock */
 999        list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1000                if (posix_same_owner(request, fl))
1001                        break;
1002        }
1003
1004        /* Process locks with this owner. */
1005        list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1006                if (!posix_same_owner(request, fl))
1007                        break;
1008
1009                /* Detect adjacent or overlapping regions (if same lock type) */
1010                if (request->fl_type == fl->fl_type) {
1011                        /* In all comparisons of start vs end, use
1012                         * "start - 1" rather than "end + 1". If end
1013                         * is OFFSET_MAX, end + 1 will become negative.
1014                         */
1015                        if (fl->fl_end < request->fl_start - 1)
1016                                continue;
1017                        /* If the next lock in the list has entirely bigger
1018                         * addresses than the new one, insert the lock here.
1019                         */
1020                        if (fl->fl_start - 1 > request->fl_end)
1021                                break;
1022
1023                        /* If we come here, the new and old lock are of the
1024                         * same type and adjacent or overlapping. Make one
1025                         * lock yielding from the lower start address of both
1026                         * locks to the higher end address.
1027                         */
1028                        if (fl->fl_start > request->fl_start)
1029                                fl->fl_start = request->fl_start;
1030                        else
1031                                request->fl_start = fl->fl_start;
1032                        if (fl->fl_end < request->fl_end)
1033                                fl->fl_end = request->fl_end;
1034                        else
1035                                request->fl_end = fl->fl_end;
1036                        if (added) {
1037                                locks_delete_lock_ctx(fl, &dispose);
1038                                continue;
1039                        }
1040                        request = fl;
1041                        added = true;
1042                } else {
1043                        /* Processing for different lock types is a bit
1044                         * more complex.
1045                         */
1046                        if (fl->fl_end < request->fl_start)
1047                                continue;
1048                        if (fl->fl_start > request->fl_end)
1049                                break;
1050                        if (request->fl_type == F_UNLCK)
1051                                added = true;
1052                        if (fl->fl_start < request->fl_start)
1053                                left = fl;
1054                        /* If the next lock in the list has a higher end
1055                         * address than the new one, insert the new one here.
1056                         */
1057                        if (fl->fl_end > request->fl_end) {
1058                                right = fl;
1059                                break;
1060                        }
1061                        if (fl->fl_start >= request->fl_start) {
1062                                /* The new lock completely replaces an old
1063                                 * one (This may happen several times).
1064                                 */
1065                                if (added) {
1066                                        locks_delete_lock_ctx(fl, &dispose);
1067                                        continue;
1068                                }
1069                                /*
1070                                 * Replace the old lock with new_fl, and
1071                                 * remove the old one. It's safe to do the
1072                                 * insert here since we know that we won't be
1073                                 * using new_fl later, and that the lock is
1074                                 * just replacing an existing lock.
1075                                 */
1076                                error = -ENOLCK;
1077                                if (!new_fl)
1078                                        goto out;
1079                                locks_copy_lock(new_fl, request);
1080                                request = new_fl;
1081                                new_fl = NULL;
1082                                locks_insert_lock_ctx(request, &fl->fl_list);
1083                                locks_delete_lock_ctx(fl, &dispose);
1084                                added = true;
1085                        }
1086                }
1087        }
1088
1089        /*
1090         * The above code only modifies existing locks in case of merging or
1091         * replacing. If new lock(s) need to be inserted all modifications are
1092         * done below this, so it's safe yet to bail out.
1093         */
1094        error = -ENOLCK; /* "no luck" */
1095        if (right && left == right && !new_fl2)
1096                goto out;
1097
1098        error = 0;
1099        if (!added) {
1100                if (request->fl_type == F_UNLCK) {
1101                        if (request->fl_flags & FL_EXISTS)
1102                                error = -ENOENT;
1103                        goto out;
1104                }
1105
1106                if (!new_fl) {
1107                        error = -ENOLCK;
1108                        goto out;
1109                }
1110                locks_copy_lock(new_fl, request);
1111                locks_insert_lock_ctx(new_fl, &fl->fl_list);
1112                fl = new_fl;
1113                new_fl = NULL;
1114        }
1115        if (right) {
1116                if (left == right) {
1117                        /* The new lock breaks the old one in two pieces,
1118                         * so we have to use the second new lock.
1119                         */
1120                        left = new_fl2;
1121                        new_fl2 = NULL;
1122                        locks_copy_lock(left, right);
1123                        locks_insert_lock_ctx(left, &fl->fl_list);
1124                }
1125                right->fl_start = request->fl_end + 1;
1126                locks_wake_up_blocks(right);
1127        }
1128        if (left) {
1129                left->fl_end = request->fl_start - 1;
1130                locks_wake_up_blocks(left);
1131        }
1132 out:
1133        spin_unlock(&ctx->flc_lock);
1134        /*
1135         * Free any unused locks.
1136         */
1137        if (new_fl)
1138                locks_free_lock(new_fl);
1139        if (new_fl2)
1140                locks_free_lock(new_fl2);
1141        locks_dispose_list(&dispose);
1142        return error;
1143}
1144
1145/**
1146 * posix_lock_file - Apply a POSIX-style lock to a file
1147 * @filp: The file to apply the lock to
1148 * @fl: The lock to be applied
1149 * @conflock: Place to return a copy of the conflicting lock, if found.
1150 *
1151 * Add a POSIX style lock to a file.
1152 * We merge adjacent & overlapping locks whenever possible.
1153 * POSIX locks are sorted by owner task, then by starting address
1154 *
1155 * Note that if called with an FL_EXISTS argument, the caller may determine
1156 * whether or not a lock was successfully freed by testing the return
1157 * value for -ENOENT.
1158 */
1159int posix_lock_file(struct file *filp, struct file_lock *fl,
1160                        struct file_lock *conflock)
1161{
1162        return __posix_lock_file(file_inode(filp), fl, conflock);
1163}
1164EXPORT_SYMBOL(posix_lock_file);
1165
1166/**
1167 * posix_lock_file_wait - Apply a POSIX-style lock to a file
1168 * @filp: The file to apply the lock to
1169 * @fl: The lock to be applied
1170 *
1171 * Add a POSIX style lock to a file.
1172 * We merge adjacent & overlapping locks whenever possible.
1173 * POSIX locks are sorted by owner task, then by starting address
1174 */
1175int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1176{
1177        int error;
1178        might_sleep ();
1179        for (;;) {
1180                error = posix_lock_file(filp, fl, NULL);
1181                if (error != FILE_LOCK_DEFERRED)
1182                        break;
1183                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1184                if (!error)
1185                        continue;
1186
1187                locks_delete_block(fl);
1188                break;
1189        }
1190        return error;
1191}
1192EXPORT_SYMBOL(posix_lock_file_wait);
1193
1194/**
1195 * locks_mandatory_locked - Check for an active lock
1196 * @file: the file to check
1197 *
1198 * Searches the inode's list of locks to find any POSIX locks which conflict.
1199 * This function is called from locks_verify_locked() only.
1200 */
1201int locks_mandatory_locked(struct file *file)
1202{
1203        int ret;
1204        struct inode *inode = file_inode(file);
1205        struct file_lock_context *ctx;
1206        struct file_lock *fl;
1207
1208        ctx = inode->i_flctx;
1209        if (!ctx || list_empty_careful(&ctx->flc_posix))
1210                return 0;
1211
1212        /*
1213         * Search the lock list for this inode for any POSIX locks.
1214         */
1215        spin_lock(&ctx->flc_lock);
1216        ret = 0;
1217        list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1218                if (fl->fl_owner != current->files &&
1219                    fl->fl_owner != file) {
1220                        ret = -EAGAIN;
1221                        break;
1222                }
1223        }
1224        spin_unlock(&ctx->flc_lock);
1225        return ret;
1226}
1227
1228/**
1229 * locks_mandatory_area - Check for a conflicting lock
1230 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1231 *              for shared
1232 * @inode:      the file to check
1233 * @filp:       how the file was opened (if it was)
1234 * @offset:     start of area to check
1235 * @count:      length of area to check
1236 *
1237 * Searches the inode's list of locks to find any POSIX locks which conflict.
1238 * This function is called from rw_verify_area() and
1239 * locks_verify_truncate().
1240 */
1241int locks_mandatory_area(int read_write, struct inode *inode,
1242                         struct file *filp, loff_t offset,
1243                         size_t count)
1244{
1245        struct file_lock fl;
1246        int error;
1247        bool sleep = false;
1248
1249        locks_init_lock(&fl);
1250        fl.fl_pid = current->tgid;
1251        fl.fl_file = filp;
1252        fl.fl_flags = FL_POSIX | FL_ACCESS;
1253        if (filp && !(filp->f_flags & O_NONBLOCK))
1254                sleep = true;
1255        fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1256        fl.fl_start = offset;
1257        fl.fl_end = offset + count - 1;
1258
1259        for (;;) {
1260                if (filp) {
1261                        fl.fl_owner = filp;
1262                        fl.fl_flags &= ~FL_SLEEP;
1263                        error = __posix_lock_file(inode, &fl, NULL);
1264                        if (!error)
1265                                break;
1266                }
1267
1268                if (sleep)
1269                        fl.fl_flags |= FL_SLEEP;
1270                fl.fl_owner = current->files;
1271                error = __posix_lock_file(inode, &fl, NULL);
1272                if (error != FILE_LOCK_DEFERRED)
1273                        break;
1274                error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1275                if (!error) {
1276                        /*
1277                         * If we've been sleeping someone might have
1278                         * changed the permissions behind our back.
1279                         */
1280                        if (__mandatory_lock(inode))
1281                                continue;
1282                }
1283
1284                locks_delete_block(&fl);
1285                break;
1286        }
1287
1288        return error;
1289}
1290
1291EXPORT_SYMBOL(locks_mandatory_area);
1292
1293static void lease_clear_pending(struct file_lock *fl, int arg)
1294{
1295        switch (arg) {
1296        case F_UNLCK:
1297                fl->fl_flags &= ~FL_UNLOCK_PENDING;
1298                /* fall through: */
1299        case F_RDLCK:
1300                fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1301        }
1302}
1303
1304/* We already had a lease on this file; just change its type */
1305int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1306{
1307        int error = assign_type(fl, arg);
1308
1309        if (error)
1310                return error;
1311        lease_clear_pending(fl, arg);
1312        locks_wake_up_blocks(fl);
1313        if (arg == F_UNLCK) {
1314                struct file *filp = fl->fl_file;
1315
1316                f_delown(filp);
1317                filp->f_owner.signum = 0;
1318                fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1319                if (fl->fl_fasync != NULL) {
1320                        printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1321                        fl->fl_fasync = NULL;
1322                }
1323                locks_delete_lock_ctx(fl, dispose);
1324        }
1325        return 0;
1326}
1327EXPORT_SYMBOL(lease_modify);
1328
1329static bool past_time(unsigned long then)
1330{
1331        if (!then)
1332                /* 0 is a special value meaning "this never expires": */
1333                return false;
1334        return time_after(jiffies, then);
1335}
1336
1337static void time_out_leases(struct inode *inode, struct list_head *dispose)
1338{
1339        struct file_lock_context *ctx = inode->i_flctx;
1340        struct file_lock *fl, *tmp;
1341
1342        lockdep_assert_held(&ctx->flc_lock);
1343
1344        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1345                trace_time_out_leases(inode, fl);
1346                if (past_time(fl->fl_downgrade_time))
1347                        lease_modify(fl, F_RDLCK, dispose);
1348                if (past_time(fl->fl_break_time))
1349                        lease_modify(fl, F_UNLCK, dispose);
1350        }
1351}
1352
1353static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1354{
1355        if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1356                return false;
1357        if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1358                return false;
1359        return locks_conflict(breaker, lease);
1360}
1361
1362static bool
1363any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1364{
1365        struct file_lock_context *ctx = inode->i_flctx;
1366        struct file_lock *fl;
1367
1368        lockdep_assert_held(&ctx->flc_lock);
1369
1370        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1371                if (leases_conflict(fl, breaker))
1372                        return true;
1373        }
1374        return false;
1375}
1376
1377/**
1378 *      __break_lease   -       revoke all outstanding leases on file
1379 *      @inode: the inode of the file to return
1380 *      @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1381 *          break all leases
1382 *      @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1383 *          only delegations
1384 *
1385 *      break_lease (inlined for speed) has checked there already is at least
1386 *      some kind of lock (maybe a lease) on this file.  Leases are broken on
1387 *      a call to open() or truncate().  This function can sleep unless you
1388 *      specified %O_NONBLOCK to your open().
1389 */
1390int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1391{
1392        int error = 0;
1393        struct file_lock_context *ctx = inode->i_flctx;
1394        struct file_lock *new_fl, *fl, *tmp;
1395        unsigned long break_time;
1396        int want_write = (mode & O_ACCMODE) != O_RDONLY;
1397        LIST_HEAD(dispose);
1398
1399        new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1400        if (IS_ERR(new_fl))
1401                return PTR_ERR(new_fl);
1402        new_fl->fl_flags = type;
1403
1404        /* typically we will check that ctx is non-NULL before calling */
1405        if (!ctx) {
1406                WARN_ON_ONCE(1);
1407                return error;
1408        }
1409
1410        spin_lock(&ctx->flc_lock);
1411
1412        time_out_leases(inode, &dispose);
1413
1414        if (!any_leases_conflict(inode, new_fl))
1415                goto out;
1416
1417        break_time = 0;
1418        if (lease_break_time > 0) {
1419                break_time = jiffies + lease_break_time * HZ;
1420                if (break_time == 0)
1421                        break_time++;   /* so that 0 means no break time */
1422        }
1423
1424        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1425                if (!leases_conflict(fl, new_fl))
1426                        continue;
1427                if (want_write) {
1428                        if (fl->fl_flags & FL_UNLOCK_PENDING)
1429                                continue;
1430                        fl->fl_flags |= FL_UNLOCK_PENDING;
1431                        fl->fl_break_time = break_time;
1432                } else {
1433                        if (lease_breaking(fl))
1434                                continue;
1435                        fl->fl_flags |= FL_DOWNGRADE_PENDING;
1436                        fl->fl_downgrade_time = break_time;
1437                }
1438                if (fl->fl_lmops->lm_break(fl))
1439                        locks_delete_lock_ctx(fl, &dispose);
1440        }
1441
1442        if (list_empty(&ctx->flc_lease))
1443                goto out;
1444
1445        if (mode & O_NONBLOCK) {
1446                trace_break_lease_noblock(inode, new_fl);
1447                error = -EWOULDBLOCK;
1448                goto out;
1449        }
1450
1451restart:
1452        fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1453        break_time = fl->fl_break_time;
1454        if (break_time != 0)
1455                break_time -= jiffies;
1456        if (break_time == 0)
1457                break_time++;
1458        locks_insert_block(fl, new_fl);
1459        trace_break_lease_block(inode, new_fl);
1460        spin_unlock(&ctx->flc_lock);
1461        locks_dispose_list(&dispose);
1462        error = wait_event_interruptible_timeout(new_fl->fl_wait,
1463                                                !new_fl->fl_next, break_time);
1464        spin_lock(&ctx->flc_lock);
1465        trace_break_lease_unblock(inode, new_fl);
1466        locks_delete_block(new_fl);
1467        if (error >= 0) {
1468                /*
1469                 * Wait for the next conflicting lease that has not been
1470                 * broken yet
1471                 */
1472                if (error == 0)
1473                        time_out_leases(inode, &dispose);
1474                if (any_leases_conflict(inode, new_fl))
1475                        goto restart;
1476                error = 0;
1477        }
1478out:
1479        spin_unlock(&ctx->flc_lock);
1480        locks_dispose_list(&dispose);
1481        locks_free_lock(new_fl);
1482        return error;
1483}
1484
1485EXPORT_SYMBOL(__break_lease);
1486
1487/**
1488 *      lease_get_mtime - get the last modified time of an inode
1489 *      @inode: the inode
1490 *      @time:  pointer to a timespec which will contain the last modified time
1491 *
1492 * This is to force NFS clients to flush their caches for files with
1493 * exclusive leases.  The justification is that if someone has an
1494 * exclusive lease, then they could be modifying it.
1495 */
1496void lease_get_mtime(struct inode *inode, struct timespec *time)
1497{
1498        bool has_lease = false;
1499        struct file_lock_context *ctx = inode->i_flctx;
1500        struct file_lock *fl;
1501
1502        if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1503                spin_lock(&ctx->flc_lock);
1504                if (!list_empty(&ctx->flc_lease)) {
1505                        fl = list_first_entry(&ctx->flc_lease,
1506                                                struct file_lock, fl_list);
1507                        if (fl->fl_type == F_WRLCK)
1508                                has_lease = true;
1509                }
1510                spin_unlock(&ctx->flc_lock);
1511        }
1512
1513        if (has_lease)
1514                *time = current_fs_time(inode->i_sb);
1515        else
1516                *time = inode->i_mtime;
1517}
1518
1519EXPORT_SYMBOL(lease_get_mtime);
1520
1521/**
1522 *      fcntl_getlease - Enquire what lease is currently active
1523 *      @filp: the file
1524 *
1525 *      The value returned by this function will be one of
1526 *      (if no lease break is pending):
1527 *
1528 *      %F_RDLCK to indicate a shared lease is held.
1529 *
1530 *      %F_WRLCK to indicate an exclusive lease is held.
1531 *
1532 *      %F_UNLCK to indicate no lease is held.
1533 *
1534 *      (if a lease break is pending):
1535 *
1536 *      %F_RDLCK to indicate an exclusive lease needs to be
1537 *              changed to a shared lease (or removed).
1538 *
1539 *      %F_UNLCK to indicate the lease needs to be removed.
1540 *
1541 *      XXX: sfr & willy disagree over whether F_INPROGRESS
1542 *      should be returned to userspace.
1543 */
1544int fcntl_getlease(struct file *filp)
1545{
1546        struct file_lock *fl;
1547        struct inode *inode = file_inode(filp);
1548        struct file_lock_context *ctx = inode->i_flctx;
1549        int type = F_UNLCK;
1550        LIST_HEAD(dispose);
1551
1552        if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1553                spin_lock(&ctx->flc_lock);
1554                time_out_leases(file_inode(filp), &dispose);
1555                list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1556                        if (fl->fl_file != filp)
1557                                continue;
1558                        type = target_leasetype(fl);
1559                        break;
1560                }
1561                spin_unlock(&ctx->flc_lock);
1562                locks_dispose_list(&dispose);
1563        }
1564        return type;
1565}
1566
1567/**
1568 * check_conflicting_open - see if the given dentry points to a file that has
1569 *                          an existing open that would conflict with the
1570 *                          desired lease.
1571 * @dentry:     dentry to check
1572 * @arg:        type of lease that we're trying to acquire
1573 *
1574 * Check to see if there's an existing open fd on this file that would
1575 * conflict with the lease we're trying to set.
1576 */
1577static int
1578check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1579{
1580        int ret = 0;
1581        struct inode *inode = dentry->d_inode;
1582
1583        if (flags & FL_LAYOUT)
1584                return 0;
1585
1586        if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1587                return -EAGAIN;
1588
1589        if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1590            (atomic_read(&inode->i_count) > 1)))
1591                ret = -EAGAIN;
1592
1593        return ret;
1594}
1595
1596static int
1597generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1598{
1599        struct file_lock *fl, *my_fl = NULL, *lease;
1600        struct dentry *dentry = filp->f_path.dentry;
1601        struct inode *inode = dentry->d_inode;
1602        struct file_lock_context *ctx;
1603        bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1604        int error;
1605        LIST_HEAD(dispose);
1606
1607        lease = *flp;
1608        trace_generic_add_lease(inode, lease);
1609
1610        /* Note that arg is never F_UNLCK here */
1611        ctx = locks_get_lock_context(inode, arg);
1612        if (!ctx)
1613                return -ENOMEM;
1614
1615        /*
1616         * In the delegation case we need mutual exclusion with
1617         * a number of operations that take the i_mutex.  We trylock
1618         * because delegations are an optional optimization, and if
1619         * there's some chance of a conflict--we'd rather not
1620         * bother, maybe that's a sign this just isn't a good file to
1621         * hand out a delegation on.
1622         */
1623        if (is_deleg && !mutex_trylock(&inode->i_mutex))
1624                return -EAGAIN;
1625
1626        if (is_deleg && arg == F_WRLCK) {
1627                /* Write delegations are not currently supported: */
1628                mutex_unlock(&inode->i_mutex);
1629                WARN_ON_ONCE(1);
1630                return -EINVAL;
1631        }
1632
1633        spin_lock(&ctx->flc_lock);
1634        time_out_leases(inode, &dispose);
1635        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1636        if (error)
1637                goto out;
1638
1639        /*
1640         * At this point, we know that if there is an exclusive
1641         * lease on this file, then we hold it on this filp
1642         * (otherwise our open of this file would have blocked).
1643         * And if we are trying to acquire an exclusive lease,
1644         * then the file is not open by anyone (including us)
1645         * except for this filp.
1646         */
1647        error = -EAGAIN;
1648        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1649                if (fl->fl_file == filp &&
1650                    fl->fl_owner == lease->fl_owner) {
1651                        my_fl = fl;
1652                        continue;
1653                }
1654
1655                /*
1656                 * No exclusive leases if someone else has a lease on
1657                 * this file:
1658                 */
1659                if (arg == F_WRLCK)
1660                        goto out;
1661                /*
1662                 * Modifying our existing lease is OK, but no getting a
1663                 * new lease if someone else is opening for write:
1664                 */
1665                if (fl->fl_flags & FL_UNLOCK_PENDING)
1666                        goto out;
1667        }
1668
1669        if (my_fl != NULL) {
1670                lease = my_fl;
1671                error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1672                if (error)
1673                        goto out;
1674                goto out_setup;
1675        }
1676
1677        error = -EINVAL;
1678        if (!leases_enable)
1679                goto out;
1680
1681        locks_insert_lock_ctx(lease, &ctx->flc_lease);
1682        /*
1683         * The check in break_lease() is lockless. It's possible for another
1684         * open to race in after we did the earlier check for a conflicting
1685         * open but before the lease was inserted. Check again for a
1686         * conflicting open and cancel the lease if there is one.
1687         *
1688         * We also add a barrier here to ensure that the insertion of the lock
1689         * precedes these checks.
1690         */
1691        smp_mb();
1692        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1693        if (error) {
1694                locks_unlink_lock_ctx(lease);
1695                goto out;
1696        }
1697
1698out_setup:
1699        if (lease->fl_lmops->lm_setup)
1700                lease->fl_lmops->lm_setup(lease, priv);
1701out:
1702        spin_unlock(&ctx->flc_lock);
1703        locks_dispose_list(&dispose);
1704        if (is_deleg)
1705                mutex_unlock(&inode->i_mutex);
1706        if (!error && !my_fl)
1707                *flp = NULL;
1708        return error;
1709}
1710
1711static int generic_delete_lease(struct file *filp, void *owner)
1712{
1713        int error = -EAGAIN;
1714        struct file_lock *fl, *victim = NULL;
1715        struct dentry *dentry = filp->f_path.dentry;
1716        struct inode *inode = dentry->d_inode;
1717        struct file_lock_context *ctx = inode->i_flctx;
1718        LIST_HEAD(dispose);
1719
1720        if (!ctx) {
1721                trace_generic_delete_lease(inode, NULL);
1722                return error;
1723        }
1724
1725        spin_lock(&ctx->flc_lock);
1726        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1727                if (fl->fl_file == filp &&
1728                    fl->fl_owner == owner) {
1729                        victim = fl;
1730                        break;
1731                }
1732        }
1733        trace_generic_delete_lease(inode, victim);
1734        if (victim)
1735                error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1736        spin_unlock(&ctx->flc_lock);
1737        locks_dispose_list(&dispose);
1738        return error;
1739}
1740
1741/**
1742 *      generic_setlease        -       sets a lease on an open file
1743 *      @filp:  file pointer
1744 *      @arg:   type of lease to obtain
1745 *      @flp:   input - file_lock to use, output - file_lock inserted
1746 *      @priv:  private data for lm_setup (may be NULL if lm_setup
1747 *              doesn't require it)
1748 *
1749 *      The (input) flp->fl_lmops->lm_break function is required
1750 *      by break_lease().
1751 */
1752int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1753                        void **priv)
1754{
1755        struct dentry *dentry = filp->f_path.dentry;
1756        struct inode *inode = dentry->d_inode;
1757        int error;
1758
1759        if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1760                return -EACCES;
1761        if (!S_ISREG(inode->i_mode))
1762                return -EINVAL;
1763        error = security_file_lock(filp, arg);
1764        if (error)
1765                return error;
1766
1767        switch (arg) {
1768        case F_UNLCK:
1769                return generic_delete_lease(filp, *priv);
1770        case F_RDLCK:
1771        case F_WRLCK:
1772                if (!(*flp)->fl_lmops->lm_break) {
1773                        WARN_ON_ONCE(1);
1774                        return -ENOLCK;
1775                }
1776
1777                return generic_add_lease(filp, arg, flp, priv);
1778        default:
1779                return -EINVAL;
1780        }
1781}
1782EXPORT_SYMBOL(generic_setlease);
1783
1784/**
1785 * vfs_setlease        -       sets a lease on an open file
1786 * @filp:       file pointer
1787 * @arg:        type of lease to obtain
1788 * @lease:      file_lock to use when adding a lease
1789 * @priv:       private info for lm_setup when adding a lease (may be
1790 *              NULL if lm_setup doesn't require it)
1791 *
1792 * Call this to establish a lease on the file. The "lease" argument is not
1793 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1794 * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
1795 * if not, this function will return -ENOLCK (and generate a scary-looking
1796 * stack trace).
1797 *
1798 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1799 * may be NULL if the lm_setup operation doesn't require it.
1800 */
1801int
1802vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1803{
1804        if (filp->f_op->setlease)
1805                return filp->f_op->setlease(filp, arg, lease, priv);
1806        else
1807                return generic_setlease(filp, arg, lease, priv);
1808}
1809EXPORT_SYMBOL_GPL(vfs_setlease);
1810
1811static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1812{
1813        struct file_lock *fl;
1814        struct fasync_struct *new;
1815        int error;
1816
1817        fl = lease_alloc(filp, arg);
1818        if (IS_ERR(fl))
1819                return PTR_ERR(fl);
1820
1821        new = fasync_alloc();
1822        if (!new) {
1823                locks_free_lock(fl);
1824                return -ENOMEM;
1825        }
1826        new->fa_fd = fd;
1827
1828        error = vfs_setlease(filp, arg, &fl, (void **)&new);
1829        if (fl)
1830                locks_free_lock(fl);
1831        if (new)
1832                fasync_free(new);
1833        return error;
1834}
1835
1836/**
1837 *      fcntl_setlease  -       sets a lease on an open file
1838 *      @fd: open file descriptor
1839 *      @filp: file pointer
1840 *      @arg: type of lease to obtain
1841 *
1842 *      Call this fcntl to establish a lease on the file.
1843 *      Note that you also need to call %F_SETSIG to
1844 *      receive a signal when the lease is broken.
1845 */
1846int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1847{
1848        if (arg == F_UNLCK)
1849                return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1850        return do_fcntl_add_lease(fd, filp, arg);
1851}
1852
1853/**
1854 * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1855 * @filp: The file to apply the lock to
1856 * @fl: The lock to be applied
1857 *
1858 * Add a FLOCK style lock to a file.
1859 */
1860int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1861{
1862        int error;
1863        might_sleep();
1864        for (;;) {
1865                error = flock_lock_file(filp, fl);
1866                if (error != FILE_LOCK_DEFERRED)
1867                        break;
1868                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1869                if (!error)
1870                        continue;
1871
1872                locks_delete_block(fl);
1873                break;
1874        }
1875        return error;
1876}
1877
1878EXPORT_SYMBOL(flock_lock_file_wait);
1879
1880/**
1881 *      sys_flock: - flock() system call.
1882 *      @fd: the file descriptor to lock.
1883 *      @cmd: the type of lock to apply.
1884 *
1885 *      Apply a %FL_FLOCK style lock to an open file descriptor.
1886 *      The @cmd can be one of
1887 *
1888 *      %LOCK_SH -- a shared lock.
1889 *
1890 *      %LOCK_EX -- an exclusive lock.
1891 *
1892 *      %LOCK_UN -- remove an existing lock.
1893 *
1894 *      %LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1895 *
1896 *      %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1897 *      processes read and write access respectively.
1898 */
1899SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1900{
1901        struct fd f = fdget(fd);
1902        struct file_lock *lock;
1903        int can_sleep, unlock;
1904        int error;
1905
1906        error = -EBADF;
1907        if (!f.file)
1908                goto out;
1909
1910        can_sleep = !(cmd & LOCK_NB);
1911        cmd &= ~LOCK_NB;
1912        unlock = (cmd == LOCK_UN);
1913
1914        if (!unlock && !(cmd & LOCK_MAND) &&
1915            !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1916                goto out_putf;
1917
1918        lock = flock_make_lock(f.file, cmd);
1919        if (IS_ERR(lock)) {
1920                error = PTR_ERR(lock);
1921                goto out_putf;
1922        }
1923
1924        if (can_sleep)
1925                lock->fl_flags |= FL_SLEEP;
1926
1927        error = security_file_lock(f.file, lock->fl_type);
1928        if (error)
1929                goto out_free;
1930
1931        if (f.file->f_op->flock)
1932                error = f.file->f_op->flock(f.file,
1933                                          (can_sleep) ? F_SETLKW : F_SETLK,
1934                                          lock);
1935        else
1936                error = flock_lock_file_wait(f.file, lock);
1937
1938 out_free:
1939        locks_free_lock(lock);
1940
1941 out_putf:
1942        fdput(f);
1943 out:
1944        return error;
1945}
1946
1947/**
1948 * vfs_test_lock - test file byte range lock
1949 * @filp: The file to test lock for
1950 * @fl: The lock to test; also used to hold result
1951 *
1952 * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1953 * setting conf->fl_type to something other than F_UNLCK.
1954 */
1955int vfs_test_lock(struct file *filp, struct file_lock *fl)
1956{
1957        if (filp->f_op->lock)
1958                return filp->f_op->lock(filp, F_GETLK, fl);
1959        posix_test_lock(filp, fl);
1960        return 0;
1961}
1962EXPORT_SYMBOL_GPL(vfs_test_lock);
1963
1964static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1965{
1966        flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
1967#if BITS_PER_LONG == 32
1968        /*
1969         * Make sure we can represent the posix lock via
1970         * legacy 32bit flock.
1971         */
1972        if (fl->fl_start > OFFT_OFFSET_MAX)
1973                return -EOVERFLOW;
1974        if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1975                return -EOVERFLOW;
1976#endif
1977        flock->l_start = fl->fl_start;
1978        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1979                fl->fl_end - fl->fl_start + 1;
1980        flock->l_whence = 0;
1981        flock->l_type = fl->fl_type;
1982        return 0;
1983}
1984
1985#if BITS_PER_LONG == 32
1986static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1987{
1988        flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
1989        flock->l_start = fl->fl_start;
1990        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1991                fl->fl_end - fl->fl_start + 1;
1992        flock->l_whence = 0;
1993        flock->l_type = fl->fl_type;
1994}
1995#endif
1996
1997/* Report the first existing lock that would conflict with l.
1998 * This implements the F_GETLK command of fcntl().
1999 */
2000int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
2001{
2002        struct file_lock file_lock;
2003        struct flock flock;
2004        int error;
2005
2006        error = -EFAULT;
2007        if (copy_from_user(&flock, l, sizeof(flock)))
2008                goto out;
2009        error = -EINVAL;
2010        if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2011                goto out;
2012
2013        error = flock_to_posix_lock(filp, &file_lock, &flock);
2014        if (error)
2015                goto out;
2016
2017        if (cmd == F_OFD_GETLK) {
2018                error = -EINVAL;
2019                if (flock.l_pid != 0)
2020                        goto out;
2021
2022                cmd = F_GETLK;
2023                file_lock.fl_flags |= FL_OFDLCK;
2024                file_lock.fl_owner = filp;
2025        }
2026
2027        error = vfs_test_lock(filp, &file_lock);
2028        if (error)
2029                goto out;
2030 
2031        flock.l_type = file_lock.fl_type;
2032        if (file_lock.fl_type != F_UNLCK) {
2033                error = posix_lock_to_flock(&flock, &file_lock);
2034                if (error)
2035                        goto rel_priv;
2036        }
2037        error = -EFAULT;
2038        if (!copy_to_user(l, &flock, sizeof(flock)))
2039                error = 0;
2040rel_priv:
2041        locks_release_private(&file_lock);
2042out:
2043        return error;
2044}
2045
2046/**
2047 * vfs_lock_file - file byte range lock
2048 * @filp: The file to apply the lock to
2049 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2050 * @fl: The lock to be applied
2051 * @conf: Place to return a copy of the conflicting lock, if found.
2052 *
2053 * A caller that doesn't care about the conflicting lock may pass NULL
2054 * as the final argument.
2055 *
2056 * If the filesystem defines a private ->lock() method, then @conf will
2057 * be left unchanged; so a caller that cares should initialize it to
2058 * some acceptable default.
2059 *
2060 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2061 * locks, the ->lock() interface may return asynchronously, before the lock has
2062 * been granted or denied by the underlying filesystem, if (and only if)
2063 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2064 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2065 * the request is for a blocking lock. When ->lock() does return asynchronously,
2066 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2067 * request completes.
2068 * If the request is for non-blocking lock the file system should return
2069 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2070 * with the result. If the request timed out the callback routine will return a
2071 * nonzero return code and the file system should release the lock. The file
2072 * system is also responsible to keep a corresponding posix lock when it
2073 * grants a lock so the VFS can find out which locks are locally held and do
2074 * the correct lock cleanup when required.
2075 * The underlying filesystem must not drop the kernel lock or call
2076 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2077 * return code.
2078 */
2079int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2080{
2081        if (filp->f_op->lock)
2082                return filp->f_op->lock(filp, cmd, fl);
2083        else
2084                return posix_lock_file(filp, fl, conf);
2085}
2086EXPORT_SYMBOL_GPL(vfs_lock_file);
2087
2088static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2089                             struct file_lock *fl)
2090{
2091        int error;
2092
2093        error = security_file_lock(filp, fl->fl_type);
2094        if (error)
2095                return error;
2096
2097        for (;;) {
2098                error = vfs_lock_file(filp, cmd, fl, NULL);
2099                if (error != FILE_LOCK_DEFERRED)
2100                        break;
2101                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2102                if (!error)
2103                        continue;
2104
2105                locks_delete_block(fl);
2106                break;
2107        }
2108
2109        return error;
2110}
2111
2112/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
2113static int
2114check_fmode_for_setlk(struct file_lock *fl)
2115{
2116        switch (fl->fl_type) {
2117        case F_RDLCK:
2118                if (!(fl->fl_file->f_mode & FMODE_READ))
2119                        return -EBADF;
2120                break;
2121        case F_WRLCK:
2122                if (!(fl->fl_file->f_mode & FMODE_WRITE))
2123                        return -EBADF;
2124        }
2125        return 0;
2126}
2127
2128/* Apply the lock described by l to an open file descriptor.
2129 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2130 */
2131int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2132                struct flock __user *l)
2133{
2134        struct file_lock *file_lock = locks_alloc_lock();
2135        struct flock flock;
2136        struct inode *inode;
2137        struct file *f;
2138        int error;
2139
2140        if (file_lock == NULL)
2141                return -ENOLCK;
2142
2143        /*
2144         * This might block, so we do it before checking the inode.
2145         */
2146        error = -EFAULT;
2147        if (copy_from_user(&flock, l, sizeof(flock)))
2148                goto out;
2149
2150        inode = file_inode(filp);
2151
2152        /* Don't allow mandatory locks on files that may be memory mapped
2153         * and shared.
2154         */
2155        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2156                error = -EAGAIN;
2157                goto out;
2158        }
2159
2160again:
2161        error = flock_to_posix_lock(filp, file_lock, &flock);
2162        if (error)
2163                goto out;
2164
2165        error = check_fmode_for_setlk(file_lock);
2166        if (error)
2167                goto out;
2168
2169        /*
2170         * If the cmd is requesting file-private locks, then set the
2171         * FL_OFDLCK flag and override the owner.
2172         */
2173        switch (cmd) {
2174        case F_OFD_SETLK:
2175                error = -EINVAL;
2176                if (flock.l_pid != 0)
2177                        goto out;
2178
2179                cmd = F_SETLK;
2180                file_lock->fl_flags |= FL_OFDLCK;
2181                file_lock->fl_owner = filp;
2182                break;
2183        case F_OFD_SETLKW:
2184                error = -EINVAL;
2185                if (flock.l_pid != 0)
2186                        goto out;
2187
2188                cmd = F_SETLKW;
2189                file_lock->fl_flags |= FL_OFDLCK;
2190                file_lock->fl_owner = filp;
2191                /* Fallthrough */
2192        case F_SETLKW:
2193                file_lock->fl_flags |= FL_SLEEP;
2194        }
2195
2196        error = do_lock_file_wait(filp, cmd, file_lock);
2197
2198        /*
2199         * Attempt to detect a close/fcntl race and recover by
2200         * releasing the lock that was just acquired.
2201         */
2202        /*
2203         * we need that spin_lock here - it prevents reordering between
2204         * update of i_flctx->flc_posix and check for it done in close().
2205         * rcu_read_lock() wouldn't do.
2206         */
2207        spin_lock(&current->files->file_lock);
2208        f = fcheck(fd);
2209        spin_unlock(&current->files->file_lock);
2210        if (!error && f != filp && flock.l_type != F_UNLCK) {
2211                flock.l_type = F_UNLCK;
2212                goto again;
2213        }
2214
2215out:
2216        locks_free_lock(file_lock);
2217        return error;
2218}
2219
2220#if BITS_PER_LONG == 32
2221/* Report the first existing lock that would conflict with l.
2222 * This implements the F_GETLK command of fcntl().
2223 */
2224int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
2225{
2226        struct file_lock file_lock;
2227        struct flock64 flock;
2228        int error;
2229
2230        error = -EFAULT;
2231        if (copy_from_user(&flock, l, sizeof(flock)))
2232                goto out;
2233        error = -EINVAL;
2234        if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2235                goto out;
2236
2237        error = flock64_to_posix_lock(filp, &file_lock, &flock);
2238        if (error)
2239                goto out;
2240
2241        if (cmd == F_OFD_GETLK) {
2242                error = -EINVAL;
2243                if (flock.l_pid != 0)
2244                        goto out;
2245
2246                cmd = F_GETLK64;
2247                file_lock.fl_flags |= FL_OFDLCK;
2248                file_lock.fl_owner = filp;
2249        }
2250
2251        error = vfs_test_lock(filp, &file_lock);
2252        if (error)
2253                goto out;
2254
2255        flock.l_type = file_lock.fl_type;
2256        if (file_lock.fl_type != F_UNLCK)
2257                posix_lock_to_flock64(&flock, &file_lock);
2258
2259        error = -EFAULT;
2260        if (!copy_to_user(l, &flock, sizeof(flock)))
2261                error = 0;
2262
2263        locks_release_private(&file_lock);
2264out:
2265        return error;
2266}
2267
2268/* Apply the lock described by l to an open file descriptor.
2269 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2270 */
2271int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2272                struct flock64 __user *l)
2273{
2274        struct file_lock *file_lock = locks_alloc_lock();
2275        struct flock64 flock;
2276        struct inode *inode;
2277        struct file *f;
2278        int error;
2279
2280        if (file_lock == NULL)
2281                return -ENOLCK;
2282
2283        /*
2284         * This might block, so we do it before checking the inode.
2285         */
2286        error = -EFAULT;
2287        if (copy_from_user(&flock, l, sizeof(flock)))
2288                goto out;
2289
2290        inode = file_inode(filp);
2291
2292        /* Don't allow mandatory locks on files that may be memory mapped
2293         * and shared.
2294         */
2295        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2296                error = -EAGAIN;
2297                goto out;
2298        }
2299
2300again:
2301        error = flock64_to_posix_lock(filp, file_lock, &flock);
2302        if (error)
2303                goto out;
2304
2305        error = check_fmode_for_setlk(file_lock);
2306        if (error)
2307                goto out;
2308
2309        /*
2310         * If the cmd is requesting file-private locks, then set the
2311         * FL_OFDLCK flag and override the owner.
2312         */
2313        switch (cmd) {
2314        case F_OFD_SETLK:
2315                error = -EINVAL;
2316                if (flock.l_pid != 0)
2317                        goto out;
2318
2319                cmd = F_SETLK64;
2320                file_lock->fl_flags |= FL_OFDLCK;
2321                file_lock->fl_owner = filp;
2322                break;
2323        case F_OFD_SETLKW:
2324                error = -EINVAL;
2325                if (flock.l_pid != 0)
2326                        goto out;
2327
2328                cmd = F_SETLKW64;
2329                file_lock->fl_flags |= FL_OFDLCK;
2330                file_lock->fl_owner = filp;
2331                /* Fallthrough */
2332        case F_SETLKW64:
2333                file_lock->fl_flags |= FL_SLEEP;
2334        }
2335
2336        error = do_lock_file_wait(filp, cmd, file_lock);
2337
2338        /*
2339         * Attempt to detect a close/fcntl race and recover by
2340         * releasing the lock that was just acquired.
2341         */
2342        spin_lock(&current->files->file_lock);
2343        f = fcheck(fd);
2344        spin_unlock(&current->files->file_lock);
2345        if (!error && f != filp && flock.l_type != F_UNLCK) {
2346                flock.l_type = F_UNLCK;
2347                goto again;
2348        }
2349
2350out:
2351        locks_free_lock(file_lock);
2352        return error;
2353}
2354#endif /* BITS_PER_LONG == 32 */
2355
2356/*
2357 * This function is called when the file is being removed
2358 * from the task's fd array.  POSIX locks belonging to this task
2359 * are deleted at this time.
2360 */
2361void locks_remove_posix(struct file *filp, fl_owner_t owner)
2362{
2363        struct file_lock lock;
2364        struct file_lock_context *ctx = file_inode(filp)->i_flctx;
2365
2366        /*
2367         * If there are no locks held on this file, we don't need to call
2368         * posix_lock_file().  Another process could be setting a lock on this
2369         * file at the same time, but we wouldn't remove that lock anyway.
2370         */
2371        if (!ctx || list_empty(&ctx->flc_posix))
2372                return;
2373
2374        lock.fl_type = F_UNLCK;
2375        lock.fl_flags = FL_POSIX | FL_CLOSE;
2376        lock.fl_start = 0;
2377        lock.fl_end = OFFSET_MAX;
2378        lock.fl_owner = owner;
2379        lock.fl_pid = current->tgid;
2380        lock.fl_file = filp;
2381        lock.fl_ops = NULL;
2382        lock.fl_lmops = NULL;
2383
2384        vfs_lock_file(filp, F_SETLK, &lock, NULL);
2385
2386        if (lock.fl_ops && lock.fl_ops->fl_release_private)
2387                lock.fl_ops->fl_release_private(&lock);
2388}
2389
2390EXPORT_SYMBOL(locks_remove_posix);
2391
2392/* The i_flctx must be valid when calling into here */
2393static void
2394locks_remove_flock(struct file *filp)
2395{
2396        struct file_lock fl = {
2397                .fl_owner = filp,
2398                .fl_pid = current->tgid,
2399                .fl_file = filp,
2400                .fl_flags = FL_FLOCK,
2401                .fl_type = F_UNLCK,
2402                .fl_end = OFFSET_MAX,
2403        };
2404        struct file_lock_context *flctx = file_inode(filp)->i_flctx;
2405
2406        if (list_empty(&flctx->flc_flock))
2407                return;
2408
2409        if (filp->f_op->flock)
2410                filp->f_op->flock(filp, F_SETLKW, &fl);
2411        else
2412                flock_lock_file(filp, &fl);
2413
2414        if (fl.fl_ops && fl.fl_ops->fl_release_private)
2415                fl.fl_ops->fl_release_private(&fl);
2416}
2417
2418/* The i_flctx must be valid when calling into here */
2419static void
2420locks_remove_lease(struct file *filp)
2421{
2422        struct inode *inode = file_inode(filp);
2423        struct file_lock_context *ctx = inode->i_flctx;
2424        struct file_lock *fl, *tmp;
2425        LIST_HEAD(dispose);
2426
2427        if (list_empty(&ctx->flc_lease))
2428                return;
2429
2430        spin_lock(&ctx->flc_lock);
2431        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2432                if (filp == fl->fl_file)
2433                        lease_modify(fl, F_UNLCK, &dispose);
2434        spin_unlock(&ctx->flc_lock);
2435        locks_dispose_list(&dispose);
2436}
2437
2438/*
2439 * This function is called on the last close of an open file.
2440 */
2441void locks_remove_file(struct file *filp)
2442{
2443        if (!file_inode(filp)->i_flctx)
2444                return;
2445
2446        /* remove any OFD locks */
2447        locks_remove_posix(filp, filp);
2448
2449        /* remove flock locks */
2450        locks_remove_flock(filp);
2451
2452        /* remove any leases */
2453        locks_remove_lease(filp);
2454}
2455
2456/**
2457 *      posix_unblock_lock - stop waiting for a file lock
2458 *      @waiter: the lock which was waiting
2459 *
2460 *      lockd needs to block waiting for locks.
2461 */
2462int
2463posix_unblock_lock(struct file_lock *waiter)
2464{
2465        int status = 0;
2466
2467        spin_lock(&blocked_lock_lock);
2468        if (waiter->fl_next)
2469                __locks_delete_block(waiter);
2470        else
2471                status = -ENOENT;
2472        spin_unlock(&blocked_lock_lock);
2473        return status;
2474}
2475EXPORT_SYMBOL(posix_unblock_lock);
2476
2477/**
2478 * vfs_cancel_lock - file byte range unblock lock
2479 * @filp: The file to apply the unblock to
2480 * @fl: The lock to be unblocked
2481 *
2482 * Used by lock managers to cancel blocked requests
2483 */
2484int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2485{
2486        if (filp->f_op->lock)
2487                return filp->f_op->lock(filp, F_CANCELLK, fl);
2488        return 0;
2489}
2490
2491EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2492
2493#ifdef CONFIG_PROC_FS
2494#include <linux/proc_fs.h>
2495#include <linux/seq_file.h>
2496
2497struct locks_iterator {
2498        int     li_cpu;
2499        loff_t  li_pos;
2500};
2501
2502static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2503                            loff_t id, char *pfx)
2504{
2505        struct inode *inode = NULL;
2506        unsigned int fl_pid;
2507
2508        if (fl->fl_nspid)
2509                fl_pid = pid_vnr(fl->fl_nspid);
2510        else
2511                fl_pid = fl->fl_pid;
2512
2513        if (fl->fl_file != NULL)
2514                inode = file_inode(fl->fl_file);
2515
2516        seq_printf(f, "%lld:%s ", id, pfx);
2517        if (IS_POSIX(fl)) {
2518                if (fl->fl_flags & FL_ACCESS)
2519                        seq_puts(f, "ACCESS");
2520                else if (IS_OFDLCK(fl))
2521                        seq_puts(f, "OFDLCK");
2522                else
2523                        seq_puts(f, "POSIX ");
2524
2525                seq_printf(f, " %s ",
2526                             (inode == NULL) ? "*NOINODE*" :
2527                             mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2528        } else if (IS_FLOCK(fl)) {
2529                if (fl->fl_type & LOCK_MAND) {
2530                        seq_puts(f, "FLOCK  MSNFS     ");
2531                } else {
2532                        seq_puts(f, "FLOCK  ADVISORY  ");
2533                }
2534        } else if (IS_LEASE(fl)) {
2535                if (fl->fl_flags & FL_DELEG)
2536                        seq_puts(f, "DELEG  ");
2537                else
2538                        seq_puts(f, "LEASE  ");
2539
2540                if (lease_breaking(fl))
2541                        seq_puts(f, "BREAKING  ");
2542                else if (fl->fl_file)
2543                        seq_puts(f, "ACTIVE    ");
2544                else
2545                        seq_puts(f, "BREAKER   ");
2546        } else {
2547                seq_puts(f, "UNKNOWN UNKNOWN  ");
2548        }
2549        if (fl->fl_type & LOCK_MAND) {
2550                seq_printf(f, "%s ",
2551                               (fl->fl_type & LOCK_READ)
2552                               ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2553                               : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2554        } else {
2555                seq_printf(f, "%s ",
2556                               (lease_breaking(fl))
2557                               ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2558                               : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2559        }
2560        if (inode) {
2561                /* userspace relies on this representation of dev_t */
2562                seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2563                                MAJOR(inode->i_sb->s_dev),
2564                                MINOR(inode->i_sb->s_dev), inode->i_ino);
2565        } else {
2566                seq_printf(f, "%d <none>:0 ", fl_pid);
2567        }
2568        if (IS_POSIX(fl)) {
2569                if (fl->fl_end == OFFSET_MAX)
2570                        seq_printf(f, "%Ld EOF\n", fl->fl_start);
2571                else
2572                        seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2573        } else {
2574                seq_puts(f, "0 EOF\n");
2575        }
2576}
2577
2578static int locks_show(struct seq_file *f, void *v)
2579{
2580        struct locks_iterator *iter = f->private;
2581        struct file_lock *fl, *bfl;
2582
2583        fl = hlist_entry(v, struct file_lock, fl_link);
2584
2585        lock_get_status(f, fl, iter->li_pos, "");
2586
2587        list_for_each_entry(bfl, &fl->fl_block, fl_block)
2588                lock_get_status(f, bfl, iter->li_pos, " ->");
2589
2590        return 0;
2591}
2592
2593static void __show_fd_locks(struct seq_file *f,
2594                        struct list_head *head, int *id,
2595                        struct file *filp, struct files_struct *files)
2596{
2597        struct file_lock *fl;
2598
2599        list_for_each_entry(fl, head, fl_list) {
2600
2601                if (filp != fl->fl_file)
2602                        continue;
2603                if (fl->fl_owner != files &&
2604                    fl->fl_owner != filp)
2605                        continue;
2606
2607                (*id)++;
2608                seq_puts(f, "lock:\t");
2609                lock_get_status(f, fl, *id, "");
2610        }
2611}
2612
2613void show_fd_locks(struct seq_file *f,
2614                  struct file *filp, struct files_struct *files)
2615{
2616        struct inode *inode = file_inode(filp);
2617        struct file_lock_context *ctx;
2618        int id = 0;
2619
2620        ctx = inode->i_flctx;
2621        if (!ctx)
2622                return;
2623
2624        spin_lock(&ctx->flc_lock);
2625        __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2626        __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2627        __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2628        spin_unlock(&ctx->flc_lock);
2629}
2630
2631static void *locks_start(struct seq_file *f, loff_t *pos)
2632        __acquires(&blocked_lock_lock)
2633{
2634        struct locks_iterator *iter = f->private;
2635
2636        iter->li_pos = *pos + 1;
2637        lg_global_lock(&file_lock_lglock);
2638        spin_lock(&blocked_lock_lock);
2639        return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
2640}
2641
2642static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2643{
2644        struct locks_iterator *iter = f->private;
2645
2646        ++iter->li_pos;
2647        return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos);
2648}
2649
2650static void locks_stop(struct seq_file *f, void *v)
2651        __releases(&blocked_lock_lock)
2652{
2653        spin_unlock(&blocked_lock_lock);
2654        lg_global_unlock(&file_lock_lglock);
2655}
2656
2657static const struct seq_operations locks_seq_operations = {
2658        .start  = locks_start,
2659        .next   = locks_next,
2660        .stop   = locks_stop,
2661        .show   = locks_show,
2662};
2663
2664static int locks_open(struct inode *inode, struct file *filp)
2665{
2666        return seq_open_private(filp, &locks_seq_operations,
2667                                        sizeof(struct locks_iterator));
2668}
2669
2670static const struct file_operations proc_locks_operations = {
2671        .open           = locks_open,
2672        .read           = seq_read,
2673        .llseek         = seq_lseek,
2674        .release        = seq_release_private,
2675};
2676
2677static int __init proc_locks_init(void)
2678{
2679        proc_create("locks", 0, NULL, &proc_locks_operations);
2680        return 0;
2681}
2682module_init(proc_locks_init);
2683#endif
2684
2685static int __init filelock_init(void)
2686{
2687        int i;
2688
2689        flctx_cache = kmem_cache_create("file_lock_ctx",
2690                        sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2691
2692        filelock_cache = kmem_cache_create("file_lock_cache",
2693                        sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2694
2695        lg_lock_init(&file_lock_lglock, "file_lock_lglock");
2696
2697        for_each_possible_cpu(i)
2698                INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i));
2699
2700        return 0;
2701}
2702
2703core_initcall(filelock_init);
2704