linux/fs/locks.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/locks.c
   3 *
   4 *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5 *  Doug Evans (dje@spiff.uucp), August 07, 1992
   6 *
   7 *  Deadlock detection added.
   8 *  FIXME: one thing isn't handled yet:
   9 *      - mandatory locks (requires lots of changes elsewhere)
  10 *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
  11 *
  12 *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
  13 *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
  14 *  
  15 *  Converted file_lock_table to a linked list from an array, which eliminates
  16 *  the limits on how many active file locks are open.
  17 *  Chad Page (pageone@netcom.com), November 27, 1994
  18 * 
  19 *  Removed dependency on file descriptors. dup()'ed file descriptors now
  20 *  get the same locks as the original file descriptors, and a close() on
  21 *  any file descriptor removes ALL the locks on the file for the current
  22 *  process. Since locks still depend on the process id, locks are inherited
  23 *  after an exec() but not after a fork(). This agrees with POSIX, and both
  24 *  BSD and SVR4 practice.
  25 *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
  26 *
  27 *  Scrapped free list which is redundant now that we allocate locks
  28 *  dynamically with kmalloc()/kfree().
  29 *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
  30 *
  31 *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
  32 *
  33 *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
  34 *  fcntl() system call. They have the semantics described above.
  35 *
  36 *  FL_FLOCK locks are created with calls to flock(), through the flock()
  37 *  system call, which is new. Old C libraries implement flock() via fcntl()
  38 *  and will continue to use the old, broken implementation.
  39 *
  40 *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
  41 *  with a file pointer (filp). As a result they can be shared by a parent
  42 *  process and its children after a fork(). They are removed when the last
  43 *  file descriptor referring to the file pointer is closed (unless explicitly
  44 *  unlocked). 
  45 *
  46 *  FL_FLOCK locks never deadlock, an existing lock is always removed before
  47 *  upgrading from shared to exclusive (or vice versa). When this happens
  48 *  any processes blocked by the current lock are woken up and allowed to
  49 *  run before the new lock is applied.
  50 *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
  51 *
  52 *  Removed some race conditions in flock_lock_file(), marked other possible
  53 *  races. Just grep for FIXME to see them. 
  54 *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
  55 *
  56 *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
  57 *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
  58 *  once we've checked for blocking and deadlocking.
  59 *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
  60 *
  61 *  Initial implementation of mandatory locks. SunOS turned out to be
  62 *  a rotten model, so I implemented the "obvious" semantics.
  63 *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
  64 *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
  65 *
  66 *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
  67 *  check if a file has mandatory locks, used by mmap(), open() and creat() to
  68 *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
  69 *  Manual, Section 2.
  70 *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
  71 *
  72 *  Tidied up block list handling. Added '/proc/locks' interface.
  73 *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
  74 *
  75 *  Fixed deadlock condition for pathological code that mixes calls to
  76 *  flock() and fcntl().
  77 *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
  78 *
  79 *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
  80 *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
  81 *  guarantee sensible behaviour in the case where file system modules might
  82 *  be compiled with different options than the kernel itself.
  83 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  84 *
  85 *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
  86 *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
  87 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  88 *
  89 *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
  90 *  locks. Changed process synchronisation to avoid dereferencing locks that
  91 *  have already been freed.
  92 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
  93 *
  94 *  Made the block list a circular list to minimise searching in the list.
  95 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
  96 *
  97 *  Made mandatory locking a mount option. Default is not to allow mandatory
  98 *  locking.
  99 *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
 100 *
 101 *  Some adaptations for NFS support.
 102 *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
 103 *
 104 *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
 105 *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
 106 *
 107 *  Use slab allocator instead of kmalloc/kfree.
 108 *  Use generic list implementation from <linux/list.h>.
 109 *  Sped up posix_locks_deadlock by only considering blocked locks.
 110 *  Matthew Wilcox <willy@debian.org>, March, 2000.
 111 *
 112 *  Leases and LOCK_MAND
 113 *  Matthew Wilcox <willy@debian.org>, June, 2000.
 114 *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
 115 */
 116
 117#include <linux/capability.h>
 118#include <linux/file.h>
 119#include <linux/fdtable.h>
 120#include <linux/fs.h>
 121#include <linux/init.h>
 122#include <linux/security.h>
 123#include <linux/slab.h>
 124#include <linux/syscalls.h>
 125#include <linux/time.h>
 126#include <linux/rcupdate.h>
 127#include <linux/pid_namespace.h>
 128#include <linux/hashtable.h>
 129#include <linux/percpu.h>
 130#include <linux/lglock.h>
 131
 132#define CREATE_TRACE_POINTS
 133#include <trace/events/filelock.h>
 134
 135#include <asm/uaccess.h>
 136
 137#define IS_POSIX(fl)    (fl->fl_flags & FL_POSIX)
 138#define IS_FLOCK(fl)    (fl->fl_flags & FL_FLOCK)
 139#define IS_LEASE(fl)    (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
 140#define IS_OFDLCK(fl)   (fl->fl_flags & FL_OFDLCK)
 141
 142static bool lease_breaking(struct file_lock *fl)
 143{
 144        return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
 145}
 146
 147static int target_leasetype(struct file_lock *fl)
 148{
 149        if (fl->fl_flags & FL_UNLOCK_PENDING)
 150                return F_UNLCK;
 151        if (fl->fl_flags & FL_DOWNGRADE_PENDING)
 152                return F_RDLCK;
 153        return fl->fl_type;
 154}
 155
 156int leases_enable = 1;
 157int lease_break_time = 45;
 158
 159/*
 160 * The global file_lock_list is only used for displaying /proc/locks, so we
 161 * keep a list on each CPU, with each list protected by its own spinlock via
 162 * the file_lock_lglock. Note that alterations to the list also require that
 163 * the relevant flc_lock is held.
 164 */
 165DEFINE_STATIC_LGLOCK(file_lock_lglock);
 166static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
 167
 168/*
 169 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
 170 * It is protected by blocked_lock_lock.
 171 *
 172 * We hash locks by lockowner in order to optimize searching for the lock a
 173 * particular lockowner is waiting on.
 174 *
 175 * FIXME: make this value scale via some heuristic? We generally will want more
 176 * buckets when we have more lockowners holding locks, but that's a little
 177 * difficult to determine without knowing what the workload will look like.
 178 */
 179#define BLOCKED_HASH_BITS       7
 180static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
 181
 182/*
 183 * This lock protects the blocked_hash. Generally, if you're accessing it, you
 184 * want to be holding this lock.
 185 *
 186 * In addition, it also protects the fl->fl_block list, and the fl->fl_next
 187 * pointer for file_lock structures that are acting as lock requests (in
 188 * contrast to those that are acting as records of acquired locks).
 189 *
 190 * Note that when we acquire this lock in order to change the above fields,
 191 * we often hold the flc_lock as well. In certain cases, when reading the fields
 192 * protected by this lock, we can skip acquiring it iff we already hold the
 193 * flc_lock.
 194 *
 195 * In particular, adding an entry to the fl_block list requires that you hold
 196 * both the flc_lock and the blocked_lock_lock (acquired in that order).
 197 * Deleting an entry from the list however only requires the file_lock_lock.
 198 */
 199static DEFINE_SPINLOCK(blocked_lock_lock);
 200
 201static struct kmem_cache *flctx_cache __read_mostly;
 202static struct kmem_cache *filelock_cache __read_mostly;
 203
 204static struct file_lock_context *
 205locks_get_lock_context(struct inode *inode, int type)
 206{
 207        struct file_lock_context *ctx;
 208
 209        /* paired with cmpxchg() below */
 210        ctx = smp_load_acquire(&inode->i_flctx);
 211        if (likely(ctx) || type == F_UNLCK)
 212                goto out;
 213
 214        ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
 215        if (!ctx)
 216                goto out;
 217
 218        spin_lock_init(&ctx->flc_lock);
 219        INIT_LIST_HEAD(&ctx->flc_flock);
 220        INIT_LIST_HEAD(&ctx->flc_posix);
 221        INIT_LIST_HEAD(&ctx->flc_lease);
 222
 223        /*
 224         * Assign the pointer if it's not already assigned. If it is, then
 225         * free the context we just allocated.
 226         */
 227        if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
 228                kmem_cache_free(flctx_cache, ctx);
 229                ctx = smp_load_acquire(&inode->i_flctx);
 230        }
 231out:
 232        trace_locks_get_lock_context(inode, type, ctx);
 233        return ctx;
 234}
 235
 236static void
 237locks_dump_ctx_list(struct list_head *list, char *list_type)
 238{
 239        struct file_lock *fl;
 240
 241        list_for_each_entry(fl, list, fl_list) {
 242                pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
 243        }
 244}
 245
 246static void
 247locks_check_ctx_lists(struct inode *inode)
 248{
 249        struct file_lock_context *ctx = inode->i_flctx;
 250
 251        if (unlikely(!list_empty(&ctx->flc_flock) ||
 252                     !list_empty(&ctx->flc_posix) ||
 253                     !list_empty(&ctx->flc_lease))) {
 254                pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
 255                        MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
 256                        inode->i_ino);
 257                locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
 258                locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
 259                locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
 260        }
 261}
 262
 263void
 264locks_free_lock_context(struct inode *inode)
 265{
 266        struct file_lock_context *ctx = inode->i_flctx;
 267
 268        if (unlikely(ctx)) {
 269                locks_check_ctx_lists(inode);
 270                kmem_cache_free(flctx_cache, ctx);
 271        }
 272}
 273
 274static void locks_init_lock_heads(struct file_lock *fl)
 275{
 276        INIT_HLIST_NODE(&fl->fl_link);
 277        INIT_LIST_HEAD(&fl->fl_list);
 278        INIT_LIST_HEAD(&fl->fl_block);
 279        init_waitqueue_head(&fl->fl_wait);
 280}
 281
 282/* Allocate an empty lock structure. */
 283struct file_lock *locks_alloc_lock(void)
 284{
 285        struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
 286
 287        if (fl)
 288                locks_init_lock_heads(fl);
 289
 290        return fl;
 291}
 292EXPORT_SYMBOL_GPL(locks_alloc_lock);
 293
 294void locks_release_private(struct file_lock *fl)
 295{
 296        if (fl->fl_ops) {
 297                if (fl->fl_ops->fl_release_private)
 298                        fl->fl_ops->fl_release_private(fl);
 299                fl->fl_ops = NULL;
 300        }
 301
 302        if (fl->fl_lmops) {
 303                if (fl->fl_lmops->lm_put_owner) {
 304                        fl->fl_lmops->lm_put_owner(fl->fl_owner);
 305                        fl->fl_owner = NULL;
 306                }
 307                fl->fl_lmops = NULL;
 308        }
 309}
 310EXPORT_SYMBOL_GPL(locks_release_private);
 311
 312/* Free a lock which is not in use. */
 313void locks_free_lock(struct file_lock *fl)
 314{
 315        BUG_ON(waitqueue_active(&fl->fl_wait));
 316        BUG_ON(!list_empty(&fl->fl_list));
 317        BUG_ON(!list_empty(&fl->fl_block));
 318        BUG_ON(!hlist_unhashed(&fl->fl_link));
 319
 320        locks_release_private(fl);
 321        kmem_cache_free(filelock_cache, fl);
 322}
 323EXPORT_SYMBOL(locks_free_lock);
 324
 325static void
 326locks_dispose_list(struct list_head *dispose)
 327{
 328        struct file_lock *fl;
 329
 330        while (!list_empty(dispose)) {
 331                fl = list_first_entry(dispose, struct file_lock, fl_list);
 332                list_del_init(&fl->fl_list);
 333                locks_free_lock(fl);
 334        }
 335}
 336
 337void locks_init_lock(struct file_lock *fl)
 338{
 339        memset(fl, 0, sizeof(struct file_lock));
 340        locks_init_lock_heads(fl);
 341}
 342
 343EXPORT_SYMBOL(locks_init_lock);
 344
 345/*
 346 * Initialize a new lock from an existing file_lock structure.
 347 */
 348void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
 349{
 350        new->fl_owner = fl->fl_owner;
 351        new->fl_pid = fl->fl_pid;
 352        new->fl_file = NULL;
 353        new->fl_flags = fl->fl_flags;
 354        new->fl_type = fl->fl_type;
 355        new->fl_start = fl->fl_start;
 356        new->fl_end = fl->fl_end;
 357        new->fl_lmops = fl->fl_lmops;
 358        new->fl_ops = NULL;
 359
 360        if (fl->fl_lmops) {
 361                if (fl->fl_lmops->lm_get_owner)
 362                        fl->fl_lmops->lm_get_owner(fl->fl_owner);
 363        }
 364}
 365EXPORT_SYMBOL(locks_copy_conflock);
 366
 367void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
 368{
 369        /* "new" must be a freshly-initialized lock */
 370        WARN_ON_ONCE(new->fl_ops);
 371
 372        locks_copy_conflock(new, fl);
 373
 374        new->fl_file = fl->fl_file;
 375        new->fl_ops = fl->fl_ops;
 376
 377        if (fl->fl_ops) {
 378                if (fl->fl_ops->fl_copy_lock)
 379                        fl->fl_ops->fl_copy_lock(new, fl);
 380        }
 381}
 382
 383EXPORT_SYMBOL(locks_copy_lock);
 384
 385static inline int flock_translate_cmd(int cmd) {
 386        if (cmd & LOCK_MAND)
 387                return cmd & (LOCK_MAND | LOCK_RW);
 388        switch (cmd) {
 389        case LOCK_SH:
 390                return F_RDLCK;
 391        case LOCK_EX:
 392                return F_WRLCK;
 393        case LOCK_UN:
 394                return F_UNLCK;
 395        }
 396        return -EINVAL;
 397}
 398
 399/* Fill in a file_lock structure with an appropriate FLOCK lock. */
 400static struct file_lock *
 401flock_make_lock(struct file *filp, unsigned int cmd)
 402{
 403        struct file_lock *fl;
 404        int type = flock_translate_cmd(cmd);
 405
 406        if (type < 0)
 407                return ERR_PTR(type);
 408        
 409        fl = locks_alloc_lock();
 410        if (fl == NULL)
 411                return ERR_PTR(-ENOMEM);
 412
 413        fl->fl_file = filp;
 414        fl->fl_owner = filp;
 415        fl->fl_pid = current->tgid;
 416        fl->fl_flags = FL_FLOCK;
 417        fl->fl_type = type;
 418        fl->fl_end = OFFSET_MAX;
 419        
 420        return fl;
 421}
 422
 423static int assign_type(struct file_lock *fl, long type)
 424{
 425        switch (type) {
 426        case F_RDLCK:
 427        case F_WRLCK:
 428        case F_UNLCK:
 429                fl->fl_type = type;
 430                break;
 431        default:
 432                return -EINVAL;
 433        }
 434        return 0;
 435}
 436
 437static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
 438                                 struct flock64 *l)
 439{
 440        switch (l->l_whence) {
 441        case SEEK_SET:
 442                fl->fl_start = 0;
 443                break;
 444        case SEEK_CUR:
 445                fl->fl_start = filp->f_pos;
 446                break;
 447        case SEEK_END:
 448                fl->fl_start = i_size_read(file_inode(filp));
 449                break;
 450        default:
 451                return -EINVAL;
 452        }
 453        if (l->l_start > OFFSET_MAX - fl->fl_start)
 454                return -EOVERFLOW;
 455        fl->fl_start += l->l_start;
 456        if (fl->fl_start < 0)
 457                return -EINVAL;
 458
 459        /* POSIX-1996 leaves the case l->l_len < 0 undefined;
 460           POSIX-2001 defines it. */
 461        if (l->l_len > 0) {
 462                if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
 463                        return -EOVERFLOW;
 464                fl->fl_end = fl->fl_start + l->l_len - 1;
 465
 466        } else if (l->l_len < 0) {
 467                if (fl->fl_start + l->l_len < 0)
 468                        return -EINVAL;
 469                fl->fl_end = fl->fl_start - 1;
 470                fl->fl_start += l->l_len;
 471        } else
 472                fl->fl_end = OFFSET_MAX;
 473
 474        fl->fl_owner = current->files;
 475        fl->fl_pid = current->tgid;
 476        fl->fl_file = filp;
 477        fl->fl_flags = FL_POSIX;
 478        fl->fl_ops = NULL;
 479        fl->fl_lmops = NULL;
 480
 481        return assign_type(fl, l->l_type);
 482}
 483
 484/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
 485 * style lock.
 486 */
 487static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
 488                               struct flock *l)
 489{
 490        struct flock64 ll = {
 491                .l_type = l->l_type,
 492                .l_whence = l->l_whence,
 493                .l_start = l->l_start,
 494                .l_len = l->l_len,
 495        };
 496
 497        return flock64_to_posix_lock(filp, fl, &ll);
 498}
 499
 500/* default lease lock manager operations */
 501static bool
 502lease_break_callback(struct file_lock *fl)
 503{
 504        kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
 505        return false;
 506}
 507
 508static void
 509lease_setup(struct file_lock *fl, void **priv)
 510{
 511        struct file *filp = fl->fl_file;
 512        struct fasync_struct *fa = *priv;
 513
 514        /*
 515         * fasync_insert_entry() returns the old entry if any. If there was no
 516         * old entry, then it used "priv" and inserted it into the fasync list.
 517         * Clear the pointer to indicate that it shouldn't be freed.
 518         */
 519        if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
 520                *priv = NULL;
 521
 522        __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 523}
 524
 525static const struct lock_manager_operations lease_manager_ops = {
 526        .lm_break = lease_break_callback,
 527        .lm_change = lease_modify,
 528        .lm_setup = lease_setup,
 529};
 530
 531/*
 532 * Initialize a lease, use the default lock manager operations
 533 */
 534static int lease_init(struct file *filp, long type, struct file_lock *fl)
 535 {
 536        if (assign_type(fl, type) != 0)
 537                return -EINVAL;
 538
 539        fl->fl_owner = filp;
 540        fl->fl_pid = current->tgid;
 541
 542        fl->fl_file = filp;
 543        fl->fl_flags = FL_LEASE;
 544        fl->fl_start = 0;
 545        fl->fl_end = OFFSET_MAX;
 546        fl->fl_ops = NULL;
 547        fl->fl_lmops = &lease_manager_ops;
 548        return 0;
 549}
 550
 551/* Allocate a file_lock initialised to this type of lease */
 552static struct file_lock *lease_alloc(struct file *filp, long type)
 553{
 554        struct file_lock *fl = locks_alloc_lock();
 555        int error = -ENOMEM;
 556
 557        if (fl == NULL)
 558                return ERR_PTR(error);
 559
 560        error = lease_init(filp, type, fl);
 561        if (error) {
 562                locks_free_lock(fl);
 563                return ERR_PTR(error);
 564        }
 565        return fl;
 566}
 567
 568/* Check if two locks overlap each other.
 569 */
 570static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
 571{
 572        return ((fl1->fl_end >= fl2->fl_start) &&
 573                (fl2->fl_end >= fl1->fl_start));
 574}
 575
 576/*
 577 * Check whether two locks have the same owner.
 578 */
 579static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
 580{
 581        if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
 582                return fl2->fl_lmops == fl1->fl_lmops &&
 583                        fl1->fl_lmops->lm_compare_owner(fl1, fl2);
 584        return fl1->fl_owner == fl2->fl_owner;
 585}
 586
 587/* Must be called with the flc_lock held! */
 588static void locks_insert_global_locks(struct file_lock *fl)
 589{
 590        lg_local_lock(&file_lock_lglock);
 591        fl->fl_link_cpu = smp_processor_id();
 592        hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
 593        lg_local_unlock(&file_lock_lglock);
 594}
 595
 596/* Must be called with the flc_lock held! */
 597static void locks_delete_global_locks(struct file_lock *fl)
 598{
 599        /*
 600         * Avoid taking lock if already unhashed. This is safe since this check
 601         * is done while holding the flc_lock, and new insertions into the list
 602         * also require that it be held.
 603         */
 604        if (hlist_unhashed(&fl->fl_link))
 605                return;
 606        lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 607        hlist_del_init(&fl->fl_link);
 608        lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 609}
 610
 611static unsigned long
 612posix_owner_key(struct file_lock *fl)
 613{
 614        if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
 615                return fl->fl_lmops->lm_owner_key(fl);
 616        return (unsigned long)fl->fl_owner;
 617}
 618
 619static void locks_insert_global_blocked(struct file_lock *waiter)
 620{
 621        lockdep_assert_held(&blocked_lock_lock);
 622
 623        hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
 624}
 625
 626static void locks_delete_global_blocked(struct file_lock *waiter)
 627{
 628        lockdep_assert_held(&blocked_lock_lock);
 629
 630        hash_del(&waiter->fl_link);
 631}
 632
 633/* Remove waiter from blocker's block list.
 634 * When blocker ends up pointing to itself then the list is empty.
 635 *
 636 * Must be called with blocked_lock_lock held.
 637 */
 638static void __locks_delete_block(struct file_lock *waiter)
 639{
 640        locks_delete_global_blocked(waiter);
 641        list_del_init(&waiter->fl_block);
 642        waiter->fl_next = NULL;
 643}
 644
 645static void locks_delete_block(struct file_lock *waiter)
 646{
 647        spin_lock(&blocked_lock_lock);
 648        __locks_delete_block(waiter);
 649        spin_unlock(&blocked_lock_lock);
 650}
 651
 652/* Insert waiter into blocker's block list.
 653 * We use a circular list so that processes can be easily woken up in
 654 * the order they blocked. The documentation doesn't require this but
 655 * it seems like the reasonable thing to do.
 656 *
 657 * Must be called with both the flc_lock and blocked_lock_lock held. The
 658 * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
 659 * that the flc_lock is also held on insertions we can avoid taking the
 660 * blocked_lock_lock in some cases when we see that the fl_block list is empty.
 661 */
 662static void __locks_insert_block(struct file_lock *blocker,
 663                                        struct file_lock *waiter)
 664{
 665        BUG_ON(!list_empty(&waiter->fl_block));
 666        waiter->fl_next = blocker;
 667        list_add_tail(&waiter->fl_block, &blocker->fl_block);
 668        if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
 669                locks_insert_global_blocked(waiter);
 670}
 671
 672/* Must be called with flc_lock held. */
 673static void locks_insert_block(struct file_lock *blocker,
 674                                        struct file_lock *waiter)
 675{
 676        spin_lock(&blocked_lock_lock);
 677        __locks_insert_block(blocker, waiter);
 678        spin_unlock(&blocked_lock_lock);
 679}
 680
 681/*
 682 * Wake up processes blocked waiting for blocker.
 683 *
 684 * Must be called with the inode->flc_lock held!
 685 */
 686static void locks_wake_up_blocks(struct file_lock *blocker)
 687{
 688        /*
 689         * Avoid taking global lock if list is empty. This is safe since new
 690         * blocked requests are only added to the list under the flc_lock, and
 691         * the flc_lock is always held here. Note that removal from the fl_block
 692         * list does not require the flc_lock, so we must recheck list_empty()
 693         * after acquiring the blocked_lock_lock.
 694         */
 695        if (list_empty(&blocker->fl_block))
 696                return;
 697
 698        spin_lock(&blocked_lock_lock);
 699        while (!list_empty(&blocker->fl_block)) {
 700                struct file_lock *waiter;
 701
 702                waiter = list_first_entry(&blocker->fl_block,
 703                                struct file_lock, fl_block);
 704                __locks_delete_block(waiter);
 705                if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
 706                        waiter->fl_lmops->lm_notify(waiter);
 707                else
 708                        wake_up(&waiter->fl_wait);
 709        }
 710        spin_unlock(&blocked_lock_lock);
 711}
 712
 713static void
 714locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
 715{
 716        fl->fl_nspid = get_pid(task_tgid(current));
 717        list_add_tail(&fl->fl_list, before);
 718        locks_insert_global_locks(fl);
 719}
 720
 721static void
 722locks_unlink_lock_ctx(struct file_lock *fl)
 723{
 724        locks_delete_global_locks(fl);
 725        list_del_init(&fl->fl_list);
 726        if (fl->fl_nspid) {
 727                put_pid(fl->fl_nspid);
 728                fl->fl_nspid = NULL;
 729        }
 730        locks_wake_up_blocks(fl);
 731}
 732
 733static void
 734locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
 735{
 736        locks_unlink_lock_ctx(fl);
 737        if (dispose)
 738                list_add(&fl->fl_list, dispose);
 739        else
 740                locks_free_lock(fl);
 741}
 742
 743/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
 744 * checks for shared/exclusive status of overlapping locks.
 745 */
 746static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 747{
 748        if (sys_fl->fl_type == F_WRLCK)
 749                return 1;
 750        if (caller_fl->fl_type == F_WRLCK)
 751                return 1;
 752        return 0;
 753}
 754
 755/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
 756 * checking before calling the locks_conflict().
 757 */
 758static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 759{
 760        /* POSIX locks owned by the same process do not conflict with
 761         * each other.
 762         */
 763        if (posix_same_owner(caller_fl, sys_fl))
 764                return (0);
 765
 766        /* Check whether they overlap */
 767        if (!locks_overlap(caller_fl, sys_fl))
 768                return 0;
 769
 770        return (locks_conflict(caller_fl, sys_fl));
 771}
 772
 773/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
 774 * checking before calling the locks_conflict().
 775 */
 776static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 777{
 778        /* FLOCK locks referring to the same filp do not conflict with
 779         * each other.
 780         */
 781        if (caller_fl->fl_file == sys_fl->fl_file)
 782                return (0);
 783        if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
 784                return 0;
 785
 786        return (locks_conflict(caller_fl, sys_fl));
 787}
 788
 789void
 790posix_test_lock(struct file *filp, struct file_lock *fl)
 791{
 792        struct file_lock *cfl;
 793        struct file_lock_context *ctx;
 794        struct inode *inode = file_inode(filp);
 795
 796        ctx = smp_load_acquire(&inode->i_flctx);
 797        if (!ctx || list_empty_careful(&ctx->flc_posix)) {
 798                fl->fl_type = F_UNLCK;
 799                return;
 800        }
 801
 802        spin_lock(&ctx->flc_lock);
 803        list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
 804                if (posix_locks_conflict(fl, cfl)) {
 805                        locks_copy_conflock(fl, cfl);
 806                        if (cfl->fl_nspid)
 807                                fl->fl_pid = pid_vnr(cfl->fl_nspid);
 808                        goto out;
 809                }
 810        }
 811        fl->fl_type = F_UNLCK;
 812out:
 813        spin_unlock(&ctx->flc_lock);
 814        return;
 815}
 816EXPORT_SYMBOL(posix_test_lock);
 817
 818/*
 819 * Deadlock detection:
 820 *
 821 * We attempt to detect deadlocks that are due purely to posix file
 822 * locks.
 823 *
 824 * We assume that a task can be waiting for at most one lock at a time.
 825 * So for any acquired lock, the process holding that lock may be
 826 * waiting on at most one other lock.  That lock in turns may be held by
 827 * someone waiting for at most one other lock.  Given a requested lock
 828 * caller_fl which is about to wait for a conflicting lock block_fl, we
 829 * follow this chain of waiters to ensure we are not about to create a
 830 * cycle.
 831 *
 832 * Since we do this before we ever put a process to sleep on a lock, we
 833 * are ensured that there is never a cycle; that is what guarantees that
 834 * the while() loop in posix_locks_deadlock() eventually completes.
 835 *
 836 * Note: the above assumption may not be true when handling lock
 837 * requests from a broken NFS client. It may also fail in the presence
 838 * of tasks (such as posix threads) sharing the same open file table.
 839 * To handle those cases, we just bail out after a few iterations.
 840 *
 841 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
 842 * Because the owner is not even nominally tied to a thread of
 843 * execution, the deadlock detection below can't reasonably work well. Just
 844 * skip it for those.
 845 *
 846 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
 847 * locks that just checks for the case where two tasks are attempting to
 848 * upgrade from read to write locks on the same inode.
 849 */
 850
 851#define MAX_DEADLK_ITERATIONS 10
 852
 853/* Find a lock that the owner of the given block_fl is blocking on. */
 854static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
 855{
 856        struct file_lock *fl;
 857
 858        hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
 859                if (posix_same_owner(fl, block_fl))
 860                        return fl->fl_next;
 861        }
 862        return NULL;
 863}
 864
 865/* Must be called with the blocked_lock_lock held! */
 866static int posix_locks_deadlock(struct file_lock *caller_fl,
 867                                struct file_lock *block_fl)
 868{
 869        int i = 0;
 870
 871        lockdep_assert_held(&blocked_lock_lock);
 872
 873        /*
 874         * This deadlock detector can't reasonably detect deadlocks with
 875         * FL_OFDLCK locks, since they aren't owned by a process, per-se.
 876         */
 877        if (IS_OFDLCK(caller_fl))
 878                return 0;
 879
 880        while ((block_fl = what_owner_is_waiting_for(block_fl))) {
 881                if (i++ > MAX_DEADLK_ITERATIONS)
 882                        return 0;
 883                if (posix_same_owner(caller_fl, block_fl))
 884                        return 1;
 885        }
 886        return 0;
 887}
 888
 889/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
 890 * after any leases, but before any posix locks.
 891 *
 892 * Note that if called with an FL_EXISTS argument, the caller may determine
 893 * whether or not a lock was successfully freed by testing the return
 894 * value for -ENOENT.
 895 */
 896static int flock_lock_inode(struct inode *inode, struct file_lock *request)
 897{
 898        struct file_lock *new_fl = NULL;
 899        struct file_lock *fl;
 900        struct file_lock_context *ctx;
 901        int error = 0;
 902        bool found = false;
 903        LIST_HEAD(dispose);
 904
 905        ctx = locks_get_lock_context(inode, request->fl_type);
 906        if (!ctx) {
 907                if (request->fl_type != F_UNLCK)
 908                        return -ENOMEM;
 909                return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
 910        }
 911
 912        if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
 913                new_fl = locks_alloc_lock();
 914                if (!new_fl)
 915                        return -ENOMEM;
 916        }
 917
 918        spin_lock(&ctx->flc_lock);
 919        if (request->fl_flags & FL_ACCESS)
 920                goto find_conflict;
 921
 922        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
 923                if (request->fl_file != fl->fl_file)
 924                        continue;
 925                if (request->fl_type == fl->fl_type)
 926                        goto out;
 927                found = true;
 928                locks_delete_lock_ctx(fl, &dispose);
 929                break;
 930        }
 931
 932        if (request->fl_type == F_UNLCK) {
 933                if ((request->fl_flags & FL_EXISTS) && !found)
 934                        error = -ENOENT;
 935                goto out;
 936        }
 937
 938find_conflict:
 939        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
 940                if (!flock_locks_conflict(request, fl))
 941                        continue;
 942                error = -EAGAIN;
 943                if (!(request->fl_flags & FL_SLEEP))
 944                        goto out;
 945                error = FILE_LOCK_DEFERRED;
 946                locks_insert_block(fl, request);
 947                goto out;
 948        }
 949        if (request->fl_flags & FL_ACCESS)
 950                goto out;
 951        locks_copy_lock(new_fl, request);
 952        locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
 953        new_fl = NULL;
 954        error = 0;
 955
 956out:
 957        spin_unlock(&ctx->flc_lock);
 958        if (new_fl)
 959                locks_free_lock(new_fl);
 960        locks_dispose_list(&dispose);
 961        return error;
 962}
 963
 964static int posix_lock_inode(struct inode *inode, struct file_lock *request,
 965                            struct file_lock *conflock)
 966{
 967        struct file_lock *fl, *tmp;
 968        struct file_lock *new_fl = NULL;
 969        struct file_lock *new_fl2 = NULL;
 970        struct file_lock *left = NULL;
 971        struct file_lock *right = NULL;
 972        struct file_lock_context *ctx;
 973        int error;
 974        bool added = false;
 975        LIST_HEAD(dispose);
 976
 977        ctx = locks_get_lock_context(inode, request->fl_type);
 978        if (!ctx)
 979                return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
 980
 981        /*
 982         * We may need two file_lock structures for this operation,
 983         * so we get them in advance to avoid races.
 984         *
 985         * In some cases we can be sure, that no new locks will be needed
 986         */
 987        if (!(request->fl_flags & FL_ACCESS) &&
 988            (request->fl_type != F_UNLCK ||
 989             request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
 990                new_fl = locks_alloc_lock();
 991                new_fl2 = locks_alloc_lock();
 992        }
 993
 994        spin_lock(&ctx->flc_lock);
 995        /*
 996         * New lock request. Walk all POSIX locks and look for conflicts. If
 997         * there are any, either return error or put the request on the
 998         * blocker's list of waiters and the global blocked_hash.
 999         */
1000        if (request->fl_type != F_UNLCK) {
1001                list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1002                        if (!posix_locks_conflict(request, fl))
1003                                continue;
1004                        if (conflock)
1005                                locks_copy_conflock(conflock, fl);
1006                        error = -EAGAIN;
1007                        if (!(request->fl_flags & FL_SLEEP))
1008                                goto out;
1009                        /*
1010                         * Deadlock detection and insertion into the blocked
1011                         * locks list must be done while holding the same lock!
1012                         */
1013                        error = -EDEADLK;
1014                        spin_lock(&blocked_lock_lock);
1015                        if (likely(!posix_locks_deadlock(request, fl))) {
1016                                error = FILE_LOCK_DEFERRED;
1017                                __locks_insert_block(fl, request);
1018                        }
1019                        spin_unlock(&blocked_lock_lock);
1020                        goto out;
1021                }
1022        }
1023
1024        /* If we're just looking for a conflict, we're done. */
1025        error = 0;
1026        if (request->fl_flags & FL_ACCESS)
1027                goto out;
1028
1029        /* Find the first old lock with the same owner as the new lock */
1030        list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1031                if (posix_same_owner(request, fl))
1032                        break;
1033        }
1034
1035        /* Process locks with this owner. */
1036        list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1037                if (!posix_same_owner(request, fl))
1038                        break;
1039
1040                /* Detect adjacent or overlapping regions (if same lock type) */
1041                if (request->fl_type == fl->fl_type) {
1042                        /* In all comparisons of start vs end, use
1043                         * "start - 1" rather than "end + 1". If end
1044                         * is OFFSET_MAX, end + 1 will become negative.
1045                         */
1046                        if (fl->fl_end < request->fl_start - 1)
1047                                continue;
1048                        /* If the next lock in the list has entirely bigger
1049                         * addresses than the new one, insert the lock here.
1050                         */
1051                        if (fl->fl_start - 1 > request->fl_end)
1052                                break;
1053
1054                        /* If we come here, the new and old lock are of the
1055                         * same type and adjacent or overlapping. Make one
1056                         * lock yielding from the lower start address of both
1057                         * locks to the higher end address.
1058                         */
1059                        if (fl->fl_start > request->fl_start)
1060                                fl->fl_start = request->fl_start;
1061                        else
1062                                request->fl_start = fl->fl_start;
1063                        if (fl->fl_end < request->fl_end)
1064                                fl->fl_end = request->fl_end;
1065                        else
1066                                request->fl_end = fl->fl_end;
1067                        if (added) {
1068                                locks_delete_lock_ctx(fl, &dispose);
1069                                continue;
1070                        }
1071                        request = fl;
1072                        added = true;
1073                } else {
1074                        /* Processing for different lock types is a bit
1075                         * more complex.
1076                         */
1077                        if (fl->fl_end < request->fl_start)
1078                                continue;
1079                        if (fl->fl_start > request->fl_end)
1080                                break;
1081                        if (request->fl_type == F_UNLCK)
1082                                added = true;
1083                        if (fl->fl_start < request->fl_start)
1084                                left = fl;
1085                        /* If the next lock in the list has a higher end
1086                         * address than the new one, insert the new one here.
1087                         */
1088                        if (fl->fl_end > request->fl_end) {
1089                                right = fl;
1090                                break;
1091                        }
1092                        if (fl->fl_start >= request->fl_start) {
1093                                /* The new lock completely replaces an old
1094                                 * one (This may happen several times).
1095                                 */
1096                                if (added) {
1097                                        locks_delete_lock_ctx(fl, &dispose);
1098                                        continue;
1099                                }
1100                                /*
1101                                 * Replace the old lock with new_fl, and
1102                                 * remove the old one. It's safe to do the
1103                                 * insert here since we know that we won't be
1104                                 * using new_fl later, and that the lock is
1105                                 * just replacing an existing lock.
1106                                 */
1107                                error = -ENOLCK;
1108                                if (!new_fl)
1109                                        goto out;
1110                                locks_copy_lock(new_fl, request);
1111                                request = new_fl;
1112                                new_fl = NULL;
1113                                locks_insert_lock_ctx(request, &fl->fl_list);
1114                                locks_delete_lock_ctx(fl, &dispose);
1115                                added = true;
1116                        }
1117                }
1118        }
1119
1120        /*
1121         * The above code only modifies existing locks in case of merging or
1122         * replacing. If new lock(s) need to be inserted all modifications are
1123         * done below this, so it's safe yet to bail out.
1124         */
1125        error = -ENOLCK; /* "no luck" */
1126        if (right && left == right && !new_fl2)
1127                goto out;
1128
1129        error = 0;
1130        if (!added) {
1131                if (request->fl_type == F_UNLCK) {
1132                        if (request->fl_flags & FL_EXISTS)
1133                                error = -ENOENT;
1134                        goto out;
1135                }
1136
1137                if (!new_fl) {
1138                        error = -ENOLCK;
1139                        goto out;
1140                }
1141                locks_copy_lock(new_fl, request);
1142                locks_insert_lock_ctx(new_fl, &fl->fl_list);
1143                fl = new_fl;
1144                new_fl = NULL;
1145        }
1146        if (right) {
1147                if (left == right) {
1148                        /* The new lock breaks the old one in two pieces,
1149                         * so we have to use the second new lock.
1150                         */
1151                        left = new_fl2;
1152                        new_fl2 = NULL;
1153                        locks_copy_lock(left, right);
1154                        locks_insert_lock_ctx(left, &fl->fl_list);
1155                }
1156                right->fl_start = request->fl_end + 1;
1157                locks_wake_up_blocks(right);
1158        }
1159        if (left) {
1160                left->fl_end = request->fl_start - 1;
1161                locks_wake_up_blocks(left);
1162        }
1163 out:
1164        spin_unlock(&ctx->flc_lock);
1165        /*
1166         * Free any unused locks.
1167         */
1168        if (new_fl)
1169                locks_free_lock(new_fl);
1170        if (new_fl2)
1171                locks_free_lock(new_fl2);
1172        locks_dispose_list(&dispose);
1173        trace_posix_lock_inode(inode, request, error);
1174
1175        return error;
1176}
1177
1178/**
1179 * posix_lock_file - Apply a POSIX-style lock to a file
1180 * @filp: The file to apply the lock to
1181 * @fl: The lock to be applied
1182 * @conflock: Place to return a copy of the conflicting lock, if found.
1183 *
1184 * Add a POSIX style lock to a file.
1185 * We merge adjacent & overlapping locks whenever possible.
1186 * POSIX locks are sorted by owner task, then by starting address
1187 *
1188 * Note that if called with an FL_EXISTS argument, the caller may determine
1189 * whether or not a lock was successfully freed by testing the return
1190 * value for -ENOENT.
1191 */
1192int posix_lock_file(struct file *filp, struct file_lock *fl,
1193                        struct file_lock *conflock)
1194{
1195        return posix_lock_inode(file_inode(filp), fl, conflock);
1196}
1197EXPORT_SYMBOL(posix_lock_file);
1198
1199/**
1200 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1201 * @inode: inode of file to which lock request should be applied
1202 * @fl: The lock to be applied
1203 *
1204 * Apply a POSIX style lock request to an inode.
1205 */
1206static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1207{
1208        int error;
1209        might_sleep ();
1210        for (;;) {
1211                error = posix_lock_inode(inode, fl, NULL);
1212                if (error != FILE_LOCK_DEFERRED)
1213                        break;
1214                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1215                if (!error)
1216                        continue;
1217
1218                locks_delete_block(fl);
1219                break;
1220        }
1221        return error;
1222}
1223
1224#ifdef CONFIG_MANDATORY_FILE_LOCKING
1225/**
1226 * locks_mandatory_locked - Check for an active lock
1227 * @file: the file to check
1228 *
1229 * Searches the inode's list of locks to find any POSIX locks which conflict.
1230 * This function is called from locks_verify_locked() only.
1231 */
1232int locks_mandatory_locked(struct file *file)
1233{
1234        int ret;
1235        struct inode *inode = file_inode(file);
1236        struct file_lock_context *ctx;
1237        struct file_lock *fl;
1238
1239        ctx = smp_load_acquire(&inode->i_flctx);
1240        if (!ctx || list_empty_careful(&ctx->flc_posix))
1241                return 0;
1242
1243        /*
1244         * Search the lock list for this inode for any POSIX locks.
1245         */
1246        spin_lock(&ctx->flc_lock);
1247        ret = 0;
1248        list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1249                if (fl->fl_owner != current->files &&
1250                    fl->fl_owner != file) {
1251                        ret = -EAGAIN;
1252                        break;
1253                }
1254        }
1255        spin_unlock(&ctx->flc_lock);
1256        return ret;
1257}
1258
1259/**
1260 * locks_mandatory_area - Check for a conflicting lock
1261 * @inode:      the file to check
1262 * @filp:       how the file was opened (if it was)
1263 * @start:      first byte in the file to check
1264 * @end:        lastbyte in the file to check
1265 * @type:       %F_WRLCK for a write lock, else %F_RDLCK
1266 *
1267 * Searches the inode's list of locks to find any POSIX locks which conflict.
1268 */
1269int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1270                         loff_t end, unsigned char type)
1271{
1272        struct file_lock fl;
1273        int error;
1274        bool sleep = false;
1275
1276        locks_init_lock(&fl);
1277        fl.fl_pid = current->tgid;
1278        fl.fl_file = filp;
1279        fl.fl_flags = FL_POSIX | FL_ACCESS;
1280        if (filp && !(filp->f_flags & O_NONBLOCK))
1281                sleep = true;
1282        fl.fl_type = type;
1283        fl.fl_start = start;
1284        fl.fl_end = end;
1285
1286        for (;;) {
1287                if (filp) {
1288                        fl.fl_owner = filp;
1289                        fl.fl_flags &= ~FL_SLEEP;
1290                        error = posix_lock_inode(inode, &fl, NULL);
1291                        if (!error)
1292                                break;
1293                }
1294
1295                if (sleep)
1296                        fl.fl_flags |= FL_SLEEP;
1297                fl.fl_owner = current->files;
1298                error = posix_lock_inode(inode, &fl, NULL);
1299                if (error != FILE_LOCK_DEFERRED)
1300                        break;
1301                error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1302                if (!error) {
1303                        /*
1304                         * If we've been sleeping someone might have
1305                         * changed the permissions behind our back.
1306                         */
1307                        if (__mandatory_lock(inode))
1308                                continue;
1309                }
1310
1311                locks_delete_block(&fl);
1312                break;
1313        }
1314
1315        return error;
1316}
1317
1318EXPORT_SYMBOL(locks_mandatory_area);
1319#endif /* CONFIG_MANDATORY_FILE_LOCKING */
1320
1321static void lease_clear_pending(struct file_lock *fl, int arg)
1322{
1323        switch (arg) {
1324        case F_UNLCK:
1325                fl->fl_flags &= ~FL_UNLOCK_PENDING;
1326                /* fall through: */
1327        case F_RDLCK:
1328                fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1329        }
1330}
1331
1332/* We already had a lease on this file; just change its type */
1333int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1334{
1335        int error = assign_type(fl, arg);
1336
1337        if (error)
1338                return error;
1339        lease_clear_pending(fl, arg);
1340        locks_wake_up_blocks(fl);
1341        if (arg == F_UNLCK) {
1342                struct file *filp = fl->fl_file;
1343
1344                f_delown(filp);
1345                filp->f_owner.signum = 0;
1346                fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1347                if (fl->fl_fasync != NULL) {
1348                        printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1349                        fl->fl_fasync = NULL;
1350                }
1351                locks_delete_lock_ctx(fl, dispose);
1352        }
1353        return 0;
1354}
1355EXPORT_SYMBOL(lease_modify);
1356
1357static bool past_time(unsigned long then)
1358{
1359        if (!then)
1360                /* 0 is a special value meaning "this never expires": */
1361                return false;
1362        return time_after(jiffies, then);
1363}
1364
1365static void time_out_leases(struct inode *inode, struct list_head *dispose)
1366{
1367        struct file_lock_context *ctx = inode->i_flctx;
1368        struct file_lock *fl, *tmp;
1369
1370        lockdep_assert_held(&ctx->flc_lock);
1371
1372        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1373                trace_time_out_leases(inode, fl);
1374                if (past_time(fl->fl_downgrade_time))
1375                        lease_modify(fl, F_RDLCK, dispose);
1376                if (past_time(fl->fl_break_time))
1377                        lease_modify(fl, F_UNLCK, dispose);
1378        }
1379}
1380
1381static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1382{
1383        if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1384                return false;
1385        if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1386                return false;
1387        return locks_conflict(breaker, lease);
1388}
1389
1390static bool
1391any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1392{
1393        struct file_lock_context *ctx = inode->i_flctx;
1394        struct file_lock *fl;
1395
1396        lockdep_assert_held(&ctx->flc_lock);
1397
1398        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1399                if (leases_conflict(fl, breaker))
1400                        return true;
1401        }
1402        return false;
1403}
1404
1405/**
1406 *      __break_lease   -       revoke all outstanding leases on file
1407 *      @inode: the inode of the file to return
1408 *      @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1409 *          break all leases
1410 *      @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1411 *          only delegations
1412 *
1413 *      break_lease (inlined for speed) has checked there already is at least
1414 *      some kind of lock (maybe a lease) on this file.  Leases are broken on
1415 *      a call to open() or truncate().  This function can sleep unless you
1416 *      specified %O_NONBLOCK to your open().
1417 */
1418int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1419{
1420        int error = 0;
1421        struct file_lock_context *ctx;
1422        struct file_lock *new_fl, *fl, *tmp;
1423        unsigned long break_time;
1424        int want_write = (mode & O_ACCMODE) != O_RDONLY;
1425        LIST_HEAD(dispose);
1426
1427        new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1428        if (IS_ERR(new_fl))
1429                return PTR_ERR(new_fl);
1430        new_fl->fl_flags = type;
1431
1432        /* typically we will check that ctx is non-NULL before calling */
1433        ctx = smp_load_acquire(&inode->i_flctx);
1434        if (!ctx) {
1435                WARN_ON_ONCE(1);
1436                return error;
1437        }
1438
1439        spin_lock(&ctx->flc_lock);
1440
1441        time_out_leases(inode, &dispose);
1442
1443        if (!any_leases_conflict(inode, new_fl))
1444                goto out;
1445
1446        break_time = 0;
1447        if (lease_break_time > 0) {
1448                break_time = jiffies + lease_break_time * HZ;
1449                if (break_time == 0)
1450                        break_time++;   /* so that 0 means no break time */
1451        }
1452
1453        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1454                if (!leases_conflict(fl, new_fl))
1455                        continue;
1456                if (want_write) {
1457                        if (fl->fl_flags & FL_UNLOCK_PENDING)
1458                                continue;
1459                        fl->fl_flags |= FL_UNLOCK_PENDING;
1460                        fl->fl_break_time = break_time;
1461                } else {
1462                        if (lease_breaking(fl))
1463                                continue;
1464                        fl->fl_flags |= FL_DOWNGRADE_PENDING;
1465                        fl->fl_downgrade_time = break_time;
1466                }
1467                if (fl->fl_lmops->lm_break(fl))
1468                        locks_delete_lock_ctx(fl, &dispose);
1469        }
1470
1471        if (list_empty(&ctx->flc_lease))
1472                goto out;
1473
1474        if (mode & O_NONBLOCK) {
1475                trace_break_lease_noblock(inode, new_fl);
1476                error = -EWOULDBLOCK;
1477                goto out;
1478        }
1479
1480restart:
1481        fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1482        break_time = fl->fl_break_time;
1483        if (break_time != 0)
1484                break_time -= jiffies;
1485        if (break_time == 0)
1486                break_time++;
1487        locks_insert_block(fl, new_fl);
1488        trace_break_lease_block(inode, new_fl);
1489        spin_unlock(&ctx->flc_lock);
1490        locks_dispose_list(&dispose);
1491        error = wait_event_interruptible_timeout(new_fl->fl_wait,
1492                                                !new_fl->fl_next, break_time);
1493        spin_lock(&ctx->flc_lock);
1494        trace_break_lease_unblock(inode, new_fl);
1495        locks_delete_block(new_fl);
1496        if (error >= 0) {
1497                /*
1498                 * Wait for the next conflicting lease that has not been
1499                 * broken yet
1500                 */
1501                if (error == 0)
1502                        time_out_leases(inode, &dispose);
1503                if (any_leases_conflict(inode, new_fl))
1504                        goto restart;
1505                error = 0;
1506        }
1507out:
1508        spin_unlock(&ctx->flc_lock);
1509        locks_dispose_list(&dispose);
1510        locks_free_lock(new_fl);
1511        return error;
1512}
1513
1514EXPORT_SYMBOL(__break_lease);
1515
1516/**
1517 *      lease_get_mtime - get the last modified time of an inode
1518 *      @inode: the inode
1519 *      @time:  pointer to a timespec which will contain the last modified time
1520 *
1521 * This is to force NFS clients to flush their caches for files with
1522 * exclusive leases.  The justification is that if someone has an
1523 * exclusive lease, then they could be modifying it.
1524 */
1525void lease_get_mtime(struct inode *inode, struct timespec *time)
1526{
1527        bool has_lease = false;
1528        struct file_lock_context *ctx;
1529        struct file_lock *fl;
1530
1531        ctx = smp_load_acquire(&inode->i_flctx);
1532        if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1533                spin_lock(&ctx->flc_lock);
1534                fl = list_first_entry_or_null(&ctx->flc_lease,
1535                                              struct file_lock, fl_list);
1536                if (fl && (fl->fl_type == F_WRLCK))
1537                        has_lease = true;
1538                spin_unlock(&ctx->flc_lock);
1539        }
1540
1541        if (has_lease)
1542                *time = current_fs_time(inode->i_sb);
1543        else
1544                *time = inode->i_mtime;
1545}
1546
1547EXPORT_SYMBOL(lease_get_mtime);
1548
1549/**
1550 *      fcntl_getlease - Enquire what lease is currently active
1551 *      @filp: the file
1552 *
1553 *      The value returned by this function will be one of
1554 *      (if no lease break is pending):
1555 *
1556 *      %F_RDLCK to indicate a shared lease is held.
1557 *
1558 *      %F_WRLCK to indicate an exclusive lease is held.
1559 *
1560 *      %F_UNLCK to indicate no lease is held.
1561 *
1562 *      (if a lease break is pending):
1563 *
1564 *      %F_RDLCK to indicate an exclusive lease needs to be
1565 *              changed to a shared lease (or removed).
1566 *
1567 *      %F_UNLCK to indicate the lease needs to be removed.
1568 *
1569 *      XXX: sfr & willy disagree over whether F_INPROGRESS
1570 *      should be returned to userspace.
1571 */
1572int fcntl_getlease(struct file *filp)
1573{
1574        struct file_lock *fl;
1575        struct inode *inode = file_inode(filp);
1576        struct file_lock_context *ctx;
1577        int type = F_UNLCK;
1578        LIST_HEAD(dispose);
1579
1580        ctx = smp_load_acquire(&inode->i_flctx);
1581        if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1582                spin_lock(&ctx->flc_lock);
1583                time_out_leases(file_inode(filp), &dispose);
1584                list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1585                        if (fl->fl_file != filp)
1586                                continue;
1587                        type = target_leasetype(fl);
1588                        break;
1589                }
1590                spin_unlock(&ctx->flc_lock);
1591                locks_dispose_list(&dispose);
1592        }
1593        return type;
1594}
1595
1596/**
1597 * check_conflicting_open - see if the given dentry points to a file that has
1598 *                          an existing open that would conflict with the
1599 *                          desired lease.
1600 * @dentry:     dentry to check
1601 * @arg:        type of lease that we're trying to acquire
1602 * @flags:      current lock flags
1603 *
1604 * Check to see if there's an existing open fd on this file that would
1605 * conflict with the lease we're trying to set.
1606 */
1607static int
1608check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1609{
1610        int ret = 0;
1611        struct inode *inode = dentry->d_inode;
1612
1613        if (flags & FL_LAYOUT)
1614                return 0;
1615
1616        if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1617                return -EAGAIN;
1618
1619        if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1620            (atomic_read(&inode->i_count) > 1)))
1621                ret = -EAGAIN;
1622
1623        return ret;
1624}
1625
1626static int
1627generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1628{
1629        struct file_lock *fl, *my_fl = NULL, *lease;
1630        struct dentry *dentry = filp->f_path.dentry;
1631        struct inode *inode = dentry->d_inode;
1632        struct file_lock_context *ctx;
1633        bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1634        int error;
1635        LIST_HEAD(dispose);
1636
1637        lease = *flp;
1638        trace_generic_add_lease(inode, lease);
1639
1640        /* Note that arg is never F_UNLCK here */
1641        ctx = locks_get_lock_context(inode, arg);
1642        if (!ctx)
1643                return -ENOMEM;
1644
1645        /*
1646         * In the delegation case we need mutual exclusion with
1647         * a number of operations that take the i_mutex.  We trylock
1648         * because delegations are an optional optimization, and if
1649         * there's some chance of a conflict--we'd rather not
1650         * bother, maybe that's a sign this just isn't a good file to
1651         * hand out a delegation on.
1652         */
1653        if (is_deleg && !inode_trylock(inode))
1654                return -EAGAIN;
1655
1656        if (is_deleg && arg == F_WRLCK) {
1657                /* Write delegations are not currently supported: */
1658                inode_unlock(inode);
1659                WARN_ON_ONCE(1);
1660                return -EINVAL;
1661        }
1662
1663        spin_lock(&ctx->flc_lock);
1664        time_out_leases(inode, &dispose);
1665        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1666        if (error)
1667                goto out;
1668
1669        /*
1670         * At this point, we know that if there is an exclusive
1671         * lease on this file, then we hold it on this filp
1672         * (otherwise our open of this file would have blocked).
1673         * And if we are trying to acquire an exclusive lease,
1674         * then the file is not open by anyone (including us)
1675         * except for this filp.
1676         */
1677        error = -EAGAIN;
1678        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1679                if (fl->fl_file == filp &&
1680                    fl->fl_owner == lease->fl_owner) {
1681                        my_fl = fl;
1682                        continue;
1683                }
1684
1685                /*
1686                 * No exclusive leases if someone else has a lease on
1687                 * this file:
1688                 */
1689                if (arg == F_WRLCK)
1690                        goto out;
1691                /*
1692                 * Modifying our existing lease is OK, but no getting a
1693                 * new lease if someone else is opening for write:
1694                 */
1695                if (fl->fl_flags & FL_UNLOCK_PENDING)
1696                        goto out;
1697        }
1698
1699        if (my_fl != NULL) {
1700                lease = my_fl;
1701                error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1702                if (error)
1703                        goto out;
1704                goto out_setup;
1705        }
1706
1707        error = -EINVAL;
1708        if (!leases_enable)
1709                goto out;
1710
1711        locks_insert_lock_ctx(lease, &ctx->flc_lease);
1712        /*
1713         * The check in break_lease() is lockless. It's possible for another
1714         * open to race in after we did the earlier check for a conflicting
1715         * open but before the lease was inserted. Check again for a
1716         * conflicting open and cancel the lease if there is one.
1717         *
1718         * We also add a barrier here to ensure that the insertion of the lock
1719         * precedes these checks.
1720         */
1721        smp_mb();
1722        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1723        if (error) {
1724                locks_unlink_lock_ctx(lease);
1725                goto out;
1726        }
1727
1728out_setup:
1729        if (lease->fl_lmops->lm_setup)
1730                lease->fl_lmops->lm_setup(lease, priv);
1731out:
1732        spin_unlock(&ctx->flc_lock);
1733        locks_dispose_list(&dispose);
1734        if (is_deleg)
1735                inode_unlock(inode);
1736        if (!error && !my_fl)
1737                *flp = NULL;
1738        return error;
1739}
1740
1741static int generic_delete_lease(struct file *filp, void *owner)
1742{
1743        int error = -EAGAIN;
1744        struct file_lock *fl, *victim = NULL;
1745        struct inode *inode = file_inode(filp);
1746        struct file_lock_context *ctx;
1747        LIST_HEAD(dispose);
1748
1749        ctx = smp_load_acquire(&inode->i_flctx);
1750        if (!ctx) {
1751                trace_generic_delete_lease(inode, NULL);
1752                return error;
1753        }
1754
1755        spin_lock(&ctx->flc_lock);
1756        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1757                if (fl->fl_file == filp &&
1758                    fl->fl_owner == owner) {
1759                        victim = fl;
1760                        break;
1761                }
1762        }
1763        trace_generic_delete_lease(inode, victim);
1764        if (victim)
1765                error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1766        spin_unlock(&ctx->flc_lock);
1767        locks_dispose_list(&dispose);
1768        return error;
1769}
1770
1771/**
1772 *      generic_setlease        -       sets a lease on an open file
1773 *      @filp:  file pointer
1774 *      @arg:   type of lease to obtain
1775 *      @flp:   input - file_lock to use, output - file_lock inserted
1776 *      @priv:  private data for lm_setup (may be NULL if lm_setup
1777 *              doesn't require it)
1778 *
1779 *      The (input) flp->fl_lmops->lm_break function is required
1780 *      by break_lease().
1781 */
1782int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1783                        void **priv)
1784{
1785        struct inode *inode = file_inode(filp);
1786        int error;
1787
1788        if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1789                return -EACCES;
1790        if (!S_ISREG(inode->i_mode))
1791                return -EINVAL;
1792        error = security_file_lock(filp, arg);
1793        if (error)
1794                return error;
1795
1796        switch (arg) {
1797        case F_UNLCK:
1798                return generic_delete_lease(filp, *priv);
1799        case F_RDLCK:
1800        case F_WRLCK:
1801                if (!(*flp)->fl_lmops->lm_break) {
1802                        WARN_ON_ONCE(1);
1803                        return -ENOLCK;
1804                }
1805
1806                return generic_add_lease(filp, arg, flp, priv);
1807        default:
1808                return -EINVAL;
1809        }
1810}
1811EXPORT_SYMBOL(generic_setlease);
1812
1813/**
1814 * vfs_setlease        -       sets a lease on an open file
1815 * @filp:       file pointer
1816 * @arg:        type of lease to obtain
1817 * @lease:      file_lock to use when adding a lease
1818 * @priv:       private info for lm_setup when adding a lease (may be
1819 *              NULL if lm_setup doesn't require it)
1820 *
1821 * Call this to establish a lease on the file. The "lease" argument is not
1822 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1823 * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
1824 * if not, this function will return -ENOLCK (and generate a scary-looking
1825 * stack trace).
1826 *
1827 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1828 * may be NULL if the lm_setup operation doesn't require it.
1829 */
1830int
1831vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1832{
1833        if (filp->f_op->setlease)
1834                return filp->f_op->setlease(filp, arg, lease, priv);
1835        else
1836                return generic_setlease(filp, arg, lease, priv);
1837}
1838EXPORT_SYMBOL_GPL(vfs_setlease);
1839
1840static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1841{
1842        struct file_lock *fl;
1843        struct fasync_struct *new;
1844        int error;
1845
1846        fl = lease_alloc(filp, arg);
1847        if (IS_ERR(fl))
1848                return PTR_ERR(fl);
1849
1850        new = fasync_alloc();
1851        if (!new) {
1852                locks_free_lock(fl);
1853                return -ENOMEM;
1854        }
1855        new->fa_fd = fd;
1856
1857        error = vfs_setlease(filp, arg, &fl, (void **)&new);
1858        if (fl)
1859                locks_free_lock(fl);
1860        if (new)
1861                fasync_free(new);
1862        return error;
1863}
1864
1865/**
1866 *      fcntl_setlease  -       sets a lease on an open file
1867 *      @fd: open file descriptor
1868 *      @filp: file pointer
1869 *      @arg: type of lease to obtain
1870 *
1871 *      Call this fcntl to establish a lease on the file.
1872 *      Note that you also need to call %F_SETSIG to
1873 *      receive a signal when the lease is broken.
1874 */
1875int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1876{
1877        if (arg == F_UNLCK)
1878                return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1879        return do_fcntl_add_lease(fd, filp, arg);
1880}
1881
1882/**
1883 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1884 * @inode: inode of the file to apply to
1885 * @fl: The lock to be applied
1886 *
1887 * Apply a FLOCK style lock request to an inode.
1888 */
1889static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1890{
1891        int error;
1892        might_sleep();
1893        for (;;) {
1894                error = flock_lock_inode(inode, fl);
1895                if (error != FILE_LOCK_DEFERRED)
1896                        break;
1897                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1898                if (!error)
1899                        continue;
1900
1901                locks_delete_block(fl);
1902                break;
1903        }
1904        return error;
1905}
1906
1907/**
1908 * locks_lock_inode_wait - Apply a lock to an inode
1909 * @inode: inode of the file to apply to
1910 * @fl: The lock to be applied
1911 *
1912 * Apply a POSIX or FLOCK style lock request to an inode.
1913 */
1914int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1915{
1916        int res = 0;
1917        switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
1918                case FL_POSIX:
1919                        res = posix_lock_inode_wait(inode, fl);
1920                        break;
1921                case FL_FLOCK:
1922                        res = flock_lock_inode_wait(inode, fl);
1923                        break;
1924                default:
1925                        BUG();
1926        }
1927        return res;
1928}
1929EXPORT_SYMBOL(locks_lock_inode_wait);
1930
1931/**
1932 *      sys_flock: - flock() system call.
1933 *      @fd: the file descriptor to lock.
1934 *      @cmd: the type of lock to apply.
1935 *
1936 *      Apply a %FL_FLOCK style lock to an open file descriptor.
1937 *      The @cmd can be one of
1938 *
1939 *      %LOCK_SH -- a shared lock.
1940 *
1941 *      %LOCK_EX -- an exclusive lock.
1942 *
1943 *      %LOCK_UN -- remove an existing lock.
1944 *
1945 *      %LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1946 *
1947 *      %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1948 *      processes read and write access respectively.
1949 */
1950SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1951{
1952        struct fd f = fdget(fd);
1953        struct file_lock *lock;
1954        int can_sleep, unlock;
1955        int error;
1956
1957        error = -EBADF;
1958        if (!f.file)
1959                goto out;
1960
1961        can_sleep = !(cmd & LOCK_NB);
1962        cmd &= ~LOCK_NB;
1963        unlock = (cmd == LOCK_UN);
1964
1965        if (!unlock && !(cmd & LOCK_MAND) &&
1966            !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1967                goto out_putf;
1968
1969        lock = flock_make_lock(f.file, cmd);
1970        if (IS_ERR(lock)) {
1971                error = PTR_ERR(lock);
1972                goto out_putf;
1973        }
1974
1975        if (can_sleep)
1976                lock->fl_flags |= FL_SLEEP;
1977
1978        error = security_file_lock(f.file, lock->fl_type);
1979        if (error)
1980                goto out_free;
1981
1982        if (f.file->f_op->flock)
1983                error = f.file->f_op->flock(f.file,
1984                                          (can_sleep) ? F_SETLKW : F_SETLK,
1985                                          lock);
1986        else
1987                error = locks_lock_file_wait(f.file, lock);
1988
1989 out_free:
1990        locks_free_lock(lock);
1991
1992 out_putf:
1993        fdput(f);
1994 out:
1995        return error;
1996}
1997
1998/**
1999 * vfs_test_lock - test file byte range lock
2000 * @filp: The file to test lock for
2001 * @fl: The lock to test; also used to hold result
2002 *
2003 * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2004 * setting conf->fl_type to something other than F_UNLCK.
2005 */
2006int vfs_test_lock(struct file *filp, struct file_lock *fl)
2007{
2008        if (filp->f_op->lock)
2009                return filp->f_op->lock(filp, F_GETLK, fl);
2010        posix_test_lock(filp, fl);
2011        return 0;
2012}
2013EXPORT_SYMBOL_GPL(vfs_test_lock);
2014
2015static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2016{
2017        flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2018#if BITS_PER_LONG == 32
2019        /*
2020         * Make sure we can represent the posix lock via
2021         * legacy 32bit flock.
2022         */
2023        if (fl->fl_start > OFFT_OFFSET_MAX)
2024                return -EOVERFLOW;
2025        if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2026                return -EOVERFLOW;
2027#endif
2028        flock->l_start = fl->fl_start;
2029        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2030                fl->fl_end - fl->fl_start + 1;
2031        flock->l_whence = 0;
2032        flock->l_type = fl->fl_type;
2033        return 0;
2034}
2035
2036#if BITS_PER_LONG == 32
2037static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2038{
2039        flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2040        flock->l_start = fl->fl_start;
2041        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2042                fl->fl_end - fl->fl_start + 1;
2043        flock->l_whence = 0;
2044        flock->l_type = fl->fl_type;
2045}
2046#endif
2047
2048/* Report the first existing lock that would conflict with l.
2049 * This implements the F_GETLK command of fcntl().
2050 */
2051int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
2052{
2053        struct file_lock file_lock;
2054        struct flock flock;
2055        int error;
2056
2057        error = -EFAULT;
2058        if (copy_from_user(&flock, l, sizeof(flock)))
2059                goto out;
2060        error = -EINVAL;
2061        if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2062                goto out;
2063
2064        error = flock_to_posix_lock(filp, &file_lock, &flock);
2065        if (error)
2066                goto out;
2067
2068        if (cmd == F_OFD_GETLK) {
2069                error = -EINVAL;
2070                if (flock.l_pid != 0)
2071                        goto out;
2072
2073                cmd = F_GETLK;
2074                file_lock.fl_flags |= FL_OFDLCK;
2075                file_lock.fl_owner = filp;
2076        }
2077
2078        error = vfs_test_lock(filp, &file_lock);
2079        if (error)
2080                goto out;
2081 
2082        flock.l_type = file_lock.fl_type;
2083        if (file_lock.fl_type != F_UNLCK) {
2084                error = posix_lock_to_flock(&flock, &file_lock);
2085                if (error)
2086                        goto rel_priv;
2087        }
2088        error = -EFAULT;
2089        if (!copy_to_user(l, &flock, sizeof(flock)))
2090                error = 0;
2091rel_priv:
2092        locks_release_private(&file_lock);
2093out:
2094        return error;
2095}
2096
2097/**
2098 * vfs_lock_file - file byte range lock
2099 * @filp: The file to apply the lock to
2100 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2101 * @fl: The lock to be applied
2102 * @conf: Place to return a copy of the conflicting lock, if found.
2103 *
2104 * A caller that doesn't care about the conflicting lock may pass NULL
2105 * as the final argument.
2106 *
2107 * If the filesystem defines a private ->lock() method, then @conf will
2108 * be left unchanged; so a caller that cares should initialize it to
2109 * some acceptable default.
2110 *
2111 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2112 * locks, the ->lock() interface may return asynchronously, before the lock has
2113 * been granted or denied by the underlying filesystem, if (and only if)
2114 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2115 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2116 * the request is for a blocking lock. When ->lock() does return asynchronously,
2117 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2118 * request completes.
2119 * If the request is for non-blocking lock the file system should return
2120 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2121 * with the result. If the request timed out the callback routine will return a
2122 * nonzero return code and the file system should release the lock. The file
2123 * system is also responsible to keep a corresponding posix lock when it
2124 * grants a lock so the VFS can find out which locks are locally held and do
2125 * the correct lock cleanup when required.
2126 * The underlying filesystem must not drop the kernel lock or call
2127 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2128 * return code.
2129 */
2130int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2131{
2132        if (filp->f_op->lock)
2133                return filp->f_op->lock(filp, cmd, fl);
2134        else
2135                return posix_lock_file(filp, fl, conf);
2136}
2137EXPORT_SYMBOL_GPL(vfs_lock_file);
2138
2139static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2140                             struct file_lock *fl)
2141{
2142        int error;
2143
2144        error = security_file_lock(filp, fl->fl_type);
2145        if (error)
2146                return error;
2147
2148        for (;;) {
2149                error = vfs_lock_file(filp, cmd, fl, NULL);
2150                if (error != FILE_LOCK_DEFERRED)
2151                        break;
2152                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2153                if (!error)
2154                        continue;
2155
2156                locks_delete_block(fl);
2157                break;
2158        }
2159
2160        return error;
2161}
2162
2163/* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2164static int
2165check_fmode_for_setlk(struct file_lock *fl)
2166{
2167        switch (fl->fl_type) {
2168        case F_RDLCK:
2169                if (!(fl->fl_file->f_mode & FMODE_READ))
2170                        return -EBADF;
2171                break;
2172        case F_WRLCK:
2173                if (!(fl->fl_file->f_mode & FMODE_WRITE))
2174                        return -EBADF;
2175        }
2176        return 0;
2177}
2178
2179/* Apply the lock described by l to an open file descriptor.
2180 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2181 */
2182int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2183                struct flock __user *l)
2184{
2185        struct file_lock *file_lock = locks_alloc_lock();
2186        struct flock flock;
2187        struct inode *inode;
2188        struct file *f;
2189        int error;
2190
2191        if (file_lock == NULL)
2192                return -ENOLCK;
2193
2194        inode = file_inode(filp);
2195
2196        /*
2197         * This might block, so we do it before checking the inode.
2198         */
2199        error = -EFAULT;
2200        if (copy_from_user(&flock, l, sizeof(flock)))
2201                goto out;
2202
2203        /* Don't allow mandatory locks on files that may be memory mapped
2204         * and shared.
2205         */
2206        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2207                error = -EAGAIN;
2208                goto out;
2209        }
2210
2211        error = flock_to_posix_lock(filp, file_lock, &flock);
2212        if (error)
2213                goto out;
2214
2215        error = check_fmode_for_setlk(file_lock);
2216        if (error)
2217                goto out;
2218
2219        /*
2220         * If the cmd is requesting file-private locks, then set the
2221         * FL_OFDLCK flag and override the owner.
2222         */
2223        switch (cmd) {
2224        case F_OFD_SETLK:
2225                error = -EINVAL;
2226                if (flock.l_pid != 0)
2227                        goto out;
2228
2229                cmd = F_SETLK;
2230                file_lock->fl_flags |= FL_OFDLCK;
2231                file_lock->fl_owner = filp;
2232                break;
2233        case F_OFD_SETLKW:
2234                error = -EINVAL;
2235                if (flock.l_pid != 0)
2236                        goto out;
2237
2238                cmd = F_SETLKW;
2239                file_lock->fl_flags |= FL_OFDLCK;
2240                file_lock->fl_owner = filp;
2241                /* Fallthrough */
2242        case F_SETLKW:
2243                file_lock->fl_flags |= FL_SLEEP;
2244        }
2245
2246        error = do_lock_file_wait(filp, cmd, file_lock);
2247
2248        /*
2249         * Attempt to detect a close/fcntl race and recover by releasing the
2250         * lock that was just acquired. There is no need to do that when we're
2251         * unlocking though, or for OFD locks.
2252         */
2253        if (!error && file_lock->fl_type != F_UNLCK &&
2254            !(file_lock->fl_flags & FL_OFDLCK)) {
2255                /*
2256                 * We need that spin_lock here - it prevents reordering between
2257                 * update of i_flctx->flc_posix and check for it done in
2258                 * close(). rcu_read_lock() wouldn't do.
2259                 */
2260                spin_lock(&current->files->file_lock);
2261                f = fcheck(fd);
2262                spin_unlock(&current->files->file_lock);
2263                if (f != filp) {
2264                        file_lock->fl_type = F_UNLCK;
2265                        error = do_lock_file_wait(filp, cmd, file_lock);
2266                        WARN_ON_ONCE(error);
2267                        error = -EBADF;
2268                }
2269        }
2270out:
2271        trace_fcntl_setlk(inode, file_lock, error);
2272        locks_free_lock(file_lock);
2273        return error;
2274}
2275
2276#if BITS_PER_LONG == 32
2277/* Report the first existing lock that would conflict with l.
2278 * This implements the F_GETLK command of fcntl().
2279 */
2280int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
2281{
2282        struct file_lock file_lock;
2283        struct flock64 flock;
2284        int error;
2285
2286        error = -EFAULT;
2287        if (copy_from_user(&flock, l, sizeof(flock)))
2288                goto out;
2289        error = -EINVAL;
2290        if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2291                goto out;
2292
2293        error = flock64_to_posix_lock(filp, &file_lock, &flock);
2294        if (error)
2295                goto out;
2296
2297        if (cmd == F_OFD_GETLK) {
2298                error = -EINVAL;
2299                if (flock.l_pid != 0)
2300                        goto out;
2301
2302                cmd = F_GETLK64;
2303                file_lock.fl_flags |= FL_OFDLCK;
2304                file_lock.fl_owner = filp;
2305        }
2306
2307        error = vfs_test_lock(filp, &file_lock);
2308        if (error)
2309                goto out;
2310
2311        flock.l_type = file_lock.fl_type;
2312        if (file_lock.fl_type != F_UNLCK)
2313                posix_lock_to_flock64(&flock, &file_lock);
2314
2315        error = -EFAULT;
2316        if (!copy_to_user(l, &flock, sizeof(flock)))
2317                error = 0;
2318
2319        locks_release_private(&file_lock);
2320out:
2321        return error;
2322}
2323
2324/* Apply the lock described by l to an open file descriptor.
2325 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2326 */
2327int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2328                struct flock64 __user *l)
2329{
2330        struct file_lock *file_lock = locks_alloc_lock();
2331        struct flock64 flock;
2332        struct inode *inode;
2333        struct file *f;
2334        int error;
2335
2336        if (file_lock == NULL)
2337                return -ENOLCK;
2338
2339        /*
2340         * This might block, so we do it before checking the inode.
2341         */
2342        error = -EFAULT;
2343        if (copy_from_user(&flock, l, sizeof(flock)))
2344                goto out;
2345
2346        inode = file_inode(filp);
2347
2348        /* Don't allow mandatory locks on files that may be memory mapped
2349         * and shared.
2350         */
2351        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2352                error = -EAGAIN;
2353                goto out;
2354        }
2355
2356        error = flock64_to_posix_lock(filp, file_lock, &flock);
2357        if (error)
2358                goto out;
2359
2360        error = check_fmode_for_setlk(file_lock);
2361        if (error)
2362                goto out;
2363
2364        /*
2365         * If the cmd is requesting file-private locks, then set the
2366         * FL_OFDLCK flag and override the owner.
2367         */
2368        switch (cmd) {
2369        case F_OFD_SETLK:
2370                error = -EINVAL;
2371                if (flock.l_pid != 0)
2372                        goto out;
2373
2374                cmd = F_SETLK64;
2375                file_lock->fl_flags |= FL_OFDLCK;
2376                file_lock->fl_owner = filp;
2377                break;
2378        case F_OFD_SETLKW:
2379                error = -EINVAL;
2380                if (flock.l_pid != 0)
2381                        goto out;
2382
2383                cmd = F_SETLKW64;
2384                file_lock->fl_flags |= FL_OFDLCK;
2385                file_lock->fl_owner = filp;
2386                /* Fallthrough */
2387        case F_SETLKW64:
2388                file_lock->fl_flags |= FL_SLEEP;
2389        }
2390
2391        error = do_lock_file_wait(filp, cmd, file_lock);
2392
2393        /*
2394         * Attempt to detect a close/fcntl race and recover by releasing the
2395         * lock that was just acquired. There is no need to do that when we're
2396         * unlocking though, or for OFD locks.
2397         */
2398        if (!error && file_lock->fl_type != F_UNLCK &&
2399            !(file_lock->fl_flags & FL_OFDLCK)) {
2400                /*
2401                 * We need that spin_lock here - it prevents reordering between
2402                 * update of i_flctx->flc_posix and check for it done in
2403                 * close(). rcu_read_lock() wouldn't do.
2404                 */
2405                spin_lock(&current->files->file_lock);
2406                f = fcheck(fd);
2407                spin_unlock(&current->files->file_lock);
2408                if (f != filp) {
2409                        file_lock->fl_type = F_UNLCK;
2410                        error = do_lock_file_wait(filp, cmd, file_lock);
2411                        WARN_ON_ONCE(error);
2412                        error = -EBADF;
2413                }
2414        }
2415out:
2416        locks_free_lock(file_lock);
2417        return error;
2418}
2419#endif /* BITS_PER_LONG == 32 */
2420
2421/*
2422 * This function is called when the file is being removed
2423 * from the task's fd array.  POSIX locks belonging to this task
2424 * are deleted at this time.
2425 */
2426void locks_remove_posix(struct file *filp, fl_owner_t owner)
2427{
2428        int error;
2429        struct file_lock lock;
2430        struct file_lock_context *ctx;
2431
2432        /*
2433         * If there are no locks held on this file, we don't need to call
2434         * posix_lock_file().  Another process could be setting a lock on this
2435         * file at the same time, but we wouldn't remove that lock anyway.
2436         */
2437        ctx =  smp_load_acquire(&file_inode(filp)->i_flctx);
2438        if (!ctx || list_empty(&ctx->flc_posix))
2439                return;
2440
2441        lock.fl_type = F_UNLCK;
2442        lock.fl_flags = FL_POSIX | FL_CLOSE;
2443        lock.fl_start = 0;
2444        lock.fl_end = OFFSET_MAX;
2445        lock.fl_owner = owner;
2446        lock.fl_pid = current->tgid;
2447        lock.fl_file = filp;
2448        lock.fl_ops = NULL;
2449        lock.fl_lmops = NULL;
2450
2451        error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2452
2453        if (lock.fl_ops && lock.fl_ops->fl_release_private)
2454                lock.fl_ops->fl_release_private(&lock);
2455        trace_locks_remove_posix(file_inode(filp), &lock, error);
2456}
2457
2458EXPORT_SYMBOL(locks_remove_posix);
2459
2460/* The i_flctx must be valid when calling into here */
2461static void
2462locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2463{
2464        struct file_lock fl = {
2465                .fl_owner = filp,
2466                .fl_pid = current->tgid,
2467                .fl_file = filp,
2468                .fl_flags = FL_FLOCK,
2469                .fl_type = F_UNLCK,
2470                .fl_end = OFFSET_MAX,
2471        };
2472        struct inode *inode = file_inode(filp);
2473
2474        if (list_empty(&flctx->flc_flock))
2475                return;
2476
2477        if (filp->f_op->flock)
2478                filp->f_op->flock(filp, F_SETLKW, &fl);
2479        else
2480                flock_lock_inode(inode, &fl);
2481
2482        if (fl.fl_ops && fl.fl_ops->fl_release_private)
2483                fl.fl_ops->fl_release_private(&fl);
2484}
2485
2486/* The i_flctx must be valid when calling into here */
2487static void
2488locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2489{
2490        struct file_lock *fl, *tmp;
2491        LIST_HEAD(dispose);
2492
2493        if (list_empty(&ctx->flc_lease))
2494                return;
2495
2496        spin_lock(&ctx->flc_lock);
2497        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2498                if (filp == fl->fl_file)
2499                        lease_modify(fl, F_UNLCK, &dispose);
2500        spin_unlock(&ctx->flc_lock);
2501        locks_dispose_list(&dispose);
2502}
2503
2504/*
2505 * This function is called on the last close of an open file.
2506 */
2507void locks_remove_file(struct file *filp)
2508{
2509        struct file_lock_context *ctx;
2510
2511        ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
2512        if (!ctx)
2513                return;
2514
2515        /* remove any OFD locks */
2516        locks_remove_posix(filp, filp);
2517
2518        /* remove flock locks */
2519        locks_remove_flock(filp, ctx);
2520
2521        /* remove any leases */
2522        locks_remove_lease(filp, ctx);
2523}
2524
2525/**
2526 *      posix_unblock_lock - stop waiting for a file lock
2527 *      @waiter: the lock which was waiting
2528 *
2529 *      lockd needs to block waiting for locks.
2530 */
2531int
2532posix_unblock_lock(struct file_lock *waiter)
2533{
2534        int status = 0;
2535
2536        spin_lock(&blocked_lock_lock);
2537        if (waiter->fl_next)
2538                __locks_delete_block(waiter);
2539        else
2540                status = -ENOENT;
2541        spin_unlock(&blocked_lock_lock);
2542        return status;
2543}
2544EXPORT_SYMBOL(posix_unblock_lock);
2545
2546/**
2547 * vfs_cancel_lock - file byte range unblock lock
2548 * @filp: The file to apply the unblock to
2549 * @fl: The lock to be unblocked
2550 *
2551 * Used by lock managers to cancel blocked requests
2552 */
2553int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2554{
2555        if (filp->f_op->lock)
2556                return filp->f_op->lock(filp, F_CANCELLK, fl);
2557        return 0;
2558}
2559
2560EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2561
2562#ifdef CONFIG_PROC_FS
2563#include <linux/proc_fs.h>
2564#include <linux/seq_file.h>
2565
2566struct locks_iterator {
2567        int     li_cpu;
2568        loff_t  li_pos;
2569};
2570
2571static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2572                            loff_t id, char *pfx)
2573{
2574        struct inode *inode = NULL;
2575        unsigned int fl_pid;
2576
2577        if (fl->fl_nspid)
2578                fl_pid = pid_vnr(fl->fl_nspid);
2579        else
2580                fl_pid = fl->fl_pid;
2581
2582        if (fl->fl_file != NULL)
2583                inode = file_inode(fl->fl_file);
2584
2585        seq_printf(f, "%lld:%s ", id, pfx);
2586        if (IS_POSIX(fl)) {
2587                if (fl->fl_flags & FL_ACCESS)
2588                        seq_puts(f, "ACCESS");
2589                else if (IS_OFDLCK(fl))
2590                        seq_puts(f, "OFDLCK");
2591                else
2592                        seq_puts(f, "POSIX ");
2593
2594                seq_printf(f, " %s ",
2595                             (inode == NULL) ? "*NOINODE*" :
2596                             mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2597        } else if (IS_FLOCK(fl)) {
2598                if (fl->fl_type & LOCK_MAND) {
2599                        seq_puts(f, "FLOCK  MSNFS     ");
2600                } else {
2601                        seq_puts(f, "FLOCK  ADVISORY  ");
2602                }
2603        } else if (IS_LEASE(fl)) {
2604                if (fl->fl_flags & FL_DELEG)
2605                        seq_puts(f, "DELEG  ");
2606                else
2607                        seq_puts(f, "LEASE  ");
2608
2609                if (lease_breaking(fl))
2610                        seq_puts(f, "BREAKING  ");
2611                else if (fl->fl_file)
2612                        seq_puts(f, "ACTIVE    ");
2613                else
2614                        seq_puts(f, "BREAKER   ");
2615        } else {
2616                seq_puts(f, "UNKNOWN UNKNOWN  ");
2617        }
2618        if (fl->fl_type & LOCK_MAND) {
2619                seq_printf(f, "%s ",
2620                               (fl->fl_type & LOCK_READ)
2621                               ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2622                               : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2623        } else {
2624                seq_printf(f, "%s ",
2625                               (lease_breaking(fl))
2626                               ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2627                               : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2628        }
2629        if (inode) {
2630                /* userspace relies on this representation of dev_t */
2631                seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2632                                MAJOR(inode->i_sb->s_dev),
2633                                MINOR(inode->i_sb->s_dev), inode->i_ino);
2634        } else {
2635                seq_printf(f, "%d <none>:0 ", fl_pid);
2636        }
2637        if (IS_POSIX(fl)) {
2638                if (fl->fl_end == OFFSET_MAX)
2639                        seq_printf(f, "%Ld EOF\n", fl->fl_start);
2640                else
2641                        seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2642        } else {
2643                seq_puts(f, "0 EOF\n");
2644        }
2645}
2646
2647static int locks_show(struct seq_file *f, void *v)
2648{
2649        struct locks_iterator *iter = f->private;
2650        struct file_lock *fl, *bfl;
2651
2652        fl = hlist_entry(v, struct file_lock, fl_link);
2653
2654        lock_get_status(f, fl, iter->li_pos, "");
2655
2656        list_for_each_entry(bfl, &fl->fl_block, fl_block)
2657                lock_get_status(f, bfl, iter->li_pos, " ->");
2658
2659        return 0;
2660}
2661
2662static void __show_fd_locks(struct seq_file *f,
2663                        struct list_head *head, int *id,
2664                        struct file *filp, struct files_struct *files)
2665{
2666        struct file_lock *fl;
2667
2668        list_for_each_entry(fl, head, fl_list) {
2669
2670                if (filp != fl->fl_file)
2671                        continue;
2672                if (fl->fl_owner != files &&
2673                    fl->fl_owner != filp)
2674                        continue;
2675
2676                (*id)++;
2677                seq_puts(f, "lock:\t");
2678                lock_get_status(f, fl, *id, "");
2679        }
2680}
2681
2682void show_fd_locks(struct seq_file *f,
2683                  struct file *filp, struct files_struct *files)
2684{
2685        struct inode *inode = file_inode(filp);
2686        struct file_lock_context *ctx;
2687        int id = 0;
2688
2689        ctx = smp_load_acquire(&inode->i_flctx);
2690        if (!ctx)
2691                return;
2692
2693        spin_lock(&ctx->flc_lock);
2694        __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2695        __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2696        __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2697        spin_unlock(&ctx->flc_lock);
2698}
2699
2700static void *locks_start(struct seq_file *f, loff_t *pos)
2701        __acquires(&blocked_lock_lock)
2702{
2703        struct locks_iterator *iter = f->private;
2704
2705        iter->li_pos = *pos + 1;
2706        lg_global_lock(&file_lock_lglock);
2707        spin_lock(&blocked_lock_lock);
2708        return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
2709}
2710
2711static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2712{
2713        struct locks_iterator *iter = f->private;
2714
2715        ++iter->li_pos;
2716        return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos);
2717}
2718
2719static void locks_stop(struct seq_file *f, void *v)
2720        __releases(&blocked_lock_lock)
2721{
2722        spin_unlock(&blocked_lock_lock);
2723        lg_global_unlock(&file_lock_lglock);
2724}
2725
2726static const struct seq_operations locks_seq_operations = {
2727        .start  = locks_start,
2728        .next   = locks_next,
2729        .stop   = locks_stop,
2730        .show   = locks_show,
2731};
2732
2733static int locks_open(struct inode *inode, struct file *filp)
2734{
2735        return seq_open_private(filp, &locks_seq_operations,
2736                                        sizeof(struct locks_iterator));
2737}
2738
2739static const struct file_operations proc_locks_operations = {
2740        .open           = locks_open,
2741        .read           = seq_read,
2742        .llseek         = seq_lseek,
2743        .release        = seq_release_private,
2744};
2745
2746static int __init proc_locks_init(void)
2747{
2748        proc_create("locks", 0, NULL, &proc_locks_operations);
2749        return 0;
2750}
2751fs_initcall(proc_locks_init);
2752#endif
2753
2754static int __init filelock_init(void)
2755{
2756        int i;
2757
2758        flctx_cache = kmem_cache_create("file_lock_ctx",
2759                        sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2760
2761        filelock_cache = kmem_cache_create("file_lock_cache",
2762                        sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2763
2764        lg_lock_init(&file_lock_lglock, "file_lock_lglock");
2765
2766        for_each_possible_cpu(i)
2767                INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i));
2768
2769        return 0;
2770}
2771
2772core_initcall(filelock_init);
2773