linux/fs/locks.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/locks.c
   3 *
   4 *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5 *  Doug Evans (dje@spiff.uucp), August 07, 1992
   6 *
   7 *  Deadlock detection added.
   8 *  FIXME: one thing isn't handled yet:
   9 *      - mandatory locks (requires lots of changes elsewhere)
  10 *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
  11 *
  12 *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
  13 *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
  14 *  
  15 *  Converted file_lock_table to a linked list from an array, which eliminates
  16 *  the limits on how many active file locks are open.
  17 *  Chad Page (pageone@netcom.com), November 27, 1994
  18 * 
  19 *  Removed dependency on file descriptors. dup()'ed file descriptors now
  20 *  get the same locks as the original file descriptors, and a close() on
  21 *  any file descriptor removes ALL the locks on the file for the current
  22 *  process. Since locks still depend on the process id, locks are inherited
  23 *  after an exec() but not after a fork(). This agrees with POSIX, and both
  24 *  BSD and SVR4 practice.
  25 *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
  26 *
  27 *  Scrapped free list which is redundant now that we allocate locks
  28 *  dynamically with kmalloc()/kfree().
  29 *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
  30 *
  31 *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
  32 *
  33 *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
  34 *  fcntl() system call. They have the semantics described above.
  35 *
  36 *  FL_FLOCK locks are created with calls to flock(), through the flock()
  37 *  system call, which is new. Old C libraries implement flock() via fcntl()
  38 *  and will continue to use the old, broken implementation.
  39 *
  40 *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
  41 *  with a file pointer (filp). As a result they can be shared by a parent
  42 *  process and its children after a fork(). They are removed when the last
  43 *  file descriptor referring to the file pointer is closed (unless explicitly
  44 *  unlocked). 
  45 *
  46 *  FL_FLOCK locks never deadlock, an existing lock is always removed before
  47 *  upgrading from shared to exclusive (or vice versa). When this happens
  48 *  any processes blocked by the current lock are woken up and allowed to
  49 *  run before the new lock is applied.
  50 *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
  51 *
  52 *  Removed some race conditions in flock_lock_file(), marked other possible
  53 *  races. Just grep for FIXME to see them. 
  54 *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
  55 *
  56 *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
  57 *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
  58 *  once we've checked for blocking and deadlocking.
  59 *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
  60 *
  61 *  Initial implementation of mandatory locks. SunOS turned out to be
  62 *  a rotten model, so I implemented the "obvious" semantics.
  63 *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
  64 *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
  65 *
  66 *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
  67 *  check if a file has mandatory locks, used by mmap(), open() and creat() to
  68 *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
  69 *  Manual, Section 2.
  70 *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
  71 *
  72 *  Tidied up block list handling. Added '/proc/locks' interface.
  73 *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
  74 *
  75 *  Fixed deadlock condition for pathological code that mixes calls to
  76 *  flock() and fcntl().
  77 *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
  78 *
  79 *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
  80 *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
  81 *  guarantee sensible behaviour in the case where file system modules might
  82 *  be compiled with different options than the kernel itself.
  83 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  84 *
  85 *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
  86 *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
  87 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  88 *
  89 *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
  90 *  locks. Changed process synchronisation to avoid dereferencing locks that
  91 *  have already been freed.
  92 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
  93 *
  94 *  Made the block list a circular list to minimise searching in the list.
  95 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
  96 *
  97 *  Made mandatory locking a mount option. Default is not to allow mandatory
  98 *  locking.
  99 *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
 100 *
 101 *  Some adaptations for NFS support.
 102 *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
 103 *
 104 *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
 105 *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
 106 *
 107 *  Use slab allocator instead of kmalloc/kfree.
 108 *  Use generic list implementation from <linux/list.h>.
 109 *  Sped up posix_locks_deadlock by only considering blocked locks.
 110 *  Matthew Wilcox <willy@debian.org>, March, 2000.
 111 *
 112 *  Leases and LOCK_MAND
 113 *  Matthew Wilcox <willy@debian.org>, June, 2000.
 114 *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
 115 */
 116
 117#include <linux/capability.h>
 118#include <linux/file.h>
 119#include <linux/fdtable.h>
 120#include <linux/fs.h>
 121#include <linux/init.h>
 122#include <linux/security.h>
 123#include <linux/slab.h>
 124#include <linux/syscalls.h>
 125#include <linux/time.h>
 126#include <linux/rcupdate.h>
 127#include <linux/pid_namespace.h>
 128#include <linux/hashtable.h>
 129#include <linux/percpu.h>
 130
 131#define CREATE_TRACE_POINTS
 132#include <trace/events/filelock.h>
 133
 134#include <linux/uaccess.h>
 135
 136#define IS_POSIX(fl)    (fl->fl_flags & FL_POSIX)
 137#define IS_FLOCK(fl)    (fl->fl_flags & FL_FLOCK)
 138#define IS_LEASE(fl)    (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
 139#define IS_OFDLCK(fl)   (fl->fl_flags & FL_OFDLCK)
 140
 141static inline bool is_remote_lock(struct file *filp)
 142{
 143        return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
 144}
 145
 146static bool lease_breaking(struct file_lock *fl)
 147{
 148        return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
 149}
 150
 151static int target_leasetype(struct file_lock *fl)
 152{
 153        if (fl->fl_flags & FL_UNLOCK_PENDING)
 154                return F_UNLCK;
 155        if (fl->fl_flags & FL_DOWNGRADE_PENDING)
 156                return F_RDLCK;
 157        return fl->fl_type;
 158}
 159
 160int leases_enable = 1;
 161int lease_break_time = 45;
 162
 163/*
 164 * The global file_lock_list is only used for displaying /proc/locks, so we
 165 * keep a list on each CPU, with each list protected by its own spinlock.
 166 * Global serialization is done using file_rwsem.
 167 *
 168 * Note that alterations to the list also require that the relevant flc_lock is
 169 * held.
 170 */
 171struct file_lock_list_struct {
 172        spinlock_t              lock;
 173        struct hlist_head       hlist;
 174};
 175static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
 176DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
 177
 178/*
 179 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
 180 * It is protected by blocked_lock_lock.
 181 *
 182 * We hash locks by lockowner in order to optimize searching for the lock a
 183 * particular lockowner is waiting on.
 184 *
 185 * FIXME: make this value scale via some heuristic? We generally will want more
 186 * buckets when we have more lockowners holding locks, but that's a little
 187 * difficult to determine without knowing what the workload will look like.
 188 */
 189#define BLOCKED_HASH_BITS       7
 190static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
 191
 192/*
 193 * This lock protects the blocked_hash. Generally, if you're accessing it, you
 194 * want to be holding this lock.
 195 *
 196 * In addition, it also protects the fl->fl_block list, and the fl->fl_next
 197 * pointer for file_lock structures that are acting as lock requests (in
 198 * contrast to those that are acting as records of acquired locks).
 199 *
 200 * Note that when we acquire this lock in order to change the above fields,
 201 * we often hold the flc_lock as well. In certain cases, when reading the fields
 202 * protected by this lock, we can skip acquiring it iff we already hold the
 203 * flc_lock.
 204 *
 205 * In particular, adding an entry to the fl_block list requires that you hold
 206 * both the flc_lock and the blocked_lock_lock (acquired in that order).
 207 * Deleting an entry from the list however only requires the file_lock_lock.
 208 */
 209static DEFINE_SPINLOCK(blocked_lock_lock);
 210
 211static struct kmem_cache *flctx_cache __read_mostly;
 212static struct kmem_cache *filelock_cache __read_mostly;
 213
 214static struct file_lock_context *
 215locks_get_lock_context(struct inode *inode, int type)
 216{
 217        struct file_lock_context *ctx;
 218
 219        /* paired with cmpxchg() below */
 220        ctx = smp_load_acquire(&inode->i_flctx);
 221        if (likely(ctx) || type == F_UNLCK)
 222                goto out;
 223
 224        ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
 225        if (!ctx)
 226                goto out;
 227
 228        spin_lock_init(&ctx->flc_lock);
 229        INIT_LIST_HEAD(&ctx->flc_flock);
 230        INIT_LIST_HEAD(&ctx->flc_posix);
 231        INIT_LIST_HEAD(&ctx->flc_lease);
 232
 233        /*
 234         * Assign the pointer if it's not already assigned. If it is, then
 235         * free the context we just allocated.
 236         */
 237        if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
 238                kmem_cache_free(flctx_cache, ctx);
 239                ctx = smp_load_acquire(&inode->i_flctx);
 240        }
 241out:
 242        trace_locks_get_lock_context(inode, type, ctx);
 243        return ctx;
 244}
 245
 246static void
 247locks_dump_ctx_list(struct list_head *list, char *list_type)
 248{
 249        struct file_lock *fl;
 250
 251        list_for_each_entry(fl, list, fl_list) {
 252                pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
 253        }
 254}
 255
 256static void
 257locks_check_ctx_lists(struct inode *inode)
 258{
 259        struct file_lock_context *ctx = inode->i_flctx;
 260
 261        if (unlikely(!list_empty(&ctx->flc_flock) ||
 262                     !list_empty(&ctx->flc_posix) ||
 263                     !list_empty(&ctx->flc_lease))) {
 264                pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
 265                        MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
 266                        inode->i_ino);
 267                locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
 268                locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
 269                locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
 270        }
 271}
 272
 273void
 274locks_free_lock_context(struct inode *inode)
 275{
 276        struct file_lock_context *ctx = inode->i_flctx;
 277
 278        if (unlikely(ctx)) {
 279                locks_check_ctx_lists(inode);
 280                kmem_cache_free(flctx_cache, ctx);
 281        }
 282}
 283
 284static void locks_init_lock_heads(struct file_lock *fl)
 285{
 286        INIT_HLIST_NODE(&fl->fl_link);
 287        INIT_LIST_HEAD(&fl->fl_list);
 288        INIT_LIST_HEAD(&fl->fl_block);
 289        init_waitqueue_head(&fl->fl_wait);
 290}
 291
 292/* Allocate an empty lock structure. */
 293struct file_lock *locks_alloc_lock(void)
 294{
 295        struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
 296
 297        if (fl)
 298                locks_init_lock_heads(fl);
 299
 300        return fl;
 301}
 302EXPORT_SYMBOL_GPL(locks_alloc_lock);
 303
 304void locks_release_private(struct file_lock *fl)
 305{
 306        if (fl->fl_ops) {
 307                if (fl->fl_ops->fl_release_private)
 308                        fl->fl_ops->fl_release_private(fl);
 309                fl->fl_ops = NULL;
 310        }
 311
 312        if (fl->fl_lmops) {
 313                if (fl->fl_lmops->lm_put_owner) {
 314                        fl->fl_lmops->lm_put_owner(fl->fl_owner);
 315                        fl->fl_owner = NULL;
 316                }
 317                fl->fl_lmops = NULL;
 318        }
 319}
 320EXPORT_SYMBOL_GPL(locks_release_private);
 321
 322/* Free a lock which is not in use. */
 323void locks_free_lock(struct file_lock *fl)
 324{
 325        BUG_ON(waitqueue_active(&fl->fl_wait));
 326        BUG_ON(!list_empty(&fl->fl_list));
 327        BUG_ON(!list_empty(&fl->fl_block));
 328        BUG_ON(!hlist_unhashed(&fl->fl_link));
 329
 330        locks_release_private(fl);
 331        kmem_cache_free(filelock_cache, fl);
 332}
 333EXPORT_SYMBOL(locks_free_lock);
 334
 335static void
 336locks_dispose_list(struct list_head *dispose)
 337{
 338        struct file_lock *fl;
 339
 340        while (!list_empty(dispose)) {
 341                fl = list_first_entry(dispose, struct file_lock, fl_list);
 342                list_del_init(&fl->fl_list);
 343                locks_free_lock(fl);
 344        }
 345}
 346
 347void locks_init_lock(struct file_lock *fl)
 348{
 349        memset(fl, 0, sizeof(struct file_lock));
 350        locks_init_lock_heads(fl);
 351}
 352
 353EXPORT_SYMBOL(locks_init_lock);
 354
 355/*
 356 * Initialize a new lock from an existing file_lock structure.
 357 */
 358void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
 359{
 360        new->fl_owner = fl->fl_owner;
 361        new->fl_pid = fl->fl_pid;
 362        new->fl_file = NULL;
 363        new->fl_flags = fl->fl_flags;
 364        new->fl_type = fl->fl_type;
 365        new->fl_start = fl->fl_start;
 366        new->fl_end = fl->fl_end;
 367        new->fl_lmops = fl->fl_lmops;
 368        new->fl_ops = NULL;
 369
 370        if (fl->fl_lmops) {
 371                if (fl->fl_lmops->lm_get_owner)
 372                        fl->fl_lmops->lm_get_owner(fl->fl_owner);
 373        }
 374}
 375EXPORT_SYMBOL(locks_copy_conflock);
 376
 377void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
 378{
 379        /* "new" must be a freshly-initialized lock */
 380        WARN_ON_ONCE(new->fl_ops);
 381
 382        locks_copy_conflock(new, fl);
 383
 384        new->fl_file = fl->fl_file;
 385        new->fl_ops = fl->fl_ops;
 386
 387        if (fl->fl_ops) {
 388                if (fl->fl_ops->fl_copy_lock)
 389                        fl->fl_ops->fl_copy_lock(new, fl);
 390        }
 391}
 392
 393EXPORT_SYMBOL(locks_copy_lock);
 394
 395static inline int flock_translate_cmd(int cmd) {
 396        if (cmd & LOCK_MAND)
 397                return cmd & (LOCK_MAND | LOCK_RW);
 398        switch (cmd) {
 399        case LOCK_SH:
 400                return F_RDLCK;
 401        case LOCK_EX:
 402                return F_WRLCK;
 403        case LOCK_UN:
 404                return F_UNLCK;
 405        }
 406        return -EINVAL;
 407}
 408
 409/* Fill in a file_lock structure with an appropriate FLOCK lock. */
 410static struct file_lock *
 411flock_make_lock(struct file *filp, unsigned int cmd)
 412{
 413        struct file_lock *fl;
 414        int type = flock_translate_cmd(cmd);
 415
 416        if (type < 0)
 417                return ERR_PTR(type);
 418        
 419        fl = locks_alloc_lock();
 420        if (fl == NULL)
 421                return ERR_PTR(-ENOMEM);
 422
 423        fl->fl_file = filp;
 424        fl->fl_owner = filp;
 425        fl->fl_pid = current->tgid;
 426        fl->fl_flags = FL_FLOCK;
 427        fl->fl_type = type;
 428        fl->fl_end = OFFSET_MAX;
 429        
 430        return fl;
 431}
 432
 433static int assign_type(struct file_lock *fl, long type)
 434{
 435        switch (type) {
 436        case F_RDLCK:
 437        case F_WRLCK:
 438        case F_UNLCK:
 439                fl->fl_type = type;
 440                break;
 441        default:
 442                return -EINVAL;
 443        }
 444        return 0;
 445}
 446
 447static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
 448                                 struct flock64 *l)
 449{
 450        switch (l->l_whence) {
 451        case SEEK_SET:
 452                fl->fl_start = 0;
 453                break;
 454        case SEEK_CUR:
 455                fl->fl_start = filp->f_pos;
 456                break;
 457        case SEEK_END:
 458                fl->fl_start = i_size_read(file_inode(filp));
 459                break;
 460        default:
 461                return -EINVAL;
 462        }
 463        if (l->l_start > OFFSET_MAX - fl->fl_start)
 464                return -EOVERFLOW;
 465        fl->fl_start += l->l_start;
 466        if (fl->fl_start < 0)
 467                return -EINVAL;
 468
 469        /* POSIX-1996 leaves the case l->l_len < 0 undefined;
 470           POSIX-2001 defines it. */
 471        if (l->l_len > 0) {
 472                if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
 473                        return -EOVERFLOW;
 474                fl->fl_end = fl->fl_start + l->l_len - 1;
 475
 476        } else if (l->l_len < 0) {
 477                if (fl->fl_start + l->l_len < 0)
 478                        return -EINVAL;
 479                fl->fl_end = fl->fl_start - 1;
 480                fl->fl_start += l->l_len;
 481        } else
 482                fl->fl_end = OFFSET_MAX;
 483
 484        fl->fl_owner = current->files;
 485        fl->fl_pid = current->tgid;
 486        fl->fl_file = filp;
 487        fl->fl_flags = FL_POSIX;
 488        fl->fl_ops = NULL;
 489        fl->fl_lmops = NULL;
 490
 491        return assign_type(fl, l->l_type);
 492}
 493
 494/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
 495 * style lock.
 496 */
 497static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
 498                               struct flock *l)
 499{
 500        struct flock64 ll = {
 501                .l_type = l->l_type,
 502                .l_whence = l->l_whence,
 503                .l_start = l->l_start,
 504                .l_len = l->l_len,
 505        };
 506
 507        return flock64_to_posix_lock(filp, fl, &ll);
 508}
 509
 510/* default lease lock manager operations */
 511static bool
 512lease_break_callback(struct file_lock *fl)
 513{
 514        kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
 515        return false;
 516}
 517
 518static void
 519lease_setup(struct file_lock *fl, void **priv)
 520{
 521        struct file *filp = fl->fl_file;
 522        struct fasync_struct *fa = *priv;
 523
 524        /*
 525         * fasync_insert_entry() returns the old entry if any. If there was no
 526         * old entry, then it used "priv" and inserted it into the fasync list.
 527         * Clear the pointer to indicate that it shouldn't be freed.
 528         */
 529        if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
 530                *priv = NULL;
 531
 532        __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 533}
 534
 535static const struct lock_manager_operations lease_manager_ops = {
 536        .lm_break = lease_break_callback,
 537        .lm_change = lease_modify,
 538        .lm_setup = lease_setup,
 539};
 540
 541/*
 542 * Initialize a lease, use the default lock manager operations
 543 */
 544static int lease_init(struct file *filp, long type, struct file_lock *fl)
 545 {
 546        if (assign_type(fl, type) != 0)
 547                return -EINVAL;
 548
 549        fl->fl_owner = filp;
 550        fl->fl_pid = current->tgid;
 551
 552        fl->fl_file = filp;
 553        fl->fl_flags = FL_LEASE;
 554        fl->fl_start = 0;
 555        fl->fl_end = OFFSET_MAX;
 556        fl->fl_ops = NULL;
 557        fl->fl_lmops = &lease_manager_ops;
 558        return 0;
 559}
 560
 561/* Allocate a file_lock initialised to this type of lease */
 562static struct file_lock *lease_alloc(struct file *filp, long type)
 563{
 564        struct file_lock *fl = locks_alloc_lock();
 565        int error = -ENOMEM;
 566
 567        if (fl == NULL)
 568                return ERR_PTR(error);
 569
 570        error = lease_init(filp, type, fl);
 571        if (error) {
 572                locks_free_lock(fl);
 573                return ERR_PTR(error);
 574        }
 575        return fl;
 576}
 577
 578/* Check if two locks overlap each other.
 579 */
 580static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
 581{
 582        return ((fl1->fl_end >= fl2->fl_start) &&
 583                (fl2->fl_end >= fl1->fl_start));
 584}
 585
 586/*
 587 * Check whether two locks have the same owner.
 588 */
 589static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
 590{
 591        if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
 592                return fl2->fl_lmops == fl1->fl_lmops &&
 593                        fl1->fl_lmops->lm_compare_owner(fl1, fl2);
 594        return fl1->fl_owner == fl2->fl_owner;
 595}
 596
 597/* Must be called with the flc_lock held! */
 598static void locks_insert_global_locks(struct file_lock *fl)
 599{
 600        struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
 601
 602        percpu_rwsem_assert_held(&file_rwsem);
 603
 604        spin_lock(&fll->lock);
 605        fl->fl_link_cpu = smp_processor_id();
 606        hlist_add_head(&fl->fl_link, &fll->hlist);
 607        spin_unlock(&fll->lock);
 608}
 609
 610/* Must be called with the flc_lock held! */
 611static void locks_delete_global_locks(struct file_lock *fl)
 612{
 613        struct file_lock_list_struct *fll;
 614
 615        percpu_rwsem_assert_held(&file_rwsem);
 616
 617        /*
 618         * Avoid taking lock if already unhashed. This is safe since this check
 619         * is done while holding the flc_lock, and new insertions into the list
 620         * also require that it be held.
 621         */
 622        if (hlist_unhashed(&fl->fl_link))
 623                return;
 624
 625        fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
 626        spin_lock(&fll->lock);
 627        hlist_del_init(&fl->fl_link);
 628        spin_unlock(&fll->lock);
 629}
 630
 631static unsigned long
 632posix_owner_key(struct file_lock *fl)
 633{
 634        if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
 635                return fl->fl_lmops->lm_owner_key(fl);
 636        return (unsigned long)fl->fl_owner;
 637}
 638
 639static void locks_insert_global_blocked(struct file_lock *waiter)
 640{
 641        lockdep_assert_held(&blocked_lock_lock);
 642
 643        hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
 644}
 645
 646static void locks_delete_global_blocked(struct file_lock *waiter)
 647{
 648        lockdep_assert_held(&blocked_lock_lock);
 649
 650        hash_del(&waiter->fl_link);
 651}
 652
 653/* Remove waiter from blocker's block list.
 654 * When blocker ends up pointing to itself then the list is empty.
 655 *
 656 * Must be called with blocked_lock_lock held.
 657 */
 658static void __locks_delete_block(struct file_lock *waiter)
 659{
 660        locks_delete_global_blocked(waiter);
 661        list_del_init(&waiter->fl_block);
 662        waiter->fl_next = NULL;
 663}
 664
 665static void locks_delete_block(struct file_lock *waiter)
 666{
 667        spin_lock(&blocked_lock_lock);
 668        __locks_delete_block(waiter);
 669        spin_unlock(&blocked_lock_lock);
 670}
 671
 672/* Insert waiter into blocker's block list.
 673 * We use a circular list so that processes can be easily woken up in
 674 * the order they blocked. The documentation doesn't require this but
 675 * it seems like the reasonable thing to do.
 676 *
 677 * Must be called with both the flc_lock and blocked_lock_lock held. The
 678 * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
 679 * that the flc_lock is also held on insertions we can avoid taking the
 680 * blocked_lock_lock in some cases when we see that the fl_block list is empty.
 681 */
 682static void __locks_insert_block(struct file_lock *blocker,
 683                                        struct file_lock *waiter)
 684{
 685        BUG_ON(!list_empty(&waiter->fl_block));
 686        waiter->fl_next = blocker;
 687        list_add_tail(&waiter->fl_block, &blocker->fl_block);
 688        if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
 689                locks_insert_global_blocked(waiter);
 690}
 691
 692/* Must be called with flc_lock held. */
 693static void locks_insert_block(struct file_lock *blocker,
 694                                        struct file_lock *waiter)
 695{
 696        spin_lock(&blocked_lock_lock);
 697        __locks_insert_block(blocker, waiter);
 698        spin_unlock(&blocked_lock_lock);
 699}
 700
 701/*
 702 * Wake up processes blocked waiting for blocker.
 703 *
 704 * Must be called with the inode->flc_lock held!
 705 */
 706static void locks_wake_up_blocks(struct file_lock *blocker)
 707{
 708        /*
 709         * Avoid taking global lock if list is empty. This is safe since new
 710         * blocked requests are only added to the list under the flc_lock, and
 711         * the flc_lock is always held here. Note that removal from the fl_block
 712         * list does not require the flc_lock, so we must recheck list_empty()
 713         * after acquiring the blocked_lock_lock.
 714         */
 715        if (list_empty(&blocker->fl_block))
 716                return;
 717
 718        spin_lock(&blocked_lock_lock);
 719        while (!list_empty(&blocker->fl_block)) {
 720                struct file_lock *waiter;
 721
 722                waiter = list_first_entry(&blocker->fl_block,
 723                                struct file_lock, fl_block);
 724                __locks_delete_block(waiter);
 725                if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
 726                        waiter->fl_lmops->lm_notify(waiter);
 727                else
 728                        wake_up(&waiter->fl_wait);
 729        }
 730        spin_unlock(&blocked_lock_lock);
 731}
 732
 733static void
 734locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
 735{
 736        fl->fl_nspid = get_pid(task_tgid(current));
 737        list_add_tail(&fl->fl_list, before);
 738        locks_insert_global_locks(fl);
 739}
 740
 741static void
 742locks_unlink_lock_ctx(struct file_lock *fl)
 743{
 744        locks_delete_global_locks(fl);
 745        list_del_init(&fl->fl_list);
 746        if (fl->fl_nspid) {
 747                put_pid(fl->fl_nspid);
 748                fl->fl_nspid = NULL;
 749        }
 750        locks_wake_up_blocks(fl);
 751}
 752
 753static void
 754locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
 755{
 756        locks_unlink_lock_ctx(fl);
 757        if (dispose)
 758                list_add(&fl->fl_list, dispose);
 759        else
 760                locks_free_lock(fl);
 761}
 762
 763/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
 764 * checks for shared/exclusive status of overlapping locks.
 765 */
 766static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 767{
 768        if (sys_fl->fl_type == F_WRLCK)
 769                return 1;
 770        if (caller_fl->fl_type == F_WRLCK)
 771                return 1;
 772        return 0;
 773}
 774
 775/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
 776 * checking before calling the locks_conflict().
 777 */
 778static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 779{
 780        /* POSIX locks owned by the same process do not conflict with
 781         * each other.
 782         */
 783        if (posix_same_owner(caller_fl, sys_fl))
 784                return (0);
 785
 786        /* Check whether they overlap */
 787        if (!locks_overlap(caller_fl, sys_fl))
 788                return 0;
 789
 790        return (locks_conflict(caller_fl, sys_fl));
 791}
 792
 793/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
 794 * checking before calling the locks_conflict().
 795 */
 796static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 797{
 798        /* FLOCK locks referring to the same filp do not conflict with
 799         * each other.
 800         */
 801        if (caller_fl->fl_file == sys_fl->fl_file)
 802                return (0);
 803        if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
 804                return 0;
 805
 806        return (locks_conflict(caller_fl, sys_fl));
 807}
 808
 809void
 810posix_test_lock(struct file *filp, struct file_lock *fl)
 811{
 812        struct file_lock *cfl;
 813        struct file_lock_context *ctx;
 814        struct inode *inode = locks_inode(filp);
 815
 816        ctx = smp_load_acquire(&inode->i_flctx);
 817        if (!ctx || list_empty_careful(&ctx->flc_posix)) {
 818                fl->fl_type = F_UNLCK;
 819                return;
 820        }
 821
 822        spin_lock(&ctx->flc_lock);
 823        list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
 824                if (posix_locks_conflict(fl, cfl)) {
 825                        locks_copy_conflock(fl, cfl);
 826                        if (cfl->fl_nspid)
 827                                fl->fl_pid = pid_vnr(cfl->fl_nspid);
 828                        goto out;
 829                }
 830        }
 831        fl->fl_type = F_UNLCK;
 832out:
 833        spin_unlock(&ctx->flc_lock);
 834        return;
 835}
 836EXPORT_SYMBOL(posix_test_lock);
 837
 838/*
 839 * Deadlock detection:
 840 *
 841 * We attempt to detect deadlocks that are due purely to posix file
 842 * locks.
 843 *
 844 * We assume that a task can be waiting for at most one lock at a time.
 845 * So for any acquired lock, the process holding that lock may be
 846 * waiting on at most one other lock.  That lock in turns may be held by
 847 * someone waiting for at most one other lock.  Given a requested lock
 848 * caller_fl which is about to wait for a conflicting lock block_fl, we
 849 * follow this chain of waiters to ensure we are not about to create a
 850 * cycle.
 851 *
 852 * Since we do this before we ever put a process to sleep on a lock, we
 853 * are ensured that there is never a cycle; that is what guarantees that
 854 * the while() loop in posix_locks_deadlock() eventually completes.
 855 *
 856 * Note: the above assumption may not be true when handling lock
 857 * requests from a broken NFS client. It may also fail in the presence
 858 * of tasks (such as posix threads) sharing the same open file table.
 859 * To handle those cases, we just bail out after a few iterations.
 860 *
 861 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
 862 * Because the owner is not even nominally tied to a thread of
 863 * execution, the deadlock detection below can't reasonably work well. Just
 864 * skip it for those.
 865 *
 866 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
 867 * locks that just checks for the case where two tasks are attempting to
 868 * upgrade from read to write locks on the same inode.
 869 */
 870
 871#define MAX_DEADLK_ITERATIONS 10
 872
 873/* Find a lock that the owner of the given block_fl is blocking on. */
 874static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
 875{
 876        struct file_lock *fl;
 877
 878        hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
 879                if (posix_same_owner(fl, block_fl))
 880                        return fl->fl_next;
 881        }
 882        return NULL;
 883}
 884
 885/* Must be called with the blocked_lock_lock held! */
 886static int posix_locks_deadlock(struct file_lock *caller_fl,
 887                                struct file_lock *block_fl)
 888{
 889        int i = 0;
 890
 891        lockdep_assert_held(&blocked_lock_lock);
 892
 893        /*
 894         * This deadlock detector can't reasonably detect deadlocks with
 895         * FL_OFDLCK locks, since they aren't owned by a process, per-se.
 896         */
 897        if (IS_OFDLCK(caller_fl))
 898                return 0;
 899
 900        while ((block_fl = what_owner_is_waiting_for(block_fl))) {
 901                if (i++ > MAX_DEADLK_ITERATIONS)
 902                        return 0;
 903                if (posix_same_owner(caller_fl, block_fl))
 904                        return 1;
 905        }
 906        return 0;
 907}
 908
 909/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
 910 * after any leases, but before any posix locks.
 911 *
 912 * Note that if called with an FL_EXISTS argument, the caller may determine
 913 * whether or not a lock was successfully freed by testing the return
 914 * value for -ENOENT.
 915 */
 916static int flock_lock_inode(struct inode *inode, struct file_lock *request)
 917{
 918        struct file_lock *new_fl = NULL;
 919        struct file_lock *fl;
 920        struct file_lock_context *ctx;
 921        int error = 0;
 922        bool found = false;
 923        LIST_HEAD(dispose);
 924
 925        ctx = locks_get_lock_context(inode, request->fl_type);
 926        if (!ctx) {
 927                if (request->fl_type != F_UNLCK)
 928                        return -ENOMEM;
 929                return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
 930        }
 931
 932        if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
 933                new_fl = locks_alloc_lock();
 934                if (!new_fl)
 935                        return -ENOMEM;
 936        }
 937
 938        percpu_down_read_preempt_disable(&file_rwsem);
 939        spin_lock(&ctx->flc_lock);
 940        if (request->fl_flags & FL_ACCESS)
 941                goto find_conflict;
 942
 943        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
 944                if (request->fl_file != fl->fl_file)
 945                        continue;
 946                if (request->fl_type == fl->fl_type)
 947                        goto out;
 948                found = true;
 949                locks_delete_lock_ctx(fl, &dispose);
 950                break;
 951        }
 952
 953        if (request->fl_type == F_UNLCK) {
 954                if ((request->fl_flags & FL_EXISTS) && !found)
 955                        error = -ENOENT;
 956                goto out;
 957        }
 958
 959find_conflict:
 960        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
 961                if (!flock_locks_conflict(request, fl))
 962                        continue;
 963                error = -EAGAIN;
 964                if (!(request->fl_flags & FL_SLEEP))
 965                        goto out;
 966                error = FILE_LOCK_DEFERRED;
 967                locks_insert_block(fl, request);
 968                goto out;
 969        }
 970        if (request->fl_flags & FL_ACCESS)
 971                goto out;
 972        locks_copy_lock(new_fl, request);
 973        locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
 974        new_fl = NULL;
 975        error = 0;
 976
 977out:
 978        spin_unlock(&ctx->flc_lock);
 979        percpu_up_read_preempt_enable(&file_rwsem);
 980        if (new_fl)
 981                locks_free_lock(new_fl);
 982        locks_dispose_list(&dispose);
 983        return error;
 984}
 985
 986static int posix_lock_inode(struct inode *inode, struct file_lock *request,
 987                            struct file_lock *conflock)
 988{
 989        struct file_lock *fl, *tmp;
 990        struct file_lock *new_fl = NULL;
 991        struct file_lock *new_fl2 = NULL;
 992        struct file_lock *left = NULL;
 993        struct file_lock *right = NULL;
 994        struct file_lock_context *ctx;
 995        int error;
 996        bool added = false;
 997        LIST_HEAD(dispose);
 998
 999        ctx = locks_get_lock_context(inode, request->fl_type);
1000        if (!ctx)
1001                return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1002
1003        /*
1004         * We may need two file_lock structures for this operation,
1005         * so we get them in advance to avoid races.
1006         *
1007         * In some cases we can be sure, that no new locks will be needed
1008         */
1009        if (!(request->fl_flags & FL_ACCESS) &&
1010            (request->fl_type != F_UNLCK ||
1011             request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1012                new_fl = locks_alloc_lock();
1013                new_fl2 = locks_alloc_lock();
1014        }
1015
1016        percpu_down_read_preempt_disable(&file_rwsem);
1017        spin_lock(&ctx->flc_lock);
1018        /*
1019         * New lock request. Walk all POSIX locks and look for conflicts. If
1020         * there are any, either return error or put the request on the
1021         * blocker's list of waiters and the global blocked_hash.
1022         */
1023        if (request->fl_type != F_UNLCK) {
1024                list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1025                        if (!posix_locks_conflict(request, fl))
1026                                continue;
1027                        if (conflock)
1028                                locks_copy_conflock(conflock, fl);
1029                        error = -EAGAIN;
1030                        if (!(request->fl_flags & FL_SLEEP))
1031                                goto out;
1032                        /*
1033                         * Deadlock detection and insertion into the blocked
1034                         * locks list must be done while holding the same lock!
1035                         */
1036                        error = -EDEADLK;
1037                        spin_lock(&blocked_lock_lock);
1038                        if (likely(!posix_locks_deadlock(request, fl))) {
1039                                error = FILE_LOCK_DEFERRED;
1040                                __locks_insert_block(fl, request);
1041                        }
1042                        spin_unlock(&blocked_lock_lock);
1043                        goto out;
1044                }
1045        }
1046
1047        /* If we're just looking for a conflict, we're done. */
1048        error = 0;
1049        if (request->fl_flags & FL_ACCESS)
1050                goto out;
1051
1052        /* Find the first old lock with the same owner as the new lock */
1053        list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1054                if (posix_same_owner(request, fl))
1055                        break;
1056        }
1057
1058        /* Process locks with this owner. */
1059        list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1060                if (!posix_same_owner(request, fl))
1061                        break;
1062
1063                /* Detect adjacent or overlapping regions (if same lock type) */
1064                if (request->fl_type == fl->fl_type) {
1065                        /* In all comparisons of start vs end, use
1066                         * "start - 1" rather than "end + 1". If end
1067                         * is OFFSET_MAX, end + 1 will become negative.
1068                         */
1069                        if (fl->fl_end < request->fl_start - 1)
1070                                continue;
1071                        /* If the next lock in the list has entirely bigger
1072                         * addresses than the new one, insert the lock here.
1073                         */
1074                        if (fl->fl_start - 1 > request->fl_end)
1075                                break;
1076
1077                        /* If we come here, the new and old lock are of the
1078                         * same type and adjacent or overlapping. Make one
1079                         * lock yielding from the lower start address of both
1080                         * locks to the higher end address.
1081                         */
1082                        if (fl->fl_start > request->fl_start)
1083                                fl->fl_start = request->fl_start;
1084                        else
1085                                request->fl_start = fl->fl_start;
1086                        if (fl->fl_end < request->fl_end)
1087                                fl->fl_end = request->fl_end;
1088                        else
1089                                request->fl_end = fl->fl_end;
1090                        if (added) {
1091                                locks_delete_lock_ctx(fl, &dispose);
1092                                continue;
1093                        }
1094                        request = fl;
1095                        added = true;
1096                } else {
1097                        /* Processing for different lock types is a bit
1098                         * more complex.
1099                         */
1100                        if (fl->fl_end < request->fl_start)
1101                                continue;
1102                        if (fl->fl_start > request->fl_end)
1103                                break;
1104                        if (request->fl_type == F_UNLCK)
1105                                added = true;
1106                        if (fl->fl_start < request->fl_start)
1107                                left = fl;
1108                        /* If the next lock in the list has a higher end
1109                         * address than the new one, insert the new one here.
1110                         */
1111                        if (fl->fl_end > request->fl_end) {
1112                                right = fl;
1113                                break;
1114                        }
1115                        if (fl->fl_start >= request->fl_start) {
1116                                /* The new lock completely replaces an old
1117                                 * one (This may happen several times).
1118                                 */
1119                                if (added) {
1120                                        locks_delete_lock_ctx(fl, &dispose);
1121                                        continue;
1122                                }
1123                                /*
1124                                 * Replace the old lock with new_fl, and
1125                                 * remove the old one. It's safe to do the
1126                                 * insert here since we know that we won't be
1127                                 * using new_fl later, and that the lock is
1128                                 * just replacing an existing lock.
1129                                 */
1130                                error = -ENOLCK;
1131                                if (!new_fl)
1132                                        goto out;
1133                                locks_copy_lock(new_fl, request);
1134                                request = new_fl;
1135                                new_fl = NULL;
1136                                locks_insert_lock_ctx(request, &fl->fl_list);
1137                                locks_delete_lock_ctx(fl, &dispose);
1138                                added = true;
1139                        }
1140                }
1141        }
1142
1143        /*
1144         * The above code only modifies existing locks in case of merging or
1145         * replacing. If new lock(s) need to be inserted all modifications are
1146         * done below this, so it's safe yet to bail out.
1147         */
1148        error = -ENOLCK; /* "no luck" */
1149        if (right && left == right && !new_fl2)
1150                goto out;
1151
1152        error = 0;
1153        if (!added) {
1154                if (request->fl_type == F_UNLCK) {
1155                        if (request->fl_flags & FL_EXISTS)
1156                                error = -ENOENT;
1157                        goto out;
1158                }
1159
1160                if (!new_fl) {
1161                        error = -ENOLCK;
1162                        goto out;
1163                }
1164                locks_copy_lock(new_fl, request);
1165                locks_insert_lock_ctx(new_fl, &fl->fl_list);
1166                fl = new_fl;
1167                new_fl = NULL;
1168        }
1169        if (right) {
1170                if (left == right) {
1171                        /* The new lock breaks the old one in two pieces,
1172                         * so we have to use the second new lock.
1173                         */
1174                        left = new_fl2;
1175                        new_fl2 = NULL;
1176                        locks_copy_lock(left, right);
1177                        locks_insert_lock_ctx(left, &fl->fl_list);
1178                }
1179                right->fl_start = request->fl_end + 1;
1180                locks_wake_up_blocks(right);
1181        }
1182        if (left) {
1183                left->fl_end = request->fl_start - 1;
1184                locks_wake_up_blocks(left);
1185        }
1186 out:
1187        spin_unlock(&ctx->flc_lock);
1188        percpu_up_read_preempt_enable(&file_rwsem);
1189        /*
1190         * Free any unused locks.
1191         */
1192        if (new_fl)
1193                locks_free_lock(new_fl);
1194        if (new_fl2)
1195                locks_free_lock(new_fl2);
1196        locks_dispose_list(&dispose);
1197        trace_posix_lock_inode(inode, request, error);
1198
1199        return error;
1200}
1201
1202/**
1203 * posix_lock_file - Apply a POSIX-style lock to a file
1204 * @filp: The file to apply the lock to
1205 * @fl: The lock to be applied
1206 * @conflock: Place to return a copy of the conflicting lock, if found.
1207 *
1208 * Add a POSIX style lock to a file.
1209 * We merge adjacent & overlapping locks whenever possible.
1210 * POSIX locks are sorted by owner task, then by starting address
1211 *
1212 * Note that if called with an FL_EXISTS argument, the caller may determine
1213 * whether or not a lock was successfully freed by testing the return
1214 * value for -ENOENT.
1215 */
1216int posix_lock_file(struct file *filp, struct file_lock *fl,
1217                        struct file_lock *conflock)
1218{
1219        return posix_lock_inode(locks_inode(filp), fl, conflock);
1220}
1221EXPORT_SYMBOL(posix_lock_file);
1222
1223/**
1224 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1225 * @inode: inode of file to which lock request should be applied
1226 * @fl: The lock to be applied
1227 *
1228 * Apply a POSIX style lock request to an inode.
1229 */
1230static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1231{
1232        int error;
1233        might_sleep ();
1234        for (;;) {
1235                error = posix_lock_inode(inode, fl, NULL);
1236                if (error != FILE_LOCK_DEFERRED)
1237                        break;
1238                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1239                if (!error)
1240                        continue;
1241
1242                locks_delete_block(fl);
1243                break;
1244        }
1245        return error;
1246}
1247
1248#ifdef CONFIG_MANDATORY_FILE_LOCKING
1249/**
1250 * locks_mandatory_locked - Check for an active lock
1251 * @file: the file to check
1252 *
1253 * Searches the inode's list of locks to find any POSIX locks which conflict.
1254 * This function is called from locks_verify_locked() only.
1255 */
1256int locks_mandatory_locked(struct file *file)
1257{
1258        int ret;
1259        struct inode *inode = locks_inode(file);
1260        struct file_lock_context *ctx;
1261        struct file_lock *fl;
1262
1263        ctx = smp_load_acquire(&inode->i_flctx);
1264        if (!ctx || list_empty_careful(&ctx->flc_posix))
1265                return 0;
1266
1267        /*
1268         * Search the lock list for this inode for any POSIX locks.
1269         */
1270        spin_lock(&ctx->flc_lock);
1271        ret = 0;
1272        list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1273                if (fl->fl_owner != current->files &&
1274                    fl->fl_owner != file) {
1275                        ret = -EAGAIN;
1276                        break;
1277                }
1278        }
1279        spin_unlock(&ctx->flc_lock);
1280        return ret;
1281}
1282
1283/**
1284 * locks_mandatory_area - Check for a conflicting lock
1285 * @inode:      the file to check
1286 * @filp:       how the file was opened (if it was)
1287 * @start:      first byte in the file to check
1288 * @end:        lastbyte in the file to check
1289 * @type:       %F_WRLCK for a write lock, else %F_RDLCK
1290 *
1291 * Searches the inode's list of locks to find any POSIX locks which conflict.
1292 */
1293int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1294                         loff_t end, unsigned char type)
1295{
1296        struct file_lock fl;
1297        int error;
1298        bool sleep = false;
1299
1300        locks_init_lock(&fl);
1301        fl.fl_pid = current->tgid;
1302        fl.fl_file = filp;
1303        fl.fl_flags = FL_POSIX | FL_ACCESS;
1304        if (filp && !(filp->f_flags & O_NONBLOCK))
1305                sleep = true;
1306        fl.fl_type = type;
1307        fl.fl_start = start;
1308        fl.fl_end = end;
1309
1310        for (;;) {
1311                if (filp) {
1312                        fl.fl_owner = filp;
1313                        fl.fl_flags &= ~FL_SLEEP;
1314                        error = posix_lock_inode(inode, &fl, NULL);
1315                        if (!error)
1316                                break;
1317                }
1318
1319                if (sleep)
1320                        fl.fl_flags |= FL_SLEEP;
1321                fl.fl_owner = current->files;
1322                error = posix_lock_inode(inode, &fl, NULL);
1323                if (error != FILE_LOCK_DEFERRED)
1324                        break;
1325                error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1326                if (!error) {
1327                        /*
1328                         * If we've been sleeping someone might have
1329                         * changed the permissions behind our back.
1330                         */
1331                        if (__mandatory_lock(inode))
1332                                continue;
1333                }
1334
1335                locks_delete_block(&fl);
1336                break;
1337        }
1338
1339        return error;
1340}
1341
1342EXPORT_SYMBOL(locks_mandatory_area);
1343#endif /* CONFIG_MANDATORY_FILE_LOCKING */
1344
1345static void lease_clear_pending(struct file_lock *fl, int arg)
1346{
1347        switch (arg) {
1348        case F_UNLCK:
1349                fl->fl_flags &= ~FL_UNLOCK_PENDING;
1350                /* fall through: */
1351        case F_RDLCK:
1352                fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1353        }
1354}
1355
1356/* We already had a lease on this file; just change its type */
1357int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1358{
1359        int error = assign_type(fl, arg);
1360
1361        if (error)
1362                return error;
1363        lease_clear_pending(fl, arg);
1364        locks_wake_up_blocks(fl);
1365        if (arg == F_UNLCK) {
1366                struct file *filp = fl->fl_file;
1367
1368                f_delown(filp);
1369                filp->f_owner.signum = 0;
1370                fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1371                if (fl->fl_fasync != NULL) {
1372                        printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1373                        fl->fl_fasync = NULL;
1374                }
1375                locks_delete_lock_ctx(fl, dispose);
1376        }
1377        return 0;
1378}
1379EXPORT_SYMBOL(lease_modify);
1380
1381static bool past_time(unsigned long then)
1382{
1383        if (!then)
1384                /* 0 is a special value meaning "this never expires": */
1385                return false;
1386        return time_after(jiffies, then);
1387}
1388
1389static void time_out_leases(struct inode *inode, struct list_head *dispose)
1390{
1391        struct file_lock_context *ctx = inode->i_flctx;
1392        struct file_lock *fl, *tmp;
1393
1394        lockdep_assert_held(&ctx->flc_lock);
1395
1396        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1397                trace_time_out_leases(inode, fl);
1398                if (past_time(fl->fl_downgrade_time))
1399                        lease_modify(fl, F_RDLCK, dispose);
1400                if (past_time(fl->fl_break_time))
1401                        lease_modify(fl, F_UNLCK, dispose);
1402        }
1403}
1404
1405static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1406{
1407        if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1408                return false;
1409        if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1410                return false;
1411        return locks_conflict(breaker, lease);
1412}
1413
1414static bool
1415any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1416{
1417        struct file_lock_context *ctx = inode->i_flctx;
1418        struct file_lock *fl;
1419
1420        lockdep_assert_held(&ctx->flc_lock);
1421
1422        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1423                if (leases_conflict(fl, breaker))
1424                        return true;
1425        }
1426        return false;
1427}
1428
1429/**
1430 *      __break_lease   -       revoke all outstanding leases on file
1431 *      @inode: the inode of the file to return
1432 *      @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1433 *          break all leases
1434 *      @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1435 *          only delegations
1436 *
1437 *      break_lease (inlined for speed) has checked there already is at least
1438 *      some kind of lock (maybe a lease) on this file.  Leases are broken on
1439 *      a call to open() or truncate().  This function can sleep unless you
1440 *      specified %O_NONBLOCK to your open().
1441 */
1442int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1443{
1444        int error = 0;
1445        struct file_lock_context *ctx;
1446        struct file_lock *new_fl, *fl, *tmp;
1447        unsigned long break_time;
1448        int want_write = (mode & O_ACCMODE) != O_RDONLY;
1449        LIST_HEAD(dispose);
1450
1451        new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1452        if (IS_ERR(new_fl))
1453                return PTR_ERR(new_fl);
1454        new_fl->fl_flags = type;
1455
1456        /* typically we will check that ctx is non-NULL before calling */
1457        ctx = smp_load_acquire(&inode->i_flctx);
1458        if (!ctx) {
1459                WARN_ON_ONCE(1);
1460                return error;
1461        }
1462
1463        percpu_down_read_preempt_disable(&file_rwsem);
1464        spin_lock(&ctx->flc_lock);
1465
1466        time_out_leases(inode, &dispose);
1467
1468        if (!any_leases_conflict(inode, new_fl))
1469                goto out;
1470
1471        break_time = 0;
1472        if (lease_break_time > 0) {
1473                break_time = jiffies + lease_break_time * HZ;
1474                if (break_time == 0)
1475                        break_time++;   /* so that 0 means no break time */
1476        }
1477
1478        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1479                if (!leases_conflict(fl, new_fl))
1480                        continue;
1481                if (want_write) {
1482                        if (fl->fl_flags & FL_UNLOCK_PENDING)
1483                                continue;
1484                        fl->fl_flags |= FL_UNLOCK_PENDING;
1485                        fl->fl_break_time = break_time;
1486                } else {
1487                        if (lease_breaking(fl))
1488                                continue;
1489                        fl->fl_flags |= FL_DOWNGRADE_PENDING;
1490                        fl->fl_downgrade_time = break_time;
1491                }
1492                if (fl->fl_lmops->lm_break(fl))
1493                        locks_delete_lock_ctx(fl, &dispose);
1494        }
1495
1496        if (list_empty(&ctx->flc_lease))
1497                goto out;
1498
1499        if (mode & O_NONBLOCK) {
1500                trace_break_lease_noblock(inode, new_fl);
1501                error = -EWOULDBLOCK;
1502                goto out;
1503        }
1504
1505restart:
1506        fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1507        break_time = fl->fl_break_time;
1508        if (break_time != 0)
1509                break_time -= jiffies;
1510        if (break_time == 0)
1511                break_time++;
1512        locks_insert_block(fl, new_fl);
1513        trace_break_lease_block(inode, new_fl);
1514        spin_unlock(&ctx->flc_lock);
1515        percpu_up_read_preempt_enable(&file_rwsem);
1516
1517        locks_dispose_list(&dispose);
1518        error = wait_event_interruptible_timeout(new_fl->fl_wait,
1519                                                !new_fl->fl_next, break_time);
1520
1521        percpu_down_read_preempt_disable(&file_rwsem);
1522        spin_lock(&ctx->flc_lock);
1523        trace_break_lease_unblock(inode, new_fl);
1524        locks_delete_block(new_fl);
1525        if (error >= 0) {
1526                /*
1527                 * Wait for the next conflicting lease that has not been
1528                 * broken yet
1529                 */
1530                if (error == 0)
1531                        time_out_leases(inode, &dispose);
1532                if (any_leases_conflict(inode, new_fl))
1533                        goto restart;
1534                error = 0;
1535        }
1536out:
1537        spin_unlock(&ctx->flc_lock);
1538        percpu_up_read_preempt_enable(&file_rwsem);
1539        locks_dispose_list(&dispose);
1540        locks_free_lock(new_fl);
1541        return error;
1542}
1543
1544EXPORT_SYMBOL(__break_lease);
1545
1546/**
1547 *      lease_get_mtime - get the last modified time of an inode
1548 *      @inode: the inode
1549 *      @time:  pointer to a timespec which will contain the last modified time
1550 *
1551 * This is to force NFS clients to flush their caches for files with
1552 * exclusive leases.  The justification is that if someone has an
1553 * exclusive lease, then they could be modifying it.
1554 */
1555void lease_get_mtime(struct inode *inode, struct timespec *time)
1556{
1557        bool has_lease = false;
1558        struct file_lock_context *ctx;
1559        struct file_lock *fl;
1560
1561        ctx = smp_load_acquire(&inode->i_flctx);
1562        if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1563                spin_lock(&ctx->flc_lock);
1564                fl = list_first_entry_or_null(&ctx->flc_lease,
1565                                              struct file_lock, fl_list);
1566                if (fl && (fl->fl_type == F_WRLCK))
1567                        has_lease = true;
1568                spin_unlock(&ctx->flc_lock);
1569        }
1570
1571        if (has_lease)
1572                *time = current_time(inode);
1573        else
1574                *time = inode->i_mtime;
1575}
1576
1577EXPORT_SYMBOL(lease_get_mtime);
1578
1579/**
1580 *      fcntl_getlease - Enquire what lease is currently active
1581 *      @filp: the file
1582 *
1583 *      The value returned by this function will be one of
1584 *      (if no lease break is pending):
1585 *
1586 *      %F_RDLCK to indicate a shared lease is held.
1587 *
1588 *      %F_WRLCK to indicate an exclusive lease is held.
1589 *
1590 *      %F_UNLCK to indicate no lease is held.
1591 *
1592 *      (if a lease break is pending):
1593 *
1594 *      %F_RDLCK to indicate an exclusive lease needs to be
1595 *              changed to a shared lease (or removed).
1596 *
1597 *      %F_UNLCK to indicate the lease needs to be removed.
1598 *
1599 *      XXX: sfr & willy disagree over whether F_INPROGRESS
1600 *      should be returned to userspace.
1601 */
1602int fcntl_getlease(struct file *filp)
1603{
1604        struct file_lock *fl;
1605        struct inode *inode = locks_inode(filp);
1606        struct file_lock_context *ctx;
1607        int type = F_UNLCK;
1608        LIST_HEAD(dispose);
1609
1610        ctx = smp_load_acquire(&inode->i_flctx);
1611        if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1612                percpu_down_read_preempt_disable(&file_rwsem);
1613                spin_lock(&ctx->flc_lock);
1614                time_out_leases(inode, &dispose);
1615                list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1616                        if (fl->fl_file != filp)
1617                                continue;
1618                        type = target_leasetype(fl);
1619                        break;
1620                }
1621                spin_unlock(&ctx->flc_lock);
1622                percpu_up_read_preempt_enable(&file_rwsem);
1623
1624                locks_dispose_list(&dispose);
1625        }
1626        return type;
1627}
1628
1629/**
1630 * check_conflicting_open - see if the given dentry points to a file that has
1631 *                          an existing open that would conflict with the
1632 *                          desired lease.
1633 * @dentry:     dentry to check
1634 * @arg:        type of lease that we're trying to acquire
1635 * @flags:      current lock flags
1636 *
1637 * Check to see if there's an existing open fd on this file that would
1638 * conflict with the lease we're trying to set.
1639 */
1640static int
1641check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1642{
1643        int ret = 0;
1644        struct inode *inode = dentry->d_inode;
1645
1646        if (flags & FL_LAYOUT)
1647                return 0;
1648
1649        if ((arg == F_RDLCK) &&
1650            (atomic_read(&d_real_inode(dentry)->i_writecount) > 0))
1651                return -EAGAIN;
1652
1653        if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1654            (atomic_read(&inode->i_count) > 1)))
1655                ret = -EAGAIN;
1656
1657        return ret;
1658}
1659
1660static int
1661generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1662{
1663        struct file_lock *fl, *my_fl = NULL, *lease;
1664        struct dentry *dentry = filp->f_path.dentry;
1665        struct inode *inode = dentry->d_inode;
1666        struct file_lock_context *ctx;
1667        bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1668        int error;
1669        LIST_HEAD(dispose);
1670
1671        lease = *flp;
1672        trace_generic_add_lease(inode, lease);
1673
1674        /* Note that arg is never F_UNLCK here */
1675        ctx = locks_get_lock_context(inode, arg);
1676        if (!ctx)
1677                return -ENOMEM;
1678
1679        /*
1680         * In the delegation case we need mutual exclusion with
1681         * a number of operations that take the i_mutex.  We trylock
1682         * because delegations are an optional optimization, and if
1683         * there's some chance of a conflict--we'd rather not
1684         * bother, maybe that's a sign this just isn't a good file to
1685         * hand out a delegation on.
1686         */
1687        if (is_deleg && !inode_trylock(inode))
1688                return -EAGAIN;
1689
1690        if (is_deleg && arg == F_WRLCK) {
1691                /* Write delegations are not currently supported: */
1692                inode_unlock(inode);
1693                WARN_ON_ONCE(1);
1694                return -EINVAL;
1695        }
1696
1697        percpu_down_read_preempt_disable(&file_rwsem);
1698        spin_lock(&ctx->flc_lock);
1699        time_out_leases(inode, &dispose);
1700        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1701        if (error)
1702                goto out;
1703
1704        /*
1705         * At this point, we know that if there is an exclusive
1706         * lease on this file, then we hold it on this filp
1707         * (otherwise our open of this file would have blocked).
1708         * And if we are trying to acquire an exclusive lease,
1709         * then the file is not open by anyone (including us)
1710         * except for this filp.
1711         */
1712        error = -EAGAIN;
1713        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1714                if (fl->fl_file == filp &&
1715                    fl->fl_owner == lease->fl_owner) {
1716                        my_fl = fl;
1717                        continue;
1718                }
1719
1720                /*
1721                 * No exclusive leases if someone else has a lease on
1722                 * this file:
1723                 */
1724                if (arg == F_WRLCK)
1725                        goto out;
1726                /*
1727                 * Modifying our existing lease is OK, but no getting a
1728                 * new lease if someone else is opening for write:
1729                 */
1730                if (fl->fl_flags & FL_UNLOCK_PENDING)
1731                        goto out;
1732        }
1733
1734        if (my_fl != NULL) {
1735                lease = my_fl;
1736                error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1737                if (error)
1738                        goto out;
1739                goto out_setup;
1740        }
1741
1742        error = -EINVAL;
1743        if (!leases_enable)
1744                goto out;
1745
1746        locks_insert_lock_ctx(lease, &ctx->flc_lease);
1747        /*
1748         * The check in break_lease() is lockless. It's possible for another
1749         * open to race in after we did the earlier check for a conflicting
1750         * open but before the lease was inserted. Check again for a
1751         * conflicting open and cancel the lease if there is one.
1752         *
1753         * We also add a barrier here to ensure that the insertion of the lock
1754         * precedes these checks.
1755         */
1756        smp_mb();
1757        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1758        if (error) {
1759                locks_unlink_lock_ctx(lease);
1760                goto out;
1761        }
1762
1763out_setup:
1764        if (lease->fl_lmops->lm_setup)
1765                lease->fl_lmops->lm_setup(lease, priv);
1766out:
1767        spin_unlock(&ctx->flc_lock);
1768        percpu_up_read_preempt_enable(&file_rwsem);
1769        locks_dispose_list(&dispose);
1770        if (is_deleg)
1771                inode_unlock(inode);
1772        if (!error && !my_fl)
1773                *flp = NULL;
1774        return error;
1775}
1776
1777static int generic_delete_lease(struct file *filp, void *owner)
1778{
1779        int error = -EAGAIN;
1780        struct file_lock *fl, *victim = NULL;
1781        struct inode *inode = locks_inode(filp);
1782        struct file_lock_context *ctx;
1783        LIST_HEAD(dispose);
1784
1785        ctx = smp_load_acquire(&inode->i_flctx);
1786        if (!ctx) {
1787                trace_generic_delete_lease(inode, NULL);
1788                return error;
1789        }
1790
1791        percpu_down_read_preempt_disable(&file_rwsem);
1792        spin_lock(&ctx->flc_lock);
1793        list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1794                if (fl->fl_file == filp &&
1795                    fl->fl_owner == owner) {
1796                        victim = fl;
1797                        break;
1798                }
1799        }
1800        trace_generic_delete_lease(inode, victim);
1801        if (victim)
1802                error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1803        spin_unlock(&ctx->flc_lock);
1804        percpu_up_read_preempt_enable(&file_rwsem);
1805        locks_dispose_list(&dispose);
1806        return error;
1807}
1808
1809/**
1810 *      generic_setlease        -       sets a lease on an open file
1811 *      @filp:  file pointer
1812 *      @arg:   type of lease to obtain
1813 *      @flp:   input - file_lock to use, output - file_lock inserted
1814 *      @priv:  private data for lm_setup (may be NULL if lm_setup
1815 *              doesn't require it)
1816 *
1817 *      The (input) flp->fl_lmops->lm_break function is required
1818 *      by break_lease().
1819 */
1820int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1821                        void **priv)
1822{
1823        struct inode *inode = locks_inode(filp);
1824        int error;
1825
1826        if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1827                return -EACCES;
1828        if (!S_ISREG(inode->i_mode))
1829                return -EINVAL;
1830        error = security_file_lock(filp, arg);
1831        if (error)
1832                return error;
1833
1834        switch (arg) {
1835        case F_UNLCK:
1836                return generic_delete_lease(filp, *priv);
1837        case F_RDLCK:
1838        case F_WRLCK:
1839                if (!(*flp)->fl_lmops->lm_break) {
1840                        WARN_ON_ONCE(1);
1841                        return -ENOLCK;
1842                }
1843
1844                return generic_add_lease(filp, arg, flp, priv);
1845        default:
1846                return -EINVAL;
1847        }
1848}
1849EXPORT_SYMBOL(generic_setlease);
1850
1851/**
1852 * vfs_setlease        -       sets a lease on an open file
1853 * @filp:       file pointer
1854 * @arg:        type of lease to obtain
1855 * @lease:      file_lock to use when adding a lease
1856 * @priv:       private info for lm_setup when adding a lease (may be
1857 *              NULL if lm_setup doesn't require it)
1858 *
1859 * Call this to establish a lease on the file. The "lease" argument is not
1860 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1861 * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
1862 * if not, this function will return -ENOLCK (and generate a scary-looking
1863 * stack trace).
1864 *
1865 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1866 * may be NULL if the lm_setup operation doesn't require it.
1867 */
1868int
1869vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1870{
1871        if (filp->f_op->setlease && is_remote_lock(filp))
1872                return filp->f_op->setlease(filp, arg, lease, priv);
1873        else
1874                return generic_setlease(filp, arg, lease, priv);
1875}
1876EXPORT_SYMBOL_GPL(vfs_setlease);
1877
1878static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1879{
1880        struct file_lock *fl;
1881        struct fasync_struct *new;
1882        int error;
1883
1884        fl = lease_alloc(filp, arg);
1885        if (IS_ERR(fl))
1886                return PTR_ERR(fl);
1887
1888        new = fasync_alloc();
1889        if (!new) {
1890                locks_free_lock(fl);
1891                return -ENOMEM;
1892        }
1893        new->fa_fd = fd;
1894
1895        error = vfs_setlease(filp, arg, &fl, (void **)&new);
1896        if (fl)
1897                locks_free_lock(fl);
1898        if (new)
1899                fasync_free(new);
1900        return error;
1901}
1902
1903/**
1904 *      fcntl_setlease  -       sets a lease on an open file
1905 *      @fd: open file descriptor
1906 *      @filp: file pointer
1907 *      @arg: type of lease to obtain
1908 *
1909 *      Call this fcntl to establish a lease on the file.
1910 *      Note that you also need to call %F_SETSIG to
1911 *      receive a signal when the lease is broken.
1912 */
1913int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1914{
1915        if (arg == F_UNLCK)
1916                return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1917        return do_fcntl_add_lease(fd, filp, arg);
1918}
1919
1920/**
1921 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1922 * @inode: inode of the file to apply to
1923 * @fl: The lock to be applied
1924 *
1925 * Apply a FLOCK style lock request to an inode.
1926 */
1927static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1928{
1929        int error;
1930        might_sleep();
1931        for (;;) {
1932                error = flock_lock_inode(inode, fl);
1933                if (error != FILE_LOCK_DEFERRED)
1934                        break;
1935                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1936                if (!error)
1937                        continue;
1938
1939                locks_delete_block(fl);
1940                break;
1941        }
1942        return error;
1943}
1944
1945/**
1946 * locks_lock_inode_wait - Apply a lock to an inode
1947 * @inode: inode of the file to apply to
1948 * @fl: The lock to be applied
1949 *
1950 * Apply a POSIX or FLOCK style lock request to an inode.
1951 */
1952int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1953{
1954        int res = 0;
1955        switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
1956                case FL_POSIX:
1957                        res = posix_lock_inode_wait(inode, fl);
1958                        break;
1959                case FL_FLOCK:
1960                        res = flock_lock_inode_wait(inode, fl);
1961                        break;
1962                default:
1963                        BUG();
1964        }
1965        return res;
1966}
1967EXPORT_SYMBOL(locks_lock_inode_wait);
1968
1969/**
1970 *      sys_flock: - flock() system call.
1971 *      @fd: the file descriptor to lock.
1972 *      @cmd: the type of lock to apply.
1973 *
1974 *      Apply a %FL_FLOCK style lock to an open file descriptor.
1975 *      The @cmd can be one of
1976 *
1977 *      %LOCK_SH -- a shared lock.
1978 *
1979 *      %LOCK_EX -- an exclusive lock.
1980 *
1981 *      %LOCK_UN -- remove an existing lock.
1982 *
1983 *      %LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1984 *
1985 *      %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1986 *      processes read and write access respectively.
1987 */
1988SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1989{
1990        struct fd f = fdget(fd);
1991        struct file_lock *lock;
1992        int can_sleep, unlock;
1993        int error;
1994
1995        error = -EBADF;
1996        if (!f.file)
1997                goto out;
1998
1999        can_sleep = !(cmd & LOCK_NB);
2000        cmd &= ~LOCK_NB;
2001        unlock = (cmd == LOCK_UN);
2002
2003        if (!unlock && !(cmd & LOCK_MAND) &&
2004            !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2005                goto out_putf;
2006
2007        lock = flock_make_lock(f.file, cmd);
2008        if (IS_ERR(lock)) {
2009                error = PTR_ERR(lock);
2010                goto out_putf;
2011        }
2012
2013        if (can_sleep)
2014                lock->fl_flags |= FL_SLEEP;
2015
2016        error = security_file_lock(f.file, lock->fl_type);
2017        if (error)
2018                goto out_free;
2019
2020        if (f.file->f_op->flock && is_remote_lock(f.file))
2021                error = f.file->f_op->flock(f.file,
2022                                          (can_sleep) ? F_SETLKW : F_SETLK,
2023                                          lock);
2024        else
2025                error = locks_lock_file_wait(f.file, lock);
2026
2027 out_free:
2028        locks_free_lock(lock);
2029
2030 out_putf:
2031        fdput(f);
2032 out:
2033        return error;
2034}
2035
2036/**
2037 * vfs_test_lock - test file byte range lock
2038 * @filp: The file to test lock for
2039 * @fl: The lock to test; also used to hold result
2040 *
2041 * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2042 * setting conf->fl_type to something other than F_UNLCK.
2043 */
2044int vfs_test_lock(struct file *filp, struct file_lock *fl)
2045{
2046        if (filp->f_op->lock && is_remote_lock(filp))
2047                return filp->f_op->lock(filp, F_GETLK, fl);
2048        posix_test_lock(filp, fl);
2049        return 0;
2050}
2051EXPORT_SYMBOL_GPL(vfs_test_lock);
2052
2053static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2054{
2055        flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2056#if BITS_PER_LONG == 32
2057        /*
2058         * Make sure we can represent the posix lock via
2059         * legacy 32bit flock.
2060         */
2061        if (fl->fl_start > OFFT_OFFSET_MAX)
2062                return -EOVERFLOW;
2063        if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2064                return -EOVERFLOW;
2065#endif
2066        flock->l_start = fl->fl_start;
2067        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2068                fl->fl_end - fl->fl_start + 1;
2069        flock->l_whence = 0;
2070        flock->l_type = fl->fl_type;
2071        return 0;
2072}
2073
2074#if BITS_PER_LONG == 32
2075static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2076{
2077        flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2078        flock->l_start = fl->fl_start;
2079        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2080                fl->fl_end - fl->fl_start + 1;
2081        flock->l_whence = 0;
2082        flock->l_type = fl->fl_type;
2083}
2084#endif
2085
2086/* Report the first existing lock that would conflict with l.
2087 * This implements the F_GETLK command of fcntl().
2088 */
2089int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
2090{
2091        struct file_lock file_lock;
2092        struct flock flock;
2093        int error;
2094
2095        error = -EFAULT;
2096        if (copy_from_user(&flock, l, sizeof(flock)))
2097                goto out;
2098        error = -EINVAL;
2099        if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2100                goto out;
2101
2102        error = flock_to_posix_lock(filp, &file_lock, &flock);
2103        if (error)
2104                goto out;
2105
2106        if (cmd == F_OFD_GETLK) {
2107                error = -EINVAL;
2108                if (flock.l_pid != 0)
2109                        goto out;
2110
2111                cmd = F_GETLK;
2112                file_lock.fl_flags |= FL_OFDLCK;
2113                file_lock.fl_owner = filp;
2114        }
2115
2116        error = vfs_test_lock(filp, &file_lock);
2117        if (error)
2118                goto out;
2119 
2120        flock.l_type = file_lock.fl_type;
2121        if (file_lock.fl_type != F_UNLCK) {
2122                error = posix_lock_to_flock(&flock, &file_lock);
2123                if (error)
2124                        goto rel_priv;
2125        }
2126        error = -EFAULT;
2127        if (!copy_to_user(l, &flock, sizeof(flock)))
2128                error = 0;
2129rel_priv:
2130        locks_release_private(&file_lock);
2131out:
2132        return error;
2133}
2134
2135/**
2136 * vfs_lock_file - file byte range lock
2137 * @filp: The file to apply the lock to
2138 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2139 * @fl: The lock to be applied
2140 * @conf: Place to return a copy of the conflicting lock, if found.
2141 *
2142 * A caller that doesn't care about the conflicting lock may pass NULL
2143 * as the final argument.
2144 *
2145 * If the filesystem defines a private ->lock() method, then @conf will
2146 * be left unchanged; so a caller that cares should initialize it to
2147 * some acceptable default.
2148 *
2149 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2150 * locks, the ->lock() interface may return asynchronously, before the lock has
2151 * been granted or denied by the underlying filesystem, if (and only if)
2152 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2153 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2154 * the request is for a blocking lock. When ->lock() does return asynchronously,
2155 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2156 * request completes.
2157 * If the request is for non-blocking lock the file system should return
2158 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2159 * with the result. If the request timed out the callback routine will return a
2160 * nonzero return code and the file system should release the lock. The file
2161 * system is also responsible to keep a corresponding posix lock when it
2162 * grants a lock so the VFS can find out which locks are locally held and do
2163 * the correct lock cleanup when required.
2164 * The underlying filesystem must not drop the kernel lock or call
2165 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2166 * return code.
2167 */
2168int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2169{
2170        if (filp->f_op->lock && is_remote_lock(filp))
2171                return filp->f_op->lock(filp, cmd, fl);
2172        else
2173                return posix_lock_file(filp, fl, conf);
2174}
2175EXPORT_SYMBOL_GPL(vfs_lock_file);
2176
2177static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2178                             struct file_lock *fl)
2179{
2180        int error;
2181
2182        error = security_file_lock(filp, fl->fl_type);
2183        if (error)
2184                return error;
2185
2186        for (;;) {
2187                error = vfs_lock_file(filp, cmd, fl, NULL);
2188                if (error != FILE_LOCK_DEFERRED)
2189                        break;
2190                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2191                if (!error)
2192                        continue;
2193
2194                locks_delete_block(fl);
2195                break;
2196        }
2197
2198        return error;
2199}
2200
2201/* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2202static int
2203check_fmode_for_setlk(struct file_lock *fl)
2204{
2205        switch (fl->fl_type) {
2206        case F_RDLCK:
2207                if (!(fl->fl_file->f_mode & FMODE_READ))
2208                        return -EBADF;
2209                break;
2210        case F_WRLCK:
2211                if (!(fl->fl_file->f_mode & FMODE_WRITE))
2212                        return -EBADF;
2213        }
2214        return 0;
2215}
2216
2217/* Apply the lock described by l to an open file descriptor.
2218 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2219 */
2220int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2221                struct flock __user *l)
2222{
2223        struct file_lock *file_lock = locks_alloc_lock();
2224        struct flock flock;
2225        struct inode *inode;
2226        struct file *f;
2227        int error;
2228
2229        if (file_lock == NULL)
2230                return -ENOLCK;
2231
2232        inode = locks_inode(filp);
2233
2234        /*
2235         * This might block, so we do it before checking the inode.
2236         */
2237        error = -EFAULT;
2238        if (copy_from_user(&flock, l, sizeof(flock)))
2239                goto out;
2240
2241        /* Don't allow mandatory locks on files that may be memory mapped
2242         * and shared.
2243         */
2244        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2245                error = -EAGAIN;
2246                goto out;
2247        }
2248
2249        error = flock_to_posix_lock(filp, file_lock, &flock);
2250        if (error)
2251                goto out;
2252
2253        error = check_fmode_for_setlk(file_lock);
2254        if (error)
2255                goto out;
2256
2257        /*
2258         * If the cmd is requesting file-private locks, then set the
2259         * FL_OFDLCK flag and override the owner.
2260         */
2261        switch (cmd) {
2262        case F_OFD_SETLK:
2263                error = -EINVAL;
2264                if (flock.l_pid != 0)
2265                        goto out;
2266
2267                cmd = F_SETLK;
2268                file_lock->fl_flags |= FL_OFDLCK;
2269                file_lock->fl_owner = filp;
2270                break;
2271        case F_OFD_SETLKW:
2272                error = -EINVAL;
2273                if (flock.l_pid != 0)
2274                        goto out;
2275
2276                cmd = F_SETLKW;
2277                file_lock->fl_flags |= FL_OFDLCK;
2278                file_lock->fl_owner = filp;
2279                /* Fallthrough */
2280        case F_SETLKW:
2281                file_lock->fl_flags |= FL_SLEEP;
2282        }
2283
2284        error = do_lock_file_wait(filp, cmd, file_lock);
2285
2286        /*
2287         * Attempt to detect a close/fcntl race and recover by releasing the
2288         * lock that was just acquired. There is no need to do that when we're
2289         * unlocking though, or for OFD locks.
2290         */
2291        if (!error && file_lock->fl_type != F_UNLCK &&
2292            !(file_lock->fl_flags & FL_OFDLCK)) {
2293                /*
2294                 * We need that spin_lock here - it prevents reordering between
2295                 * update of i_flctx->flc_posix and check for it done in
2296                 * close(). rcu_read_lock() wouldn't do.
2297                 */
2298                spin_lock(&current->files->file_lock);
2299                f = fcheck(fd);
2300                spin_unlock(&current->files->file_lock);
2301                if (f != filp) {
2302                        file_lock->fl_type = F_UNLCK;
2303                        error = do_lock_file_wait(filp, cmd, file_lock);
2304                        WARN_ON_ONCE(error);
2305                        error = -EBADF;
2306                }
2307        }
2308out:
2309        trace_fcntl_setlk(inode, file_lock, error);
2310        locks_free_lock(file_lock);
2311        return error;
2312}
2313
2314#if BITS_PER_LONG == 32
2315/* Report the first existing lock that would conflict with l.
2316 * This implements the F_GETLK command of fcntl().
2317 */
2318int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
2319{
2320        struct file_lock file_lock;
2321        struct flock64 flock;
2322        int error;
2323
2324        error = -EFAULT;
2325        if (copy_from_user(&flock, l, sizeof(flock)))
2326                goto out;
2327        error = -EINVAL;
2328        if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2329                goto out;
2330
2331        error = flock64_to_posix_lock(filp, &file_lock, &flock);
2332        if (error)
2333                goto out;
2334
2335        if (cmd == F_OFD_GETLK) {
2336                error = -EINVAL;
2337                if (flock.l_pid != 0)
2338                        goto out;
2339
2340                cmd = F_GETLK64;
2341                file_lock.fl_flags |= FL_OFDLCK;
2342                file_lock.fl_owner = filp;
2343        }
2344
2345        error = vfs_test_lock(filp, &file_lock);
2346        if (error)
2347                goto out;
2348
2349        flock.l_type = file_lock.fl_type;
2350        if (file_lock.fl_type != F_UNLCK)
2351                posix_lock_to_flock64(&flock, &file_lock);
2352
2353        error = -EFAULT;
2354        if (!copy_to_user(l, &flock, sizeof(flock)))
2355                error = 0;
2356
2357        locks_release_private(&file_lock);
2358out:
2359        return error;
2360}
2361
2362/* Apply the lock described by l to an open file descriptor.
2363 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2364 */
2365int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2366                struct flock64 __user *l)
2367{
2368        struct file_lock *file_lock = locks_alloc_lock();
2369        struct flock64 flock;
2370        struct inode *inode;
2371        struct file *f;
2372        int error;
2373
2374        if (file_lock == NULL)
2375                return -ENOLCK;
2376
2377        /*
2378         * This might block, so we do it before checking the inode.
2379         */
2380        error = -EFAULT;
2381        if (copy_from_user(&flock, l, sizeof(flock)))
2382                goto out;
2383
2384        inode = locks_inode(filp);
2385
2386        /* Don't allow mandatory locks on files that may be memory mapped
2387         * and shared.
2388         */
2389        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2390                error = -EAGAIN;
2391                goto out;
2392        }
2393
2394        error = flock64_to_posix_lock(filp, file_lock, &flock);
2395        if (error)
2396                goto out;
2397
2398        error = check_fmode_for_setlk(file_lock);
2399        if (error)
2400                goto out;
2401
2402        /*
2403         * If the cmd is requesting file-private locks, then set the
2404         * FL_OFDLCK flag and override the owner.
2405         */
2406        switch (cmd) {
2407        case F_OFD_SETLK:
2408                error = -EINVAL;
2409                if (flock.l_pid != 0)
2410                        goto out;
2411
2412                cmd = F_SETLK64;
2413                file_lock->fl_flags |= FL_OFDLCK;
2414                file_lock->fl_owner = filp;
2415                break;
2416        case F_OFD_SETLKW:
2417                error = -EINVAL;
2418                if (flock.l_pid != 0)
2419                        goto out;
2420
2421                cmd = F_SETLKW64;
2422                file_lock->fl_flags |= FL_OFDLCK;
2423                file_lock->fl_owner = filp;
2424                /* Fallthrough */
2425        case F_SETLKW64:
2426                file_lock->fl_flags |= FL_SLEEP;
2427        }
2428
2429        error = do_lock_file_wait(filp, cmd, file_lock);
2430
2431        /*
2432         * Attempt to detect a close/fcntl race and recover by releasing the
2433         * lock that was just acquired. There is no need to do that when we're
2434         * unlocking though, or for OFD locks.
2435         */
2436        if (!error && file_lock->fl_type != F_UNLCK &&
2437            !(file_lock->fl_flags & FL_OFDLCK)) {
2438                /*
2439                 * We need that spin_lock here - it prevents reordering between
2440                 * update of i_flctx->flc_posix and check for it done in
2441                 * close(). rcu_read_lock() wouldn't do.
2442                 */
2443                spin_lock(&current->files->file_lock);
2444                f = fcheck(fd);
2445                spin_unlock(&current->files->file_lock);
2446                if (f != filp) {
2447                        file_lock->fl_type = F_UNLCK;
2448                        error = do_lock_file_wait(filp, cmd, file_lock);
2449                        WARN_ON_ONCE(error);
2450                        error = -EBADF;
2451                }
2452        }
2453out:
2454        locks_free_lock(file_lock);
2455        return error;
2456}
2457#endif /* BITS_PER_LONG == 32 */
2458
2459/*
2460 * This function is called when the file is being removed
2461 * from the task's fd array.  POSIX locks belonging to this task
2462 * are deleted at this time.
2463 */
2464void locks_remove_posix(struct file *filp, fl_owner_t owner)
2465{
2466        int error;
2467        struct inode *inode = locks_inode(filp);
2468        struct file_lock lock;
2469        struct file_lock_context *ctx;
2470
2471        /*
2472         * If there are no locks held on this file, we don't need to call
2473         * posix_lock_file().  Another process could be setting a lock on this
2474         * file at the same time, but we wouldn't remove that lock anyway.
2475         */
2476        ctx =  smp_load_acquire(&inode->i_flctx);
2477        if (!ctx || list_empty(&ctx->flc_posix))
2478                return;
2479
2480        lock.fl_type = F_UNLCK;
2481        lock.fl_flags = FL_POSIX | FL_CLOSE;
2482        lock.fl_start = 0;
2483        lock.fl_end = OFFSET_MAX;
2484        lock.fl_owner = owner;
2485        lock.fl_pid = current->tgid;
2486        lock.fl_file = filp;
2487        lock.fl_ops = NULL;
2488        lock.fl_lmops = NULL;
2489
2490        error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2491
2492        if (lock.fl_ops && lock.fl_ops->fl_release_private)
2493                lock.fl_ops->fl_release_private(&lock);
2494        trace_locks_remove_posix(inode, &lock, error);
2495}
2496
2497EXPORT_SYMBOL(locks_remove_posix);
2498
2499/* The i_flctx must be valid when calling into here */
2500static void
2501locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2502{
2503        struct file_lock fl = {
2504                .fl_owner = filp,
2505                .fl_pid = current->tgid,
2506                .fl_file = filp,
2507                .fl_flags = FL_FLOCK | FL_CLOSE,
2508                .fl_type = F_UNLCK,
2509                .fl_end = OFFSET_MAX,
2510        };
2511        struct inode *inode = locks_inode(filp);
2512
2513        if (list_empty(&flctx->flc_flock))
2514                return;
2515
2516        if (filp->f_op->flock && is_remote_lock(filp))
2517                filp->f_op->flock(filp, F_SETLKW, &fl);
2518        else
2519                flock_lock_inode(inode, &fl);
2520
2521        if (fl.fl_ops && fl.fl_ops->fl_release_private)
2522                fl.fl_ops->fl_release_private(&fl);
2523}
2524
2525/* The i_flctx must be valid when calling into here */
2526static void
2527locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2528{
2529        struct file_lock *fl, *tmp;
2530        LIST_HEAD(dispose);
2531
2532        if (list_empty(&ctx->flc_lease))
2533                return;
2534
2535        percpu_down_read_preempt_disable(&file_rwsem);
2536        spin_lock(&ctx->flc_lock);
2537        list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2538                if (filp == fl->fl_file)
2539                        lease_modify(fl, F_UNLCK, &dispose);
2540        spin_unlock(&ctx->flc_lock);
2541        percpu_up_read_preempt_enable(&file_rwsem);
2542
2543        locks_dispose_list(&dispose);
2544}
2545
2546/*
2547 * This function is called on the last close of an open file.
2548 */
2549void locks_remove_file(struct file *filp)
2550{
2551        struct file_lock_context *ctx;
2552
2553        ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2554        if (!ctx)
2555                return;
2556
2557        /* remove any OFD locks */
2558        locks_remove_posix(filp, filp);
2559
2560        /* remove flock locks */
2561        locks_remove_flock(filp, ctx);
2562
2563        /* remove any leases */
2564        locks_remove_lease(filp, ctx);
2565}
2566
2567/**
2568 *      posix_unblock_lock - stop waiting for a file lock
2569 *      @waiter: the lock which was waiting
2570 *
2571 *      lockd needs to block waiting for locks.
2572 */
2573int
2574posix_unblock_lock(struct file_lock *waiter)
2575{
2576        int status = 0;
2577
2578        spin_lock(&blocked_lock_lock);
2579        if (waiter->fl_next)
2580                __locks_delete_block(waiter);
2581        else
2582                status = -ENOENT;
2583        spin_unlock(&blocked_lock_lock);
2584        return status;
2585}
2586EXPORT_SYMBOL(posix_unblock_lock);
2587
2588/**
2589 * vfs_cancel_lock - file byte range unblock lock
2590 * @filp: The file to apply the unblock to
2591 * @fl: The lock to be unblocked
2592 *
2593 * Used by lock managers to cancel blocked requests
2594 */
2595int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2596{
2597        if (filp->f_op->lock && is_remote_lock(filp))
2598                return filp->f_op->lock(filp, F_CANCELLK, fl);
2599        return 0;
2600}
2601
2602EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2603
2604#ifdef CONFIG_PROC_FS
2605#include <linux/proc_fs.h>
2606#include <linux/seq_file.h>
2607
2608struct locks_iterator {
2609        int     li_cpu;
2610        loff_t  li_pos;
2611};
2612
2613static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2614                            loff_t id, char *pfx)
2615{
2616        struct inode *inode = NULL;
2617        unsigned int fl_pid;
2618
2619        if (fl->fl_nspid) {
2620                struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2621
2622                /* Don't let fl_pid change based on who is reading the file */
2623                fl_pid = pid_nr_ns(fl->fl_nspid, proc_pidns);
2624
2625                /*
2626                 * If there isn't a fl_pid don't display who is waiting on
2627                 * the lock if we are called from locks_show, or if we are
2628                 * called from __show_fd_info - skip lock entirely
2629                 */
2630                if (fl_pid == 0)
2631                        return;
2632        } else
2633                fl_pid = fl->fl_pid;
2634
2635        if (fl->fl_file != NULL)
2636                inode = locks_inode(fl->fl_file);
2637
2638        seq_printf(f, "%lld:%s ", id, pfx);
2639        if (IS_POSIX(fl)) {
2640                if (fl->fl_flags & FL_ACCESS)
2641                        seq_puts(f, "ACCESS");
2642                else if (IS_OFDLCK(fl))
2643                        seq_puts(f, "OFDLCK");
2644                else
2645                        seq_puts(f, "POSIX ");
2646
2647                seq_printf(f, " %s ",
2648                             (inode == NULL) ? "*NOINODE*" :
2649                             mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2650        } else if (IS_FLOCK(fl)) {
2651                if (fl->fl_type & LOCK_MAND) {
2652                        seq_puts(f, "FLOCK  MSNFS     ");
2653                } else {
2654                        seq_puts(f, "FLOCK  ADVISORY  ");
2655                }
2656        } else if (IS_LEASE(fl)) {
2657                if (fl->fl_flags & FL_DELEG)
2658                        seq_puts(f, "DELEG  ");
2659                else
2660                        seq_puts(f, "LEASE  ");
2661
2662                if (lease_breaking(fl))
2663                        seq_puts(f, "BREAKING  ");
2664                else if (fl->fl_file)
2665                        seq_puts(f, "ACTIVE    ");
2666                else
2667                        seq_puts(f, "BREAKER   ");
2668        } else {
2669                seq_puts(f, "UNKNOWN UNKNOWN  ");
2670        }
2671        if (fl->fl_type & LOCK_MAND) {
2672                seq_printf(f, "%s ",
2673                               (fl->fl_type & LOCK_READ)
2674                               ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2675                               : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2676        } else {
2677                seq_printf(f, "%s ",
2678                               (lease_breaking(fl))
2679                               ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2680                               : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2681        }
2682        if (inode) {
2683                /* userspace relies on this representation of dev_t */
2684                seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2685                                MAJOR(inode->i_sb->s_dev),
2686                                MINOR(inode->i_sb->s_dev), inode->i_ino);
2687        } else {
2688                seq_printf(f, "%d <none>:0 ", fl_pid);
2689        }
2690        if (IS_POSIX(fl)) {
2691                if (fl->fl_end == OFFSET_MAX)
2692                        seq_printf(f, "%Ld EOF\n", fl->fl_start);
2693                else
2694                        seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2695        } else {
2696                seq_puts(f, "0 EOF\n");
2697        }
2698}
2699
2700static int locks_show(struct seq_file *f, void *v)
2701{
2702        struct locks_iterator *iter = f->private;
2703        struct file_lock *fl, *bfl;
2704        struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2705
2706        fl = hlist_entry(v, struct file_lock, fl_link);
2707
2708        if (fl->fl_nspid && !pid_nr_ns(fl->fl_nspid, proc_pidns))
2709                return 0;
2710
2711        lock_get_status(f, fl, iter->li_pos, "");
2712
2713        list_for_each_entry(bfl, &fl->fl_block, fl_block)
2714                lock_get_status(f, bfl, iter->li_pos, " ->");
2715
2716        return 0;
2717}
2718
2719static void __show_fd_locks(struct seq_file *f,
2720                        struct list_head *head, int *id,
2721                        struct file *filp, struct files_struct *files)
2722{
2723        struct file_lock *fl;
2724
2725        list_for_each_entry(fl, head, fl_list) {
2726
2727                if (filp != fl->fl_file)
2728                        continue;
2729                if (fl->fl_owner != files &&
2730                    fl->fl_owner != filp)
2731                        continue;
2732
2733                (*id)++;
2734                seq_puts(f, "lock:\t");
2735                lock_get_status(f, fl, *id, "");
2736        }
2737}
2738
2739void show_fd_locks(struct seq_file *f,
2740                  struct file *filp, struct files_struct *files)
2741{
2742        struct inode *inode = locks_inode(filp);
2743        struct file_lock_context *ctx;
2744        int id = 0;
2745
2746        ctx = smp_load_acquire(&inode->i_flctx);
2747        if (!ctx)
2748                return;
2749
2750        spin_lock(&ctx->flc_lock);
2751        __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2752        __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2753        __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2754        spin_unlock(&ctx->flc_lock);
2755}
2756
2757static void *locks_start(struct seq_file *f, loff_t *pos)
2758        __acquires(&blocked_lock_lock)
2759{
2760        struct locks_iterator *iter = f->private;
2761
2762        iter->li_pos = *pos + 1;
2763        percpu_down_write(&file_rwsem);
2764        spin_lock(&blocked_lock_lock);
2765        return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2766}
2767
2768static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2769{
2770        struct locks_iterator *iter = f->private;
2771
2772        ++iter->li_pos;
2773        return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2774}
2775
2776static void locks_stop(struct seq_file *f, void *v)
2777        __releases(&blocked_lock_lock)
2778{
2779        spin_unlock(&blocked_lock_lock);
2780        percpu_up_write(&file_rwsem);
2781}
2782
2783static const struct seq_operations locks_seq_operations = {
2784        .start  = locks_start,
2785        .next   = locks_next,
2786        .stop   = locks_stop,
2787        .show   = locks_show,
2788};
2789
2790static int locks_open(struct inode *inode, struct file *filp)
2791{
2792        return seq_open_private(filp, &locks_seq_operations,
2793                                        sizeof(struct locks_iterator));
2794}
2795
2796static const struct file_operations proc_locks_operations = {
2797        .open           = locks_open,
2798        .read           = seq_read,
2799        .llseek         = seq_lseek,
2800        .release        = seq_release_private,
2801};
2802
2803static int __init proc_locks_init(void)
2804{
2805        proc_create("locks", 0, NULL, &proc_locks_operations);
2806        return 0;
2807}
2808fs_initcall(proc_locks_init);
2809#endif
2810
2811static int __init filelock_init(void)
2812{
2813        int i;
2814
2815        flctx_cache = kmem_cache_create("file_lock_ctx",
2816                        sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2817
2818        filelock_cache = kmem_cache_create("file_lock_cache",
2819                        sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2820
2821
2822        for_each_possible_cpu(i) {
2823                struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2824
2825                spin_lock_init(&fll->lock);
2826                INIT_HLIST_HEAD(&fll->hlist);
2827        }
2828
2829        return 0;
2830}
2831
2832core_initcall(filelock_init);
2833