linux/fs/locks.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/locks.c
   3 *
   4 *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5 *  Doug Evans (dje@spiff.uucp), August 07, 1992
   6 *
   7 *  Deadlock detection added.
   8 *  FIXME: one thing isn't handled yet:
   9 *      - mandatory locks (requires lots of changes elsewhere)
  10 *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
  11 *
  12 *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
  13 *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
  14 *  
  15 *  Converted file_lock_table to a linked list from an array, which eliminates
  16 *  the limits on how many active file locks are open.
  17 *  Chad Page (pageone@netcom.com), November 27, 1994
  18 * 
  19 *  Removed dependency on file descriptors. dup()'ed file descriptors now
  20 *  get the same locks as the original file descriptors, and a close() on
  21 *  any file descriptor removes ALL the locks on the file for the current
  22 *  process. Since locks still depend on the process id, locks are inherited
  23 *  after an exec() but not after a fork(). This agrees with POSIX, and both
  24 *  BSD and SVR4 practice.
  25 *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
  26 *
  27 *  Scrapped free list which is redundant now that we allocate locks
  28 *  dynamically with kmalloc()/kfree().
  29 *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
  30 *
  31 *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
  32 *
  33 *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
  34 *  fcntl() system call. They have the semantics described above.
  35 *
  36 *  FL_FLOCK locks are created with calls to flock(), through the flock()
  37 *  system call, which is new. Old C libraries implement flock() via fcntl()
  38 *  and will continue to use the old, broken implementation.
  39 *
  40 *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
  41 *  with a file pointer (filp). As a result they can be shared by a parent
  42 *  process and its children after a fork(). They are removed when the last
  43 *  file descriptor referring to the file pointer is closed (unless explicitly
  44 *  unlocked). 
  45 *
  46 *  FL_FLOCK locks never deadlock, an existing lock is always removed before
  47 *  upgrading from shared to exclusive (or vice versa). When this happens
  48 *  any processes blocked by the current lock are woken up and allowed to
  49 *  run before the new lock is applied.
  50 *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
  51 *
  52 *  Removed some race conditions in flock_lock_file(), marked other possible
  53 *  races. Just grep for FIXME to see them. 
  54 *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
  55 *
  56 *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
  57 *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
  58 *  once we've checked for blocking and deadlocking.
  59 *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
  60 *
  61 *  Initial implementation of mandatory locks. SunOS turned out to be
  62 *  a rotten model, so I implemented the "obvious" semantics.
  63 *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
  64 *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
  65 *
  66 *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
  67 *  check if a file has mandatory locks, used by mmap(), open() and creat() to
  68 *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
  69 *  Manual, Section 2.
  70 *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
  71 *
  72 *  Tidied up block list handling. Added '/proc/locks' interface.
  73 *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
  74 *
  75 *  Fixed deadlock condition for pathological code that mixes calls to
  76 *  flock() and fcntl().
  77 *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
  78 *
  79 *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
  80 *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
  81 *  guarantee sensible behaviour in the case where file system modules might
  82 *  be compiled with different options than the kernel itself.
  83 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  84 *
  85 *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
  86 *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
  87 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  88 *
  89 *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
  90 *  locks. Changed process synchronisation to avoid dereferencing locks that
  91 *  have already been freed.
  92 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
  93 *
  94 *  Made the block list a circular list to minimise searching in the list.
  95 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
  96 *
  97 *  Made mandatory locking a mount option. Default is not to allow mandatory
  98 *  locking.
  99 *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
 100 *
 101 *  Some adaptations for NFS support.
 102 *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
 103 *
 104 *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
 105 *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
 106 *
 107 *  Use slab allocator instead of kmalloc/kfree.
 108 *  Use generic list implementation from <linux/list.h>.
 109 *  Sped up posix_locks_deadlock by only considering blocked locks.
 110 *  Matthew Wilcox <willy@debian.org>, March, 2000.
 111 *
 112 *  Leases and LOCK_MAND
 113 *  Matthew Wilcox <willy@debian.org>, June, 2000.
 114 *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
 115 */
 116
 117#include <linux/capability.h>
 118#include <linux/file.h>
 119#include <linux/fdtable.h>
 120#include <linux/fs.h>
 121#include <linux/init.h>
 122#include <linux/module.h>
 123#include <linux/security.h>
 124#include <linux/slab.h>
 125#include <linux/syscalls.h>
 126#include <linux/time.h>
 127#include <linux/rcupdate.h>
 128#include <linux/pid_namespace.h>
 129#include <linux/hashtable.h>
 130#include <linux/percpu.h>
 131#include <linux/lglock.h>
 132
 133#define CREATE_TRACE_POINTS
 134#include <trace/events/filelock.h>
 135
 136#include <asm/uaccess.h>
 137
 138#define IS_POSIX(fl)    (fl->fl_flags & FL_POSIX)
 139#define IS_FLOCK(fl)    (fl->fl_flags & FL_FLOCK)
 140#define IS_LEASE(fl)    (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
 141#define IS_OFDLCK(fl)   (fl->fl_flags & FL_OFDLCK)
 142#define IS_REMOTELCK(fl)        (fl->fl_pid <= 0)
 143
 144static inline bool is_remote_lock(struct file *filp)
 145{
 146        return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
 147}
 148
 149static bool lease_breaking(struct file_lock *fl)
 150{
 151        return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
 152}
 153
 154static int target_leasetype(struct file_lock *fl)
 155{
 156        if (fl->fl_flags & FL_UNLOCK_PENDING)
 157                return F_UNLCK;
 158        if (fl->fl_flags & FL_DOWNGRADE_PENDING)
 159                return F_RDLCK;
 160        return fl->fl_type;
 161}
 162
 163int leases_enable = 1;
 164int lease_break_time = 45;
 165
 166#define for_each_lock(inode, lockp) \
 167        for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
 168
 169/*
 170 * The global file_lock_list is only used for displaying /proc/locks, so we
 171 * keep a list on each CPU, with each list protected by its own spinlock via
 172 * the file_lock_lglock. Note that alterations to the list also require that
 173 * the relevant i_lock is held.
 174 */
 175DEFINE_STATIC_LGLOCK(file_lock_lglock);
 176static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
 177
 178/*
 179 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
 180 * It is protected by blocked_lock_lock.
 181 *
 182 * We hash locks by lockowner in order to optimize searching for the lock a
 183 * particular lockowner is waiting on.
 184 *
 185 * FIXME: make this value scale via some heuristic? We generally will want more
 186 * buckets when we have more lockowners holding locks, but that's a little
 187 * difficult to determine without knowing what the workload will look like.
 188 */
 189#define BLOCKED_HASH_BITS       7
 190static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
 191
 192/*
 193 * This lock protects the blocked_hash. Generally, if you're accessing it, you
 194 * want to be holding this lock.
 195 *
 196 * In addition, it also protects the fl->fl_block list, and the fl->fl_next
 197 * pointer for file_lock structures that are acting as lock requests (in
 198 * contrast to those that are acting as records of acquired locks).
 199 *
 200 * Note that when we acquire this lock in order to change the above fields,
 201 * we often hold the i_lock as well. In certain cases, when reading the fields
 202 * protected by this lock, we can skip acquiring it iff we already hold the
 203 * i_lock.
 204 *
 205 * In particular, adding an entry to the fl_block list requires that you hold
 206 * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting
 207 * an entry from the list however only requires the file_lock_lock.
 208 */
 209static DEFINE_SPINLOCK(blocked_lock_lock);
 210
 211static struct kmem_cache *filelock_cache __read_mostly;
 212
 213static void locks_init_lock_heads(struct file_lock *fl)
 214{
 215        INIT_HLIST_NODE(&fl->fl_link);
 216        INIT_LIST_HEAD(&fl->fl_block);
 217        init_waitqueue_head(&fl->fl_wait);
 218}
 219
 220/* Allocate an empty lock structure. */
 221struct file_lock *locks_alloc_lock(void)
 222{
 223        struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
 224
 225        if (fl)
 226                locks_init_lock_heads(fl);
 227
 228        return fl;
 229}
 230EXPORT_SYMBOL_GPL(locks_alloc_lock);
 231
 232void locks_release_private(struct file_lock *fl)
 233{
 234        struct lock_manager_operations_extend *lm_ops_extend;
 235
 236        if (fl->fl_ops) {
 237                if (fl->fl_ops->fl_release_private)
 238                        fl->fl_ops->fl_release_private(fl);
 239                fl->fl_ops = NULL;
 240        }
 241
 242        if (fl->fl_lmops) {
 243                lm_ops_extend = get_lm_ops_extend(fl);
 244                if (lm_ops_extend && lm_ops_extend->lm_put_owner)
 245                        lm_ops_extend->lm_put_owner(fl);
 246                fl->fl_lmops = NULL;
 247        }
 248}
 249EXPORT_SYMBOL_GPL(locks_release_private);
 250
 251/* Free a lock which is not in use. */
 252void locks_free_lock(struct file_lock *fl)
 253{
 254        BUG_ON(waitqueue_active(&fl->fl_wait));
 255        BUG_ON(!list_empty(&fl->fl_block));
 256        BUG_ON(!hlist_unhashed(&fl->fl_link));
 257
 258        locks_release_private(fl);
 259        kmem_cache_free(filelock_cache, fl);
 260}
 261EXPORT_SYMBOL(locks_free_lock);
 262
 263static void
 264locks_dispose_list(struct list_head *dispose)
 265{
 266        struct file_lock *fl;
 267
 268        while (!list_empty(dispose)) {
 269                fl = list_first_entry(dispose, struct file_lock, fl_block);
 270                list_del_init(&fl->fl_block);
 271                locks_free_lock(fl);
 272        }
 273}
 274
 275void locks_init_lock(struct file_lock *fl)
 276{
 277        memset(fl, 0, sizeof(struct file_lock));
 278        locks_init_lock_heads(fl);
 279}
 280
 281EXPORT_SYMBOL(locks_init_lock);
 282
 283/*
 284 * Initialize a new lock from an existing file_lock structure.
 285 */
 286void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
 287{
 288        struct lock_manager_operations_extend *lm_ops_extend;
 289
 290        new->fl_owner = fl->fl_owner;
 291        new->fl_pid = fl->fl_pid;
 292        new->fl_file = NULL;
 293        new->fl_flags = fl->fl_flags;
 294        new->fl_type = fl->fl_type;
 295        new->fl_start = fl->fl_start;
 296        new->fl_end = fl->fl_end;
 297        new->fl_lmops = fl->fl_lmops;
 298        new->fl_ops = NULL;
 299
 300        if (fl->fl_lmops) {
 301                lm_ops_extend = get_lm_ops_extend(fl);
 302                if (lm_ops_extend && lm_ops_extend->lm_get_owner)
 303                        lm_ops_extend->lm_get_owner(new, fl);
 304        }
 305}
 306EXPORT_SYMBOL(locks_copy_conflock);
 307
 308void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
 309{
 310        new->fl_owner = fl->fl_owner;
 311        new->fl_pid = fl->fl_pid;
 312        new->fl_file = NULL;
 313        new->fl_flags = fl->fl_flags;
 314        new->fl_type = fl->fl_type;
 315        new->fl_start = fl->fl_start;
 316        new->fl_end = fl->fl_end;
 317        new->fl_ops = NULL;
 318        new->fl_lmops = NULL;
 319}
 320EXPORT_SYMBOL(__locks_copy_lock);
 321
 322void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
 323{
 324        locks_release_private(new);
 325
 326        locks_copy_conflock(new, fl);
 327
 328        new->fl_file = fl->fl_file;
 329        new->fl_ops = fl->fl_ops;
 330
 331        if (fl->fl_ops) {
 332                if (fl->fl_ops->fl_copy_lock)
 333                        fl->fl_ops->fl_copy_lock(new, fl);
 334        }
 335}
 336
 337EXPORT_SYMBOL(locks_copy_lock);
 338
 339static inline int flock_translate_cmd(int cmd) {
 340        if (cmd & LOCK_MAND)
 341                return cmd & (LOCK_MAND | LOCK_RW);
 342        switch (cmd) {
 343        case LOCK_SH:
 344                return F_RDLCK;
 345        case LOCK_EX:
 346                return F_WRLCK;
 347        case LOCK_UN:
 348                return F_UNLCK;
 349        }
 350        return -EINVAL;
 351}
 352
 353/* Fill in a file_lock structure with an appropriate FLOCK lock. */
 354static int flock_make_lock(struct file *filp, struct file_lock **lock,
 355                unsigned int cmd)
 356{
 357        struct file_lock *fl;
 358        int type = flock_translate_cmd(cmd);
 359        if (type < 0)
 360                return type;
 361        
 362        fl = locks_alloc_lock();
 363        if (fl == NULL)
 364                return -ENOMEM;
 365
 366        fl->fl_file = filp;
 367        fl->fl_pid = current->tgid;
 368        fl->fl_flags = FL_FLOCK;
 369        fl->fl_type = type;
 370        fl->fl_end = OFFSET_MAX;
 371        
 372        *lock = fl;
 373        return 0;
 374}
 375
 376static int assign_type(struct file_lock *fl, long type)
 377{
 378        switch (type) {
 379        case F_RDLCK:
 380        case F_WRLCK:
 381        case F_UNLCK:
 382                fl->fl_type = type;
 383                break;
 384        default:
 385                return -EINVAL;
 386        }
 387        return 0;
 388}
 389
 390static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
 391                                 struct flock64 *l)
 392{
 393        switch (l->l_whence) {
 394        case SEEK_SET:
 395                fl->fl_start = 0;
 396                break;
 397        case SEEK_CUR:
 398                fl->fl_start = filp->f_pos;
 399                break;
 400        case SEEK_END:
 401                fl->fl_start = i_size_read(file_inode(filp));
 402                break;
 403        default:
 404                return -EINVAL;
 405        }
 406        if (l->l_start > OFFSET_MAX - fl->fl_start)
 407                return -EOVERFLOW;
 408        fl->fl_start += l->l_start;
 409        if (fl->fl_start < 0)
 410                return -EINVAL;
 411
 412        /* POSIX-1996 leaves the case l->l_len < 0 undefined;
 413           POSIX-2001 defines it. */
 414        if (l->l_len > 0) {
 415                if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
 416                        return -EOVERFLOW;
 417                fl->fl_end = fl->fl_start + l->l_len - 1;
 418
 419        } else if (l->l_len < 0) {
 420                if (fl->fl_start + l->l_len < 0)
 421                        return -EINVAL;
 422                fl->fl_end = fl->fl_start - 1;
 423                fl->fl_start += l->l_len;
 424        } else
 425                fl->fl_end = OFFSET_MAX;
 426
 427        fl->fl_owner = current->files;
 428        fl->fl_pid = current->tgid;
 429        fl->fl_file = filp;
 430        fl->fl_flags = FL_POSIX;
 431        fl->fl_ops = NULL;
 432        fl->fl_lmops = NULL;
 433
 434        return assign_type(fl, l->l_type);
 435}
 436
 437/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
 438 * style lock.
 439 */
 440static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
 441                               struct flock *l)
 442{
 443        struct flock64 ll = {
 444                .l_type = l->l_type,
 445                .l_whence = l->l_whence,
 446                .l_start = l->l_start,
 447                .l_len = l->l_len,
 448        };
 449
 450        return flock64_to_posix_lock(filp, fl, &ll);
 451}
 452
 453/* default lease lock manager operations */
 454static void lease_break_callback(struct file_lock *fl)
 455{
 456        kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
 457}
 458
 459static const struct lock_manager_operations lease_manager_ops = {
 460        .lm_break = lease_break_callback,
 461        .lm_change = lease_modify,
 462};
 463
 464/*
 465 * Initialize a lease, use the default lock manager operations
 466 */
 467static int lease_init(struct file *filp, long type, struct file_lock *fl)
 468 {
 469        if (assign_type(fl, type) != 0)
 470                return -EINVAL;
 471
 472        fl->fl_owner = (fl_owner_t)filp;
 473        fl->fl_pid = current->tgid;
 474
 475        fl->fl_file = filp;
 476        fl->fl_flags = FL_LEASE;
 477        fl->fl_start = 0;
 478        fl->fl_end = OFFSET_MAX;
 479        fl->fl_ops = NULL;
 480        fl->fl_lmops = &lease_manager_ops;
 481        return 0;
 482}
 483
 484/* Allocate a file_lock initialised to this type of lease */
 485static struct file_lock *lease_alloc(struct file *filp, long type)
 486{
 487        struct file_lock *fl = locks_alloc_lock();
 488        int error = -ENOMEM;
 489
 490        if (fl == NULL)
 491                return ERR_PTR(error);
 492
 493        error = lease_init(filp, type, fl);
 494        if (error) {
 495                locks_free_lock(fl);
 496                return ERR_PTR(error);
 497        }
 498        return fl;
 499}
 500
 501/* Check if two locks overlap each other.
 502 */
 503static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
 504{
 505        return ((fl1->fl_end >= fl2->fl_start) &&
 506                (fl2->fl_end >= fl1->fl_start));
 507}
 508
 509/*
 510 * Check whether two locks have the same owner.
 511 */
 512static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
 513{
 514        if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
 515                return fl2->fl_lmops == fl1->fl_lmops &&
 516                        fl1->fl_lmops->lm_compare_owner(fl1, fl2);
 517        return fl1->fl_owner == fl2->fl_owner;
 518}
 519
 520/* Must be called with the i_lock held! */
 521static void locks_insert_global_locks(struct file_lock *fl)
 522{
 523        lg_local_lock(&file_lock_lglock);
 524        fl->fl_link_cpu = smp_processor_id();
 525        hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
 526        lg_local_unlock(&file_lock_lglock);
 527}
 528
 529/* Must be called with the i_lock held! */
 530static void locks_delete_global_locks(struct file_lock *fl)
 531{
 532        /*
 533         * Avoid taking lock if already unhashed. This is safe since this check
 534         * is done while holding the i_lock, and new insertions into the list
 535         * also require that it be held.
 536         */
 537        if (hlist_unhashed(&fl->fl_link))
 538                return;
 539        lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 540        hlist_del_init(&fl->fl_link);
 541        lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 542}
 543
 544static unsigned long
 545posix_owner_key(struct file_lock *fl)
 546{
 547        if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
 548                return fl->fl_lmops->lm_owner_key(fl);
 549        return (unsigned long)fl->fl_owner;
 550}
 551
 552static void locks_insert_global_blocked(struct file_lock *waiter)
 553{
 554        hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
 555}
 556
 557static void locks_delete_global_blocked(struct file_lock *waiter)
 558{
 559        hash_del(&waiter->fl_link);
 560}
 561
 562/* Remove waiter from blocker's block list.
 563 * When blocker ends up pointing to itself then the list is empty.
 564 *
 565 * Must be called with blocked_lock_lock held.
 566 */
 567static void __locks_delete_block(struct file_lock *waiter)
 568{
 569        locks_delete_global_blocked(waiter);
 570        list_del_init(&waiter->fl_block);
 571        waiter->fl_next = NULL;
 572}
 573
 574static void locks_delete_block(struct file_lock *waiter)
 575{
 576        spin_lock(&blocked_lock_lock);
 577        __locks_delete_block(waiter);
 578        spin_unlock(&blocked_lock_lock);
 579}
 580
 581/* Insert waiter into blocker's block list.
 582 * We use a circular list so that processes can be easily woken up in
 583 * the order they blocked. The documentation doesn't require this but
 584 * it seems like the reasonable thing to do.
 585 *
 586 * Must be called with both the i_lock and blocked_lock_lock held. The fl_block
 587 * list itself is protected by the file_lock_list, but by ensuring that the
 588 * i_lock is also held on insertions we can avoid taking the blocked_lock_lock
 589 * in some cases when we see that the fl_block list is empty.
 590 */
 591static void __locks_insert_block(struct file_lock *blocker,
 592                                        struct file_lock *waiter)
 593{
 594        BUG_ON(!list_empty(&waiter->fl_block));
 595        waiter->fl_next = blocker;
 596        list_add_tail(&waiter->fl_block, &blocker->fl_block);
 597        if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
 598                locks_insert_global_blocked(waiter);
 599}
 600
 601/* Must be called with i_lock held. */
 602static void locks_insert_block(struct file_lock *blocker,
 603                                        struct file_lock *waiter)
 604{
 605        spin_lock(&blocked_lock_lock);
 606        __locks_insert_block(blocker, waiter);
 607        spin_unlock(&blocked_lock_lock);
 608}
 609
 610/*
 611 * Wake up processes blocked waiting for blocker.
 612 *
 613 * Must be called with the inode->i_lock held!
 614 */
 615static void locks_wake_up_blocks(struct file_lock *blocker)
 616{
 617        /*
 618         * Avoid taking global lock if list is empty. This is safe since new
 619         * blocked requests are only added to the list under the i_lock, and
 620         * the i_lock is always held here. Note that removal from the fl_block
 621         * list does not require the i_lock, so we must recheck list_empty()
 622         * after acquiring the blocked_lock_lock.
 623         */
 624        if (list_empty(&blocker->fl_block))
 625                return;
 626
 627        spin_lock(&blocked_lock_lock);
 628        while (!list_empty(&blocker->fl_block)) {
 629                struct file_lock *waiter;
 630
 631                waiter = list_first_entry(&blocker->fl_block,
 632                                struct file_lock, fl_block);
 633                __locks_delete_block(waiter);
 634                if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
 635                        waiter->fl_lmops->lm_notify(waiter);
 636                else
 637                        wake_up(&waiter->fl_wait);
 638        }
 639        spin_unlock(&blocked_lock_lock);
 640}
 641
 642/* Insert file lock fl into an inode's lock list at the position indicated
 643 * by pos. At the same time add the lock to the global file lock list.
 644 *
 645 * Must be called with the i_lock held!
 646 */
 647static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
 648{
 649        /* insert into file's list */
 650        fl->fl_next = *pos;
 651        *pos = fl;
 652
 653        locks_insert_global_locks(fl);
 654}
 655
 656/**
 657 * locks_delete_lock - Delete a lock and then free it.
 658 * @thisfl_p: pointer that points to the fl_next field of the previous
 659 *            inode->i_flock list entry
 660 *
 661 * Unlink a lock from all lists and free the namespace reference, but don't
 662 * free it yet. Wake up processes that are blocked waiting for this lock and
 663 * notify the FS that the lock has been cleared.
 664 *
 665 * Must be called with the i_lock held!
 666 */
 667static void locks_unlink_lock(struct file_lock **thisfl_p)
 668{
 669        struct file_lock *fl = *thisfl_p;
 670
 671        locks_delete_global_locks(fl);
 672
 673        *thisfl_p = fl->fl_next;
 674        fl->fl_next = NULL;
 675
 676        locks_wake_up_blocks(fl);
 677}
 678
 679/*
 680 * Unlink a lock from all lists and free it.
 681 *
 682 * Must be called with i_lock held!
 683 */
 684static void locks_delete_lock(struct file_lock **thisfl_p,
 685                              struct list_head *dispose)
 686{
 687        struct file_lock *fl = *thisfl_p;
 688
 689        locks_unlink_lock(thisfl_p);
 690        if (dispose)
 691                list_add(&fl->fl_block, dispose);
 692        else
 693                locks_free_lock(fl);
 694}
 695
 696/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
 697 * checks for shared/exclusive status of overlapping locks.
 698 */
 699static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 700{
 701        if (sys_fl->fl_type == F_WRLCK)
 702                return 1;
 703        if (caller_fl->fl_type == F_WRLCK)
 704                return 1;
 705        return 0;
 706}
 707
 708/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
 709 * checking before calling the locks_conflict().
 710 */
 711static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 712{
 713        /* POSIX locks owned by the same process do not conflict with
 714         * each other.
 715         */
 716        if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
 717                return (0);
 718
 719        /* Check whether they overlap */
 720        if (!locks_overlap(caller_fl, sys_fl))
 721                return 0;
 722
 723        return (locks_conflict(caller_fl, sys_fl));
 724}
 725
 726/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
 727 * checking before calling the locks_conflict().
 728 */
 729static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
 730{
 731        /* FLOCK locks referring to the same filp do not conflict with
 732         * each other.
 733         */
 734        if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
 735                return (0);
 736        if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
 737                return 0;
 738
 739        return (locks_conflict(caller_fl, sys_fl));
 740}
 741
 742void
 743posix_test_lock(struct file *filp, struct file_lock *fl)
 744{
 745        struct file_lock *cfl;
 746        struct inode *inode = locks_inode(filp);
 747
 748        spin_lock(&inode->i_lock);
 749        for (cfl = inode->i_flock; cfl; cfl = cfl->fl_next) {
 750                if (!IS_POSIX(cfl))
 751                        continue;
 752                if (posix_locks_conflict(fl, cfl))
 753                        break;
 754        }
 755        if (cfl)
 756                locks_copy_conflock(fl, cfl);
 757        else
 758                fl->fl_type = F_UNLCK;
 759        spin_unlock(&inode->i_lock);
 760        return;
 761}
 762EXPORT_SYMBOL(posix_test_lock);
 763
 764/*
 765 * Deadlock detection:
 766 *
 767 * We attempt to detect deadlocks that are due purely to posix file
 768 * locks.
 769 *
 770 * We assume that a task can be waiting for at most one lock at a time.
 771 * So for any acquired lock, the process holding that lock may be
 772 * waiting on at most one other lock.  That lock in turns may be held by
 773 * someone waiting for at most one other lock.  Given a requested lock
 774 * caller_fl which is about to wait for a conflicting lock block_fl, we
 775 * follow this chain of waiters to ensure we are not about to create a
 776 * cycle.
 777 *
 778 * Since we do this before we ever put a process to sleep on a lock, we
 779 * are ensured that there is never a cycle; that is what guarantees that
 780 * the while() loop in posix_locks_deadlock() eventually completes.
 781 *
 782 * Note: the above assumption may not be true when handling lock
 783 * requests from a broken NFS client. It may also fail in the presence
 784 * of tasks (such as posix threads) sharing the same open file table.
 785 * To handle those cases, we just bail out after a few iterations.
 786 *
 787 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
 788 * Because the owner is not even nominally tied to a thread of
 789 * execution, the deadlock detection below can't reasonably work well. Just
 790 * skip it for those.
 791 *
 792 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
 793 * locks that just checks for the case where two tasks are attempting to
 794 * upgrade from read to write locks on the same inode.
 795 */
 796
 797#define MAX_DEADLK_ITERATIONS 10
 798
 799/* Find a lock that the owner of the given block_fl is blocking on. */
 800static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
 801{
 802        struct file_lock *fl;
 803
 804        hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
 805                if (posix_same_owner(fl, block_fl))
 806                        return fl->fl_next;
 807        }
 808        return NULL;
 809}
 810
 811/* Must be called with the blocked_lock_lock held! */
 812static int posix_locks_deadlock(struct file_lock *caller_fl,
 813                                struct file_lock *block_fl)
 814{
 815        int i = 0;
 816
 817        /*
 818         * This deadlock detector can't reasonably detect deadlocks with
 819         * FL_OFDLCK locks, since they aren't owned by a process, per-se.
 820         */
 821        if (IS_OFDLCK(caller_fl))
 822                return 0;
 823
 824        while ((block_fl = what_owner_is_waiting_for(block_fl))) {
 825                if (i++ > MAX_DEADLK_ITERATIONS)
 826                        return 0;
 827                if (posix_same_owner(caller_fl, block_fl))
 828                        return 1;
 829        }
 830        return 0;
 831}
 832
 833/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
 834 * after any leases, but before any posix locks.
 835 *
 836 * Note that if called with an FL_EXISTS argument, the caller may determine
 837 * whether or not a lock was successfully freed by testing the return
 838 * value for -ENOENT.
 839 */
 840static int flock_lock_inode(struct inode *inode, struct file_lock *request)
 841{
 842        struct file_lock *new_fl = NULL;
 843        struct file_lock **before;
 844        int error = 0;
 845        int found = 0;
 846        LIST_HEAD(dispose);
 847
 848        if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
 849                new_fl = locks_alloc_lock();
 850                if (!new_fl)
 851                        return -ENOMEM;
 852        }
 853
 854        spin_lock(&inode->i_lock);
 855        if (request->fl_flags & FL_ACCESS)
 856                goto find_conflict;
 857
 858        for_each_lock(inode, before) {
 859                struct file_lock *fl = *before;
 860                if (IS_POSIX(fl))
 861                        break;
 862                if (IS_LEASE(fl))
 863                        continue;
 864                if (request->fl_file != fl->fl_file)
 865                        continue;
 866                if (request->fl_type == fl->fl_type)
 867                        goto out;
 868                found = 1;
 869                locks_delete_lock(before, &dispose);
 870                break;
 871        }
 872
 873        if (request->fl_type == F_UNLCK) {
 874                if ((request->fl_flags & FL_EXISTS) && !found)
 875                        error = -ENOENT;
 876                goto out;
 877        }
 878
 879find_conflict:
 880        for_each_lock(inode, before) {
 881                struct file_lock *fl = *before;
 882                if (IS_POSIX(fl))
 883                        break;
 884                if (IS_LEASE(fl))
 885                        continue;
 886                if (!flock_locks_conflict(request, fl))
 887                        continue;
 888                error = -EAGAIN;
 889                if (!(request->fl_flags & FL_SLEEP))
 890                        goto out;
 891                error = FILE_LOCK_DEFERRED;
 892                locks_insert_block(fl, request);
 893                goto out;
 894        }
 895        if (request->fl_flags & FL_ACCESS)
 896                goto out;
 897        locks_copy_lock(new_fl, request);
 898        locks_insert_lock(before, new_fl);
 899        new_fl = NULL;
 900        error = 0;
 901
 902out:
 903        spin_unlock(&inode->i_lock);
 904        if (new_fl)
 905                locks_free_lock(new_fl);
 906        locks_dispose_list(&dispose);
 907        return error;
 908}
 909
 910static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
 911{
 912        struct file_lock *fl;
 913        struct file_lock *new_fl = NULL;
 914        struct file_lock *new_fl2 = NULL;
 915        struct file_lock *left = NULL;
 916        struct file_lock *right = NULL;
 917        struct file_lock **before;
 918        int error;
 919        bool added = false;
 920        LIST_HEAD(dispose);
 921
 922        /*
 923         * We may need two file_lock structures for this operation,
 924         * so we get them in advance to avoid races.
 925         *
 926         * In some cases we can be sure, that no new locks will be needed
 927         */
 928        if (!(request->fl_flags & FL_ACCESS) &&
 929            (request->fl_type != F_UNLCK ||
 930             request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
 931                new_fl = locks_alloc_lock();
 932                new_fl2 = locks_alloc_lock();
 933        }
 934
 935        spin_lock(&inode->i_lock);
 936        /*
 937         * New lock request. Walk all POSIX locks and look for conflicts. If
 938         * there are any, either return error or put the request on the
 939         * blocker's list of waiters and the global blocked_hash.
 940         */
 941        if (request->fl_type != F_UNLCK) {
 942                for_each_lock(inode, before) {
 943                        fl = *before;
 944                        if (!IS_POSIX(fl))
 945                                continue;
 946                        if (!posix_locks_conflict(request, fl))
 947                                continue;
 948                        if (conflock)
 949                                locks_copy_conflock(conflock, fl);
 950                        error = -EAGAIN;
 951                        if (!(request->fl_flags & FL_SLEEP))
 952                                goto out;
 953                        /*
 954                         * Deadlock detection and insertion into the blocked
 955                         * locks list must be done while holding the same lock!
 956                         */
 957                        error = -EDEADLK;
 958                        spin_lock(&blocked_lock_lock);
 959                        if (likely(!posix_locks_deadlock(request, fl))) {
 960                                error = FILE_LOCK_DEFERRED;
 961                                __locks_insert_block(fl, request);
 962                        }
 963                        spin_unlock(&blocked_lock_lock);
 964                        goto out;
 965                }
 966        }
 967
 968        /* If we're just looking for a conflict, we're done. */
 969        error = 0;
 970        if (request->fl_flags & FL_ACCESS)
 971                goto out;
 972
 973        /*
 974         * Find the first old lock with the same owner as the new lock.
 975         */
 976        
 977        before = &inode->i_flock;
 978
 979        /* First skip locks owned by other processes.  */
 980        while ((fl = *before) && (!IS_POSIX(fl) ||
 981                                  !posix_same_owner(request, fl))) {
 982                before = &fl->fl_next;
 983        }
 984
 985        /* Process locks with this owner. */
 986        while ((fl = *before) && posix_same_owner(request, fl)) {
 987                /* Detect adjacent or overlapping regions (if same lock type)
 988                 */
 989                if (request->fl_type == fl->fl_type) {
 990                        /* In all comparisons of start vs end, use
 991                         * "start - 1" rather than "end + 1". If end
 992                         * is OFFSET_MAX, end + 1 will become negative.
 993                         */
 994                        if (fl->fl_end < request->fl_start - 1)
 995                                goto next_lock;
 996                        /* If the next lock in the list has entirely bigger
 997                         * addresses than the new one, insert the lock here.
 998                         */
 999                        if (fl->fl_start - 1 > request->fl_end)
1000                                break;
1001
1002                        /* If we come here, the new and old lock are of the
1003                         * same type and adjacent or overlapping. Make one
1004                         * lock yielding from the lower start address of both
1005                         * locks to the higher end address.
1006                         */
1007                        if (fl->fl_start > request->fl_start) {
1008                                gmb();
1009                                fl->fl_start = request->fl_start;
1010                        } else {
1011                                gmb();
1012                                request->fl_start = fl->fl_start;
1013                        }
1014                        if (fl->fl_end < request->fl_end) {
1015                                gmb();
1016                                fl->fl_end = request->fl_end;
1017                        } else {
1018                                gmb();
1019                                request->fl_end = fl->fl_end;
1020                        }
1021                        if (added) {
1022                                locks_delete_lock(before, &dispose);
1023                                continue;
1024                        }
1025                        request = fl;
1026                        added = true;
1027                }
1028                else {
1029                        /* Processing for different lock types is a bit
1030                         * more complex.
1031                         */
1032                        if (fl->fl_end < request->fl_start)
1033                                goto next_lock;
1034                        if (fl->fl_start > request->fl_end)
1035                                break;
1036                        if (request->fl_type == F_UNLCK)
1037                                added = true;
1038                        if (fl->fl_start < request->fl_start)
1039                                left = fl;
1040                        /* If the next lock in the list has a higher end
1041                         * address than the new one, insert the new one here.
1042                         */
1043                        if (fl->fl_end > request->fl_end) {
1044                                right = fl;
1045                                break;
1046                        }
1047                        if (fl->fl_start >= request->fl_start) {
1048                                /* The new lock completely replaces an old
1049                                 * one (This may happen several times).
1050                                 */
1051                                if (added) {
1052                                        locks_delete_lock(before, &dispose);
1053                                        continue;
1054                                }
1055                                /*
1056                                 * Replace the old lock with new_fl, and
1057                                 * remove the old one. It's safe to do the
1058                                 * insert here since we know that we won't be
1059                                 * using new_fl later, and that the lock is
1060                                 * just replacing an existing lock.
1061                                 */
1062                                error = -ENOLCK;
1063                                if (!new_fl)
1064                                        goto out;
1065                                locks_copy_lock(new_fl, request);
1066                                request = new_fl;
1067                                new_fl = NULL;
1068                                locks_delete_lock(before, &dispose);
1069                                locks_insert_lock(before, request);
1070                                added = true;
1071                        }
1072                }
1073                /* Go on to next lock.
1074                 */
1075        next_lock:
1076                before = &fl->fl_next;
1077        }
1078
1079        /*
1080         * The above code only modifies existing locks in case of merging or
1081         * replacing. If new lock(s) need to be inserted all modifications are
1082         * done below this, so it's safe yet to bail out.
1083         */
1084        error = -ENOLCK; /* "no luck" */
1085        if (right && left == right && !new_fl2)
1086                goto out;
1087
1088        error = 0;
1089        if (!added) {
1090                if (request->fl_type == F_UNLCK) {
1091                        if (request->fl_flags & FL_EXISTS)
1092                                error = -ENOENT;
1093                        goto out;
1094                }
1095
1096                if (!new_fl) {
1097                        error = -ENOLCK;
1098                        goto out;
1099                }
1100                locks_copy_lock(new_fl, request);
1101                locks_insert_lock(before, new_fl);
1102                new_fl = NULL;
1103        }
1104        if (right) {
1105                if (left == right) {
1106                        /* The new lock breaks the old one in two pieces,
1107                         * so we have to use the second new lock.
1108                         */
1109                        left = new_fl2;
1110                        new_fl2 = NULL;
1111                        locks_copy_lock(left, right);
1112                        locks_insert_lock(before, left);
1113                }
1114                right->fl_start = request->fl_end + 1;
1115                locks_wake_up_blocks(right);
1116        }
1117        if (left) {
1118                left->fl_end = request->fl_start - 1;
1119                locks_wake_up_blocks(left);
1120        }
1121 out:
1122        spin_unlock(&inode->i_lock);
1123        /*
1124         * Free any unused locks.
1125         */
1126        if (new_fl)
1127                locks_free_lock(new_fl);
1128        if (new_fl2)
1129                locks_free_lock(new_fl2);
1130        locks_dispose_list(&dispose);
1131        return error;
1132}
1133
1134/**
1135 * posix_lock_file - Apply a POSIX-style lock to a file
1136 * @filp: The file to apply the lock to
1137 * @fl: The lock to be applied
1138 * @conflock: Place to return a copy of the conflicting lock, if found.
1139 *
1140 * Add a POSIX style lock to a file.
1141 * We merge adjacent & overlapping locks whenever possible.
1142 * POSIX locks are sorted by owner task, then by starting address
1143 *
1144 * Note that if called with an FL_EXISTS argument, the caller may determine
1145 * whether or not a lock was successfully freed by testing the return
1146 * value for -ENOENT.
1147 */
1148int posix_lock_file(struct file *filp, struct file_lock *fl,
1149                        struct file_lock *conflock)
1150{
1151        return __posix_lock_file(locks_inode(filp), fl, conflock);
1152}
1153EXPORT_SYMBOL(posix_lock_file);
1154
1155/**
1156 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1157 * @inode: inode of file to which lock request should be applied
1158 * @fl: The lock to be applied
1159 *
1160 * Variant of posix_lock_file_wait that does not take a filp, and so can be
1161 * used after the filp has already been torn down.
1162 */
1163int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1164{
1165        int error;
1166        might_sleep ();
1167        for (;;) {
1168                error = __posix_lock_file(inode, fl, NULL);
1169                if (error != FILE_LOCK_DEFERRED)
1170                        break;
1171                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1172                if (!error)
1173                        continue;
1174
1175                locks_delete_block(fl);
1176                break;
1177        }
1178        return error;
1179}
1180EXPORT_SYMBOL(posix_lock_inode_wait);
1181
1182#ifdef CONFIG_MANDATORY_FILE_LOCKING
1183/**
1184 * locks_mandatory_locked - Check for an active lock
1185 * @file: the file to check
1186 *
1187 * Searches the inode's list of locks to find any POSIX locks which conflict.
1188 * This function is called from locks_verify_locked() only.
1189 */
1190int locks_mandatory_locked(struct file *file)
1191{
1192        struct inode *inode = locks_inode(file);
1193        fl_owner_t owner = current->files;
1194        struct file_lock *fl;
1195
1196        /*
1197         * Search the lock list for this inode for any POSIX locks.
1198         */
1199        spin_lock(&inode->i_lock);
1200        for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1201                if (!IS_POSIX(fl))
1202                        continue;
1203                if (fl->fl_owner != owner && fl->fl_owner != (fl_owner_t)file)
1204                        break;
1205        }
1206        spin_unlock(&inode->i_lock);
1207        return fl ? -EAGAIN : 0;
1208}
1209
1210/**
1211 * locks_mandatory_area - Check for a conflicting lock
1212 * @inode:      the file to check
1213 * @filp:       how the file was opened (if it was)
1214 * @start:      first byte in the file to check
1215 * @end:        lastbyte in the file to check
1216 * @type:       %F_WRLCK for a write lock, else %F_RDLCK
1217 *
1218 * Searches the inode's list of locks to find any POSIX locks which conflict.
1219 */
1220int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1221                         loff_t end, unsigned char type)
1222{
1223        struct file_lock fl;
1224        int error;
1225        bool sleep = false;
1226
1227        locks_init_lock(&fl);
1228        fl.fl_pid = current->tgid;
1229        fl.fl_file = filp;
1230        fl.fl_flags = FL_POSIX | FL_ACCESS;
1231        if (filp && !(filp->f_flags & O_NONBLOCK))
1232                sleep = true;
1233        fl.fl_type = type;
1234        fl.fl_start = start;
1235        fl.fl_end = end;
1236
1237        for (;;) {
1238                if (filp) {
1239                        fl.fl_owner = (fl_owner_t)filp;
1240                        fl.fl_flags &= ~FL_SLEEP;
1241                        error = __posix_lock_file(inode, &fl, NULL);
1242                        if (!error)
1243                                break;
1244                }
1245
1246                if (sleep)
1247                        fl.fl_flags |= FL_SLEEP;
1248                fl.fl_owner = current->files;
1249                error = __posix_lock_file(inode, &fl, NULL);
1250                if (error != FILE_LOCK_DEFERRED)
1251                        break;
1252                error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1253                if (!error) {
1254                        /*
1255                         * If we've been sleeping someone might have
1256                         * changed the permissions behind our back.
1257                         */
1258                        if (__mandatory_lock(inode))
1259                                continue;
1260                }
1261
1262                locks_delete_block(&fl);
1263                break;
1264        }
1265
1266        return error;
1267}
1268
1269EXPORT_SYMBOL(locks_mandatory_area);
1270#endif /* CONFIG_MANDATORY_FILE_LOCKING */
1271
1272static void lease_clear_pending(struct file_lock *fl, int arg)
1273{
1274        switch (arg) {
1275        case F_UNLCK:
1276                fl->fl_flags &= ~FL_UNLOCK_PENDING;
1277                /* fall through: */
1278        case F_RDLCK:
1279                fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1280        }
1281}
1282
1283/* We already had a lease on this file; just change its type */
1284int lease_modify(struct file_lock **before, int arg)
1285{
1286        struct file_lock *fl = *before;
1287        int error = assign_type(fl, arg);
1288
1289        if (error)
1290                return error;
1291        lease_clear_pending(fl, arg);
1292        locks_wake_up_blocks(fl);
1293        if (arg == F_UNLCK) {
1294                struct file *filp = fl->fl_file;
1295
1296                f_delown(filp);
1297                filp->f_owner.signum = 0;
1298                fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1299                if (fl->fl_fasync != NULL) {
1300                        printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1301                        fl->fl_fasync = NULL;
1302                }
1303                locks_delete_lock(before, NULL);
1304        }
1305        return 0;
1306}
1307EXPORT_SYMBOL(lease_modify);
1308
1309static bool past_time(unsigned long then)
1310{
1311        if (!then)
1312                /* 0 is a special value meaning "this never expires": */
1313                return false;
1314        return time_after(jiffies, then);
1315}
1316
1317static void time_out_leases(struct inode *inode)
1318{
1319        struct file_lock **before;
1320        struct file_lock *fl;
1321
1322        before = &inode->i_flock;
1323        while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1324                trace_time_out_leases(inode, fl);
1325                if (past_time(fl->fl_downgrade_time))
1326                        lease_modify(before, F_RDLCK);
1327                if (past_time(fl->fl_break_time))
1328                        lease_modify(before, F_UNLCK);
1329                if (fl == *before)      /* lease_modify may have freed fl */
1330                        before = &fl->fl_next;
1331        }
1332}
1333
1334static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1335{
1336        if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1337                return false;
1338        if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1339                return false;
1340        return locks_conflict(breaker, lease);
1341}
1342
1343static bool
1344any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1345{
1346        struct file_lock *fl;
1347
1348        lockdep_assert_held(&inode->i_lock);
1349
1350        for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) {
1351                if (leases_conflict(fl, breaker))
1352                        return true;
1353        }
1354        return false;
1355}
1356
1357/**
1358 *      __break_lease   -       revoke all outstanding leases on file
1359 *      @inode: the inode of the file to return
1360 *      @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1361 *          break all leases
1362 *      @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1363 *          only delegations
1364 *
1365 *      break_lease (inlined for speed) has checked there already is at least
1366 *      some kind of lock (maybe a lease) on this file.  Leases are broken on
1367 *      a call to open() or truncate().  This function can sleep unless you
1368 *      specified %O_NONBLOCK to your open().
1369 */
1370int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1371{
1372        int error = 0;
1373        struct file_lock *new_fl;
1374        struct file_lock *fl;
1375        unsigned long break_time;
1376        int want_write = (mode & O_ACCMODE) != O_RDONLY;
1377
1378        new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1379        if (IS_ERR(new_fl))
1380                return PTR_ERR(new_fl);
1381        new_fl->fl_flags = type;
1382
1383        spin_lock(&inode->i_lock);
1384
1385        time_out_leases(inode);
1386
1387        if (!any_leases_conflict(inode, new_fl))
1388                goto out;
1389
1390        break_time = 0;
1391        if (lease_break_time > 0) {
1392                break_time = jiffies + lease_break_time * HZ;
1393                if (break_time == 0)
1394                        break_time++;   /* so that 0 means no break time */
1395        }
1396
1397        for (fl = inode->i_flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1398                if (!leases_conflict(fl, new_fl))
1399                        continue;
1400                if (want_write) {
1401                        if (fl->fl_flags & FL_UNLOCK_PENDING)
1402                                continue;
1403                        fl->fl_flags |= FL_UNLOCK_PENDING;
1404                        fl->fl_break_time = break_time;
1405                } else {
1406                        if (lease_breaking(inode->i_flock))
1407                                continue;
1408                        fl->fl_flags |= FL_DOWNGRADE_PENDING;
1409                        fl->fl_downgrade_time = break_time;
1410                }
1411                fl->fl_lmops->lm_break(fl);
1412        }
1413
1414        if (mode & O_NONBLOCK) {
1415                trace_break_lease_noblock(inode, new_fl);
1416                error = -EWOULDBLOCK;
1417                goto out;
1418        }
1419
1420restart:
1421        break_time = inode->i_flock->fl_break_time;
1422        if (break_time != 0)
1423                break_time -= jiffies;
1424        if (break_time == 0)
1425                break_time++;
1426        locks_insert_block(inode->i_flock, new_fl);
1427        trace_break_lease_block(inode, new_fl);
1428        spin_unlock(&inode->i_lock);
1429        error = wait_event_interruptible_timeout(new_fl->fl_wait,
1430                                                !new_fl->fl_next, break_time);
1431        spin_lock(&inode->i_lock);
1432        trace_break_lease_unblock(inode, new_fl);
1433        locks_delete_block(new_fl);
1434        if (error >= 0) {
1435                /*
1436                 * Wait for the next conflicting lease that has not been
1437                 * broken yet
1438                 */
1439                if (error == 0)
1440                        time_out_leases(inode);
1441                if (any_leases_conflict(inode, new_fl))
1442                        goto restart;
1443
1444                error = 0;
1445        }
1446
1447out:
1448        spin_unlock(&inode->i_lock);
1449        locks_free_lock(new_fl);
1450        return error;
1451}
1452
1453EXPORT_SYMBOL(__break_lease);
1454
1455/**
1456 *      lease_get_mtime - get the last modified time of an inode
1457 *      @inode: the inode
1458 *      @time:  pointer to a timespec which will contain the last modified time
1459 *
1460 * This is to force NFS clients to flush their caches for files with
1461 * exclusive leases.  The justification is that if someone has an
1462 * exclusive lease, then they could be modifying it.
1463 */
1464void lease_get_mtime(struct inode *inode, struct timespec *time)
1465{
1466        struct file_lock *flock = inode->i_flock;
1467        if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
1468                *time = current_fs_time(inode->i_sb);
1469        else
1470                *time = inode->i_mtime;
1471}
1472
1473EXPORT_SYMBOL(lease_get_mtime);
1474
1475/**
1476 *      fcntl_getlease - Enquire what lease is currently active
1477 *      @filp: the file
1478 *
1479 *      The value returned by this function will be one of
1480 *      (if no lease break is pending):
1481 *
1482 *      %F_RDLCK to indicate a shared lease is held.
1483 *
1484 *      %F_WRLCK to indicate an exclusive lease is held.
1485 *
1486 *      %F_UNLCK to indicate no lease is held.
1487 *
1488 *      (if a lease break is pending):
1489 *
1490 *      %F_RDLCK to indicate an exclusive lease needs to be
1491 *              changed to a shared lease (or removed).
1492 *
1493 *      %F_UNLCK to indicate the lease needs to be removed.
1494 *
1495 *      XXX: sfr & willy disagree over whether F_INPROGRESS
1496 *      should be returned to userspace.
1497 */
1498int fcntl_getlease(struct file *filp)
1499{
1500        struct file_lock *fl;
1501        struct inode *inode = locks_inode(filp);
1502        int type = F_UNLCK;
1503
1504        spin_lock(&inode->i_lock);
1505        time_out_leases(inode);
1506        for (fl = inode->i_flock; fl && IS_LEASE(fl);
1507                        fl = fl->fl_next) {
1508                if (fl->fl_file == filp) {
1509                        type = target_leasetype(fl);
1510                        break;
1511                }
1512        }
1513        spin_unlock(&inode->i_lock);
1514        return type;
1515}
1516
1517/**
1518 * check_conflicting_open - see if the given dentry points to a file that has
1519 *                          an existing open that would conflict with the
1520 *                          desired lease.
1521 * @dentry:     dentry to check
1522 * @arg:        type of lease that we're trying to acquire
1523 *
1524 * Check to see if there's an existing open fd on this file that would
1525 * conflict with the lease we're trying to set.
1526 */
1527static int
1528check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1529{
1530        int ret = 0;
1531        struct inode *inode = dentry->d_inode;
1532
1533        if (flags & FL_LAYOUT)
1534                return 0;
1535
1536        if ((arg == F_RDLCK) &&
1537            (atomic_read(&d_real_inode(dentry)->i_writecount) > 0))
1538                return -EAGAIN;
1539
1540        if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1541            (atomic_read(&inode->i_count) > 1)))
1542                ret = -EAGAIN;
1543
1544        return ret;
1545}
1546
1547static int
1548generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1549{
1550        struct file_lock *fl, **before, **my_before = NULL, *lease;
1551        struct dentry *dentry = filp->f_path.dentry;
1552        struct inode *inode = dentry->d_inode;
1553        bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1554        int error;
1555
1556        lease = *flp;
1557        trace_generic_add_lease(inode, lease);
1558
1559        /*
1560         * In the delegation case we need mutual exclusion with
1561         * a number of operations that take the i_mutex.  We trylock
1562         * because delegations are an optional optimization, and if
1563         * there's some chance of a conflict--we'd rather not
1564         * bother, maybe that's a sign this just isn't a good file to
1565         * hand out a delegation on.
1566         */
1567        if (is_deleg && !mutex_trylock(&inode->i_mutex))
1568                return -EAGAIN;
1569
1570        if (is_deleg && arg == F_WRLCK) {
1571                /* Write delegations are not currently supported: */
1572                WARN_ON_ONCE(1);
1573                return -EINVAL;
1574        }
1575
1576        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1577        if (error)
1578                goto out;
1579
1580        /*
1581         * At this point, we know that if there is an exclusive
1582         * lease on this file, then we hold it on this filp
1583         * (otherwise our open of this file would have blocked).
1584         * And if we are trying to acquire an exclusive lease,
1585         * then the file is not open by anyone (including us)
1586         * except for this filp.
1587         */
1588        error = -EAGAIN;
1589        for (before = &inode->i_flock;
1590                        ((fl = *before) != NULL) && IS_LEASE(fl);
1591                        before = &fl->fl_next) {
1592                if (fl->fl_file == filp &&
1593                        fl->fl_owner == lease->fl_owner) {
1594                        my_before = before;
1595                        continue;
1596                }
1597                /*
1598                 * No exclusive leases if someone else has a lease on
1599                 * this file:
1600                 */
1601                if (arg == F_WRLCK)
1602                        goto out;
1603                /*
1604                 * Modifying our existing lease is OK, but no getting a
1605                 * new lease if someone else is opening for write:
1606                 */
1607                if (fl->fl_flags & FL_UNLOCK_PENDING)
1608                        goto out;
1609        }
1610
1611        if (my_before != NULL) {
1612                error = lease->fl_lmops->lm_change(my_before, arg);
1613                if (!error)
1614                        *flp = *my_before;
1615                goto out;
1616        }
1617
1618        error = -EINVAL;
1619        if (!leases_enable)
1620                goto out;
1621
1622        locks_insert_lock(before, lease);
1623        /*
1624         * The check in break_lease() is lockless. It's possible for another
1625         * open to race in after we did the earlier check for a conflicting
1626         * open but before the lease was inserted. Check again for a
1627         * conflicting open and cancel the lease if there is one.
1628         *
1629         * We also add a barrier here to ensure that the insertion of the lock
1630         * precedes these checks.
1631         */
1632        smp_mb();
1633        error = check_conflicting_open(dentry, arg, lease->fl_flags);
1634        if (error)
1635                goto out_unlink;
1636out:
1637        if (is_deleg)
1638                mutex_unlock(&inode->i_mutex);
1639        return error;
1640out_unlink:
1641        locks_unlink_lock(before);
1642        goto out;
1643}
1644
1645static int generic_delete_lease(struct file *filp, void *owner)
1646{
1647        int error = -EAGAIN;
1648        struct file_lock *fl, **before;
1649        struct inode *inode = locks_inode(filp);
1650
1651        for (before = &inode->i_flock;
1652                        ((fl = *before) != NULL) && IS_LEASE(fl);
1653                        before = &fl->fl_next) {
1654                if (fl->fl_file == filp && 
1655                        fl->fl_owner == owner)
1656                        break;
1657        }
1658        trace_generic_delete_lease(inode, fl);
1659        if (fl && IS_LEASE(fl))
1660                error = fl->fl_lmops->lm_change(before, F_UNLCK);
1661        return error;
1662}
1663
1664/**
1665 *      generic_setlease        -       sets a lease on an open file
1666 *      @filp: file pointer
1667 *      @arg: type of lease to obtain
1668 *      @flp: input - file_lock to use, output - file_lock inserted
1669 *      @priv: private data for lm_setup
1670 *
1671 *      The (input) flp->fl_lmops->lm_break function is required
1672 *      by break_lease().
1673 *
1674 *      Called with inode->i_lock held.
1675 */
1676int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1677                        void **priv)
1678{
1679        struct inode *inode = locks_inode(filp);
1680        int error;
1681
1682        if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1683                return -EACCES;
1684        if (!S_ISREG(inode->i_mode))
1685                return -EINVAL;
1686        error = security_file_lock(filp, arg);
1687        if (error)
1688                return error;
1689
1690        time_out_leases(inode);
1691
1692        switch (arg) {
1693        case F_UNLCK:
1694                return generic_delete_lease(filp, *priv);
1695        case F_RDLCK:
1696        case F_WRLCK:
1697                if (!(*flp)->fl_lmops->lm_break) {
1698                        WARN_ON_ONCE(1);
1699                        return -ENOLCK;
1700                }
1701                return generic_add_lease(filp, arg, flp, priv);
1702        default:
1703                return -EINVAL;
1704        }
1705}
1706EXPORT_SYMBOL(generic_setlease);
1707
1708static int
1709__vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1710{
1711        if (filp->f_op && filp->f_op->setlease && is_remote_lock(filp))
1712                return filp->f_op->setlease(filp, arg, lease, priv);
1713        else
1714                return generic_setlease(filp, arg, lease, priv);
1715}
1716
1717/**
1718 * vfs_setlease        -       sets a lease on an open file
1719 * @filp: file pointer
1720 * @arg: type of lease to obtain
1721 * @lease: file_lock to use when adding a lease
1722 * @priv: private info for lm_setup when adding a lease
1723 *
1724 * Call this to establish a lease on the file. The "lease" argument is not
1725 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1726 * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
1727 * if not, this function will return -ENOLCK (and generate a scary-looking
1728 * stack trace).
1729 */
1730
1731int
1732vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1733{
1734        struct inode *inode = locks_inode(filp);
1735        int error;
1736
1737        spin_lock(&inode->i_lock);
1738        error = __vfs_setlease(filp, arg, lease, priv);
1739        spin_unlock(&inode->i_lock);
1740
1741        return error;
1742}
1743EXPORT_SYMBOL_GPL(vfs_setlease);
1744
1745static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1746{
1747        struct file_lock *fl, *ret;
1748        struct inode *inode = locks_inode(filp);
1749        struct fasync_struct *new;
1750        int error;
1751
1752        fl = lease_alloc(filp, arg);
1753        if (IS_ERR(fl))
1754                return PTR_ERR(fl);
1755
1756        new = fasync_alloc();
1757        if (!new) {
1758                locks_free_lock(fl);
1759                return -ENOMEM;
1760        }
1761        ret = fl;
1762        spin_lock(&inode->i_lock);
1763        error = __vfs_setlease(filp, arg, &ret, NULL);
1764        if (error) {
1765                spin_unlock(&inode->i_lock);
1766                locks_free_lock(fl);
1767                goto out_free_fasync;
1768        }
1769        if (ret != fl)
1770                locks_free_lock(fl);
1771
1772        /*
1773         * fasync_insert_entry() returns the old entry if any.
1774         * If there was no old entry, then it used 'new' and
1775         * inserted it into the fasync list. Clear new so that
1776         * we don't release it here.
1777         */
1778        if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1779                new = NULL;
1780
1781        error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1782        spin_unlock(&inode->i_lock);
1783
1784out_free_fasync:
1785        if (new)
1786                fasync_free(new);
1787        return error;
1788}
1789
1790/**
1791 *      fcntl_setlease  -       sets a lease on an open file
1792 *      @fd: open file descriptor
1793 *      @filp: file pointer
1794 *      @arg: type of lease to obtain
1795 *
1796 *      Call this fcntl to establish a lease on the file.
1797 *      Note that you also need to call %F_SETSIG to
1798 *      receive a signal when the lease is broken.
1799 */
1800int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1801{
1802        if (arg == F_UNLCK)
1803                return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1804        return do_fcntl_add_lease(fd, filp, arg);
1805}
1806
1807/**
1808 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1809 * @inode: inode of the file to apply to
1810 * @fl: The lock to be applied
1811 *
1812 * Apply a FLOCK style lock request to an inode.
1813 */
1814int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1815{
1816        int error;
1817        might_sleep();
1818        for (;;) {
1819                error = flock_lock_inode(inode, fl);
1820                if (error != FILE_LOCK_DEFERRED)
1821                        break;
1822                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1823                if (!error)
1824                        continue;
1825
1826                locks_delete_block(fl);
1827                break;
1828        }
1829        return error;
1830}
1831EXPORT_SYMBOL(flock_lock_inode_wait);
1832
1833/**
1834 * locks_lock_inode_wait - Apply a lock to an inode
1835 * @inode: inode of the file to apply to
1836 * @fl: The lock to be applied
1837 *
1838 * Apply a POSIX or FLOCK style lock request to an inode.
1839 */
1840int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1841{
1842        int res = 0;
1843        switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
1844                case FL_POSIX:
1845                        res = posix_lock_inode_wait(inode, fl);
1846                        break;
1847                case FL_FLOCK:
1848                        res = flock_lock_inode_wait(inode, fl);
1849                        break;
1850                default:
1851                        BUG();
1852        }
1853        return res;
1854}
1855EXPORT_SYMBOL(locks_lock_inode_wait);
1856
1857/**
1858 *      sys_flock: - flock() system call.
1859 *      @fd: the file descriptor to lock.
1860 *      @cmd: the type of lock to apply.
1861 *
1862 *      Apply a %FL_FLOCK style lock to an open file descriptor.
1863 *      The @cmd can be one of
1864 *
1865 *      %LOCK_SH -- a shared lock.
1866 *
1867 *      %LOCK_EX -- an exclusive lock.
1868 *
1869 *      %LOCK_UN -- remove an existing lock.
1870 *
1871 *      %LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1872 *
1873 *      %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1874 *      processes read and write access respectively.
1875 */
1876SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1877{
1878        struct fd f = fdget(fd);
1879        struct file_lock *lock;
1880        int can_sleep, unlock;
1881        int error;
1882
1883        error = -EBADF;
1884        if (!f.file)
1885                goto out;
1886
1887        can_sleep = !(cmd & LOCK_NB);
1888        cmd &= ~LOCK_NB;
1889        unlock = (cmd == LOCK_UN);
1890
1891        if (!unlock && !(cmd & LOCK_MAND) &&
1892            !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1893                goto out_putf;
1894
1895        error = flock_make_lock(f.file, &lock, cmd);
1896        if (error)
1897                goto out_putf;
1898        if (can_sleep)
1899                lock->fl_flags |= FL_SLEEP;
1900
1901        error = security_file_lock(f.file, lock->fl_type);
1902        if (error)
1903                goto out_free;
1904
1905        if (f.file->f_op && f.file->f_op->flock && is_remote_lock(f.file))
1906                error = f.file->f_op->flock(f.file,
1907                                          (can_sleep) ? F_SETLKW : F_SETLK,
1908                                          lock);
1909        else
1910                error = locks_lock_file_wait(f.file, lock);
1911
1912 out_free:
1913        locks_free_lock(lock);
1914
1915 out_putf:
1916        fdput(f);
1917 out:
1918        return error;
1919}
1920
1921/**
1922 * vfs_test_lock - test file byte range lock
1923 * @filp: The file to test lock for
1924 * @fl: The lock to test; also used to hold result
1925 *
1926 * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1927 * setting conf->fl_type to something other than F_UNLCK.
1928 */
1929int vfs_test_lock(struct file *filp, struct file_lock *fl)
1930{
1931        if (filp->f_op && filp->f_op->lock && is_remote_lock(filp))
1932                return filp->f_op->lock(filp, F_GETLK, fl);
1933        posix_test_lock(filp, fl);
1934        return 0;
1935}
1936EXPORT_SYMBOL_GPL(vfs_test_lock);
1937
1938/**
1939 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
1940 * @fl: The file_lock who's fl_pid should be translated
1941 * @ns: The namespace into which the pid should be translated
1942 *
1943 * Used to tranlate a fl_pid into a namespace virtual pid number
1944 */
1945static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
1946{
1947        pid_t vnr;
1948        struct pid *pid;
1949
1950        if (IS_OFDLCK(fl))
1951                return -1;
1952        if (IS_REMOTELCK(fl))
1953                return fl->fl_pid;
1954
1955        rcu_read_lock();
1956        pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
1957        vnr = pid_nr_ns(pid, ns);
1958        rcu_read_unlock();
1959        return vnr;
1960}
1961
1962static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1963{
1964        flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
1965#if BITS_PER_LONG == 32
1966        /*
1967         * Make sure we can represent the posix lock via
1968         * legacy 32bit flock.
1969         */
1970        if (fl->fl_start > OFFT_OFFSET_MAX)
1971                return -EOVERFLOW;
1972        if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1973                return -EOVERFLOW;
1974#endif
1975        flock->l_start = fl->fl_start;
1976        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1977                fl->fl_end - fl->fl_start + 1;
1978        flock->l_whence = 0;
1979        flock->l_type = fl->fl_type;
1980        return 0;
1981}
1982
1983#if BITS_PER_LONG == 32
1984static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1985{
1986        flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
1987        flock->l_start = fl->fl_start;
1988        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1989                fl->fl_end - fl->fl_start + 1;
1990        flock->l_whence = 0;
1991        flock->l_type = fl->fl_type;
1992}
1993#endif
1994
1995/* Report the first existing lock that would conflict with l.
1996 * This implements the F_GETLK command of fcntl().
1997 */
1998int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
1999{
2000        struct file_lock *fl;
2001        int error;
2002
2003        fl = locks_alloc_lock();
2004        if (fl == NULL)
2005                return -ENOMEM;
2006        error = -EINVAL;
2007        if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2008                goto out;
2009
2010        error = flock_to_posix_lock(filp, fl, flock);
2011        if (error)
2012                goto out;
2013
2014        if (cmd == F_OFD_GETLK) {
2015                error = -EINVAL;
2016                if (flock->l_pid != 0)
2017                        goto out;
2018
2019                cmd = F_GETLK;
2020                fl->fl_flags |= FL_OFDLCK;
2021                fl->fl_owner = (fl_owner_t)filp;
2022        }
2023
2024        error = vfs_test_lock(filp, fl);
2025        if (error)
2026                goto out;
2027 
2028        flock->l_type = fl->fl_type;
2029        if (fl->fl_type != F_UNLCK) {
2030                error = posix_lock_to_flock(flock, fl);
2031                if (error)
2032                        goto out;
2033        }
2034out:
2035        locks_free_lock(fl);
2036        return error;
2037}
2038
2039/**
2040 * vfs_lock_file - file byte range lock
2041 * @filp: The file to apply the lock to
2042 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2043 * @fl: The lock to be applied
2044 * @conf: Place to return a copy of the conflicting lock, if found.
2045 *
2046 * A caller that doesn't care about the conflicting lock may pass NULL
2047 * as the final argument.
2048 *
2049 * If the filesystem defines a private ->lock() method, then @conf will
2050 * be left unchanged; so a caller that cares should initialize it to
2051 * some acceptable default.
2052 *
2053 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2054 * locks, the ->lock() interface may return asynchronously, before the lock has
2055 * been granted or denied by the underlying filesystem, if (and only if)
2056 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2057 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2058 * the request is for a blocking lock. When ->lock() does return asynchronously,
2059 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2060 * request completes.
2061 * If the request is for non-blocking lock the file system should return
2062 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2063 * with the result. If the request timed out the callback routine will return a
2064 * nonzero return code and the file system should release the lock. The file
2065 * system is also responsible to keep a corresponding posix lock when it
2066 * grants a lock so the VFS can find out which locks are locally held and do
2067 * the correct lock cleanup when required.
2068 * The underlying filesystem must not drop the kernel lock or call
2069 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2070 * return code.
2071 */
2072int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2073{
2074        if (filp->f_op && filp->f_op->lock && is_remote_lock(filp))
2075                return filp->f_op->lock(filp, cmd, fl);
2076        else
2077                return posix_lock_file(filp, fl, conf);
2078}
2079EXPORT_SYMBOL_GPL(vfs_lock_file);
2080
2081static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2082                             struct file_lock *fl)
2083{
2084        int error;
2085
2086        error = security_file_lock(filp, fl->fl_type);
2087        if (error)
2088                return error;
2089
2090        for (;;) {
2091                error = vfs_lock_file(filp, cmd, fl, NULL);
2092                if (error != FILE_LOCK_DEFERRED)
2093                        break;
2094                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2095                if (!error)
2096                        continue;
2097
2098                locks_delete_block(fl);
2099                break;
2100        }
2101
2102        return error;
2103}
2104
2105/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
2106static int
2107check_fmode_for_setlk(struct file_lock *fl)
2108{
2109        switch (fl->fl_type) {
2110        case F_RDLCK:
2111                if (!(fl->fl_file->f_mode & FMODE_READ))
2112                        return -EBADF;
2113                break;
2114        case F_WRLCK:
2115                if (!(fl->fl_file->f_mode & FMODE_WRITE))
2116                        return -EBADF;
2117        }
2118        return 0;
2119}
2120
2121/* Apply the lock described by l to an open file descriptor.
2122 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2123 */
2124int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2125                struct flock *flock)
2126{
2127        struct file_lock *file_lock = locks_alloc_lock();
2128        struct inode *inode = locks_inode(filp);
2129        struct file *f;
2130        int error;
2131
2132        if (file_lock == NULL)
2133                return -ENOLCK;
2134
2135        /* Don't allow mandatory locks on files that may be memory mapped
2136         * and shared.
2137         */
2138        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2139                error = -EAGAIN;
2140                goto out;
2141        }
2142
2143        error = flock_to_posix_lock(filp, file_lock, flock);
2144        if (error)
2145                goto out;
2146
2147        error = check_fmode_for_setlk(file_lock);
2148        if (error)
2149                goto out;
2150
2151        /*
2152         * If the cmd is requesting file-private locks, then set the
2153         * FL_OFDLCK flag and override the owner.
2154         */
2155        switch (cmd) {
2156        case F_OFD_SETLK:
2157                error = -EINVAL;
2158                if (flock->l_pid != 0)
2159                        goto out;
2160
2161                cmd = F_SETLK;
2162                file_lock->fl_flags |= FL_OFDLCK;
2163                file_lock->fl_owner = (fl_owner_t)filp;
2164                break;
2165        case F_OFD_SETLKW:
2166                error = -EINVAL;
2167                if (flock->l_pid != 0)
2168                        goto out;
2169
2170                cmd = F_SETLKW;
2171                file_lock->fl_flags |= FL_OFDLCK;
2172                file_lock->fl_owner = (fl_owner_t)filp;
2173                /* Fallthrough */
2174        case F_SETLKW:
2175                file_lock->fl_flags |= FL_SLEEP;
2176        }
2177
2178        error = do_lock_file_wait(filp, cmd, file_lock);
2179
2180        /*
2181         * Attempt to detect a close/fcntl race and recover by releasing the
2182         * lock that was just acquired. There is no need to do that when we're
2183         * unlocking though, or for OFD locks.
2184         */
2185        if (!error && file_lock->fl_type != F_UNLCK &&
2186            !(file_lock->fl_flags & FL_OFDLCK)) {
2187                /*
2188                 * We need that spin_lock here - it prevents reordering between
2189                 * update of i_flctx->flc_posix and check for it done in
2190                 * close(). rcu_read_lock() wouldn't do.
2191                 */
2192                spin_lock(&current->files->file_lock);
2193                f = fcheck(fd);
2194                spin_unlock(&current->files->file_lock);
2195                if (f != filp) {
2196                        file_lock->fl_type = F_UNLCK;
2197                        error = do_lock_file_wait(filp, cmd, file_lock);
2198                        WARN_ON_ONCE(error);
2199                        error = -EBADF;
2200                }
2201        }
2202out:
2203        locks_free_lock(file_lock);
2204        return error;
2205}
2206
2207#if BITS_PER_LONG == 32
2208/* Report the first existing lock that would conflict with l.
2209 * This implements the F_GETLK command of fcntl().
2210 */
2211int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2212{
2213        struct file_lock *fl;
2214        int error;
2215
2216        fl = locks_alloc_lock();
2217        if (fl == NULL)
2218                return -ENOMEM;
2219
2220        error = -EINVAL;
2221        if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2222                goto out;
2223
2224        error = flock64_to_posix_lock(filp, fl, flock);
2225        if (error)
2226                goto out;
2227
2228        if (cmd == F_OFD_GETLK) {
2229                error = -EINVAL;
2230                if (flock->l_pid != 0)
2231                        goto out;
2232
2233                cmd = F_GETLK64;
2234                fl->fl_flags |= FL_OFDLCK;
2235                fl->fl_owner = (fl_owner_t)filp;
2236        }
2237
2238        error = vfs_test_lock(filp, fl);
2239        if (error)
2240                goto out;
2241
2242        flock->l_type = fl->fl_type;
2243        if (fl->fl_type != F_UNLCK)
2244                posix_lock_to_flock64(flock, fl);
2245
2246out:
2247        locks_free_lock(fl);
2248        return error;
2249}
2250
2251/* Apply the lock described by l to an open file descriptor.
2252 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2253 */
2254int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2255                struct flock64 *flock)
2256{
2257        struct file_lock *file_lock = locks_alloc_lock();
2258        struct inode *inode = locks_inode(filp);
2259        struct file *f;
2260        int error;
2261
2262        if (file_lock == NULL)
2263                return -ENOLCK;
2264
2265        /* Don't allow mandatory locks on files that may be memory mapped
2266         * and shared.
2267         */
2268        if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2269                error = -EAGAIN;
2270                goto out;
2271        }
2272
2273        error = flock64_to_posix_lock(filp, file_lock, flock);
2274        if (error)
2275                goto out;
2276
2277        error = check_fmode_for_setlk(file_lock);
2278        if (error)
2279                goto out;
2280
2281        /*
2282         * If the cmd is requesting file-private locks, then set the
2283         * FL_OFDLCK flag and override the owner.
2284         */
2285        switch (cmd) {
2286        case F_OFD_SETLK:
2287                error = -EINVAL;
2288                if (flock->l_pid != 0)
2289                        goto out;
2290
2291                cmd = F_SETLK64;
2292                file_lock->fl_flags |= FL_OFDLCK;
2293                file_lock->fl_owner = (fl_owner_t)filp;
2294                break;
2295        case F_OFD_SETLKW:
2296                error = -EINVAL;
2297                if (flock->l_pid != 0)
2298                        goto out;
2299
2300                cmd = F_SETLKW64;
2301                file_lock->fl_flags |= FL_OFDLCK;
2302                file_lock->fl_owner = (fl_owner_t)filp;
2303                /* Fallthrough */
2304        case F_SETLKW64:
2305                file_lock->fl_flags |= FL_SLEEP;
2306        }
2307
2308        error = do_lock_file_wait(filp, cmd, file_lock);
2309
2310        /*
2311         * Attempt to detect a close/fcntl race and recover by releasing the
2312         * lock that was just acquired. There is no need to do that when we're
2313         * unlocking though, or for OFD locks.
2314         */
2315        if (!error && file_lock->fl_type != F_UNLCK &&
2316            !(file_lock->fl_flags & FL_OFDLCK)) {
2317                /*
2318                 * We need that spin_lock here - it prevents reordering between
2319                 * update of i_flctx->flc_posix and check for it done in
2320                 * close(). rcu_read_lock() wouldn't do.
2321                 */
2322                spin_lock(&current->files->file_lock);
2323                f = fcheck(fd);
2324                spin_unlock(&current->files->file_lock);
2325                if (f != filp) {
2326                        file_lock->fl_type = F_UNLCK;
2327                        error = do_lock_file_wait(filp, cmd, file_lock);
2328                        WARN_ON_ONCE(error);
2329                        error = -EBADF;
2330                }
2331        }
2332out:
2333        locks_free_lock(file_lock);
2334        return error;
2335}
2336#endif /* BITS_PER_LONG == 32 */
2337
2338/*
2339 * This function is called when the file is being removed
2340 * from the task's fd array.  POSIX locks belonging to this task
2341 * are deleted at this time.
2342 */
2343void locks_remove_posix(struct file *filp, fl_owner_t owner)
2344{
2345        struct file_lock lock;
2346
2347        /*
2348         * If there are no locks held on this file, we don't need to call
2349         * posix_lock_file().  Another process could be setting a lock on this
2350         * file at the same time, but we wouldn't remove that lock anyway.
2351         */
2352        if (!locks_inode(filp)->i_flock)
2353                return;
2354
2355        lock.fl_type = F_UNLCK;
2356        lock.fl_flags = FL_POSIX | FL_CLOSE;
2357        lock.fl_start = 0;
2358        lock.fl_end = OFFSET_MAX;
2359        lock.fl_owner = owner;
2360        lock.fl_pid = current->tgid;
2361        lock.fl_file = filp;
2362        lock.fl_ops = NULL;
2363        lock.fl_lmops = NULL;
2364
2365        vfs_lock_file(filp, F_SETLK, &lock, NULL);
2366
2367        if (lock.fl_ops && lock.fl_ops->fl_release_private)
2368                lock.fl_ops->fl_release_private(&lock);
2369}
2370
2371EXPORT_SYMBOL(locks_remove_posix);
2372
2373/*
2374 * This function is called on the last close of an open file.
2375 */
2376void locks_remove_file(struct file *filp)
2377{
2378        struct inode * inode = locks_inode(filp);
2379        struct file_lock *fl;
2380        struct file_lock **before;
2381        LIST_HEAD(dispose);
2382
2383        if (!inode->i_flock)
2384                return;
2385
2386        locks_remove_posix(filp, (fl_owner_t)filp);
2387
2388        if (filp->f_op && filp->f_op->flock && is_remote_lock(filp)) {
2389                struct file_lock fl = {
2390                        .fl_pid = current->tgid,
2391                        .fl_file = filp,
2392                        .fl_flags = FL_FLOCK | FL_CLOSE,
2393                        .fl_type = F_UNLCK,
2394                        .fl_end = OFFSET_MAX,
2395                };
2396                filp->f_op->flock(filp, F_SETLKW, &fl);
2397                if (fl.fl_ops && fl.fl_ops->fl_release_private)
2398                        fl.fl_ops->fl_release_private(&fl);
2399        }
2400
2401        spin_lock(&inode->i_lock);
2402        before = &inode->i_flock;
2403
2404        while ((fl = *before) != NULL) {
2405                if (fl->fl_file == filp) {
2406                        if (IS_LEASE(fl)) {
2407                                lease_modify(before, F_UNLCK);
2408                                continue;
2409                        }
2410
2411                        /*
2412                         * There's a leftover lock on the list of a type that
2413                         * we didn't expect to see. Most likely a classic
2414                         * POSIX lock that ended up not getting released
2415                         * properly, or that raced onto the list somehow. Log
2416                         * some info about it and then just remove it from
2417                         * the list.
2418                         */
2419                        WARN(!IS_FLOCK(fl),
2420                                "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
2421                                MAJOR(inode->i_sb->s_dev),
2422                                MINOR(inode->i_sb->s_dev), inode->i_ino,
2423                                fl->fl_type, fl->fl_flags,
2424                                fl->fl_start, fl->fl_end);
2425
2426                        locks_delete_lock(before, &dispose);
2427                        continue;
2428                }
2429                before = &fl->fl_next;
2430        }
2431        spin_unlock(&inode->i_lock);
2432        locks_dispose_list(&dispose);
2433}
2434
2435/**
2436 *      posix_unblock_lock - stop waiting for a file lock
2437 *      @waiter: the lock which was waiting
2438 *
2439 *      lockd needs to block waiting for locks.
2440 */
2441int
2442posix_unblock_lock(struct file_lock *waiter)
2443{
2444        int status = 0;
2445
2446        spin_lock(&blocked_lock_lock);
2447        if (waiter->fl_next)
2448                __locks_delete_block(waiter);
2449        else
2450                status = -ENOENT;
2451        spin_unlock(&blocked_lock_lock);
2452        return status;
2453}
2454EXPORT_SYMBOL(posix_unblock_lock);
2455
2456/**
2457 * vfs_cancel_lock - file byte range unblock lock
2458 * @filp: The file to apply the unblock to
2459 * @fl: The lock to be unblocked
2460 *
2461 * Used by lock managers to cancel blocked requests
2462 */
2463int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2464{
2465        if (filp->f_op && filp->f_op->lock && is_remote_lock(filp))
2466                return filp->f_op->lock(filp, F_CANCELLK, fl);
2467        return 0;
2468}
2469
2470EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2471
2472#ifdef CONFIG_PROC_FS
2473#include <linux/proc_fs.h>
2474#include <linux/seq_file.h>
2475
2476struct locks_iterator {
2477        int     li_cpu;
2478        loff_t  li_pos;
2479};
2480
2481static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2482                            loff_t id, char *pfx)
2483{
2484        struct inode *inode = NULL;
2485        unsigned int fl_pid;
2486        struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2487
2488        fl_pid = locks_translate_pid(fl, proc_pidns);
2489        /*
2490         * If there isn't a fl_pid don't display who is waiting on
2491         * the lock if we are called from locks_show, or if we are
2492         * called from __show_fd_info - skip lock entirely
2493         */
2494        if (fl_pid == 0)
2495                return;
2496
2497        if (fl->fl_file != NULL)
2498                inode = locks_inode(fl->fl_file);
2499
2500        seq_printf(f, "%lld:%s ", id, pfx);
2501        if (IS_POSIX(fl)) {
2502                if (fl->fl_flags & FL_ACCESS)
2503                        seq_printf(f, "ACCESS");
2504                else if (IS_OFDLCK(fl))
2505                        seq_printf(f, "OFDLCK");
2506                else
2507                        seq_printf(f, "POSIX ");
2508
2509                seq_printf(f, " %s ",
2510                             (inode == NULL) ? "*NOINODE*" :
2511                             mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2512        } else if (IS_FLOCK(fl)) {
2513                if (fl->fl_type & LOCK_MAND) {
2514                        seq_printf(f, "FLOCK  MSNFS     ");
2515                } else {
2516                        seq_printf(f, "FLOCK  ADVISORY  ");
2517                }
2518        } else if (IS_LEASE(fl)) {
2519                seq_printf(f, "LEASE  ");
2520                if (lease_breaking(fl))
2521                        seq_printf(f, "BREAKING  ");
2522                else if (fl->fl_file)
2523                        seq_printf(f, "ACTIVE    ");
2524                else
2525                        seq_printf(f, "BREAKER   ");
2526        } else {
2527                seq_printf(f, "UNKNOWN UNKNOWN  ");
2528        }
2529        if (fl->fl_type & LOCK_MAND) {
2530                seq_printf(f, "%s ",
2531                               (fl->fl_type & LOCK_READ)
2532                               ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2533                               : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2534        } else {
2535                seq_printf(f, "%s ",
2536                               (lease_breaking(fl))
2537                               ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2538                               : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2539        }
2540        if (inode) {
2541#ifdef WE_CAN_BREAK_LSLK_NOW
2542                seq_printf(f, "%d %s:%ld ", fl_pid,
2543                                inode->i_sb->s_id, inode->i_ino);
2544#else
2545                /* userspace relies on this representation of dev_t ;-( */
2546                seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2547                                MAJOR(inode->i_sb->s_dev),
2548                                MINOR(inode->i_sb->s_dev), inode->i_ino);
2549#endif
2550        } else {
2551                seq_printf(f, "%d <none>:0 ", fl_pid);
2552        }
2553        if (IS_POSIX(fl)) {
2554                if (fl->fl_end == OFFSET_MAX)
2555                        seq_printf(f, "%Ld EOF\n", fl->fl_start);
2556                else
2557                        seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2558        } else {
2559                seq_printf(f, "0 EOF\n");
2560        }
2561}
2562
2563static int locks_show(struct seq_file *f, void *v)
2564{
2565        struct locks_iterator *iter = f->private;
2566        struct file_lock *fl, *bfl;
2567        struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2568
2569        fl = hlist_entry(v, struct file_lock, fl_link);
2570
2571        if (locks_translate_pid(fl, proc_pidns) == 0)
2572                return 0;
2573
2574        lock_get_status(f, fl, iter->li_pos, "");
2575
2576        list_for_each_entry(bfl, &fl->fl_block, fl_block)
2577                lock_get_status(f, bfl, iter->li_pos, " ->");
2578
2579        return 0;
2580}
2581
2582void show_fd_locks(struct seq_file *f,
2583                  struct file *filp, struct files_struct *files)
2584{
2585        struct inode *inode = locks_inode(filp);
2586        struct file_lock **before, *fl;
2587        int id = 0;
2588
2589        spin_lock(&inode->i_lock);
2590        for_each_lock(inode, before) {
2591                fl = *before;
2592
2593                if (filp != fl->fl_file)
2594                        continue;
2595                /*
2596                 * Upstream does fl->fl_owner != filp in the second check,
2597                 * but RHEL doesn't use ->fl_owner as "struct file *", it
2598                 * is NULL if IS_FLOCK(). But in this case we rely on the
2599                 * previous check, when upstream set ->fl_owner = filp it
2600                 * matches ->fl_file.
2601                 */
2602                if (fl->fl_owner != files &&
2603                    fl->fl_owner != NULL)
2604                        continue;
2605
2606                id++;
2607                seq_puts(f, "lock:\t");
2608                lock_get_status(f, fl, id, "");
2609        }
2610        spin_unlock(&inode->i_lock);
2611}
2612
2613static void *locks_start(struct seq_file *f, loff_t *pos)
2614        __acquires(&blocked_lock_lock)
2615{
2616        struct locks_iterator *iter = f->private;
2617
2618        iter->li_pos = *pos + 1;
2619        lg_global_lock(&file_lock_lglock);
2620        spin_lock(&blocked_lock_lock);
2621        return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
2622}
2623
2624static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2625{
2626        struct locks_iterator *iter = f->private;
2627
2628        ++iter->li_pos;
2629        return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos);
2630}
2631
2632static void locks_stop(struct seq_file *f, void *v)
2633        __releases(&blocked_lock_lock)
2634{
2635        spin_unlock(&blocked_lock_lock);
2636        lg_global_unlock(&file_lock_lglock);
2637}
2638
2639static const struct seq_operations locks_seq_operations = {
2640        .start  = locks_start,
2641        .next   = locks_next,
2642        .stop   = locks_stop,
2643        .show   = locks_show,
2644};
2645
2646static int locks_open(struct inode *inode, struct file *filp)
2647{
2648        return seq_open_private(filp, &locks_seq_operations,
2649                                        sizeof(struct locks_iterator));
2650}
2651
2652static const struct file_operations proc_locks_operations = {
2653        .open           = locks_open,
2654        .read           = seq_read,
2655        .llseek         = seq_lseek,
2656        .release        = seq_release_private,
2657};
2658
2659static int __init proc_locks_init(void)
2660{
2661        proc_create("locks", 0, NULL, &proc_locks_operations);
2662        return 0;
2663}
2664module_init(proc_locks_init);
2665#endif
2666
2667/**
2668 *      lock_may_read - checks that the region is free of locks
2669 *      @inode: the inode that is being read
2670 *      @start: the first byte to read
2671 *      @len: the number of bytes to read
2672 *
2673 *      Emulates Windows locking requirements.  Whole-file
2674 *      mandatory locks (share modes) can prohibit a read and
2675 *      byte-range POSIX locks can prohibit a read if they overlap.
2676 *
2677 *      N.B. this function is only ever called
2678 *      from knfsd and ownership of locks is never checked.
2679 */
2680int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2681{
2682        struct file_lock *fl;
2683        int result = 1;
2684
2685        spin_lock(&inode->i_lock);
2686        for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2687                if (IS_POSIX(fl)) {
2688                        if (fl->fl_type == F_RDLCK)
2689                                continue;
2690                        if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2691                                continue;
2692                } else if (IS_FLOCK(fl)) {
2693                        if (!(fl->fl_type & LOCK_MAND))
2694                                continue;
2695                        if (fl->fl_type & LOCK_READ)
2696                                continue;
2697                } else
2698                        continue;
2699                result = 0;
2700                break;
2701        }
2702        spin_unlock(&inode->i_lock);
2703        return result;
2704}
2705
2706EXPORT_SYMBOL(lock_may_read);
2707
2708/**
2709 *      lock_may_write - checks that the region is free of locks
2710 *      @inode: the inode that is being written
2711 *      @start: the first byte to write
2712 *      @len: the number of bytes to write
2713 *
2714 *      Emulates Windows locking requirements.  Whole-file
2715 *      mandatory locks (share modes) can prohibit a write and
2716 *      byte-range POSIX locks can prohibit a write if they overlap.
2717 *
2718 *      N.B. this function is only ever called
2719 *      from knfsd and ownership of locks is never checked.
2720 */
2721int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2722{
2723        struct file_lock *fl;
2724        int result = 1;
2725
2726        spin_lock(&inode->i_lock);
2727        for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2728                if (IS_POSIX(fl)) {
2729                        if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2730                                continue;
2731                } else if (IS_FLOCK(fl)) {
2732                        if (!(fl->fl_type & LOCK_MAND))
2733                                continue;
2734                        if (fl->fl_type & LOCK_WRITE)
2735                                continue;
2736                } else
2737                        continue;
2738                result = 0;
2739                break;
2740        }
2741        spin_unlock(&inode->i_lock);
2742        return result;
2743}
2744
2745EXPORT_SYMBOL(lock_may_write);
2746
2747static int __init filelock_init(void)
2748{
2749        int i;
2750
2751        filelock_cache = kmem_cache_create("file_lock_cache",
2752                        sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2753
2754        lg_lock_init(&file_lock_lglock, "file_lock_lglock");
2755
2756        for_each_possible_cpu(i)
2757                INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i));
2758
2759        return 0;
2760}
2761
2762core_initcall(filelock_init);
2763