linux/kernel/futex.c
<<
>>
Prefs
   1/*
   2 *  Fast Userspace Mutexes (which I call "Futexes!").
   3 *  (C) Rusty Russell, IBM 2002
   4 *
   5 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
   6 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
   7 *
   8 *  Removed page pinning, fix privately mapped COW pages and other cleanups
   9 *  (C) Copyright 2003, 2004 Jamie Lokier
  10 *
  11 *  Robust futex support started by Ingo Molnar
  12 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
  13 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
  14 *
  15 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
  16 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  17 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  18 *
  19 *  PRIVATE futexes by Eric Dumazet
  20 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
  21 *
  22 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
  23 *  Copyright (C) IBM Corporation, 2009
  24 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
  25 *
  26 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
  27 *  enough at me, Linus for the original (flawed) idea, Matthew
  28 *  Kirkwood for proof-of-concept implementation.
  29 *
  30 *  "The futexes are also cursed."
  31 *  "But they come in a choice of three flavours!"
  32 *
  33 *  This program is free software; you can redistribute it and/or modify
  34 *  it under the terms of the GNU General Public License as published by
  35 *  the Free Software Foundation; either version 2 of the License, or
  36 *  (at your option) any later version.
  37 *
  38 *  This program is distributed in the hope that it will be useful,
  39 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  40 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  41 *  GNU General Public License for more details.
  42 *
  43 *  You should have received a copy of the GNU General Public License
  44 *  along with this program; if not, write to the Free Software
  45 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  46 */
  47#include <linux/slab.h>
  48#include <linux/poll.h>
  49#include <linux/fs.h>
  50#include <linux/file.h>
  51#include <linux/jhash.h>
  52#include <linux/init.h>
  53#include <linux/futex.h>
  54#include <linux/mount.h>
  55#include <linux/pagemap.h>
  56#include <linux/syscalls.h>
  57#include <linux/signal.h>
  58#include <linux/module.h>
  59#include <linux/magic.h>
  60#include <linux/pid.h>
  61#include <linux/nsproxy.h>
  62
  63#include <asm/futex.h>
  64
  65#include "rtmutex_common.h"
  66
  67int __read_mostly futex_cmpxchg_enabled;
  68
  69#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
  70
  71/*
  72 * Futex flags used to encode options to functions and preserve them across
  73 * restarts.
  74 */
  75#define FLAGS_SHARED            0x01
  76#define FLAGS_CLOCKRT           0x02
  77#define FLAGS_HAS_TIMEOUT       0x04
  78
  79/*
  80 * Priority Inheritance state:
  81 */
  82struct futex_pi_state {
  83        /*
  84         * list of 'owned' pi_state instances - these have to be
  85         * cleaned up in do_exit() if the task exits prematurely:
  86         */
  87        struct list_head list;
  88
  89        /*
  90         * The PI object:
  91         */
  92        struct rt_mutex pi_mutex;
  93
  94        struct task_struct *owner;
  95        atomic_t refcount;
  96
  97        union futex_key key;
  98};
  99
 100/**
 101 * struct futex_q - The hashed futex queue entry, one per waiting task
 102 * @list:               priority-sorted list of tasks waiting on this futex
 103 * @task:               the task waiting on the futex
 104 * @lock_ptr:           the hash bucket lock
 105 * @key:                the key the futex is hashed on
 106 * @pi_state:           optional priority inheritance state
 107 * @rt_waiter:          rt_waiter storage for use with requeue_pi
 108 * @requeue_pi_key:     the requeue_pi target futex key
 109 * @bitset:             bitset for the optional bitmasked wakeup
 110 *
 111 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
 112 * we can wake only the relevant ones (hashed queues may be shared).
 113 *
 114 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
 115 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
 116 * The order of wakeup is always to make the first condition true, then
 117 * the second.
 118 *
 119 * PI futexes are typically woken before they are removed from the hash list via
 120 * the rt_mutex code. See unqueue_me_pi().
 121 */
 122struct futex_q {
 123        struct plist_node list;
 124
 125        struct task_struct *task;
 126        spinlock_t *lock_ptr;
 127        union futex_key key;
 128        struct futex_pi_state *pi_state;
 129        struct rt_mutex_waiter *rt_waiter;
 130        union futex_key *requeue_pi_key;
 131        u32 bitset;
 132};
 133
 134static const struct futex_q futex_q_init = {
 135        /* list gets initialized in queue_me()*/
 136        .key = FUTEX_KEY_INIT,
 137        .bitset = FUTEX_BITSET_MATCH_ANY
 138};
 139
 140/*
 141 * Hash buckets are shared by all the futex_keys that hash to the same
 142 * location.  Each key may have multiple futex_q structures, one for each task
 143 * waiting on a futex.
 144 */
 145struct futex_hash_bucket {
 146        spinlock_t lock;
 147        struct plist_head chain;
 148};
 149
 150static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
 151
 152/*
 153 * We hash on the keys returned from get_futex_key (see below).
 154 */
 155static struct futex_hash_bucket *hash_futex(union futex_key *key)
 156{
 157        u32 hash = jhash2((u32*)&key->both.word,
 158                          (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
 159                          key->both.offset);
 160        return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
 161}
 162
 163/*
 164 * Return 1 if two futex_keys are equal, 0 otherwise.
 165 */
 166static inline int match_futex(union futex_key *key1, union futex_key *key2)
 167{
 168        return (key1 && key2
 169                && key1->both.word == key2->both.word
 170                && key1->both.ptr == key2->both.ptr
 171                && key1->both.offset == key2->both.offset);
 172}
 173
 174/*
 175 * Take a reference to the resource addressed by a key.
 176 * Can be called while holding spinlocks.
 177 *
 178 */
 179static void get_futex_key_refs(union futex_key *key)
 180{
 181        if (!key->both.ptr)
 182                return;
 183
 184        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
 185        case FUT_OFF_INODE:
 186                ihold(key->shared.inode);
 187                break;
 188        case FUT_OFF_MMSHARED:
 189                atomic_inc(&key->private.mm->mm_count);
 190                break;
 191        }
 192}
 193
 194/*
 195 * Drop a reference to the resource addressed by a key.
 196 * The hash bucket spinlock must not be held.
 197 */
 198static void drop_futex_key_refs(union futex_key *key)
 199{
 200        if (!key->both.ptr) {
 201                /* If we're here then we tried to put a key we failed to get */
 202                WARN_ON_ONCE(1);
 203                return;
 204        }
 205
 206        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
 207        case FUT_OFF_INODE:
 208                iput(key->shared.inode);
 209                break;
 210        case FUT_OFF_MMSHARED:
 211                mmdrop(key->private.mm);
 212                break;
 213        }
 214}
 215
 216/**
 217 * get_futex_key() - Get parameters which are the keys for a futex
 218 * @uaddr:      virtual address of the futex
 219 * @fshared:    0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 220 * @key:        address where result is stored.
 221 *
 222 * Returns a negative error code or 0
 223 * The key words are stored in *key on success.
 224 *
 225 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
 226 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 227 * We can usually work out the index without swapping in the page.
 228 *
 229 * lock_page() might sleep, the caller should not hold a spinlock.
 230 */
 231static int
 232get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
 233{
 234        unsigned long address = (unsigned long)uaddr;
 235        struct mm_struct *mm = current->mm;
 236        struct page *page, *page_head;
 237        int err;
 238
 239        /*
 240         * The futex address must be "naturally" aligned.
 241         */
 242        key->both.offset = address % PAGE_SIZE;
 243        if (unlikely((address % sizeof(u32)) != 0))
 244                return -EINVAL;
 245        address -= key->both.offset;
 246
 247        /*
 248         * PROCESS_PRIVATE futexes are fast.
 249         * As the mm cannot disappear under us and the 'key' only needs
 250         * virtual address, we dont even have to find the underlying vma.
 251         * Note : We do have to check 'uaddr' is a valid user address,
 252         *        but access_ok() should be faster than find_vma()
 253         */
 254        if (!fshared) {
 255                if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
 256                        return -EFAULT;
 257                key->private.mm = mm;
 258                key->private.address = address;
 259                get_futex_key_refs(key);
 260                return 0;
 261        }
 262
 263again:
 264        err = get_user_pages_fast(address, 1, 1, &page);
 265        if (err < 0)
 266                return err;
 267
 268#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 269        page_head = page;
 270        if (unlikely(PageTail(page))) {
 271                put_page(page);
 272                /* serialize against __split_huge_page_splitting() */
 273                local_irq_disable();
 274                if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
 275                        page_head = compound_head(page);
 276                        /*
 277                         * page_head is valid pointer but we must pin
 278                         * it before taking the PG_lock and/or
 279                         * PG_compound_lock. The moment we re-enable
 280                         * irqs __split_huge_page_splitting() can
 281                         * return and the head page can be freed from
 282                         * under us. We can't take the PG_lock and/or
 283                         * PG_compound_lock on a page that could be
 284                         * freed from under us.
 285                         */
 286                        if (page != page_head) {
 287                                get_page(page_head);
 288                                put_page(page);
 289                        }
 290                        local_irq_enable();
 291                } else {
 292                        local_irq_enable();
 293                        goto again;
 294                }
 295        }
 296#else
 297        page_head = compound_head(page);
 298        if (page != page_head) {
 299                get_page(page_head);
 300                put_page(page);
 301        }
 302#endif
 303
 304        lock_page(page_head);
 305        if (!page_head->mapping) {
 306                unlock_page(page_head);
 307                put_page(page_head);
 308                goto again;
 309        }
 310
 311        /*
 312         * Private mappings are handled in a simple way.
 313         *
 314         * NOTE: When userspace waits on a MAP_SHARED mapping, even if
 315         * it's a read-only handle, it's expected that futexes attach to
 316         * the object not the particular process.
 317         */
 318        if (PageAnon(page_head)) {
 319                key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
 320                key->private.mm = mm;
 321                key->private.address = address;
 322        } else {
 323                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
 324                key->shared.inode = page_head->mapping->host;
 325                key->shared.pgoff = page_head->index;
 326        }
 327
 328        get_futex_key_refs(key);
 329
 330        unlock_page(page_head);
 331        put_page(page_head);
 332        return 0;
 333}
 334
 335static inline void put_futex_key(union futex_key *key)
 336{
 337        drop_futex_key_refs(key);
 338}
 339
 340/**
 341 * fault_in_user_writeable() - Fault in user address and verify RW access
 342 * @uaddr:      pointer to faulting user space address
 343 *
 344 * Slow path to fixup the fault we just took in the atomic write
 345 * access to @uaddr.
 346 *
 347 * We have no generic implementation of a non-destructive write to the
 348 * user address. We know that we faulted in the atomic pagefault
 349 * disabled section so we can as well avoid the #PF overhead by
 350 * calling get_user_pages() right away.
 351 */
 352static int fault_in_user_writeable(u32 __user *uaddr)
 353{
 354        struct mm_struct *mm = current->mm;
 355        int ret;
 356
 357        down_read(&mm->mmap_sem);
 358        ret = get_user_pages(current, mm, (unsigned long)uaddr,
 359                             1, 1, 0, NULL, NULL);
 360        up_read(&mm->mmap_sem);
 361
 362        return ret < 0 ? ret : 0;
 363}
 364
 365/**
 366 * futex_top_waiter() - Return the highest priority waiter on a futex
 367 * @hb:         the hash bucket the futex_q's reside in
 368 * @key:        the futex key (to distinguish it from other futex futex_q's)
 369 *
 370 * Must be called with the hb lock held.
 371 */
 372static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
 373                                        union futex_key *key)
 374{
 375        struct futex_q *this;
 376
 377        plist_for_each_entry(this, &hb->chain, list) {
 378                if (match_futex(&this->key, key))
 379                        return this;
 380        }
 381        return NULL;
 382}
 383
 384static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
 385{
 386        u32 curval;
 387
 388        pagefault_disable();
 389        curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
 390        pagefault_enable();
 391
 392        return curval;
 393}
 394
 395static int get_futex_value_locked(u32 *dest, u32 __user *from)
 396{
 397        int ret;
 398
 399        pagefault_disable();
 400        ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
 401        pagefault_enable();
 402
 403        return ret ? -EFAULT : 0;
 404}
 405
 406
 407/*
 408 * PI code:
 409 */
 410static int refill_pi_state_cache(void)
 411{
 412        struct futex_pi_state *pi_state;
 413
 414        if (likely(current->pi_state_cache))
 415                return 0;
 416
 417        pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
 418
 419        if (!pi_state)
 420                return -ENOMEM;
 421
 422        INIT_LIST_HEAD(&pi_state->list);
 423        /* pi_mutex gets initialized later */
 424        pi_state->owner = NULL;
 425        atomic_set(&pi_state->refcount, 1);
 426        pi_state->key = FUTEX_KEY_INIT;
 427
 428        current->pi_state_cache = pi_state;
 429
 430        return 0;
 431}
 432
 433static struct futex_pi_state * alloc_pi_state(void)
 434{
 435        struct futex_pi_state *pi_state = current->pi_state_cache;
 436
 437        WARN_ON(!pi_state);
 438        current->pi_state_cache = NULL;
 439
 440        return pi_state;
 441}
 442
 443static void free_pi_state(struct futex_pi_state *pi_state)
 444{
 445        if (!atomic_dec_and_test(&pi_state->refcount))
 446                return;
 447
 448        /*
 449         * If pi_state->owner is NULL, the owner is most probably dying
 450         * and has cleaned up the pi_state already
 451         */
 452        if (pi_state->owner) {
 453                raw_spin_lock_irq(&pi_state->owner->pi_lock);
 454                list_del_init(&pi_state->list);
 455                raw_spin_unlock_irq(&pi_state->owner->pi_lock);
 456
 457                rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
 458        }
 459
 460        if (current->pi_state_cache)
 461                kfree(pi_state);
 462        else {
 463                /*
 464                 * pi_state->list is already empty.
 465                 * clear pi_state->owner.
 466                 * refcount is at 0 - put it back to 1.
 467                 */
 468                pi_state->owner = NULL;
 469                atomic_set(&pi_state->refcount, 1);
 470                current->pi_state_cache = pi_state;
 471        }
 472}
 473
 474/*
 475 * Look up the task based on what TID userspace gave us.
 476 * We dont trust it.
 477 */
 478static struct task_struct * futex_find_get_task(pid_t pid)
 479{
 480        struct task_struct *p;
 481
 482        rcu_read_lock();
 483        p = find_task_by_vpid(pid);
 484        if (p)
 485                get_task_struct(p);
 486
 487        rcu_read_unlock();
 488
 489        return p;
 490}
 491
 492/*
 493 * This task is holding PI mutexes at exit time => bad.
 494 * Kernel cleans up PI-state, but userspace is likely hosed.
 495 * (Robust-futex cleanup is separate and might save the day for userspace.)
 496 */
 497void exit_pi_state_list(struct task_struct *curr)
 498{
 499        struct list_head *next, *head = &curr->pi_state_list;
 500        struct futex_pi_state *pi_state;
 501        struct futex_hash_bucket *hb;
 502        union futex_key key = FUTEX_KEY_INIT;
 503
 504        if (!futex_cmpxchg_enabled)
 505                return;
 506        /*
 507         * We are a ZOMBIE and nobody can enqueue itself on
 508         * pi_state_list anymore, but we have to be careful
 509         * versus waiters unqueueing themselves:
 510         */
 511        raw_spin_lock_irq(&curr->pi_lock);
 512        while (!list_empty(head)) {
 513
 514                next = head->next;
 515                pi_state = list_entry(next, struct futex_pi_state, list);
 516                key = pi_state->key;
 517                hb = hash_futex(&key);
 518                raw_spin_unlock_irq(&curr->pi_lock);
 519
 520                spin_lock(&hb->lock);
 521
 522                raw_spin_lock_irq(&curr->pi_lock);
 523                /*
 524                 * We dropped the pi-lock, so re-check whether this
 525                 * task still owns the PI-state:
 526                 */
 527                if (head->next != next) {
 528                        spin_unlock(&hb->lock);
 529                        continue;
 530                }
 531
 532                WARN_ON(pi_state->owner != curr);
 533                WARN_ON(list_empty(&pi_state->list));
 534                list_del_init(&pi_state->list);
 535                pi_state->owner = NULL;
 536                raw_spin_unlock_irq(&curr->pi_lock);
 537
 538                rt_mutex_unlock(&pi_state->pi_mutex);
 539
 540                spin_unlock(&hb->lock);
 541
 542                raw_spin_lock_irq(&curr->pi_lock);
 543        }
 544        raw_spin_unlock_irq(&curr->pi_lock);
 545}
 546
 547static int
 548lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
 549                union futex_key *key, struct futex_pi_state **ps)
 550{
 551        struct futex_pi_state *pi_state = NULL;
 552        struct futex_q *this, *next;
 553        struct plist_head *head;
 554        struct task_struct *p;
 555        pid_t pid = uval & FUTEX_TID_MASK;
 556
 557        head = &hb->chain;
 558
 559        plist_for_each_entry_safe(this, next, head, list) {
 560                if (match_futex(&this->key, key)) {
 561                        /*
 562                         * Another waiter already exists - bump up
 563                         * the refcount and return its pi_state:
 564                         */
 565                        pi_state = this->pi_state;
 566                        /*
 567                         * Userspace might have messed up non-PI and PI futexes
 568                         */
 569                        if (unlikely(!pi_state))
 570                                return -EINVAL;
 571
 572                        WARN_ON(!atomic_read(&pi_state->refcount));
 573
 574                        /*
 575                         * When pi_state->owner is NULL then the owner died
 576                         * and another waiter is on the fly. pi_state->owner
 577                         * is fixed up by the task which acquires
 578                         * pi_state->rt_mutex.
 579                         *
 580                         * We do not check for pid == 0 which can happen when
 581                         * the owner died and robust_list_exit() cleared the
 582                         * TID.
 583                         */
 584                        if (pid && pi_state->owner) {
 585                                /*
 586                                 * Bail out if user space manipulated the
 587                                 * futex value.
 588                                 */
 589                                if (pid != task_pid_vnr(pi_state->owner))
 590                                        return -EINVAL;
 591                        }
 592
 593                        atomic_inc(&pi_state->refcount);
 594                        *ps = pi_state;
 595
 596                        return 0;
 597                }
 598        }
 599
 600        /*
 601         * We are the first waiter - try to look up the real owner and attach
 602         * the new pi_state to it, but bail out when TID = 0
 603         */
 604        if (!pid)
 605                return -ESRCH;
 606        p = futex_find_get_task(pid);
 607        if (!p)
 608                return -ESRCH;
 609
 610        /*
 611         * We need to look at the task state flags to figure out,
 612         * whether the task is exiting. To protect against the do_exit
 613         * change of the task flags, we do this protected by
 614         * p->pi_lock:
 615         */
 616        raw_spin_lock_irq(&p->pi_lock);
 617        if (unlikely(p->flags & PF_EXITING)) {
 618                /*
 619                 * The task is on the way out. When PF_EXITPIDONE is
 620                 * set, we know that the task has finished the
 621                 * cleanup:
 622                 */
 623                int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
 624
 625                raw_spin_unlock_irq(&p->pi_lock);
 626                put_task_struct(p);
 627                return ret;
 628        }
 629
 630        pi_state = alloc_pi_state();
 631
 632        /*
 633         * Initialize the pi_mutex in locked state and make 'p'
 634         * the owner of it:
 635         */
 636        rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
 637
 638        /* Store the key for possible exit cleanups: */
 639        pi_state->key = *key;
 640
 641        WARN_ON(!list_empty(&pi_state->list));
 642        list_add(&pi_state->list, &p->pi_state_list);
 643        pi_state->owner = p;
 644        raw_spin_unlock_irq(&p->pi_lock);
 645
 646        put_task_struct(p);
 647
 648        *ps = pi_state;
 649
 650        return 0;
 651}
 652
 653/**
 654 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
 655 * @uaddr:              the pi futex user address
 656 * @hb:                 the pi futex hash bucket
 657 * @key:                the futex key associated with uaddr and hb
 658 * @ps:                 the pi_state pointer where we store the result of the
 659 *                      lookup
 660 * @task:               the task to perform the atomic lock work for.  This will
 661 *                      be "current" except in the case of requeue pi.
 662 * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
 663 *
 664 * Returns:
 665 *  0 - ready to wait
 666 *  1 - acquired the lock
 667 * <0 - error
 668 *
 669 * The hb->lock and futex_key refs shall be held by the caller.
 670 */
 671static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
 672                                union futex_key *key,
 673                                struct futex_pi_state **ps,
 674                                struct task_struct *task, int set_waiters)
 675{
 676        int lock_taken, ret, ownerdied = 0;
 677        u32 uval, newval, curval;
 678
 679retry:
 680        ret = lock_taken = 0;
 681
 682        /*
 683         * To avoid races, we attempt to take the lock here again
 684         * (by doing a 0 -> TID atomic cmpxchg), while holding all
 685         * the locks. It will most likely not succeed.
 686         */
 687        newval = task_pid_vnr(task);
 688        if (set_waiters)
 689                newval |= FUTEX_WAITERS;
 690
 691        curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
 692
 693        if (unlikely(curval == -EFAULT))
 694                return -EFAULT;
 695
 696        /*
 697         * Detect deadlocks.
 698         */
 699        if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
 700                return -EDEADLK;
 701
 702        /*
 703         * Surprise - we got the lock. Just return to userspace:
 704         */
 705        if (unlikely(!curval))
 706                return 1;
 707
 708        uval = curval;
 709
 710        /*
 711         * Set the FUTEX_WAITERS flag, so the owner will know it has someone
 712         * to wake at the next unlock.
 713         */
 714        newval = curval | FUTEX_WAITERS;
 715
 716        /*
 717         * There are two cases, where a futex might have no owner (the
 718         * owner TID is 0): OWNER_DIED. We take over the futex in this
 719         * case. We also do an unconditional take over, when the owner
 720         * of the futex died.
 721         *
 722         * This is safe as we are protected by the hash bucket lock !
 723         */
 724        if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
 725                /* Keep the OWNER_DIED bit */
 726                newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
 727                ownerdied = 0;
 728                lock_taken = 1;
 729        }
 730
 731        curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
 732
 733        if (unlikely(curval == -EFAULT))
 734                return -EFAULT;
 735        if (unlikely(curval != uval))
 736                goto retry;
 737
 738        /*
 739         * We took the lock due to owner died take over.
 740         */
 741        if (unlikely(lock_taken))
 742                return 1;
 743
 744        /*
 745         * We dont have the lock. Look up the PI state (or create it if
 746         * we are the first waiter):
 747         */
 748        ret = lookup_pi_state(uval, hb, key, ps);
 749
 750        if (unlikely(ret)) {
 751                switch (ret) {
 752                case -ESRCH:
 753                        /*
 754                         * No owner found for this futex. Check if the
 755                         * OWNER_DIED bit is set to figure out whether
 756                         * this is a robust futex or not.
 757                         */
 758                        if (get_futex_value_locked(&curval, uaddr))
 759                                return -EFAULT;
 760
 761                        /*
 762                         * We simply start over in case of a robust
 763                         * futex. The code above will take the futex
 764                         * and return happy.
 765                         */
 766                        if (curval & FUTEX_OWNER_DIED) {
 767                                ownerdied = 1;
 768                                goto retry;
 769                        }
 770                default:
 771                        break;
 772                }
 773        }
 774
 775        return ret;
 776}
 777
 778/*
 779 * The hash bucket lock must be held when this is called.
 780 * Afterwards, the futex_q must not be accessed.
 781 */
 782static void wake_futex(struct futex_q *q)
 783{
 784        struct task_struct *p = q->task;
 785
 786        /*
 787         * We set q->lock_ptr = NULL _before_ we wake up the task. If
 788         * a non-futex wake up happens on another CPU then the task
 789         * might exit and p would dereference a non-existing task
 790         * struct. Prevent this by holding a reference on p across the
 791         * wake up.
 792         */
 793        get_task_struct(p);
 794
 795        plist_del(&q->list, &q->list.plist);
 796        /*
 797         * The waiting task can free the futex_q as soon as
 798         * q->lock_ptr = NULL is written, without taking any locks. A
 799         * memory barrier is required here to prevent the following
 800         * store to lock_ptr from getting ahead of the plist_del.
 801         */
 802        smp_wmb();
 803        q->lock_ptr = NULL;
 804
 805        wake_up_state(p, TASK_NORMAL);
 806        put_task_struct(p);
 807}
 808
 809static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
 810{
 811        struct task_struct *new_owner;
 812        struct futex_pi_state *pi_state = this->pi_state;
 813        u32 curval, newval;
 814
 815        if (!pi_state)
 816                return -EINVAL;
 817
 818        /*
 819         * If current does not own the pi_state then the futex is
 820         * inconsistent and user space fiddled with the futex value.
 821         */
 822        if (pi_state->owner != current)
 823                return -EINVAL;
 824
 825        raw_spin_lock(&pi_state->pi_mutex.wait_lock);
 826        new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
 827
 828        /*
 829         * It is possible that the next waiter (the one that brought
 830         * this owner to the kernel) timed out and is no longer
 831         * waiting on the lock.
 832         */
 833        if (!new_owner)
 834                new_owner = this->task;
 835
 836        /*
 837         * We pass it to the next owner. (The WAITERS bit is always
 838         * kept enabled while there is PI state around. We must also
 839         * preserve the owner died bit.)
 840         */
 841        if (!(uval & FUTEX_OWNER_DIED)) {
 842                int ret = 0;
 843
 844                newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 845
 846                curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
 847
 848                if (curval == -EFAULT)
 849                        ret = -EFAULT;
 850                else if (curval != uval)
 851                        ret = -EINVAL;
 852                if (ret) {
 853                        raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
 854                        return ret;
 855                }
 856        }
 857
 858        raw_spin_lock_irq(&pi_state->owner->pi_lock);
 859        WARN_ON(list_empty(&pi_state->list));
 860        list_del_init(&pi_state->list);
 861        raw_spin_unlock_irq(&pi_state->owner->pi_lock);
 862
 863        raw_spin_lock_irq(&new_owner->pi_lock);
 864        WARN_ON(!list_empty(&pi_state->list));
 865        list_add(&pi_state->list, &new_owner->pi_state_list);
 866        pi_state->owner = new_owner;
 867        raw_spin_unlock_irq(&new_owner->pi_lock);
 868
 869        raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
 870        rt_mutex_unlock(&pi_state->pi_mutex);
 871
 872        return 0;
 873}
 874
 875static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
 876{
 877        u32 oldval;
 878
 879        /*
 880         * There is no waiter, so we unlock the futex. The owner died
 881         * bit has not to be preserved here. We are the owner:
 882         */
 883        oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
 884
 885        if (oldval == -EFAULT)
 886                return oldval;
 887        if (oldval != uval)
 888                return -EAGAIN;
 889
 890        return 0;
 891}
 892
 893/*
 894 * Express the locking dependencies for lockdep:
 895 */
 896static inline void
 897double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
 898{
 899        if (hb1 <= hb2) {
 900                spin_lock(&hb1->lock);
 901                if (hb1 < hb2)
 902                        spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
 903        } else { /* hb1 > hb2 */
 904                spin_lock(&hb2->lock);
 905                spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
 906        }
 907}
 908
 909static inline void
 910double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
 911{
 912        spin_unlock(&hb1->lock);
 913        if (hb1 != hb2)
 914                spin_unlock(&hb2->lock);
 915}
 916
 917/*
 918 * Wake up waiters matching bitset queued on this futex (uaddr).
 919 */
 920static int
 921futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
 922{
 923        struct futex_hash_bucket *hb;
 924        struct futex_q *this, *next;
 925        struct plist_head *head;
 926        union futex_key key = FUTEX_KEY_INIT;
 927        int ret;
 928
 929        if (!bitset)
 930                return -EINVAL;
 931
 932        ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
 933        if (unlikely(ret != 0))
 934                goto out;
 935
 936        hb = hash_futex(&key);
 937        spin_lock(&hb->lock);
 938        head = &hb->chain;
 939
 940        plist_for_each_entry_safe(this, next, head, list) {
 941                if (match_futex (&this->key, &key)) {
 942                        if (this->pi_state || this->rt_waiter) {
 943                                ret = -EINVAL;
 944                                break;
 945                        }
 946
 947                        /* Check if one of the bits is set in both bitsets */
 948                        if (!(this->bitset & bitset))
 949                                continue;
 950
 951                        wake_futex(this);
 952                        if (++ret >= nr_wake)
 953                                break;
 954                }
 955        }
 956
 957        spin_unlock(&hb->lock);
 958        put_futex_key(&key);
 959out:
 960        return ret;
 961}
 962
 963/*
 964 * Wake up all waiters hashed on the physical page that is mapped
 965 * to this virtual address:
 966 */
 967static int
 968futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
 969              int nr_wake, int nr_wake2, int op)
 970{
 971        union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
 972        struct futex_hash_bucket *hb1, *hb2;
 973        struct plist_head *head;
 974        struct futex_q *this, *next;
 975        int ret, op_ret;
 976
 977retry:
 978        ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
 979        if (unlikely(ret != 0))
 980                goto out;
 981        ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
 982        if (unlikely(ret != 0))
 983                goto out_put_key1;
 984
 985        hb1 = hash_futex(&key1);
 986        hb2 = hash_futex(&key2);
 987
 988retry_private:
 989        double_lock_hb(hb1, hb2);
 990        op_ret = futex_atomic_op_inuser(op, uaddr2);
 991        if (unlikely(op_ret < 0)) {
 992
 993                double_unlock_hb(hb1, hb2);
 994
 995#ifndef CONFIG_MMU
 996                /*
 997                 * we don't get EFAULT from MMU faults if we don't have an MMU,
 998                 * but we might get them from range checking
 999                 */
1000                ret = op_ret;
1001                goto out_put_keys;
1002#endif
1003
1004                if (unlikely(op_ret != -EFAULT)) {
1005                        ret = op_ret;
1006                        goto out_put_keys;
1007                }
1008
1009                ret = fault_in_user_writeable(uaddr2);
1010                if (ret)
1011                        goto out_put_keys;
1012
1013                if (!(flags & FLAGS_SHARED))
1014                        goto retry_private;
1015
1016                put_futex_key(&key2);
1017                put_futex_key(&key1);
1018                goto retry;
1019        }
1020
1021        head = &hb1->chain;
1022
1023        plist_for_each_entry_safe(this, next, head, list) {
1024                if (match_futex (&this->key, &key1)) {
1025                        wake_futex(this);
1026                        if (++ret >= nr_wake)
1027                                break;
1028                }
1029        }
1030
1031        if (op_ret > 0) {
1032                head = &hb2->chain;
1033
1034                op_ret = 0;
1035                plist_for_each_entry_safe(this, next, head, list) {
1036                        if (match_futex (&this->key, &key2)) {
1037                                wake_futex(this);
1038                                if (++op_ret >= nr_wake2)
1039                                        break;
1040                        }
1041                }
1042                ret += op_ret;
1043        }
1044
1045        double_unlock_hb(hb1, hb2);
1046out_put_keys:
1047        put_futex_key(&key2);
1048out_put_key1:
1049        put_futex_key(&key1);
1050out:
1051        return ret;
1052}
1053
1054/**
1055 * requeue_futex() - Requeue a futex_q from one hb to another
1056 * @q:          the futex_q to requeue
1057 * @hb1:        the source hash_bucket
1058 * @hb2:        the target hash_bucket
1059 * @key2:       the new key for the requeued futex_q
1060 */
1061static inline
1062void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1063                   struct futex_hash_bucket *hb2, union futex_key *key2)
1064{
1065
1066        /*
1067         * If key1 and key2 hash to the same bucket, no need to
1068         * requeue.
1069         */
1070        if (likely(&hb1->chain != &hb2->chain)) {
1071                plist_del(&q->list, &hb1->chain);
1072                plist_add(&q->list, &hb2->chain);
1073                q->lock_ptr = &hb2->lock;
1074#ifdef CONFIG_DEBUG_PI_LIST
1075                q->list.plist.spinlock = &hb2->lock;
1076#endif
1077        }
1078        get_futex_key_refs(key2);
1079        q->key = *key2;
1080}
1081
1082/**
1083 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1084 * @q:          the futex_q
1085 * @key:        the key of the requeue target futex
1086 * @hb:         the hash_bucket of the requeue target futex
1087 *
1088 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1089 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1090 * to the requeue target futex so the waiter can detect the wakeup on the right
1091 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1092 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1093 * to protect access to the pi_state to fixup the owner later.  Must be called
1094 * with both q->lock_ptr and hb->lock held.
1095 */
1096static inline
1097void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1098                           struct futex_hash_bucket *hb)
1099{
1100        get_futex_key_refs(key);
1101        q->key = *key;
1102
1103        WARN_ON(plist_node_empty(&q->list));
1104        plist_del(&q->list, &q->list.plist);
1105
1106        WARN_ON(!q->rt_waiter);
1107        q->rt_waiter = NULL;
1108
1109        q->lock_ptr = &hb->lock;
1110#ifdef CONFIG_DEBUG_PI_LIST
1111        q->list.plist.spinlock = &hb->lock;
1112#endif
1113
1114        wake_up_state(q->task, TASK_NORMAL);
1115}
1116
1117/**
1118 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1119 * @pifutex:            the user address of the to futex
1120 * @hb1:                the from futex hash bucket, must be locked by the caller
1121 * @hb2:                the to futex hash bucket, must be locked by the caller
1122 * @key1:               the from futex key
1123 * @key2:               the to futex key
1124 * @ps:                 address to store the pi_state pointer
1125 * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1126 *
1127 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1128 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1129 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1130 * hb1 and hb2 must be held by the caller.
1131 *
1132 * Returns:
1133 *  0 - failed to acquire the lock atomicly
1134 *  1 - acquired the lock
1135 * <0 - error
1136 */
1137static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1138                                 struct futex_hash_bucket *hb1,
1139                                 struct futex_hash_bucket *hb2,
1140                                 union futex_key *key1, union futex_key *key2,
1141                                 struct futex_pi_state **ps, int set_waiters)
1142{
1143        struct futex_q *top_waiter = NULL;
1144        u32 curval;
1145        int ret;
1146
1147        if (get_futex_value_locked(&curval, pifutex))
1148                return -EFAULT;
1149
1150        /*
1151         * Find the top_waiter and determine if there are additional waiters.
1152         * If the caller intends to requeue more than 1 waiter to pifutex,
1153         * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1154         * as we have means to handle the possible fault.  If not, don't set
1155         * the bit unecessarily as it will force the subsequent unlock to enter
1156         * the kernel.
1157         */
1158        top_waiter = futex_top_waiter(hb1, key1);
1159
1160        /* There are no waiters, nothing for us to do. */
1161        if (!top_waiter)
1162                return 0;
1163
1164        /* Ensure we requeue to the expected futex. */
1165        if (!match_futex(top_waiter->requeue_pi_key, key2))
1166                return -EINVAL;
1167
1168        /*
1169         * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1170         * the contended case or if set_waiters is 1.  The pi_state is returned
1171         * in ps in contended cases.
1172         */
1173        ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1174                                   set_waiters);
1175        if (ret == 1)
1176                requeue_pi_wake_futex(top_waiter, key2, hb2);
1177
1178        return ret;
1179}
1180
1181/**
1182 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1183 * @uaddr1:     source futex user address
1184 * @flags:      futex flags (FLAGS_SHARED, etc.)
1185 * @uaddr2:     target futex user address
1186 * @nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
1187 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1188 * @cmpval:     @uaddr1 expected value (or %NULL)
1189 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1190 *              pi futex (pi to pi requeue is not supported)
1191 *
1192 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1193 * uaddr2 atomically on behalf of the top waiter.
1194 *
1195 * Returns:
1196 * >=0 - on success, the number of tasks requeued or woken
1197 *  <0 - on error
1198 */
1199static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1200                         u32 __user *uaddr2, int nr_wake, int nr_requeue,
1201                         u32 *cmpval, int requeue_pi)
1202{
1203        union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1204        int drop_count = 0, task_count = 0, ret;
1205        struct futex_pi_state *pi_state = NULL;
1206        struct futex_hash_bucket *hb1, *hb2;
1207        struct plist_head *head1;
1208        struct futex_q *this, *next;
1209        u32 curval2;
1210
1211        if (requeue_pi) {
1212                /*
1213                 * requeue_pi requires a pi_state, try to allocate it now
1214                 * without any locks in case it fails.
1215                 */
1216                if (refill_pi_state_cache())
1217                        return -ENOMEM;
1218                /*
1219                 * requeue_pi must wake as many tasks as it can, up to nr_wake
1220                 * + nr_requeue, since it acquires the rt_mutex prior to
1221                 * returning to userspace, so as to not leave the rt_mutex with
1222                 * waiters and no owner.  However, second and third wake-ups
1223                 * cannot be predicted as they involve race conditions with the
1224                 * first wake and a fault while looking up the pi_state.  Both
1225                 * pthread_cond_signal() and pthread_cond_broadcast() should
1226                 * use nr_wake=1.
1227                 */
1228                if (nr_wake != 1)
1229                        return -EINVAL;
1230        }
1231
1232retry:
1233        if (pi_state != NULL) {
1234                /*
1235                 * We will have to lookup the pi_state again, so free this one
1236                 * to keep the accounting correct.
1237                 */
1238                free_pi_state(pi_state);
1239                pi_state = NULL;
1240        }
1241
1242        ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
1243        if (unlikely(ret != 0))
1244                goto out;
1245        ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
1246        if (unlikely(ret != 0))
1247                goto out_put_key1;
1248
1249        hb1 = hash_futex(&key1);
1250        hb2 = hash_futex(&key2);
1251
1252retry_private:
1253        double_lock_hb(hb1, hb2);
1254
1255        if (likely(cmpval != NULL)) {
1256                u32 curval;
1257
1258                ret = get_futex_value_locked(&curval, uaddr1);
1259
1260                if (unlikely(ret)) {
1261                        double_unlock_hb(hb1, hb2);
1262
1263                        ret = get_user(curval, uaddr1);
1264                        if (ret)
1265                                goto out_put_keys;
1266
1267                        if (!(flags & FLAGS_SHARED))
1268                                goto retry_private;
1269
1270                        put_futex_key(&key2);
1271                        put_futex_key(&key1);
1272                        goto retry;
1273                }
1274                if (curval != *cmpval) {
1275                        ret = -EAGAIN;
1276                        goto out_unlock;
1277                }
1278        }
1279
1280        if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1281                /*
1282                 * Attempt to acquire uaddr2 and wake the top waiter. If we
1283                 * intend to requeue waiters, force setting the FUTEX_WAITERS
1284                 * bit.  We force this here where we are able to easily handle
1285                 * faults rather in the requeue loop below.
1286                 */
1287                ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1288                                                 &key2, &pi_state, nr_requeue);
1289
1290                /*
1291                 * At this point the top_waiter has either taken uaddr2 or is
1292                 * waiting on it.  If the former, then the pi_state will not
1293                 * exist yet, look it up one more time to ensure we have a
1294                 * reference to it.
1295                 */
1296                if (ret == 1) {
1297                        WARN_ON(pi_state);
1298                        drop_count++;
1299                        task_count++;
1300                        ret = get_futex_value_locked(&curval2, uaddr2);
1301                        if (!ret)
1302                                ret = lookup_pi_state(curval2, hb2, &key2,
1303                                                      &pi_state);
1304                }
1305
1306                switch (ret) {
1307                case 0:
1308                        break;
1309                case -EFAULT:
1310                        double_unlock_hb(hb1, hb2);
1311                        put_futex_key(&key2);
1312                        put_futex_key(&key1);
1313                        ret = fault_in_user_writeable(uaddr2);
1314                        if (!ret)
1315                                goto retry;
1316                        goto out;
1317                case -EAGAIN:
1318                        /* The owner was exiting, try again. */
1319                        double_unlock_hb(hb1, hb2);
1320                        put_futex_key(&key2);
1321                        put_futex_key(&key1);
1322                        cond_resched();
1323                        goto retry;
1324                default:
1325                        goto out_unlock;
1326                }
1327        }
1328
1329        head1 = &hb1->chain;
1330        plist_for_each_entry_safe(this, next, head1, list) {
1331                if (task_count - nr_wake >= nr_requeue)
1332                        break;
1333
1334                if (!match_futex(&this->key, &key1))
1335                        continue;
1336
1337                /*
1338                 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1339                 * be paired with each other and no other futex ops.
1340                 */
1341                if ((requeue_pi && !this->rt_waiter) ||
1342                    (!requeue_pi && this->rt_waiter)) {
1343                        ret = -EINVAL;
1344                        break;
1345                }
1346
1347                /*
1348                 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1349                 * lock, we already woke the top_waiter.  If not, it will be
1350                 * woken by futex_unlock_pi().
1351                 */
1352                if (++task_count <= nr_wake && !requeue_pi) {
1353                        wake_futex(this);
1354                        continue;
1355                }
1356
1357                /* Ensure we requeue to the expected futex for requeue_pi. */
1358                if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1359                        ret = -EINVAL;
1360                        break;
1361                }
1362
1363                /*
1364                 * Requeue nr_requeue waiters and possibly one more in the case
1365                 * of requeue_pi if we couldn't acquire the lock atomically.
1366                 */
1367                if (requeue_pi) {
1368                        /* Prepare the waiter to take the rt_mutex. */
1369                        atomic_inc(&pi_state->refcount);
1370                        this->pi_state = pi_state;
1371                        ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1372                                                        this->rt_waiter,
1373                                                        this->task, 1);
1374                        if (ret == 1) {
1375                                /* We got the lock. */
1376                                requeue_pi_wake_futex(this, &key2, hb2);
1377                                drop_count++;
1378                                continue;
1379                        } else if (ret) {
1380                                /* -EDEADLK */
1381                                this->pi_state = NULL;
1382                                free_pi_state(pi_state);
1383                                goto out_unlock;
1384                        }
1385                }
1386                requeue_futex(this, hb1, hb2, &key2);
1387                drop_count++;
1388        }
1389
1390out_unlock:
1391        double_unlock_hb(hb1, hb2);
1392
1393        /*
1394         * drop_futex_key_refs() must be called outside the spinlocks. During
1395         * the requeue we moved futex_q's from the hash bucket at key1 to the
1396         * one at key2 and updated their key pointer.  We no longer need to
1397         * hold the references to key1.
1398         */
1399        while (--drop_count >= 0)
1400                drop_futex_key_refs(&key1);
1401
1402out_put_keys:
1403        put_futex_key(&key2);
1404out_put_key1:
1405        put_futex_key(&key1);
1406out:
1407        if (pi_state != NULL)
1408                free_pi_state(pi_state);
1409        return ret ? ret : task_count;
1410}
1411
1412/* The key must be already stored in q->key. */
1413static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1414        __acquires(&hb->lock)
1415{
1416        struct futex_hash_bucket *hb;
1417
1418        hb = hash_futex(&q->key);
1419        q->lock_ptr = &hb->lock;
1420
1421        spin_lock(&hb->lock);
1422        return hb;
1423}
1424
1425static inline void
1426queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1427        __releases(&hb->lock)
1428{
1429        spin_unlock(&hb->lock);
1430}
1431
1432/**
1433 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1434 * @q:  The futex_q to enqueue
1435 * @hb: The destination hash bucket
1436 *
1437 * The hb->lock must be held by the caller, and is released here. A call to
1438 * queue_me() is typically paired with exactly one call to unqueue_me().  The
1439 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1440 * or nothing if the unqueue is done as part of the wake process and the unqueue
1441 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1442 * an example).
1443 */
1444static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1445        __releases(&hb->lock)
1446{
1447        int prio;
1448
1449        /*
1450         * The priority used to register this element is
1451         * - either the real thread-priority for the real-time threads
1452         * (i.e. threads with a priority lower than MAX_RT_PRIO)
1453         * - or MAX_RT_PRIO for non-RT threads.
1454         * Thus, all RT-threads are woken first in priority order, and
1455         * the others are woken last, in FIFO order.
1456         */
1457        prio = min(current->normal_prio, MAX_RT_PRIO);
1458
1459        plist_node_init(&q->list, prio);
1460#ifdef CONFIG_DEBUG_PI_LIST
1461        q->list.plist.spinlock = &hb->lock;
1462#endif
1463        plist_add(&q->list, &hb->chain);
1464        q->task = current;
1465        spin_unlock(&hb->lock);
1466}
1467
1468/**
1469 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1470 * @q:  The futex_q to unqueue
1471 *
1472 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1473 * be paired with exactly one earlier call to queue_me().
1474 *
1475 * Returns:
1476 *   1 - if the futex_q was still queued (and we removed unqueued it)
1477 *   0 - if the futex_q was already removed by the waking thread
1478 */
1479static int unqueue_me(struct futex_q *q)
1480{
1481        spinlock_t *lock_ptr;
1482        int ret = 0;
1483
1484        /* In the common case we don't take the spinlock, which is nice. */
1485retry:
1486        lock_ptr = q->lock_ptr;
1487        barrier();
1488        if (lock_ptr != NULL) {
1489                spin_lock(lock_ptr);
1490                /*
1491                 * q->lock_ptr can change between reading it and
1492                 * spin_lock(), causing us to take the wrong lock.  This
1493                 * corrects the race condition.
1494                 *
1495                 * Reasoning goes like this: if we have the wrong lock,
1496                 * q->lock_ptr must have changed (maybe several times)
1497                 * between reading it and the spin_lock().  It can
1498                 * change again after the spin_lock() but only if it was
1499                 * already changed before the spin_lock().  It cannot,
1500                 * however, change back to the original value.  Therefore
1501                 * we can detect whether we acquired the correct lock.
1502                 */
1503                if (unlikely(lock_ptr != q->lock_ptr)) {
1504                        spin_unlock(lock_ptr);
1505                        goto retry;
1506                }
1507                WARN_ON(plist_node_empty(&q->list));
1508                plist_del(&q->list, &q->list.plist);
1509
1510                BUG_ON(q->pi_state);
1511
1512                spin_unlock(lock_ptr);
1513                ret = 1;
1514        }
1515
1516        drop_futex_key_refs(&q->key);
1517        return ret;
1518}
1519
1520/*
1521 * PI futexes can not be requeued and must remove themself from the
1522 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1523 * and dropped here.
1524 */
1525static void unqueue_me_pi(struct futex_q *q)
1526        __releases(q->lock_ptr)
1527{
1528        WARN_ON(plist_node_empty(&q->list));
1529        plist_del(&q->list, &q->list.plist);
1530
1531        BUG_ON(!q->pi_state);
1532        free_pi_state(q->pi_state);
1533        q->pi_state = NULL;
1534
1535        spin_unlock(q->lock_ptr);
1536}
1537
1538/*
1539 * Fixup the pi_state owner with the new owner.
1540 *
1541 * Must be called with hash bucket lock held and mm->sem held for non
1542 * private futexes.
1543 */
1544static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1545                                struct task_struct *newowner)
1546{
1547        u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1548        struct futex_pi_state *pi_state = q->pi_state;
1549        struct task_struct *oldowner = pi_state->owner;
1550        u32 uval, curval, newval;
1551        int ret;
1552
1553        /* Owner died? */
1554        if (!pi_state->owner)
1555                newtid |= FUTEX_OWNER_DIED;
1556
1557        /*
1558         * We are here either because we stole the rtmutex from the
1559         * pending owner or we are the pending owner which failed to
1560         * get the rtmutex. We have to replace the pending owner TID
1561         * in the user space variable. This must be atomic as we have
1562         * to preserve the owner died bit here.
1563         *
1564         * Note: We write the user space value _before_ changing the pi_state
1565         * because we can fault here. Imagine swapped out pages or a fork
1566         * that marked all the anonymous memory readonly for cow.
1567         *
1568         * Modifying pi_state _before_ the user space value would
1569         * leave the pi_state in an inconsistent state when we fault
1570         * here, because we need to drop the hash bucket lock to
1571         * handle the fault. This might be observed in the PID check
1572         * in lookup_pi_state.
1573         */
1574retry:
1575        if (get_futex_value_locked(&uval, uaddr))
1576                goto handle_fault;
1577
1578        while (1) {
1579                newval = (uval & FUTEX_OWNER_DIED) | newtid;
1580
1581                curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1582
1583                if (curval == -EFAULT)
1584                        goto handle_fault;
1585                if (curval == uval)
1586                        break;
1587                uval = curval;
1588        }
1589
1590        /*
1591         * We fixed up user space. Now we need to fix the pi_state
1592         * itself.
1593         */
1594        if (pi_state->owner != NULL) {
1595                raw_spin_lock_irq(&pi_state->owner->pi_lock);
1596                WARN_ON(list_empty(&pi_state->list));
1597                list_del_init(&pi_state->list);
1598                raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1599        }
1600
1601        pi_state->owner = newowner;
1602
1603        raw_spin_lock_irq(&newowner->pi_lock);
1604        WARN_ON(!list_empty(&pi_state->list));
1605        list_add(&pi_state->list, &newowner->pi_state_list);
1606        raw_spin_unlock_irq(&newowner->pi_lock);
1607        return 0;
1608
1609        /*
1610         * To handle the page fault we need to drop the hash bucket
1611         * lock here. That gives the other task (either the pending
1612         * owner itself or the task which stole the rtmutex) the
1613         * chance to try the fixup of the pi_state. So once we are
1614         * back from handling the fault we need to check the pi_state
1615         * after reacquiring the hash bucket lock and before trying to
1616         * do another fixup. When the fixup has been done already we
1617         * simply return.
1618         */
1619handle_fault:
1620        spin_unlock(q->lock_ptr);
1621
1622        ret = fault_in_user_writeable(uaddr);
1623
1624        spin_lock(q->lock_ptr);
1625
1626        /*
1627         * Check if someone else fixed it for us:
1628         */
1629        if (pi_state->owner != oldowner)
1630                return 0;
1631
1632        if (ret)
1633                return ret;
1634
1635        goto retry;
1636}
1637
1638static long futex_wait_restart(struct restart_block *restart);
1639
1640/**
1641 * fixup_owner() - Post lock pi_state and corner case management
1642 * @uaddr:      user address of the futex
1643 * @q:          futex_q (contains pi_state and access to the rt_mutex)
1644 * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1645 *
1646 * After attempting to lock an rt_mutex, this function is called to cleanup
1647 * the pi_state owner as well as handle race conditions that may allow us to
1648 * acquire the lock. Must be called with the hb lock held.
1649 *
1650 * Returns:
1651 *  1 - success, lock taken
1652 *  0 - success, lock not taken
1653 * <0 - on error (-EFAULT)
1654 */
1655static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1656{
1657        struct task_struct *owner;
1658        int ret = 0;
1659
1660        if (locked) {
1661                /*
1662                 * Got the lock. We might not be the anticipated owner if we
1663                 * did a lock-steal - fix up the PI-state in that case:
1664                 */
1665                if (q->pi_state->owner != current)
1666                        ret = fixup_pi_state_owner(uaddr, q, current);
1667                goto out;
1668        }
1669
1670        /*
1671         * Catch the rare case, where the lock was released when we were on the
1672         * way back before we locked the hash bucket.
1673         */
1674        if (q->pi_state->owner == current) {
1675                /*
1676                 * Try to get the rt_mutex now. This might fail as some other
1677                 * task acquired the rt_mutex after we removed ourself from the
1678                 * rt_mutex waiters list.
1679                 */
1680                if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1681                        locked = 1;
1682                        goto out;
1683                }
1684
1685                /*
1686                 * pi_state is incorrect, some other task did a lock steal and
1687                 * we returned due to timeout or signal without taking the
1688                 * rt_mutex. Too late. We can access the rt_mutex_owner without
1689                 * locking, as the other task is now blocked on the hash bucket
1690                 * lock. Fix the state up.
1691                 */
1692                owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1693                ret = fixup_pi_state_owner(uaddr, q, owner);
1694                goto out;
1695        }
1696
1697        /*
1698         * Paranoia check. If we did not take the lock, then we should not be
1699         * the owner, nor the pending owner, of the rt_mutex.
1700         */
1701        if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1702                printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1703                                "pi-state %p\n", ret,
1704                                q->pi_state->pi_mutex.owner,
1705                                q->pi_state->owner);
1706
1707out:
1708        return ret ? ret : locked;
1709}
1710
1711/**
1712 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1713 * @hb:         the futex hash bucket, must be locked by the caller
1714 * @q:          the futex_q to queue up on
1715 * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
1716 */
1717static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1718                                struct hrtimer_sleeper *timeout)
1719{
1720        /*
1721         * The task state is guaranteed to be set before another task can
1722         * wake it. set_current_state() is implemented using set_mb() and
1723         * queue_me() calls spin_unlock() upon completion, both serializing
1724         * access to the hash list and forcing another memory barrier.
1725         */
1726        set_current_state(TASK_INTERRUPTIBLE);
1727        queue_me(q, hb);
1728
1729        /* Arm the timer */
1730        if (timeout) {
1731                hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1732                if (!hrtimer_active(&timeout->timer))
1733                        timeout->task = NULL;
1734        }
1735
1736        /*
1737         * If we have been removed from the hash list, then another task
1738         * has tried to wake us, and we can skip the call to schedule().
1739         */
1740        if (likely(!plist_node_empty(&q->list))) {
1741                /*
1742                 * If the timer has already expired, current will already be
1743                 * flagged for rescheduling. Only call schedule if there
1744                 * is no timeout, or if it has yet to expire.
1745                 */
1746                if (!timeout || timeout->task)
1747                        schedule();
1748        }
1749        __set_current_state(TASK_RUNNING);
1750}
1751
1752/**
1753 * futex_wait_setup() - Prepare to wait on a futex
1754 * @uaddr:      the futex userspace address
1755 * @val:        the expected value
1756 * @flags:      futex flags (FLAGS_SHARED, etc.)
1757 * @q:          the associated futex_q
1758 * @hb:         storage for hash_bucket pointer to be returned to caller
1759 *
1760 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
1761 * compare it with the expected value.  Handle atomic faults internally.
1762 * Return with the hb lock held and a q.key reference on success, and unlocked
1763 * with no q.key reference on failure.
1764 *
1765 * Returns:
1766 *  0 - uaddr contains val and hb has been locked
1767 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1768 */
1769static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1770                           struct futex_q *q, struct futex_hash_bucket **hb)
1771{
1772        u32 uval;
1773        int ret;
1774
1775        /*
1776         * Access the page AFTER the hash-bucket is locked.
1777         * Order is important:
1778         *
1779         *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1780         *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1781         *
1782         * The basic logical guarantee of a futex is that it blocks ONLY
1783         * if cond(var) is known to be true at the time of blocking, for
1784         * any cond.  If we queued after testing *uaddr, that would open
1785         * a race condition where we could block indefinitely with
1786         * cond(var) false, which would violate the guarantee.
1787         *
1788         * A consequence is that futex_wait() can return zero and absorb
1789         * a wakeup when *uaddr != val on entry to the syscall.  This is
1790         * rare, but normal.
1791         */
1792retry:
1793        ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
1794        if (unlikely(ret != 0))
1795                return ret;
1796
1797retry_private:
1798        *hb = queue_lock(q);
1799
1800        ret = get_futex_value_locked(&uval, uaddr);
1801
1802        if (ret) {
1803                queue_unlock(q, *hb);
1804
1805                ret = get_user(uval, uaddr);
1806                if (ret)
1807                        goto out;
1808
1809                if (!(flags & FLAGS_SHARED))
1810                        goto retry_private;
1811
1812                put_futex_key(&q->key);
1813                goto retry;
1814        }
1815
1816        if (uval != val) {
1817                queue_unlock(q, *hb);
1818                ret = -EWOULDBLOCK;
1819        }
1820
1821out:
1822        if (ret)
1823                put_futex_key(&q->key);
1824        return ret;
1825}
1826
1827static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1828                      ktime_t *abs_time, u32 bitset)
1829{
1830        struct hrtimer_sleeper timeout, *to = NULL;
1831        struct restart_block *restart;
1832        struct futex_hash_bucket *hb;
1833        struct futex_q q = futex_q_init;
1834        int ret;
1835
1836        if (!bitset)
1837                return -EINVAL;
1838        q.bitset = bitset;
1839
1840        if (abs_time) {
1841                to = &timeout;
1842
1843                hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1844                                      CLOCK_REALTIME : CLOCK_MONOTONIC,
1845                                      HRTIMER_MODE_ABS);
1846                hrtimer_init_sleeper(to, current);
1847                hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1848                                             current->timer_slack_ns);
1849        }
1850
1851retry:
1852        /*
1853         * Prepare to wait on uaddr. On success, holds hb lock and increments
1854         * q.key refs.
1855         */
1856        ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
1857        if (ret)
1858                goto out;
1859
1860        /* queue_me and wait for wakeup, timeout, or a signal. */
1861        futex_wait_queue_me(hb, &q, to);
1862
1863        /* If we were woken (and unqueued), we succeeded, whatever. */
1864        ret = 0;
1865        /* unqueue_me() drops q.key ref */
1866        if (!unqueue_me(&q))
1867                goto out;
1868        ret = -ETIMEDOUT;
1869        if (to && !to->task)
1870                goto out;
1871
1872        /*
1873         * We expect signal_pending(current), but we might be the
1874         * victim of a spurious wakeup as well.
1875         */
1876        if (!signal_pending(current))
1877                goto retry;
1878
1879        ret = -ERESTARTSYS;
1880        if (!abs_time)
1881                goto out;
1882
1883        restart = &current_thread_info()->restart_block;
1884        restart->fn = futex_wait_restart;
1885        restart->futex.uaddr = uaddr;
1886        restart->futex.val = val;
1887        restart->futex.time = abs_time->tv64;
1888        restart->futex.bitset = bitset;
1889        restart->futex.flags = flags;
1890
1891        ret = -ERESTART_RESTARTBLOCK;
1892
1893out:
1894        if (to) {
1895                hrtimer_cancel(&to->timer);
1896                destroy_hrtimer_on_stack(&to->timer);
1897        }
1898        return ret;
1899}
1900
1901
1902static long futex_wait_restart(struct restart_block *restart)
1903{
1904        u32 __user *uaddr = restart->futex.uaddr;
1905        ktime_t t, *tp = NULL;
1906
1907        if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1908                t.tv64 = restart->futex.time;
1909                tp = &t;
1910        }
1911        restart->fn = do_no_restart_syscall;
1912
1913        return (long)futex_wait(uaddr, restart->futex.flags,
1914                                restart->futex.val, tp, restart->futex.bitset);
1915}
1916
1917
1918/*
1919 * Userspace tried a 0 -> TID atomic transition of the futex value
1920 * and failed. The kernel side here does the whole locking operation:
1921 * if there are waiters then it will block, it does PI, etc. (Due to
1922 * races the kernel might see a 0 value of the futex too.)
1923 */
1924static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1925                         ktime_t *time, int trylock)
1926{
1927        struct hrtimer_sleeper timeout, *to = NULL;
1928        struct futex_hash_bucket *hb;
1929        struct futex_q q = futex_q_init;
1930        int res, ret;
1931
1932        if (refill_pi_state_cache())
1933                return -ENOMEM;
1934
1935        if (time) {
1936                to = &timeout;
1937                hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1938                                      HRTIMER_MODE_ABS);
1939                hrtimer_init_sleeper(to, current);
1940                hrtimer_set_expires(&to->timer, *time);
1941        }
1942
1943retry:
1944        ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key);
1945        if (unlikely(ret != 0))
1946                goto out;
1947
1948retry_private:
1949        hb = queue_lock(&q);
1950
1951        ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
1952        if (unlikely(ret)) {
1953                switch (ret) {
1954                case 1:
1955                        /* We got the lock. */
1956                        ret = 0;
1957                        goto out_unlock_put_key;
1958                case -EFAULT:
1959                        goto uaddr_faulted;
1960                case -EAGAIN:
1961                        /*
1962                         * Task is exiting and we just wait for the
1963                         * exit to complete.
1964                         */
1965                        queue_unlock(&q, hb);
1966                        put_futex_key(&q.key);
1967                        cond_resched();
1968                        goto retry;
1969                default:
1970                        goto out_unlock_put_key;
1971                }
1972        }
1973
1974        /*
1975         * Only actually queue now that the atomic ops are done:
1976         */
1977        queue_me(&q, hb);
1978
1979        WARN_ON(!q.pi_state);
1980        /*
1981         * Block on the PI mutex:
1982         */
1983        if (!trylock)
1984                ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1985        else {
1986                ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1987                /* Fixup the trylock return value: */
1988                ret = ret ? 0 : -EWOULDBLOCK;
1989        }
1990
1991        spin_lock(q.lock_ptr);
1992        /*
1993         * Fixup the pi_state owner and possibly acquire the lock if we
1994         * haven't already.
1995         */
1996        res = fixup_owner(uaddr, &q, !ret);
1997        /*
1998         * If fixup_owner() returned an error, proprogate that.  If it acquired
1999         * the lock, clear our -ETIMEDOUT or -EINTR.
2000         */
2001        if (res)
2002                ret = (res < 0) ? res : 0;
2003
2004        /*
2005         * If fixup_owner() faulted and was unable to handle the fault, unlock
2006         * it and return the fault to userspace.
2007         */
2008        if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2009                rt_mutex_unlock(&q.pi_state->pi_mutex);
2010
2011        /* Unqueue and drop the lock */
2012        unqueue_me_pi(&q);
2013
2014        goto out_put_key;
2015
2016out_unlock_put_key:
2017        queue_unlock(&q, hb);
2018
2019out_put_key:
2020        put_futex_key(&q.key);
2021out:
2022        if (to)
2023                destroy_hrtimer_on_stack(&to->timer);
2024        return ret != -EINTR ? ret : -ERESTARTNOINTR;
2025
2026uaddr_faulted:
2027        queue_unlock(&q, hb);
2028
2029        ret = fault_in_user_writeable(uaddr);
2030        if (ret)
2031                goto out_put_key;
2032
2033        if (!(flags & FLAGS_SHARED))
2034                goto retry_private;
2035
2036        put_futex_key(&q.key);
2037        goto retry;
2038}
2039
2040/*
2041 * Userspace attempted a TID -> 0 atomic transition, and failed.
2042 * This is the in-kernel slowpath: we look up the PI state (if any),
2043 * and do the rt-mutex unlock.
2044 */
2045static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2046{
2047        struct futex_hash_bucket *hb;
2048        struct futex_q *this, *next;
2049        u32 uval;
2050        struct plist_head *head;
2051        union futex_key key = FUTEX_KEY_INIT;
2052        int ret;
2053
2054retry:
2055        if (get_user(uval, uaddr))
2056                return -EFAULT;
2057        /*
2058         * We release only a lock we actually own:
2059         */
2060        if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
2061                return -EPERM;
2062
2063        ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
2064        if (unlikely(ret != 0))
2065                goto out;
2066
2067        hb = hash_futex(&key);
2068        spin_lock(&hb->lock);
2069
2070        /*
2071         * To avoid races, try to do the TID -> 0 atomic transition
2072         * again. If it succeeds then we can return without waking
2073         * anyone else up:
2074         */
2075        if (!(uval & FUTEX_OWNER_DIED))
2076                uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
2077
2078
2079        if (unlikely(uval == -EFAULT))
2080                goto pi_faulted;
2081        /*
2082         * Rare case: we managed to release the lock atomically,
2083         * no need to wake anyone else up:
2084         */
2085        if (unlikely(uval == task_pid_vnr(current)))
2086                goto out_unlock;
2087
2088        /*
2089         * Ok, other tasks may need to be woken up - check waiters
2090         * and do the wakeup if necessary:
2091         */
2092        head = &hb->chain;
2093
2094        plist_for_each_entry_safe(this, next, head, list) {
2095                if (!match_futex (&this->key, &key))
2096                        continue;
2097                ret = wake_futex_pi(uaddr, uval, this);
2098                /*
2099                 * The atomic access to the futex value
2100                 * generated a pagefault, so retry the
2101                 * user-access and the wakeup:
2102                 */
2103                if (ret == -EFAULT)
2104                        goto pi_faulted;
2105                goto out_unlock;
2106        }
2107        /*
2108         * No waiters - kernel unlocks the futex:
2109         */
2110        if (!(uval & FUTEX_OWNER_DIED)) {
2111                ret = unlock_futex_pi(uaddr, uval);
2112                if (ret == -EFAULT)
2113                        goto pi_faulted;
2114        }
2115
2116out_unlock:
2117        spin_unlock(&hb->lock);
2118        put_futex_key(&key);
2119
2120out:
2121        return ret;
2122
2123pi_faulted:
2124        spin_unlock(&hb->lock);
2125        put_futex_key(&key);
2126
2127        ret = fault_in_user_writeable(uaddr);
2128        if (!ret)
2129                goto retry;
2130
2131        return ret;
2132}
2133
2134/**
2135 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2136 * @hb:         the hash_bucket futex_q was original enqueued on
2137 * @q:          the futex_q woken while waiting to be requeued
2138 * @key2:       the futex_key of the requeue target futex
2139 * @timeout:    the timeout associated with the wait (NULL if none)
2140 *
2141 * Detect if the task was woken on the initial futex as opposed to the requeue
2142 * target futex.  If so, determine if it was a timeout or a signal that caused
2143 * the wakeup and return the appropriate error code to the caller.  Must be
2144 * called with the hb lock held.
2145 *
2146 * Returns
2147 *  0 - no early wakeup detected
2148 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2149 */
2150static inline
2151int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2152                                   struct futex_q *q, union futex_key *key2,
2153                                   struct hrtimer_sleeper *timeout)
2154{
2155        int ret = 0;
2156
2157        /*
2158         * With the hb lock held, we avoid races while we process the wakeup.
2159         * We only need to hold hb (and not hb2) to ensure atomicity as the
2160         * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2161         * It can't be requeued from uaddr2 to something else since we don't
2162         * support a PI aware source futex for requeue.
2163         */
2164        if (!match_futex(&q->key, key2)) {
2165                WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2166                /*
2167                 * We were woken prior to requeue by a timeout or a signal.
2168                 * Unqueue the futex_q and determine which it was.
2169                 */
2170                plist_del(&q->list, &q->list.plist);
2171
2172                /* Handle spurious wakeups gracefully */
2173                ret = -EWOULDBLOCK;
2174                if (timeout && !timeout->task)
2175                        ret = -ETIMEDOUT;
2176                else if (signal_pending(current))
2177                        ret = -ERESTARTNOINTR;
2178        }
2179        return ret;
2180}
2181
2182/**
2183 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2184 * @uaddr:      the futex we initially wait on (non-pi)
2185 * @flags:      futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2186 *              the same type, no requeueing from private to shared, etc.
2187 * @val:        the expected value of uaddr
2188 * @abs_time:   absolute timeout
2189 * @bitset:     32 bit wakeup bitset set by userspace, defaults to all
2190 * @clockrt:    whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2191 * @uaddr2:     the pi futex we will take prior to returning to user-space
2192 *
2193 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2194 * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
2195 * complete the acquisition of the rt_mutex prior to returning to userspace.
2196 * This ensures the rt_mutex maintains an owner when it has waiters; without
2197 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2198 * need to.
2199 *
2200 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2201 * via the following:
2202 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2203 * 2) wakeup on uaddr2 after a requeue
2204 * 3) signal
2205 * 4) timeout
2206 *
2207 * If 3, cleanup and return -ERESTARTNOINTR.
2208 *
2209 * If 2, we may then block on trying to take the rt_mutex and return via:
2210 * 5) successful lock
2211 * 6) signal
2212 * 7) timeout
2213 * 8) other lock acquisition failure
2214 *
2215 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2216 *
2217 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2218 *
2219 * Returns:
2220 *  0 - On success
2221 * <0 - On error
2222 */
2223static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2224                                 u32 val, ktime_t *abs_time, u32 bitset,
2225                                 u32 __user *uaddr2)
2226{
2227        struct hrtimer_sleeper timeout, *to = NULL;
2228        struct rt_mutex_waiter rt_waiter;
2229        struct rt_mutex *pi_mutex = NULL;
2230        struct futex_hash_bucket *hb;
2231        union futex_key key2 = FUTEX_KEY_INIT;
2232        struct futex_q q = futex_q_init;
2233        int res, ret;
2234
2235        if (!bitset)
2236                return -EINVAL;
2237
2238        if (abs_time) {
2239                to = &timeout;
2240                hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2241                                      CLOCK_REALTIME : CLOCK_MONOTONIC,
2242                                      HRTIMER_MODE_ABS);
2243                hrtimer_init_sleeper(to, current);
2244                hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2245                                             current->timer_slack_ns);
2246        }
2247
2248        /*
2249         * The waiter is allocated on our stack, manipulated by the requeue
2250         * code while we sleep on uaddr.
2251         */
2252        debug_rt_mutex_init_waiter(&rt_waiter);
2253        rt_waiter.task = NULL;
2254
2255        ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
2256        if (unlikely(ret != 0))
2257                goto out;
2258
2259        q.bitset = bitset;
2260        q.rt_waiter = &rt_waiter;
2261        q.requeue_pi_key = &key2;
2262
2263        /*
2264         * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2265         * count.
2266         */
2267        ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2268        if (ret)
2269                goto out_key2;
2270
2271        /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2272        futex_wait_queue_me(hb, &q, to);
2273
2274        spin_lock(&hb->lock);
2275        ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2276        spin_unlock(&hb->lock);
2277        if (ret)
2278                goto out_put_keys;
2279
2280        /*
2281         * In order for us to be here, we know our q.key == key2, and since
2282         * we took the hb->lock above, we also know that futex_requeue() has
2283         * completed and we no longer have to concern ourselves with a wakeup
2284         * race with the atomic proxy lock acquisition by the requeue code. The
2285         * futex_requeue dropped our key1 reference and incremented our key2
2286         * reference count.
2287         */
2288
2289        /* Check if the requeue code acquired the second futex for us. */
2290        if (!q.rt_waiter) {
2291                /*
2292                 * Got the lock. We might not be the anticipated owner if we
2293                 * did a lock-steal - fix up the PI-state in that case.
2294                 */
2295                if (q.pi_state && (q.pi_state->owner != current)) {
2296                        spin_lock(q.lock_ptr);
2297                        ret = fixup_pi_state_owner(uaddr2, &q, current);
2298                        spin_unlock(q.lock_ptr);
2299                }
2300        } else {
2301                /*
2302                 * We have been woken up by futex_unlock_pi(), a timeout, or a
2303                 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2304                 * the pi_state.
2305                 */
2306                WARN_ON(!&q.pi_state);
2307                pi_mutex = &q.pi_state->pi_mutex;
2308                ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2309                debug_rt_mutex_free_waiter(&rt_waiter);
2310
2311                spin_lock(q.lock_ptr);
2312                /*
2313                 * Fixup the pi_state owner and possibly acquire the lock if we
2314                 * haven't already.
2315                 */
2316                res = fixup_owner(uaddr2, &q, !ret);
2317                /*
2318                 * If fixup_owner() returned an error, proprogate that.  If it
2319                 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2320                 */
2321                if (res)
2322                        ret = (res < 0) ? res : 0;
2323
2324                /* Unqueue and drop the lock. */
2325                unqueue_me_pi(&q);
2326        }
2327
2328        /*
2329         * If fixup_pi_state_owner() faulted and was unable to handle the
2330         * fault, unlock the rt_mutex and return the fault to userspace.
2331         */
2332        if (ret == -EFAULT) {
2333                if (rt_mutex_owner(pi_mutex) == current)
2334                        rt_mutex_unlock(pi_mutex);
2335        } else if (ret == -EINTR) {
2336                /*
2337                 * We've already been requeued, but cannot restart by calling
2338                 * futex_lock_pi() directly. We could restart this syscall, but
2339                 * it would detect that the user space "val" changed and return
2340                 * -EWOULDBLOCK.  Save the overhead of the restart and return
2341                 * -EWOULDBLOCK directly.
2342                 */
2343                ret = -EWOULDBLOCK;
2344        }
2345
2346out_put_keys:
2347        put_futex_key(&q.key);
2348out_key2:
2349        put_futex_key(&key2);
2350
2351out:
2352        if (to) {
2353                hrtimer_cancel(&to->timer);
2354                destroy_hrtimer_on_stack(&to->timer);
2355        }
2356        return ret;
2357}
2358
2359/*
2360 * Support for robust futexes: the kernel cleans up held futexes at
2361 * thread exit time.
2362 *
2363 * Implementation: user-space maintains a per-thread list of locks it
2364 * is holding. Upon do_exit(), the kernel carefully walks this list,
2365 * and marks all locks that are owned by this thread with the
2366 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2367 * always manipulated with the lock held, so the list is private and
2368 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2369 * field, to allow the kernel to clean up if the thread dies after
2370 * acquiring the lock, but just before it could have added itself to
2371 * the list. There can only be one such pending lock.
2372 */
2373
2374/**
2375 * sys_set_robust_list() - Set the robust-futex list head of a task
2376 * @head:       pointer to the list-head
2377 * @len:        length of the list-head, as userspace expects
2378 */
2379SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2380                size_t, len)
2381{
2382        if (!futex_cmpxchg_enabled)
2383                return -ENOSYS;
2384        /*
2385         * The kernel knows only one size for now:
2386         */
2387        if (unlikely(len != sizeof(*head)))
2388                return -EINVAL;
2389
2390        current->robust_list = head;
2391
2392        return 0;
2393}
2394
2395/**
2396 * sys_get_robust_list() - Get the robust-futex list head of a task
2397 * @pid:        pid of the process [zero for current task]
2398 * @head_ptr:   pointer to a list-head pointer, the kernel fills it in
2399 * @len_ptr:    pointer to a length field, the kernel fills in the header size
2400 */
2401SYSCALL_DEFINE3(get_robust_list, int, pid,
2402                struct robust_list_head __user * __user *, head_ptr,
2403                size_t __user *, len_ptr)
2404{
2405        struct robust_list_head __user *head;
2406        unsigned long ret;
2407        const struct cred *cred = current_cred(), *pcred;
2408
2409        if (!futex_cmpxchg_enabled)
2410                return -ENOSYS;
2411
2412        if (!pid)
2413                head = current->robust_list;
2414        else {
2415                struct task_struct *p;
2416
2417                ret = -ESRCH;
2418                rcu_read_lock();
2419                p = find_task_by_vpid(pid);
2420                if (!p)
2421                        goto err_unlock;
2422                ret = -EPERM;
2423                pcred = __task_cred(p);
2424                if (cred->euid != pcred->euid &&
2425                    cred->euid != pcred->uid &&
2426                    !capable(CAP_SYS_PTRACE))
2427                        goto err_unlock;
2428                head = p->robust_list;
2429                rcu_read_unlock();
2430        }
2431
2432        if (put_user(sizeof(*head), len_ptr))
2433                return -EFAULT;
2434        return put_user(head, head_ptr);
2435
2436err_unlock:
2437        rcu_read_unlock();
2438
2439        return ret;
2440}
2441
2442/*
2443 * Process a futex-list entry, check whether it's owned by the
2444 * dying task, and do notification if so:
2445 */
2446int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2447{
2448        u32 uval, nval, mval;
2449
2450retry:
2451        if (get_user(uval, uaddr))
2452                return -1;
2453
2454        if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2455                /*
2456                 * Ok, this dying thread is truly holding a futex
2457                 * of interest. Set the OWNER_DIED bit atomically
2458                 * via cmpxchg, and if the value had FUTEX_WAITERS
2459                 * set, wake up a waiter (if any). (We have to do a
2460                 * futex_wake() even if OWNER_DIED is already set -
2461                 * to handle the rare but possible case of recursive
2462                 * thread-death.) The rest of the cleanup is done in
2463                 * userspace.
2464                 */
2465                mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2466                nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2467
2468                if (nval == -EFAULT)
2469                        return -1;
2470
2471                if (nval != uval)
2472                        goto retry;
2473
2474                /*
2475                 * Wake robust non-PI futexes here. The wakeup of
2476                 * PI futexes happens in exit_pi_state():
2477                 */
2478                if (!pi && (uval & FUTEX_WAITERS))
2479                        futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2480        }
2481        return 0;
2482}
2483
2484/*
2485 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2486 */
2487static inline int fetch_robust_entry(struct robust_list __user **entry,
2488                                     struct robust_list __user * __user *head,
2489                                     unsigned int *pi)
2490{
2491        unsigned long uentry;
2492
2493        if (get_user(uentry, (unsigned long __user *)head))
2494                return -EFAULT;
2495
2496        *entry = (void __user *)(uentry & ~1UL);
2497        *pi = uentry & 1;
2498
2499        return 0;
2500}
2501
2502/*
2503 * Walk curr->robust_list (very carefully, it's a userspace list!)
2504 * and mark any locks found there dead, and notify any waiters.
2505 *
2506 * We silently return on any sign of list-walking problem.
2507 */
2508void exit_robust_list(struct task_struct *curr)
2509{
2510        struct robust_list_head __user *head = curr->robust_list;
2511        struct robust_list __user *entry, *next_entry, *pending;
2512        unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2513        unsigned int uninitialized_var(next_pi);
2514        unsigned long futex_offset;
2515        int rc;
2516
2517        if (!futex_cmpxchg_enabled)
2518                return;
2519
2520        /*
2521         * Fetch the list head (which was registered earlier, via
2522         * sys_set_robust_list()):
2523         */
2524        if (fetch_robust_entry(&entry, &head->list.next, &pi))
2525                return;
2526        /*
2527         * Fetch the relative futex offset:
2528         */
2529        if (get_user(futex_offset, &head->futex_offset))
2530                return;
2531        /*
2532         * Fetch any possibly pending lock-add first, and handle it
2533         * if it exists:
2534         */
2535        if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2536                return;
2537
2538        next_entry = NULL;      /* avoid warning with gcc */
2539        while (entry != &head->list) {
2540                /*
2541                 * Fetch the next entry in the list before calling
2542                 * handle_futex_death:
2543                 */
2544                rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2545                /*
2546                 * A pending lock might already be on the list, so
2547                 * don't process it twice:
2548                 */
2549                if (entry != pending)
2550                        if (handle_futex_death((void __user *)entry + futex_offset,
2551                                                curr, pi))
2552                                return;
2553                if (rc)
2554                        return;
2555                entry = next_entry;
2556                pi = next_pi;
2557                /*
2558                 * Avoid excessively long or circular lists:
2559                 */
2560                if (!--limit)
2561                        break;
2562
2563                cond_resched();
2564        }
2565
2566        if (pending)
2567                handle_futex_death((void __user *)pending + futex_offset,
2568                                   curr, pip);
2569}
2570
2571long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2572                u32 __user *uaddr2, u32 val2, u32 val3)
2573{
2574        int ret = -ENOSYS, cmd = op & FUTEX_CMD_MASK;
2575        unsigned int flags = 0;
2576
2577        if (!(op & FUTEX_PRIVATE_FLAG))
2578                flags |= FLAGS_SHARED;
2579
2580        if (op & FUTEX_CLOCK_REALTIME) {
2581                flags |= FLAGS_CLOCKRT;
2582                if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2583                        return -ENOSYS;
2584        }
2585
2586        switch (cmd) {
2587        case FUTEX_WAIT:
2588                val3 = FUTEX_BITSET_MATCH_ANY;
2589        case FUTEX_WAIT_BITSET:
2590                ret = futex_wait(uaddr, flags, val, timeout, val3);
2591                break;
2592        case FUTEX_WAKE:
2593                val3 = FUTEX_BITSET_MATCH_ANY;
2594        case FUTEX_WAKE_BITSET:
2595                ret = futex_wake(uaddr, flags, val, val3);
2596                break;
2597        case FUTEX_REQUEUE:
2598                ret = futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2599                break;
2600        case FUTEX_CMP_REQUEUE:
2601                ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2602                break;
2603        case FUTEX_WAKE_OP:
2604                ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2605                break;
2606        case FUTEX_LOCK_PI:
2607                if (futex_cmpxchg_enabled)
2608                        ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
2609                break;
2610        case FUTEX_UNLOCK_PI:
2611                if (futex_cmpxchg_enabled)
2612                        ret = futex_unlock_pi(uaddr, flags);
2613                break;
2614        case FUTEX_TRYLOCK_PI:
2615                if (futex_cmpxchg_enabled)
2616                        ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
2617                break;
2618        case FUTEX_WAIT_REQUEUE_PI:
2619                val3 = FUTEX_BITSET_MATCH_ANY;
2620                ret = futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2621                                            uaddr2);
2622                break;
2623        case FUTEX_CMP_REQUEUE_PI:
2624                ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2625                break;
2626        default:
2627                ret = -ENOSYS;
2628        }
2629        return ret;
2630}
2631
2632
2633SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2634                struct timespec __user *, utime, u32 __user *, uaddr2,
2635                u32, val3)
2636{
2637        struct timespec ts;
2638        ktime_t t, *tp = NULL;
2639        u32 val2 = 0;
2640        int cmd = op & FUTEX_CMD_MASK;
2641
2642        if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2643                      cmd == FUTEX_WAIT_BITSET ||
2644                      cmd == FUTEX_WAIT_REQUEUE_PI)) {
2645                if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2646                        return -EFAULT;
2647                if (!timespec_valid(&ts))
2648                        return -EINVAL;
2649
2650                t = timespec_to_ktime(ts);
2651                if (cmd == FUTEX_WAIT)
2652                        t = ktime_add_safe(ktime_get(), t);
2653                tp = &t;
2654        }
2655        /*
2656         * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2657         * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2658         */
2659        if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2660            cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2661                val2 = (u32) (unsigned long) utime;
2662
2663        return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2664}
2665
2666static int __init futex_init(void)
2667{
2668        u32 curval;
2669        int i;
2670
2671        /*
2672         * This will fail and we want it. Some arch implementations do
2673         * runtime detection of the futex_atomic_cmpxchg_inatomic()
2674         * functionality. We want to know that before we call in any
2675         * of the complex code paths. Also we want to prevent
2676         * registration of robust lists in that case. NULL is
2677         * guaranteed to fault and we get -EFAULT on functional
2678         * implementation, the non-functional ones will return
2679         * -ENOSYS.
2680         */
2681        curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2682        if (curval == -EFAULT)
2683                futex_cmpxchg_enabled = 1;
2684
2685        for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2686                plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2687                spin_lock_init(&futex_queues[i].lock);
2688        }
2689
2690        return 0;
2691}
2692__initcall(futex_init);
2693