linux/drivers/android/binder.c
<<
>>
Prefs
   1/* binder.c
   2 *
   3 * Android IPC Subsystem
   4 *
   5 * Copyright (C) 2007-2008 Google, Inc.
   6 *
   7 * This software is licensed under the terms of the GNU General Public
   8 * License version 2, as published by the Free Software Foundation, and
   9 * may be copied, distributed, and modified under those terms.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 */
  17
  18/*
  19 * Locking overview
  20 *
  21 * There are 3 main spinlocks which must be acquired in the
  22 * order shown:
  23 *
  24 * 1) proc->outer_lock : protects binder_ref
  25 *    binder_proc_lock() and binder_proc_unlock() are
  26 *    used to acq/rel.
  27 * 2) node->lock : protects most fields of binder_node.
  28 *    binder_node_lock() and binder_node_unlock() are
  29 *    used to acq/rel
  30 * 3) proc->inner_lock : protects the thread and node lists
  31 *    (proc->threads, proc->waiting_threads, proc->nodes)
  32 *    and all todo lists associated with the binder_proc
  33 *    (proc->todo, thread->todo, proc->delivered_death and
  34 *    node->async_todo), as well as thread->transaction_stack
  35 *    binder_inner_proc_lock() and binder_inner_proc_unlock()
  36 *    are used to acq/rel
  37 *
  38 * Any lock under procA must never be nested under any lock at the same
  39 * level or below on procB.
  40 *
  41 * Functions that require a lock held on entry indicate which lock
  42 * in the suffix of the function name:
  43 *
  44 * foo_olocked() : requires node->outer_lock
  45 * foo_nlocked() : requires node->lock
  46 * foo_ilocked() : requires proc->inner_lock
  47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
  48 * foo_nilocked(): requires node->lock and proc->inner_lock
  49 * ...
  50 */
  51
  52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  53
  54#include <linux/fdtable.h>
  55#include <linux/file.h>
  56#include <linux/freezer.h>
  57#include <linux/fs.h>
  58#include <linux/list.h>
  59#include <linux/miscdevice.h>
  60#include <linux/module.h>
  61#include <linux/mutex.h>
  62#include <linux/nsproxy.h>
  63#include <linux/poll.h>
  64#include <linux/debugfs.h>
  65#include <linux/rbtree.h>
  66#include <linux/sched/signal.h>
  67#include <linux/sched/mm.h>
  68#include <linux/seq_file.h>
  69#include <linux/uaccess.h>
  70#include <linux/pid_namespace.h>
  71#include <linux/security.h>
  72#include <linux/spinlock.h>
  73#include <linux/ratelimit.h>
  74#include <linux/syscalls.h>
  75#include <linux/task_work.h>
  76
  77#include <uapi/linux/android/binder.h>
  78
  79#include <asm/cacheflush.h>
  80
  81#include "binder_alloc.h"
  82#include "binder_internal.h"
  83#include "binder_trace.h"
  84
  85static HLIST_HEAD(binder_deferred_list);
  86static DEFINE_MUTEX(binder_deferred_lock);
  87
  88static HLIST_HEAD(binder_devices);
  89static HLIST_HEAD(binder_procs);
  90static DEFINE_MUTEX(binder_procs_lock);
  91
  92static HLIST_HEAD(binder_dead_nodes);
  93static DEFINE_SPINLOCK(binder_dead_nodes_lock);
  94
  95static struct dentry *binder_debugfs_dir_entry_root;
  96static struct dentry *binder_debugfs_dir_entry_proc;
  97static atomic_t binder_last_id;
  98
  99static int proc_show(struct seq_file *m, void *unused);
 100DEFINE_SHOW_ATTRIBUTE(proc);
 101
 102/* This is only defined in include/asm-arm/sizes.h */
 103#ifndef SZ_1K
 104#define SZ_1K                               0x400
 105#endif
 106
 107#ifndef SZ_4M
 108#define SZ_4M                               0x400000
 109#endif
 110
 111#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
 112
 113enum {
 114        BINDER_DEBUG_USER_ERROR             = 1U << 0,
 115        BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
 116        BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
 117        BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
 118        BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
 119        BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
 120        BINDER_DEBUG_READ_WRITE             = 1U << 6,
 121        BINDER_DEBUG_USER_REFS              = 1U << 7,
 122        BINDER_DEBUG_THREADS                = 1U << 8,
 123        BINDER_DEBUG_TRANSACTION            = 1U << 9,
 124        BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
 125        BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
 126        BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
 127        BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
 128        BINDER_DEBUG_SPINLOCKS              = 1U << 14,
 129};
 130static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
 131        BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
 132module_param_named(debug_mask, binder_debug_mask, uint, 0644);
 133
 134static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
 135module_param_named(devices, binder_devices_param, charp, 0444);
 136
 137static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
 138static int binder_stop_on_user_error;
 139
 140static int binder_set_stop_on_user_error(const char *val,
 141                                         const struct kernel_param *kp)
 142{
 143        int ret;
 144
 145        ret = param_set_int(val, kp);
 146        if (binder_stop_on_user_error < 2)
 147                wake_up(&binder_user_error_wait);
 148        return ret;
 149}
 150module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
 151        param_get_int, &binder_stop_on_user_error, 0644);
 152
 153#define binder_debug(mask, x...) \
 154        do { \
 155                if (binder_debug_mask & mask) \
 156                        pr_info_ratelimited(x); \
 157        } while (0)
 158
 159#define binder_user_error(x...) \
 160        do { \
 161                if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
 162                        pr_info_ratelimited(x); \
 163                if (binder_stop_on_user_error) \
 164                        binder_stop_on_user_error = 2; \
 165        } while (0)
 166
 167#define to_flat_binder_object(hdr) \
 168        container_of(hdr, struct flat_binder_object, hdr)
 169
 170#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
 171
 172#define to_binder_buffer_object(hdr) \
 173        container_of(hdr, struct binder_buffer_object, hdr)
 174
 175#define to_binder_fd_array_object(hdr) \
 176        container_of(hdr, struct binder_fd_array_object, hdr)
 177
 178enum binder_stat_types {
 179        BINDER_STAT_PROC,
 180        BINDER_STAT_THREAD,
 181        BINDER_STAT_NODE,
 182        BINDER_STAT_REF,
 183        BINDER_STAT_DEATH,
 184        BINDER_STAT_TRANSACTION,
 185        BINDER_STAT_TRANSACTION_COMPLETE,
 186        BINDER_STAT_COUNT
 187};
 188
 189struct binder_stats {
 190        atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
 191        atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
 192        atomic_t obj_created[BINDER_STAT_COUNT];
 193        atomic_t obj_deleted[BINDER_STAT_COUNT];
 194};
 195
 196static struct binder_stats binder_stats;
 197
 198static inline void binder_stats_deleted(enum binder_stat_types type)
 199{
 200        atomic_inc(&binder_stats.obj_deleted[type]);
 201}
 202
 203static inline void binder_stats_created(enum binder_stat_types type)
 204{
 205        atomic_inc(&binder_stats.obj_created[type]);
 206}
 207
 208struct binder_transaction_log_entry {
 209        int debug_id;
 210        int debug_id_done;
 211        int call_type;
 212        int from_proc;
 213        int from_thread;
 214        int target_handle;
 215        int to_proc;
 216        int to_thread;
 217        int to_node;
 218        int data_size;
 219        int offsets_size;
 220        int return_error_line;
 221        uint32_t return_error;
 222        uint32_t return_error_param;
 223        const char *context_name;
 224};
 225struct binder_transaction_log {
 226        atomic_t cur;
 227        bool full;
 228        struct binder_transaction_log_entry entry[32];
 229};
 230static struct binder_transaction_log binder_transaction_log;
 231static struct binder_transaction_log binder_transaction_log_failed;
 232
 233static struct binder_transaction_log_entry *binder_transaction_log_add(
 234        struct binder_transaction_log *log)
 235{
 236        struct binder_transaction_log_entry *e;
 237        unsigned int cur = atomic_inc_return(&log->cur);
 238
 239        if (cur >= ARRAY_SIZE(log->entry))
 240                log->full = true;
 241        e = &log->entry[cur % ARRAY_SIZE(log->entry)];
 242        WRITE_ONCE(e->debug_id_done, 0);
 243        /*
 244         * write-barrier to synchronize access to e->debug_id_done.
 245         * We make sure the initialized 0 value is seen before
 246         * memset() other fields are zeroed by memset.
 247         */
 248        smp_wmb();
 249        memset(e, 0, sizeof(*e));
 250        return e;
 251}
 252
 253/**
 254 * struct binder_work - work enqueued on a worklist
 255 * @entry:             node enqueued on list
 256 * @type:              type of work to be performed
 257 *
 258 * There are separate work lists for proc, thread, and node (async).
 259 */
 260struct binder_work {
 261        struct list_head entry;
 262
 263        enum {
 264                BINDER_WORK_TRANSACTION = 1,
 265                BINDER_WORK_TRANSACTION_COMPLETE,
 266                BINDER_WORK_RETURN_ERROR,
 267                BINDER_WORK_NODE,
 268                BINDER_WORK_DEAD_BINDER,
 269                BINDER_WORK_DEAD_BINDER_AND_CLEAR,
 270                BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
 271        } type;
 272};
 273
 274struct binder_error {
 275        struct binder_work work;
 276        uint32_t cmd;
 277};
 278
 279/**
 280 * struct binder_node - binder node bookkeeping
 281 * @debug_id:             unique ID for debugging
 282 *                        (invariant after initialized)
 283 * @lock:                 lock for node fields
 284 * @work:                 worklist element for node work
 285 *                        (protected by @proc->inner_lock)
 286 * @rb_node:              element for proc->nodes tree
 287 *                        (protected by @proc->inner_lock)
 288 * @dead_node:            element for binder_dead_nodes list
 289 *                        (protected by binder_dead_nodes_lock)
 290 * @proc:                 binder_proc that owns this node
 291 *                        (invariant after initialized)
 292 * @refs:                 list of references on this node
 293 *                        (protected by @lock)
 294 * @internal_strong_refs: used to take strong references when
 295 *                        initiating a transaction
 296 *                        (protected by @proc->inner_lock if @proc
 297 *                        and by @lock)
 298 * @local_weak_refs:      weak user refs from local process
 299 *                        (protected by @proc->inner_lock if @proc
 300 *                        and by @lock)
 301 * @local_strong_refs:    strong user refs from local process
 302 *                        (protected by @proc->inner_lock if @proc
 303 *                        and by @lock)
 304 * @tmp_refs:             temporary kernel refs
 305 *                        (protected by @proc->inner_lock while @proc
 306 *                        is valid, and by binder_dead_nodes_lock
 307 *                        if @proc is NULL. During inc/dec and node release
 308 *                        it is also protected by @lock to provide safety
 309 *                        as the node dies and @proc becomes NULL)
 310 * @ptr:                  userspace pointer for node
 311 *                        (invariant, no lock needed)
 312 * @cookie:               userspace cookie for node
 313 *                        (invariant, no lock needed)
 314 * @has_strong_ref:       userspace notified of strong ref
 315 *                        (protected by @proc->inner_lock if @proc
 316 *                        and by @lock)
 317 * @pending_strong_ref:   userspace has acked notification of strong ref
 318 *                        (protected by @proc->inner_lock if @proc
 319 *                        and by @lock)
 320 * @has_weak_ref:         userspace notified of weak ref
 321 *                        (protected by @proc->inner_lock if @proc
 322 *                        and by @lock)
 323 * @pending_weak_ref:     userspace has acked notification of weak ref
 324 *                        (protected by @proc->inner_lock if @proc
 325 *                        and by @lock)
 326 * @has_async_transaction: async transaction to node in progress
 327 *                        (protected by @lock)
 328 * @accept_fds:           file descriptor operations supported for node
 329 *                        (invariant after initialized)
 330 * @min_priority:         minimum scheduling priority
 331 *                        (invariant after initialized)
 332 * @async_todo:           list of async work items
 333 *                        (protected by @proc->inner_lock)
 334 *
 335 * Bookkeeping structure for binder nodes.
 336 */
 337struct binder_node {
 338        int debug_id;
 339        spinlock_t lock;
 340        struct binder_work work;
 341        union {
 342                struct rb_node rb_node;
 343                struct hlist_node dead_node;
 344        };
 345        struct binder_proc *proc;
 346        struct hlist_head refs;
 347        int internal_strong_refs;
 348        int local_weak_refs;
 349        int local_strong_refs;
 350        int tmp_refs;
 351        binder_uintptr_t ptr;
 352        binder_uintptr_t cookie;
 353        struct {
 354                /*
 355                 * bitfield elements protected by
 356                 * proc inner_lock
 357                 */
 358                u8 has_strong_ref:1;
 359                u8 pending_strong_ref:1;
 360                u8 has_weak_ref:1;
 361                u8 pending_weak_ref:1;
 362        };
 363        struct {
 364                /*
 365                 * invariant after initialization
 366                 */
 367                u8 accept_fds:1;
 368                u8 min_priority;
 369        };
 370        bool has_async_transaction;
 371        struct list_head async_todo;
 372};
 373
 374struct binder_ref_death {
 375        /**
 376         * @work: worklist element for death notifications
 377         *        (protected by inner_lock of the proc that
 378         *        this ref belongs to)
 379         */
 380        struct binder_work work;
 381        binder_uintptr_t cookie;
 382};
 383
 384/**
 385 * struct binder_ref_data - binder_ref counts and id
 386 * @debug_id:        unique ID for the ref
 387 * @desc:            unique userspace handle for ref
 388 * @strong:          strong ref count (debugging only if not locked)
 389 * @weak:            weak ref count (debugging only if not locked)
 390 *
 391 * Structure to hold ref count and ref id information. Since
 392 * the actual ref can only be accessed with a lock, this structure
 393 * is used to return information about the ref to callers of
 394 * ref inc/dec functions.
 395 */
 396struct binder_ref_data {
 397        int debug_id;
 398        uint32_t desc;
 399        int strong;
 400        int weak;
 401};
 402
 403/**
 404 * struct binder_ref - struct to track references on nodes
 405 * @data:        binder_ref_data containing id, handle, and current refcounts
 406 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
 407 * @rb_node_node: node for lookup by @node in proc's rb_tree
 408 * @node_entry:  list entry for node->refs list in target node
 409 *               (protected by @node->lock)
 410 * @proc:        binder_proc containing ref
 411 * @node:        binder_node of target node. When cleaning up a
 412 *               ref for deletion in binder_cleanup_ref, a non-NULL
 413 *               @node indicates the node must be freed
 414 * @death:       pointer to death notification (ref_death) if requested
 415 *               (protected by @node->lock)
 416 *
 417 * Structure to track references from procA to target node (on procB). This
 418 * structure is unsafe to access without holding @proc->outer_lock.
 419 */
 420struct binder_ref {
 421        /* Lookups needed: */
 422        /*   node + proc => ref (transaction) */
 423        /*   desc + proc => ref (transaction, inc/dec ref) */
 424        /*   node => refs + procs (proc exit) */
 425        struct binder_ref_data data;
 426        struct rb_node rb_node_desc;
 427        struct rb_node rb_node_node;
 428        struct hlist_node node_entry;
 429        struct binder_proc *proc;
 430        struct binder_node *node;
 431        struct binder_ref_death *death;
 432};
 433
 434enum binder_deferred_state {
 435        BINDER_DEFERRED_FLUSH        = 0x01,
 436        BINDER_DEFERRED_RELEASE      = 0x02,
 437};
 438
 439/**
 440 * struct binder_proc - binder process bookkeeping
 441 * @proc_node:            element for binder_procs list
 442 * @threads:              rbtree of binder_threads in this proc
 443 *                        (protected by @inner_lock)
 444 * @nodes:                rbtree of binder nodes associated with
 445 *                        this proc ordered by node->ptr
 446 *                        (protected by @inner_lock)
 447 * @refs_by_desc:         rbtree of refs ordered by ref->desc
 448 *                        (protected by @outer_lock)
 449 * @refs_by_node:         rbtree of refs ordered by ref->node
 450 *                        (protected by @outer_lock)
 451 * @waiting_threads:      threads currently waiting for proc work
 452 *                        (protected by @inner_lock)
 453 * @pid                   PID of group_leader of process
 454 *                        (invariant after initialized)
 455 * @tsk                   task_struct for group_leader of process
 456 *                        (invariant after initialized)
 457 * @deferred_work_node:   element for binder_deferred_list
 458 *                        (protected by binder_deferred_lock)
 459 * @deferred_work:        bitmap of deferred work to perform
 460 *                        (protected by binder_deferred_lock)
 461 * @is_dead:              process is dead and awaiting free
 462 *                        when outstanding transactions are cleaned up
 463 *                        (protected by @inner_lock)
 464 * @todo:                 list of work for this process
 465 *                        (protected by @inner_lock)
 466 * @stats:                per-process binder statistics
 467 *                        (atomics, no lock needed)
 468 * @delivered_death:      list of delivered death notification
 469 *                        (protected by @inner_lock)
 470 * @max_threads:          cap on number of binder threads
 471 *                        (protected by @inner_lock)
 472 * @requested_threads:    number of binder threads requested but not
 473 *                        yet started. In current implementation, can
 474 *                        only be 0 or 1.
 475 *                        (protected by @inner_lock)
 476 * @requested_threads_started: number binder threads started
 477 *                        (protected by @inner_lock)
 478 * @tmp_ref:              temporary reference to indicate proc is in use
 479 *                        (protected by @inner_lock)
 480 * @default_priority:     default scheduler priority
 481 *                        (invariant after initialized)
 482 * @debugfs_entry:        debugfs node
 483 * @alloc:                binder allocator bookkeeping
 484 * @context:              binder_context for this proc
 485 *                        (invariant after initialized)
 486 * @inner_lock:           can nest under outer_lock and/or node lock
 487 * @outer_lock:           no nesting under innor or node lock
 488 *                        Lock order: 1) outer, 2) node, 3) inner
 489 *
 490 * Bookkeeping structure for binder processes
 491 */
 492struct binder_proc {
 493        struct hlist_node proc_node;
 494        struct rb_root threads;
 495        struct rb_root nodes;
 496        struct rb_root refs_by_desc;
 497        struct rb_root refs_by_node;
 498        struct list_head waiting_threads;
 499        int pid;
 500        struct task_struct *tsk;
 501        struct hlist_node deferred_work_node;
 502        int deferred_work;
 503        bool is_dead;
 504
 505        struct list_head todo;
 506        struct binder_stats stats;
 507        struct list_head delivered_death;
 508        int max_threads;
 509        int requested_threads;
 510        int requested_threads_started;
 511        int tmp_ref;
 512        long default_priority;
 513        struct dentry *debugfs_entry;
 514        struct binder_alloc alloc;
 515        struct binder_context *context;
 516        spinlock_t inner_lock;
 517        spinlock_t outer_lock;
 518};
 519
 520enum {
 521        BINDER_LOOPER_STATE_REGISTERED  = 0x01,
 522        BINDER_LOOPER_STATE_ENTERED     = 0x02,
 523        BINDER_LOOPER_STATE_EXITED      = 0x04,
 524        BINDER_LOOPER_STATE_INVALID     = 0x08,
 525        BINDER_LOOPER_STATE_WAITING     = 0x10,
 526        BINDER_LOOPER_STATE_POLL        = 0x20,
 527};
 528
 529/**
 530 * struct binder_thread - binder thread bookkeeping
 531 * @proc:                 binder process for this thread
 532 *                        (invariant after initialization)
 533 * @rb_node:              element for proc->threads rbtree
 534 *                        (protected by @proc->inner_lock)
 535 * @waiting_thread_node:  element for @proc->waiting_threads list
 536 *                        (protected by @proc->inner_lock)
 537 * @pid:                  PID for this thread
 538 *                        (invariant after initialization)
 539 * @looper:               bitmap of looping state
 540 *                        (only accessed by this thread)
 541 * @looper_needs_return:  looping thread needs to exit driver
 542 *                        (no lock needed)
 543 * @transaction_stack:    stack of in-progress transactions for this thread
 544 *                        (protected by @proc->inner_lock)
 545 * @todo:                 list of work to do for this thread
 546 *                        (protected by @proc->inner_lock)
 547 * @process_todo:         whether work in @todo should be processed
 548 *                        (protected by @proc->inner_lock)
 549 * @return_error:         transaction errors reported by this thread
 550 *                        (only accessed by this thread)
 551 * @reply_error:          transaction errors reported by target thread
 552 *                        (protected by @proc->inner_lock)
 553 * @wait:                 wait queue for thread work
 554 * @stats:                per-thread statistics
 555 *                        (atomics, no lock needed)
 556 * @tmp_ref:              temporary reference to indicate thread is in use
 557 *                        (atomic since @proc->inner_lock cannot
 558 *                        always be acquired)
 559 * @is_dead:              thread is dead and awaiting free
 560 *                        when outstanding transactions are cleaned up
 561 *                        (protected by @proc->inner_lock)
 562 *
 563 * Bookkeeping structure for binder threads.
 564 */
 565struct binder_thread {
 566        struct binder_proc *proc;
 567        struct rb_node rb_node;
 568        struct list_head waiting_thread_node;
 569        int pid;
 570        int looper;              /* only modified by this thread */
 571        bool looper_need_return; /* can be written by other thread */
 572        struct binder_transaction *transaction_stack;
 573        struct list_head todo;
 574        bool process_todo;
 575        struct binder_error return_error;
 576        struct binder_error reply_error;
 577        wait_queue_head_t wait;
 578        struct binder_stats stats;
 579        atomic_t tmp_ref;
 580        bool is_dead;
 581};
 582
 583/**
 584 * struct binder_txn_fd_fixup - transaction fd fixup list element
 585 * @fixup_entry:          list entry
 586 * @file:                 struct file to be associated with new fd
 587 * @offset:               offset in buffer data to this fixup
 588 *
 589 * List element for fd fixups in a transaction. Since file
 590 * descriptors need to be allocated in the context of the
 591 * target process, we pass each fd to be processed in this
 592 * struct.
 593 */
 594struct binder_txn_fd_fixup {
 595        struct list_head fixup_entry;
 596        struct file *file;
 597        size_t offset;
 598};
 599
 600struct binder_transaction {
 601        int debug_id;
 602        struct binder_work work;
 603        struct binder_thread *from;
 604        struct binder_transaction *from_parent;
 605        struct binder_proc *to_proc;
 606        struct binder_thread *to_thread;
 607        struct binder_transaction *to_parent;
 608        unsigned need_reply:1;
 609        /* unsigned is_dead:1; */       /* not used at the moment */
 610
 611        struct binder_buffer *buffer;
 612        unsigned int    code;
 613        unsigned int    flags;
 614        long    priority;
 615        long    saved_priority;
 616        kuid_t  sender_euid;
 617        struct list_head fd_fixups;
 618        /**
 619         * @lock:  protects @from, @to_proc, and @to_thread
 620         *
 621         * @from, @to_proc, and @to_thread can be set to NULL
 622         * during thread teardown
 623         */
 624        spinlock_t lock;
 625};
 626
 627/**
 628 * binder_proc_lock() - Acquire outer lock for given binder_proc
 629 * @proc:         struct binder_proc to acquire
 630 *
 631 * Acquires proc->outer_lock. Used to protect binder_ref
 632 * structures associated with the given proc.
 633 */
 634#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
 635static void
 636_binder_proc_lock(struct binder_proc *proc, int line)
 637        __acquires(&proc->outer_lock)
 638{
 639        binder_debug(BINDER_DEBUG_SPINLOCKS,
 640                     "%s: line=%d\n", __func__, line);
 641        spin_lock(&proc->outer_lock);
 642}
 643
 644/**
 645 * binder_proc_unlock() - Release spinlock for given binder_proc
 646 * @proc:         struct binder_proc to acquire
 647 *
 648 * Release lock acquired via binder_proc_lock()
 649 */
 650#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
 651static void
 652_binder_proc_unlock(struct binder_proc *proc, int line)
 653        __releases(&proc->outer_lock)
 654{
 655        binder_debug(BINDER_DEBUG_SPINLOCKS,
 656                     "%s: line=%d\n", __func__, line);
 657        spin_unlock(&proc->outer_lock);
 658}
 659
 660/**
 661 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
 662 * @proc:         struct binder_proc to acquire
 663 *
 664 * Acquires proc->inner_lock. Used to protect todo lists
 665 */
 666#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
 667static void
 668_binder_inner_proc_lock(struct binder_proc *proc, int line)
 669        __acquires(&proc->inner_lock)
 670{
 671        binder_debug(BINDER_DEBUG_SPINLOCKS,
 672                     "%s: line=%d\n", __func__, line);
 673        spin_lock(&proc->inner_lock);
 674}
 675
 676/**
 677 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
 678 * @proc:         struct binder_proc to acquire
 679 *
 680 * Release lock acquired via binder_inner_proc_lock()
 681 */
 682#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
 683static void
 684_binder_inner_proc_unlock(struct binder_proc *proc, int line)
 685        __releases(&proc->inner_lock)
 686{
 687        binder_debug(BINDER_DEBUG_SPINLOCKS,
 688                     "%s: line=%d\n", __func__, line);
 689        spin_unlock(&proc->inner_lock);
 690}
 691
 692/**
 693 * binder_node_lock() - Acquire spinlock for given binder_node
 694 * @node:         struct binder_node to acquire
 695 *
 696 * Acquires node->lock. Used to protect binder_node fields
 697 */
 698#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
 699static void
 700_binder_node_lock(struct binder_node *node, int line)
 701        __acquires(&node->lock)
 702{
 703        binder_debug(BINDER_DEBUG_SPINLOCKS,
 704                     "%s: line=%d\n", __func__, line);
 705        spin_lock(&node->lock);
 706}
 707
 708/**
 709 * binder_node_unlock() - Release spinlock for given binder_proc
 710 * @node:         struct binder_node to acquire
 711 *
 712 * Release lock acquired via binder_node_lock()
 713 */
 714#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
 715static void
 716_binder_node_unlock(struct binder_node *node, int line)
 717        __releases(&node->lock)
 718{
 719        binder_debug(BINDER_DEBUG_SPINLOCKS,
 720                     "%s: line=%d\n", __func__, line);
 721        spin_unlock(&node->lock);
 722}
 723
 724/**
 725 * binder_node_inner_lock() - Acquire node and inner locks
 726 * @node:         struct binder_node to acquire
 727 *
 728 * Acquires node->lock. If node->proc also acquires
 729 * proc->inner_lock. Used to protect binder_node fields
 730 */
 731#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
 732static void
 733_binder_node_inner_lock(struct binder_node *node, int line)
 734        __acquires(&node->lock) __acquires(&node->proc->inner_lock)
 735{
 736        binder_debug(BINDER_DEBUG_SPINLOCKS,
 737                     "%s: line=%d\n", __func__, line);
 738        spin_lock(&node->lock);
 739        if (node->proc)
 740                binder_inner_proc_lock(node->proc);
 741        else
 742                /* annotation for sparse */
 743                __acquire(&node->proc->inner_lock);
 744}
 745
 746/**
 747 * binder_node_unlock() - Release node and inner locks
 748 * @node:         struct binder_node to acquire
 749 *
 750 * Release lock acquired via binder_node_lock()
 751 */
 752#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
 753static void
 754_binder_node_inner_unlock(struct binder_node *node, int line)
 755        __releases(&node->lock) __releases(&node->proc->inner_lock)
 756{
 757        struct binder_proc *proc = node->proc;
 758
 759        binder_debug(BINDER_DEBUG_SPINLOCKS,
 760                     "%s: line=%d\n", __func__, line);
 761        if (proc)
 762                binder_inner_proc_unlock(proc);
 763        else
 764                /* annotation for sparse */
 765                __release(&node->proc->inner_lock);
 766        spin_unlock(&node->lock);
 767}
 768
 769static bool binder_worklist_empty_ilocked(struct list_head *list)
 770{
 771        return list_empty(list);
 772}
 773
 774/**
 775 * binder_worklist_empty() - Check if no items on the work list
 776 * @proc:       binder_proc associated with list
 777 * @list:       list to check
 778 *
 779 * Return: true if there are no items on list, else false
 780 */
 781static bool binder_worklist_empty(struct binder_proc *proc,
 782                                  struct list_head *list)
 783{
 784        bool ret;
 785
 786        binder_inner_proc_lock(proc);
 787        ret = binder_worklist_empty_ilocked(list);
 788        binder_inner_proc_unlock(proc);
 789        return ret;
 790}
 791
 792/**
 793 * binder_enqueue_work_ilocked() - Add an item to the work list
 794 * @work:         struct binder_work to add to list
 795 * @target_list:  list to add work to
 796 *
 797 * Adds the work to the specified list. Asserts that work
 798 * is not already on a list.
 799 *
 800 * Requires the proc->inner_lock to be held.
 801 */
 802static void
 803binder_enqueue_work_ilocked(struct binder_work *work,
 804                           struct list_head *target_list)
 805{
 806        BUG_ON(target_list == NULL);
 807        BUG_ON(work->entry.next && !list_empty(&work->entry));
 808        list_add_tail(&work->entry, target_list);
 809}
 810
 811/**
 812 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
 813 * @thread:       thread to queue work to
 814 * @work:         struct binder_work to add to list
 815 *
 816 * Adds the work to the todo list of the thread. Doesn't set the process_todo
 817 * flag, which means that (if it wasn't already set) the thread will go to
 818 * sleep without handling this work when it calls read.
 819 *
 820 * Requires the proc->inner_lock to be held.
 821 */
 822static void
 823binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
 824                                            struct binder_work *work)
 825{
 826        WARN_ON(!list_empty(&thread->waiting_thread_node));
 827        binder_enqueue_work_ilocked(work, &thread->todo);
 828}
 829
 830/**
 831 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
 832 * @thread:       thread to queue work to
 833 * @work:         struct binder_work to add to list
 834 *
 835 * Adds the work to the todo list of the thread, and enables processing
 836 * of the todo queue.
 837 *
 838 * Requires the proc->inner_lock to be held.
 839 */
 840static void
 841binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
 842                                   struct binder_work *work)
 843{
 844        WARN_ON(!list_empty(&thread->waiting_thread_node));
 845        binder_enqueue_work_ilocked(work, &thread->todo);
 846        thread->process_todo = true;
 847}
 848
 849/**
 850 * binder_enqueue_thread_work() - Add an item to the thread work list
 851 * @thread:       thread to queue work to
 852 * @work:         struct binder_work to add to list
 853 *
 854 * Adds the work to the todo list of the thread, and enables processing
 855 * of the todo queue.
 856 */
 857static void
 858binder_enqueue_thread_work(struct binder_thread *thread,
 859                           struct binder_work *work)
 860{
 861        binder_inner_proc_lock(thread->proc);
 862        binder_enqueue_thread_work_ilocked(thread, work);
 863        binder_inner_proc_unlock(thread->proc);
 864}
 865
 866static void
 867binder_dequeue_work_ilocked(struct binder_work *work)
 868{
 869        list_del_init(&work->entry);
 870}
 871
 872/**
 873 * binder_dequeue_work() - Removes an item from the work list
 874 * @proc:         binder_proc associated with list
 875 * @work:         struct binder_work to remove from list
 876 *
 877 * Removes the specified work item from whatever list it is on.
 878 * Can safely be called if work is not on any list.
 879 */
 880static void
 881binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
 882{
 883        binder_inner_proc_lock(proc);
 884        binder_dequeue_work_ilocked(work);
 885        binder_inner_proc_unlock(proc);
 886}
 887
 888static struct binder_work *binder_dequeue_work_head_ilocked(
 889                                        struct list_head *list)
 890{
 891        struct binder_work *w;
 892
 893        w = list_first_entry_or_null(list, struct binder_work, entry);
 894        if (w)
 895                list_del_init(&w->entry);
 896        return w;
 897}
 898
 899/**
 900 * binder_dequeue_work_head() - Dequeues the item at head of list
 901 * @proc:         binder_proc associated with list
 902 * @list:         list to dequeue head
 903 *
 904 * Removes the head of the list if there are items on the list
 905 *
 906 * Return: pointer dequeued binder_work, NULL if list was empty
 907 */
 908static struct binder_work *binder_dequeue_work_head(
 909                                        struct binder_proc *proc,
 910                                        struct list_head *list)
 911{
 912        struct binder_work *w;
 913
 914        binder_inner_proc_lock(proc);
 915        w = binder_dequeue_work_head_ilocked(list);
 916        binder_inner_proc_unlock(proc);
 917        return w;
 918}
 919
 920static void
 921binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
 922static void binder_free_thread(struct binder_thread *thread);
 923static void binder_free_proc(struct binder_proc *proc);
 924static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 925
 926static bool binder_has_work_ilocked(struct binder_thread *thread,
 927                                    bool do_proc_work)
 928{
 929        return thread->process_todo ||
 930                thread->looper_need_return ||
 931                (do_proc_work &&
 932                 !binder_worklist_empty_ilocked(&thread->proc->todo));
 933}
 934
 935static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
 936{
 937        bool has_work;
 938
 939        binder_inner_proc_lock(thread->proc);
 940        has_work = binder_has_work_ilocked(thread, do_proc_work);
 941        binder_inner_proc_unlock(thread->proc);
 942
 943        return has_work;
 944}
 945
 946static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
 947{
 948        return !thread->transaction_stack &&
 949                binder_worklist_empty_ilocked(&thread->todo) &&
 950                (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
 951                                   BINDER_LOOPER_STATE_REGISTERED));
 952}
 953
 954static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
 955                                               bool sync)
 956{
 957        struct rb_node *n;
 958        struct binder_thread *thread;
 959
 960        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
 961                thread = rb_entry(n, struct binder_thread, rb_node);
 962                if (thread->looper & BINDER_LOOPER_STATE_POLL &&
 963                    binder_available_for_proc_work_ilocked(thread)) {
 964                        if (sync)
 965                                wake_up_interruptible_sync(&thread->wait);
 966                        else
 967                                wake_up_interruptible(&thread->wait);
 968                }
 969        }
 970}
 971
 972/**
 973 * binder_select_thread_ilocked() - selects a thread for doing proc work.
 974 * @proc:       process to select a thread from
 975 *
 976 * Note that calling this function moves the thread off the waiting_threads
 977 * list, so it can only be woken up by the caller of this function, or a
 978 * signal. Therefore, callers *should* always wake up the thread this function
 979 * returns.
 980 *
 981 * Return:      If there's a thread currently waiting for process work,
 982 *              returns that thread. Otherwise returns NULL.
 983 */
 984static struct binder_thread *
 985binder_select_thread_ilocked(struct binder_proc *proc)
 986{
 987        struct binder_thread *thread;
 988
 989        assert_spin_locked(&proc->inner_lock);
 990        thread = list_first_entry_or_null(&proc->waiting_threads,
 991                                          struct binder_thread,
 992                                          waiting_thread_node);
 993
 994        if (thread)
 995                list_del_init(&thread->waiting_thread_node);
 996
 997        return thread;
 998}
 999
1000/**
1001 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1002 * @proc:       process to wake up a thread in
1003 * @thread:     specific thread to wake-up (may be NULL)
1004 * @sync:       whether to do a synchronous wake-up
1005 *
1006 * This function wakes up a thread in the @proc process.
1007 * The caller may provide a specific thread to wake-up in
1008 * the @thread parameter. If @thread is NULL, this function
1009 * will wake up threads that have called poll().
1010 *
1011 * Note that for this function to work as expected, callers
1012 * should first call binder_select_thread() to find a thread
1013 * to handle the work (if they don't have a thread already),
1014 * and pass the result into the @thread parameter.
1015 */
1016static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1017                                         struct binder_thread *thread,
1018                                         bool sync)
1019{
1020        assert_spin_locked(&proc->inner_lock);
1021
1022        if (thread) {
1023                if (sync)
1024                        wake_up_interruptible_sync(&thread->wait);
1025                else
1026                        wake_up_interruptible(&thread->wait);
1027                return;
1028        }
1029
1030        /* Didn't find a thread waiting for proc work; this can happen
1031         * in two scenarios:
1032         * 1. All threads are busy handling transactions
1033         *    In that case, one of those threads should call back into
1034         *    the kernel driver soon and pick up this work.
1035         * 2. Threads are using the (e)poll interface, in which case
1036         *    they may be blocked on the waitqueue without having been
1037         *    added to waiting_threads. For this case, we just iterate
1038         *    over all threads not handling transaction work, and
1039         *    wake them all up. We wake all because we don't know whether
1040         *    a thread that called into (e)poll is handling non-binder
1041         *    work currently.
1042         */
1043        binder_wakeup_poll_threads_ilocked(proc, sync);
1044}
1045
1046static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1047{
1048        struct binder_thread *thread = binder_select_thread_ilocked(proc);
1049
1050        binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1051}
1052
1053static void binder_set_nice(long nice)
1054{
1055        long min_nice;
1056
1057        if (can_nice(current, nice)) {
1058                set_user_nice(current, nice);
1059                return;
1060        }
1061        min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1062        binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1063                     "%d: nice value %ld not allowed use %ld instead\n",
1064                      current->pid, nice, min_nice);
1065        set_user_nice(current, min_nice);
1066        if (min_nice <= MAX_NICE)
1067                return;
1068        binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1069}
1070
1071static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1072                                                   binder_uintptr_t ptr)
1073{
1074        struct rb_node *n = proc->nodes.rb_node;
1075        struct binder_node *node;
1076
1077        assert_spin_locked(&proc->inner_lock);
1078
1079        while (n) {
1080                node = rb_entry(n, struct binder_node, rb_node);
1081
1082                if (ptr < node->ptr)
1083                        n = n->rb_left;
1084                else if (ptr > node->ptr)
1085                        n = n->rb_right;
1086                else {
1087                        /*
1088                         * take an implicit weak reference
1089                         * to ensure node stays alive until
1090                         * call to binder_put_node()
1091                         */
1092                        binder_inc_node_tmpref_ilocked(node);
1093                        return node;
1094                }
1095        }
1096        return NULL;
1097}
1098
1099static struct binder_node *binder_get_node(struct binder_proc *proc,
1100                                           binder_uintptr_t ptr)
1101{
1102        struct binder_node *node;
1103
1104        binder_inner_proc_lock(proc);
1105        node = binder_get_node_ilocked(proc, ptr);
1106        binder_inner_proc_unlock(proc);
1107        return node;
1108}
1109
1110static struct binder_node *binder_init_node_ilocked(
1111                                                struct binder_proc *proc,
1112                                                struct binder_node *new_node,
1113                                                struct flat_binder_object *fp)
1114{
1115        struct rb_node **p = &proc->nodes.rb_node;
1116        struct rb_node *parent = NULL;
1117        struct binder_node *node;
1118        binder_uintptr_t ptr = fp ? fp->binder : 0;
1119        binder_uintptr_t cookie = fp ? fp->cookie : 0;
1120        __u32 flags = fp ? fp->flags : 0;
1121
1122        assert_spin_locked(&proc->inner_lock);
1123
1124        while (*p) {
1125
1126                parent = *p;
1127                node = rb_entry(parent, struct binder_node, rb_node);
1128
1129                if (ptr < node->ptr)
1130                        p = &(*p)->rb_left;
1131                else if (ptr > node->ptr)
1132                        p = &(*p)->rb_right;
1133                else {
1134                        /*
1135                         * A matching node is already in
1136                         * the rb tree. Abandon the init
1137                         * and return it.
1138                         */
1139                        binder_inc_node_tmpref_ilocked(node);
1140                        return node;
1141                }
1142        }
1143        node = new_node;
1144        binder_stats_created(BINDER_STAT_NODE);
1145        node->tmp_refs++;
1146        rb_link_node(&node->rb_node, parent, p);
1147        rb_insert_color(&node->rb_node, &proc->nodes);
1148        node->debug_id = atomic_inc_return(&binder_last_id);
1149        node->proc = proc;
1150        node->ptr = ptr;
1151        node->cookie = cookie;
1152        node->work.type = BINDER_WORK_NODE;
1153        node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1154        node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1155        spin_lock_init(&node->lock);
1156        INIT_LIST_HEAD(&node->work.entry);
1157        INIT_LIST_HEAD(&node->async_todo);
1158        binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1159                     "%d:%d node %d u%016llx c%016llx created\n",
1160                     proc->pid, current->pid, node->debug_id,
1161                     (u64)node->ptr, (u64)node->cookie);
1162
1163        return node;
1164}
1165
1166static struct binder_node *binder_new_node(struct binder_proc *proc,
1167                                           struct flat_binder_object *fp)
1168{
1169        struct binder_node *node;
1170        struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1171
1172        if (!new_node)
1173                return NULL;
1174        binder_inner_proc_lock(proc);
1175        node = binder_init_node_ilocked(proc, new_node, fp);
1176        binder_inner_proc_unlock(proc);
1177        if (node != new_node)
1178                /*
1179                 * The node was already added by another thread
1180                 */
1181                kfree(new_node);
1182
1183        return node;
1184}
1185
1186static void binder_free_node(struct binder_node *node)
1187{
1188        kfree(node);
1189        binder_stats_deleted(BINDER_STAT_NODE);
1190}
1191
1192static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1193                                    int internal,
1194                                    struct list_head *target_list)
1195{
1196        struct binder_proc *proc = node->proc;
1197
1198        assert_spin_locked(&node->lock);
1199        if (proc)
1200                assert_spin_locked(&proc->inner_lock);
1201        if (strong) {
1202                if (internal) {
1203                        if (target_list == NULL &&
1204                            node->internal_strong_refs == 0 &&
1205                            !(node->proc &&
1206                              node == node->proc->context->binder_context_mgr_node &&
1207                              node->has_strong_ref)) {
1208                                pr_err("invalid inc strong node for %d\n",
1209                                        node->debug_id);
1210                                return -EINVAL;
1211                        }
1212                        node->internal_strong_refs++;
1213                } else
1214                        node->local_strong_refs++;
1215                if (!node->has_strong_ref && target_list) {
1216                        struct binder_thread *thread = container_of(target_list,
1217                                                    struct binder_thread, todo);
1218                        binder_dequeue_work_ilocked(&node->work);
1219                        BUG_ON(&thread->todo != target_list);
1220                        binder_enqueue_deferred_thread_work_ilocked(thread,
1221                                                                   &node->work);
1222                }
1223        } else {
1224                if (!internal)
1225                        node->local_weak_refs++;
1226                if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1227                        if (target_list == NULL) {
1228                                pr_err("invalid inc weak node for %d\n",
1229                                        node->debug_id);
1230                                return -EINVAL;
1231                        }
1232                        /*
1233                         * See comment above
1234                         */
1235                        binder_enqueue_work_ilocked(&node->work, target_list);
1236                }
1237        }
1238        return 0;
1239}
1240
1241static int binder_inc_node(struct binder_node *node, int strong, int internal,
1242                           struct list_head *target_list)
1243{
1244        int ret;
1245
1246        binder_node_inner_lock(node);
1247        ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1248        binder_node_inner_unlock(node);
1249
1250        return ret;
1251}
1252
1253static bool binder_dec_node_nilocked(struct binder_node *node,
1254                                     int strong, int internal)
1255{
1256        struct binder_proc *proc = node->proc;
1257
1258        assert_spin_locked(&node->lock);
1259        if (proc)
1260                assert_spin_locked(&proc->inner_lock);
1261        if (strong) {
1262                if (internal)
1263                        node->internal_strong_refs--;
1264                else
1265                        node->local_strong_refs--;
1266                if (node->local_strong_refs || node->internal_strong_refs)
1267                        return false;
1268        } else {
1269                if (!internal)
1270                        node->local_weak_refs--;
1271                if (node->local_weak_refs || node->tmp_refs ||
1272                                !hlist_empty(&node->refs))
1273                        return false;
1274        }
1275
1276        if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1277                if (list_empty(&node->work.entry)) {
1278                        binder_enqueue_work_ilocked(&node->work, &proc->todo);
1279                        binder_wakeup_proc_ilocked(proc);
1280                }
1281        } else {
1282                if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1283                    !node->local_weak_refs && !node->tmp_refs) {
1284                        if (proc) {
1285                                binder_dequeue_work_ilocked(&node->work);
1286                                rb_erase(&node->rb_node, &proc->nodes);
1287                                binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1288                                             "refless node %d deleted\n",
1289                                             node->debug_id);
1290                        } else {
1291                                BUG_ON(!list_empty(&node->work.entry));
1292                                spin_lock(&binder_dead_nodes_lock);
1293                                /*
1294                                 * tmp_refs could have changed so
1295                                 * check it again
1296                                 */
1297                                if (node->tmp_refs) {
1298                                        spin_unlock(&binder_dead_nodes_lock);
1299                                        return false;
1300                                }
1301                                hlist_del(&node->dead_node);
1302                                spin_unlock(&binder_dead_nodes_lock);
1303                                binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1304                                             "dead node %d deleted\n",
1305                                             node->debug_id);
1306                        }
1307                        return true;
1308                }
1309        }
1310        return false;
1311}
1312
1313static void binder_dec_node(struct binder_node *node, int strong, int internal)
1314{
1315        bool free_node;
1316
1317        binder_node_inner_lock(node);
1318        free_node = binder_dec_node_nilocked(node, strong, internal);
1319        binder_node_inner_unlock(node);
1320        if (free_node)
1321                binder_free_node(node);
1322}
1323
1324static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1325{
1326        /*
1327         * No call to binder_inc_node() is needed since we
1328         * don't need to inform userspace of any changes to
1329         * tmp_refs
1330         */
1331        node->tmp_refs++;
1332}
1333
1334/**
1335 * binder_inc_node_tmpref() - take a temporary reference on node
1336 * @node:       node to reference
1337 *
1338 * Take reference on node to prevent the node from being freed
1339 * while referenced only by a local variable. The inner lock is
1340 * needed to serialize with the node work on the queue (which
1341 * isn't needed after the node is dead). If the node is dead
1342 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1343 * node->tmp_refs against dead-node-only cases where the node
1344 * lock cannot be acquired (eg traversing the dead node list to
1345 * print nodes)
1346 */
1347static void binder_inc_node_tmpref(struct binder_node *node)
1348{
1349        binder_node_lock(node);
1350        if (node->proc)
1351                binder_inner_proc_lock(node->proc);
1352        else
1353                spin_lock(&binder_dead_nodes_lock);
1354        binder_inc_node_tmpref_ilocked(node);
1355        if (node->proc)
1356                binder_inner_proc_unlock(node->proc);
1357        else
1358                spin_unlock(&binder_dead_nodes_lock);
1359        binder_node_unlock(node);
1360}
1361
1362/**
1363 * binder_dec_node_tmpref() - remove a temporary reference on node
1364 * @node:       node to reference
1365 *
1366 * Release temporary reference on node taken via binder_inc_node_tmpref()
1367 */
1368static void binder_dec_node_tmpref(struct binder_node *node)
1369{
1370        bool free_node;
1371
1372        binder_node_inner_lock(node);
1373        if (!node->proc)
1374                spin_lock(&binder_dead_nodes_lock);
1375        else
1376                __acquire(&binder_dead_nodes_lock);
1377        node->tmp_refs--;
1378        BUG_ON(node->tmp_refs < 0);
1379        if (!node->proc)
1380                spin_unlock(&binder_dead_nodes_lock);
1381        else
1382                __release(&binder_dead_nodes_lock);
1383        /*
1384         * Call binder_dec_node() to check if all refcounts are 0
1385         * and cleanup is needed. Calling with strong=0 and internal=1
1386         * causes no actual reference to be released in binder_dec_node().
1387         * If that changes, a change is needed here too.
1388         */
1389        free_node = binder_dec_node_nilocked(node, 0, 1);
1390        binder_node_inner_unlock(node);
1391        if (free_node)
1392                binder_free_node(node);
1393}
1394
1395static void binder_put_node(struct binder_node *node)
1396{
1397        binder_dec_node_tmpref(node);
1398}
1399
1400static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1401                                                 u32 desc, bool need_strong_ref)
1402{
1403        struct rb_node *n = proc->refs_by_desc.rb_node;
1404        struct binder_ref *ref;
1405
1406        while (n) {
1407                ref = rb_entry(n, struct binder_ref, rb_node_desc);
1408
1409                if (desc < ref->data.desc) {
1410                        n = n->rb_left;
1411                } else if (desc > ref->data.desc) {
1412                        n = n->rb_right;
1413                } else if (need_strong_ref && !ref->data.strong) {
1414                        binder_user_error("tried to use weak ref as strong ref\n");
1415                        return NULL;
1416                } else {
1417                        return ref;
1418                }
1419        }
1420        return NULL;
1421}
1422
1423/**
1424 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1425 * @proc:       binder_proc that owns the ref
1426 * @node:       binder_node of target
1427 * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1428 *
1429 * Look up the ref for the given node and return it if it exists
1430 *
1431 * If it doesn't exist and the caller provides a newly allocated
1432 * ref, initialize the fields of the newly allocated ref and insert
1433 * into the given proc rb_trees and node refs list.
1434 *
1435 * Return:      the ref for node. It is possible that another thread
1436 *              allocated/initialized the ref first in which case the
1437 *              returned ref would be different than the passed-in
1438 *              new_ref. new_ref must be kfree'd by the caller in
1439 *              this case.
1440 */
1441static struct binder_ref *binder_get_ref_for_node_olocked(
1442                                        struct binder_proc *proc,
1443                                        struct binder_node *node,
1444                                        struct binder_ref *new_ref)
1445{
1446        struct binder_context *context = proc->context;
1447        struct rb_node **p = &proc->refs_by_node.rb_node;
1448        struct rb_node *parent = NULL;
1449        struct binder_ref *ref;
1450        struct rb_node *n;
1451
1452        while (*p) {
1453                parent = *p;
1454                ref = rb_entry(parent, struct binder_ref, rb_node_node);
1455
1456                if (node < ref->node)
1457                        p = &(*p)->rb_left;
1458                else if (node > ref->node)
1459                        p = &(*p)->rb_right;
1460                else
1461                        return ref;
1462        }
1463        if (!new_ref)
1464                return NULL;
1465
1466        binder_stats_created(BINDER_STAT_REF);
1467        new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1468        new_ref->proc = proc;
1469        new_ref->node = node;
1470        rb_link_node(&new_ref->rb_node_node, parent, p);
1471        rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1472
1473        new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1474        for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1475                ref = rb_entry(n, struct binder_ref, rb_node_desc);
1476                if (ref->data.desc > new_ref->data.desc)
1477                        break;
1478                new_ref->data.desc = ref->data.desc + 1;
1479        }
1480
1481        p = &proc->refs_by_desc.rb_node;
1482        while (*p) {
1483                parent = *p;
1484                ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1485
1486                if (new_ref->data.desc < ref->data.desc)
1487                        p = &(*p)->rb_left;
1488                else if (new_ref->data.desc > ref->data.desc)
1489                        p = &(*p)->rb_right;
1490                else
1491                        BUG();
1492        }
1493        rb_link_node(&new_ref->rb_node_desc, parent, p);
1494        rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1495
1496        binder_node_lock(node);
1497        hlist_add_head(&new_ref->node_entry, &node->refs);
1498
1499        binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1500                     "%d new ref %d desc %d for node %d\n",
1501                      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1502                      node->debug_id);
1503        binder_node_unlock(node);
1504        return new_ref;
1505}
1506
1507static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1508{
1509        bool delete_node = false;
1510
1511        binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1512                     "%d delete ref %d desc %d for node %d\n",
1513                      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1514                      ref->node->debug_id);
1515
1516        rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1517        rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1518
1519        binder_node_inner_lock(ref->node);
1520        if (ref->data.strong)
1521                binder_dec_node_nilocked(ref->node, 1, 1);
1522
1523        hlist_del(&ref->node_entry);
1524        delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1525        binder_node_inner_unlock(ref->node);
1526        /*
1527         * Clear ref->node unless we want the caller to free the node
1528         */
1529        if (!delete_node) {
1530                /*
1531                 * The caller uses ref->node to determine
1532                 * whether the node needs to be freed. Clear
1533                 * it since the node is still alive.
1534                 */
1535                ref->node = NULL;
1536        }
1537
1538        if (ref->death) {
1539                binder_debug(BINDER_DEBUG_DEAD_BINDER,
1540                             "%d delete ref %d desc %d has death notification\n",
1541                              ref->proc->pid, ref->data.debug_id,
1542                              ref->data.desc);
1543                binder_dequeue_work(ref->proc, &ref->death->work);
1544                binder_stats_deleted(BINDER_STAT_DEATH);
1545        }
1546        binder_stats_deleted(BINDER_STAT_REF);
1547}
1548
1549/**
1550 * binder_inc_ref_olocked() - increment the ref for given handle
1551 * @ref:         ref to be incremented
1552 * @strong:      if true, strong increment, else weak
1553 * @target_list: list to queue node work on
1554 *
1555 * Increment the ref. @ref->proc->outer_lock must be held on entry
1556 *
1557 * Return: 0, if successful, else errno
1558 */
1559static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1560                                  struct list_head *target_list)
1561{
1562        int ret;
1563
1564        if (strong) {
1565                if (ref->data.strong == 0) {
1566                        ret = binder_inc_node(ref->node, 1, 1, target_list);
1567                        if (ret)
1568                                return ret;
1569                }
1570                ref->data.strong++;
1571        } else {
1572                if (ref->data.weak == 0) {
1573                        ret = binder_inc_node(ref->node, 0, 1, target_list);
1574                        if (ret)
1575                                return ret;
1576                }
1577                ref->data.weak++;
1578        }
1579        return 0;
1580}
1581
1582/**
1583 * binder_dec_ref() - dec the ref for given handle
1584 * @ref:        ref to be decremented
1585 * @strong:     if true, strong decrement, else weak
1586 *
1587 * Decrement the ref.
1588 *
1589 * Return: true if ref is cleaned up and ready to be freed
1590 */
1591static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1592{
1593        if (strong) {
1594                if (ref->data.strong == 0) {
1595                        binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1596                                          ref->proc->pid, ref->data.debug_id,
1597                                          ref->data.desc, ref->data.strong,
1598                                          ref->data.weak);
1599                        return false;
1600                }
1601                ref->data.strong--;
1602                if (ref->data.strong == 0)
1603                        binder_dec_node(ref->node, strong, 1);
1604        } else {
1605                if (ref->data.weak == 0) {
1606                        binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1607                                          ref->proc->pid, ref->data.debug_id,
1608                                          ref->data.desc, ref->data.strong,
1609                                          ref->data.weak);
1610                        return false;
1611                }
1612                ref->data.weak--;
1613        }
1614        if (ref->data.strong == 0 && ref->data.weak == 0) {
1615                binder_cleanup_ref_olocked(ref);
1616                return true;
1617        }
1618        return false;
1619}
1620
1621/**
1622 * binder_get_node_from_ref() - get the node from the given proc/desc
1623 * @proc:       proc containing the ref
1624 * @desc:       the handle associated with the ref
1625 * @need_strong_ref: if true, only return node if ref is strong
1626 * @rdata:      the id/refcount data for the ref
1627 *
1628 * Given a proc and ref handle, return the associated binder_node
1629 *
1630 * Return: a binder_node or NULL if not found or not strong when strong required
1631 */
1632static struct binder_node *binder_get_node_from_ref(
1633                struct binder_proc *proc,
1634                u32 desc, bool need_strong_ref,
1635                struct binder_ref_data *rdata)
1636{
1637        struct binder_node *node;
1638        struct binder_ref *ref;
1639
1640        binder_proc_lock(proc);
1641        ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1642        if (!ref)
1643                goto err_no_ref;
1644        node = ref->node;
1645        /*
1646         * Take an implicit reference on the node to ensure
1647         * it stays alive until the call to binder_put_node()
1648         */
1649        binder_inc_node_tmpref(node);
1650        if (rdata)
1651                *rdata = ref->data;
1652        binder_proc_unlock(proc);
1653
1654        return node;
1655
1656err_no_ref:
1657        binder_proc_unlock(proc);
1658        return NULL;
1659}
1660
1661/**
1662 * binder_free_ref() - free the binder_ref
1663 * @ref:        ref to free
1664 *
1665 * Free the binder_ref. Free the binder_node indicated by ref->node
1666 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1667 */
1668static void binder_free_ref(struct binder_ref *ref)
1669{
1670        if (ref->node)
1671                binder_free_node(ref->node);
1672        kfree(ref->death);
1673        kfree(ref);
1674}
1675
1676/**
1677 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1678 * @proc:       proc containing the ref
1679 * @desc:       the handle associated with the ref
1680 * @increment:  true=inc reference, false=dec reference
1681 * @strong:     true=strong reference, false=weak reference
1682 * @rdata:      the id/refcount data for the ref
1683 *
1684 * Given a proc and ref handle, increment or decrement the ref
1685 * according to "increment" arg.
1686 *
1687 * Return: 0 if successful, else errno
1688 */
1689static int binder_update_ref_for_handle(struct binder_proc *proc,
1690                uint32_t desc, bool increment, bool strong,
1691                struct binder_ref_data *rdata)
1692{
1693        int ret = 0;
1694        struct binder_ref *ref;
1695        bool delete_ref = false;
1696
1697        binder_proc_lock(proc);
1698        ref = binder_get_ref_olocked(proc, desc, strong);
1699        if (!ref) {
1700                ret = -EINVAL;
1701                goto err_no_ref;
1702        }
1703        if (increment)
1704                ret = binder_inc_ref_olocked(ref, strong, NULL);
1705        else
1706                delete_ref = binder_dec_ref_olocked(ref, strong);
1707
1708        if (rdata)
1709                *rdata = ref->data;
1710        binder_proc_unlock(proc);
1711
1712        if (delete_ref)
1713                binder_free_ref(ref);
1714        return ret;
1715
1716err_no_ref:
1717        binder_proc_unlock(proc);
1718        return ret;
1719}
1720
1721/**
1722 * binder_dec_ref_for_handle() - dec the ref for given handle
1723 * @proc:       proc containing the ref
1724 * @desc:       the handle associated with the ref
1725 * @strong:     true=strong reference, false=weak reference
1726 * @rdata:      the id/refcount data for the ref
1727 *
1728 * Just calls binder_update_ref_for_handle() to decrement the ref.
1729 *
1730 * Return: 0 if successful, else errno
1731 */
1732static int binder_dec_ref_for_handle(struct binder_proc *proc,
1733                uint32_t desc, bool strong, struct binder_ref_data *rdata)
1734{
1735        return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1736}
1737
1738
1739/**
1740 * binder_inc_ref_for_node() - increment the ref for given proc/node
1741 * @proc:        proc containing the ref
1742 * @node:        target node
1743 * @strong:      true=strong reference, false=weak reference
1744 * @target_list: worklist to use if node is incremented
1745 * @rdata:       the id/refcount data for the ref
1746 *
1747 * Given a proc and node, increment the ref. Create the ref if it
1748 * doesn't already exist
1749 *
1750 * Return: 0 if successful, else errno
1751 */
1752static int binder_inc_ref_for_node(struct binder_proc *proc,
1753                        struct binder_node *node,
1754                        bool strong,
1755                        struct list_head *target_list,
1756                        struct binder_ref_data *rdata)
1757{
1758        struct binder_ref *ref;
1759        struct binder_ref *new_ref = NULL;
1760        int ret = 0;
1761
1762        binder_proc_lock(proc);
1763        ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1764        if (!ref) {
1765                binder_proc_unlock(proc);
1766                new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1767                if (!new_ref)
1768                        return -ENOMEM;
1769                binder_proc_lock(proc);
1770                ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1771        }
1772        ret = binder_inc_ref_olocked(ref, strong, target_list);
1773        *rdata = ref->data;
1774        binder_proc_unlock(proc);
1775        if (new_ref && ref != new_ref)
1776                /*
1777                 * Another thread created the ref first so
1778                 * free the one we allocated
1779                 */
1780                kfree(new_ref);
1781        return ret;
1782}
1783
1784static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1785                                           struct binder_transaction *t)
1786{
1787        BUG_ON(!target_thread);
1788        assert_spin_locked(&target_thread->proc->inner_lock);
1789        BUG_ON(target_thread->transaction_stack != t);
1790        BUG_ON(target_thread->transaction_stack->from != target_thread);
1791        target_thread->transaction_stack =
1792                target_thread->transaction_stack->from_parent;
1793        t->from = NULL;
1794}
1795
1796/**
1797 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1798 * @thread:     thread to decrement
1799 *
1800 * A thread needs to be kept alive while being used to create or
1801 * handle a transaction. binder_get_txn_from() is used to safely
1802 * extract t->from from a binder_transaction and keep the thread
1803 * indicated by t->from from being freed. When done with that
1804 * binder_thread, this function is called to decrement the
1805 * tmp_ref and free if appropriate (thread has been released
1806 * and no transaction being processed by the driver)
1807 */
1808static void binder_thread_dec_tmpref(struct binder_thread *thread)
1809{
1810        /*
1811         * atomic is used to protect the counter value while
1812         * it cannot reach zero or thread->is_dead is false
1813         */
1814        binder_inner_proc_lock(thread->proc);
1815        atomic_dec(&thread->tmp_ref);
1816        if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1817                binder_inner_proc_unlock(thread->proc);
1818                binder_free_thread(thread);
1819                return;
1820        }
1821        binder_inner_proc_unlock(thread->proc);
1822}
1823
1824/**
1825 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1826 * @proc:       proc to decrement
1827 *
1828 * A binder_proc needs to be kept alive while being used to create or
1829 * handle a transaction. proc->tmp_ref is incremented when
1830 * creating a new transaction or the binder_proc is currently in-use
1831 * by threads that are being released. When done with the binder_proc,
1832 * this function is called to decrement the counter and free the
1833 * proc if appropriate (proc has been released, all threads have
1834 * been released and not currenly in-use to process a transaction).
1835 */
1836static void binder_proc_dec_tmpref(struct binder_proc *proc)
1837{
1838        binder_inner_proc_lock(proc);
1839        proc->tmp_ref--;
1840        if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1841                        !proc->tmp_ref) {
1842                binder_inner_proc_unlock(proc);
1843                binder_free_proc(proc);
1844                return;
1845        }
1846        binder_inner_proc_unlock(proc);
1847}
1848
1849/**
1850 * binder_get_txn_from() - safely extract the "from" thread in transaction
1851 * @t:  binder transaction for t->from
1852 *
1853 * Atomically return the "from" thread and increment the tmp_ref
1854 * count for the thread to ensure it stays alive until
1855 * binder_thread_dec_tmpref() is called.
1856 *
1857 * Return: the value of t->from
1858 */
1859static struct binder_thread *binder_get_txn_from(
1860                struct binder_transaction *t)
1861{
1862        struct binder_thread *from;
1863
1864        spin_lock(&t->lock);
1865        from = t->from;
1866        if (from)
1867                atomic_inc(&from->tmp_ref);
1868        spin_unlock(&t->lock);
1869        return from;
1870}
1871
1872/**
1873 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1874 * @t:  binder transaction for t->from
1875 *
1876 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1877 * to guarantee that the thread cannot be released while operating on it.
1878 * The caller must call binder_inner_proc_unlock() to release the inner lock
1879 * as well as call binder_dec_thread_txn() to release the reference.
1880 *
1881 * Return: the value of t->from
1882 */
1883static struct binder_thread *binder_get_txn_from_and_acq_inner(
1884                struct binder_transaction *t)
1885        __acquires(&t->from->proc->inner_lock)
1886{
1887        struct binder_thread *from;
1888
1889        from = binder_get_txn_from(t);
1890        if (!from) {
1891                __acquire(&from->proc->inner_lock);
1892                return NULL;
1893        }
1894        binder_inner_proc_lock(from->proc);
1895        if (t->from) {
1896                BUG_ON(from != t->from);
1897                return from;
1898        }
1899        binder_inner_proc_unlock(from->proc);
1900        __acquire(&from->proc->inner_lock);
1901        binder_thread_dec_tmpref(from);
1902        return NULL;
1903}
1904
1905/**
1906 * binder_free_txn_fixups() - free unprocessed fd fixups
1907 * @t:  binder transaction for t->from
1908 *
1909 * If the transaction is being torn down prior to being
1910 * processed by the target process, free all of the
1911 * fd fixups and fput the file structs. It is safe to
1912 * call this function after the fixups have been
1913 * processed -- in that case, the list will be empty.
1914 */
1915static void binder_free_txn_fixups(struct binder_transaction *t)
1916{
1917        struct binder_txn_fd_fixup *fixup, *tmp;
1918
1919        list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1920                fput(fixup->file);
1921                list_del(&fixup->fixup_entry);
1922                kfree(fixup);
1923        }
1924}
1925
1926static void binder_free_transaction(struct binder_transaction *t)
1927{
1928        if (t->buffer)
1929                t->buffer->transaction = NULL;
1930        binder_free_txn_fixups(t);
1931        kfree(t);
1932        binder_stats_deleted(BINDER_STAT_TRANSACTION);
1933}
1934
1935static void binder_send_failed_reply(struct binder_transaction *t,
1936                                     uint32_t error_code)
1937{
1938        struct binder_thread *target_thread;
1939        struct binder_transaction *next;
1940
1941        BUG_ON(t->flags & TF_ONE_WAY);
1942        while (1) {
1943                target_thread = binder_get_txn_from_and_acq_inner(t);
1944                if (target_thread) {
1945                        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1946                                     "send failed reply for transaction %d to %d:%d\n",
1947                                      t->debug_id,
1948                                      target_thread->proc->pid,
1949                                      target_thread->pid);
1950
1951                        binder_pop_transaction_ilocked(target_thread, t);
1952                        if (target_thread->reply_error.cmd == BR_OK) {
1953                                target_thread->reply_error.cmd = error_code;
1954                                binder_enqueue_thread_work_ilocked(
1955                                        target_thread,
1956                                        &target_thread->reply_error.work);
1957                                wake_up_interruptible(&target_thread->wait);
1958                        } else {
1959                                /*
1960                                 * Cannot get here for normal operation, but
1961                                 * we can if multiple synchronous transactions
1962                                 * are sent without blocking for responses.
1963                                 * Just ignore the 2nd error in this case.
1964                                 */
1965                                pr_warn("Unexpected reply error: %u\n",
1966                                        target_thread->reply_error.cmd);
1967                        }
1968                        binder_inner_proc_unlock(target_thread->proc);
1969                        binder_thread_dec_tmpref(target_thread);
1970                        binder_free_transaction(t);
1971                        return;
1972                } else {
1973                        __release(&target_thread->proc->inner_lock);
1974                }
1975                next = t->from_parent;
1976
1977                binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1978                             "send failed reply for transaction %d, target dead\n",
1979                             t->debug_id);
1980
1981                binder_free_transaction(t);
1982                if (next == NULL) {
1983                        binder_debug(BINDER_DEBUG_DEAD_BINDER,
1984                                     "reply failed, no target thread at root\n");
1985                        return;
1986                }
1987                t = next;
1988                binder_debug(BINDER_DEBUG_DEAD_BINDER,
1989                             "reply failed, no target thread -- retry %d\n",
1990                              t->debug_id);
1991        }
1992}
1993
1994/**
1995 * binder_cleanup_transaction() - cleans up undelivered transaction
1996 * @t:          transaction that needs to be cleaned up
1997 * @reason:     reason the transaction wasn't delivered
1998 * @error_code: error to return to caller (if synchronous call)
1999 */
2000static void binder_cleanup_transaction(struct binder_transaction *t,
2001                                       const char *reason,
2002                                       uint32_t error_code)
2003{
2004        if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2005                binder_send_failed_reply(t, error_code);
2006        } else {
2007                binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2008                        "undelivered transaction %d, %s\n",
2009                        t->debug_id, reason);
2010                binder_free_transaction(t);
2011        }
2012}
2013
2014/**
2015 * binder_validate_object() - checks for a valid metadata object in a buffer.
2016 * @buffer:     binder_buffer that we're parsing.
2017 * @offset:     offset in the buffer at which to validate an object.
2018 *
2019 * Return:      If there's a valid metadata object at @offset in @buffer, the
2020 *              size of that object. Otherwise, it returns zero.
2021 */
2022static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2023{
2024        /* Check if we can read a header first */
2025        struct binder_object_header *hdr;
2026        size_t object_size = 0;
2027
2028        if (buffer->data_size < sizeof(*hdr) ||
2029            offset > buffer->data_size - sizeof(*hdr) ||
2030            !IS_ALIGNED(offset, sizeof(u32)))
2031                return 0;
2032
2033        /* Ok, now see if we can read a complete object. */
2034        hdr = (struct binder_object_header *)(buffer->data + offset);
2035        switch (hdr->type) {
2036        case BINDER_TYPE_BINDER:
2037        case BINDER_TYPE_WEAK_BINDER:
2038        case BINDER_TYPE_HANDLE:
2039        case BINDER_TYPE_WEAK_HANDLE:
2040                object_size = sizeof(struct flat_binder_object);
2041                break;
2042        case BINDER_TYPE_FD:
2043                object_size = sizeof(struct binder_fd_object);
2044                break;
2045        case BINDER_TYPE_PTR:
2046                object_size = sizeof(struct binder_buffer_object);
2047                break;
2048        case BINDER_TYPE_FDA:
2049                object_size = sizeof(struct binder_fd_array_object);
2050                break;
2051        default:
2052                return 0;
2053        }
2054        if (offset <= buffer->data_size - object_size &&
2055            buffer->data_size >= object_size)
2056                return object_size;
2057        else
2058                return 0;
2059}
2060
2061/**
2062 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2063 * @b:          binder_buffer containing the object
2064 * @index:      index in offset array at which the binder_buffer_object is
2065 *              located
2066 * @start:      points to the start of the offset array
2067 * @num_valid:  the number of valid offsets in the offset array
2068 *
2069 * Return:      If @index is within the valid range of the offset array
2070 *              described by @start and @num_valid, and if there's a valid
2071 *              binder_buffer_object at the offset found in index @index
2072 *              of the offset array, that object is returned. Otherwise,
2073 *              %NULL is returned.
2074 *              Note that the offset found in index @index itself is not
2075 *              verified; this function assumes that @num_valid elements
2076 *              from @start were previously verified to have valid offsets.
2077 */
2078static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2079                                                        binder_size_t index,
2080                                                        binder_size_t *start,
2081                                                        binder_size_t num_valid)
2082{
2083        struct binder_buffer_object *buffer_obj;
2084        binder_size_t *offp;
2085
2086        if (index >= num_valid)
2087                return NULL;
2088
2089        offp = start + index;
2090        buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2091        if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2092                return NULL;
2093
2094        return buffer_obj;
2095}
2096
2097/**
2098 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2099 * @b:                  transaction buffer
2100 * @objects_start       start of objects buffer
2101 * @buffer:             binder_buffer_object in which to fix up
2102 * @offset:             start offset in @buffer to fix up
2103 * @last_obj:           last binder_buffer_object that we fixed up in
2104 * @last_min_offset:    minimum fixup offset in @last_obj
2105 *
2106 * Return:              %true if a fixup in buffer @buffer at offset @offset is
2107 *                      allowed.
2108 *
2109 * For safety reasons, we only allow fixups inside a buffer to happen
2110 * at increasing offsets; additionally, we only allow fixup on the last
2111 * buffer object that was verified, or one of its parents.
2112 *
2113 * Example of what is allowed:
2114 *
2115 * A
2116 *   B (parent = A, offset = 0)
2117 *   C (parent = A, offset = 16)
2118 *     D (parent = C, offset = 0)
2119 *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2120 *
2121 * Examples of what is not allowed:
2122 *
2123 * Decreasing offsets within the same parent:
2124 * A
2125 *   C (parent = A, offset = 16)
2126 *   B (parent = A, offset = 0) // decreasing offset within A
2127 *
2128 * Referring to a parent that wasn't the last object or any of its parents:
2129 * A
2130 *   B (parent = A, offset = 0)
2131 *   C (parent = A, offset = 0)
2132 *   C (parent = A, offset = 16)
2133 *     D (parent = B, offset = 0) // B is not A or any of A's parents
2134 */
2135static bool binder_validate_fixup(struct binder_buffer *b,
2136                                  binder_size_t *objects_start,
2137                                  struct binder_buffer_object *buffer,
2138                                  binder_size_t fixup_offset,
2139                                  struct binder_buffer_object *last_obj,
2140                                  binder_size_t last_min_offset)
2141{
2142        if (!last_obj) {
2143                /* Nothing to fix up in */
2144                return false;
2145        }
2146
2147        while (last_obj != buffer) {
2148                /*
2149                 * Safe to retrieve the parent of last_obj, since it
2150                 * was already previously verified by the driver.
2151                 */
2152                if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2153                        return false;
2154                last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2155                last_obj = (struct binder_buffer_object *)
2156                        (b->data + *(objects_start + last_obj->parent));
2157        }
2158        return (fixup_offset >= last_min_offset);
2159}
2160
2161/**
2162 * struct binder_task_work_cb - for deferred close
2163 *
2164 * @twork:                callback_head for task work
2165 * @fd:                   fd to close
2166 *
2167 * Structure to pass task work to be handled after
2168 * returning from binder_ioctl() via task_work_add().
2169 */
2170struct binder_task_work_cb {
2171        struct callback_head twork;
2172        struct file *file;
2173};
2174
2175/**
2176 * binder_do_fd_close() - close list of file descriptors
2177 * @twork:      callback head for task work
2178 *
2179 * It is not safe to call ksys_close() during the binder_ioctl()
2180 * function if there is a chance that binder's own file descriptor
2181 * might be closed. This is to meet the requirements for using
2182 * fdget() (see comments for __fget_light()). Therefore use
2183 * task_work_add() to schedule the close operation once we have
2184 * returned from binder_ioctl(). This function is a callback
2185 * for that mechanism and does the actual ksys_close() on the
2186 * given file descriptor.
2187 */
2188static void binder_do_fd_close(struct callback_head *twork)
2189{
2190        struct binder_task_work_cb *twcb = container_of(twork,
2191                        struct binder_task_work_cb, twork);
2192
2193        fput(twcb->file);
2194        kfree(twcb);
2195}
2196
2197/**
2198 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2199 * @fd:         file-descriptor to close
2200 *
2201 * See comments in binder_do_fd_close(). This function is used to schedule
2202 * a file-descriptor to be closed after returning from binder_ioctl().
2203 */
2204static void binder_deferred_fd_close(int fd)
2205{
2206        struct binder_task_work_cb *twcb;
2207
2208        twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2209        if (!twcb)
2210                return;
2211        init_task_work(&twcb->twork, binder_do_fd_close);
2212        __close_fd_get_file(fd, &twcb->file);
2213        if (twcb->file)
2214                task_work_add(current, &twcb->twork, true);
2215        else
2216                kfree(twcb);
2217}
2218
2219static void binder_transaction_buffer_release(struct binder_proc *proc,
2220                                              struct binder_buffer *buffer,
2221                                              binder_size_t *failed_at)
2222{
2223        binder_size_t *offp, *off_start, *off_end;
2224        int debug_id = buffer->debug_id;
2225
2226        binder_debug(BINDER_DEBUG_TRANSACTION,
2227                     "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2228                     proc->pid, buffer->debug_id,
2229                     buffer->data_size, buffer->offsets_size, failed_at);
2230
2231        if (buffer->target_node)
2232                binder_dec_node(buffer->target_node, 1, 0);
2233
2234        off_start = (binder_size_t *)(buffer->data +
2235                                      ALIGN(buffer->data_size, sizeof(void *)));
2236        if (failed_at)
2237                off_end = failed_at;
2238        else
2239                off_end = (void *)off_start + buffer->offsets_size;
2240        for (offp = off_start; offp < off_end; offp++) {
2241                struct binder_object_header *hdr;
2242                size_t object_size = binder_validate_object(buffer, *offp);
2243
2244                if (object_size == 0) {
2245                        pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2246                               debug_id, (u64)*offp, buffer->data_size);
2247                        continue;
2248                }
2249                hdr = (struct binder_object_header *)(buffer->data + *offp);
2250                switch (hdr->type) {
2251                case BINDER_TYPE_BINDER:
2252                case BINDER_TYPE_WEAK_BINDER: {
2253                        struct flat_binder_object *fp;
2254                        struct binder_node *node;
2255
2256                        fp = to_flat_binder_object(hdr);
2257                        node = binder_get_node(proc, fp->binder);
2258                        if (node == NULL) {
2259                                pr_err("transaction release %d bad node %016llx\n",
2260                                       debug_id, (u64)fp->binder);
2261                                break;
2262                        }
2263                        binder_debug(BINDER_DEBUG_TRANSACTION,
2264                                     "        node %d u%016llx\n",
2265                                     node->debug_id, (u64)node->ptr);
2266                        binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2267                                        0);
2268                        binder_put_node(node);
2269                } break;
2270                case BINDER_TYPE_HANDLE:
2271                case BINDER_TYPE_WEAK_HANDLE: {
2272                        struct flat_binder_object *fp;
2273                        struct binder_ref_data rdata;
2274                        int ret;
2275
2276                        fp = to_flat_binder_object(hdr);
2277                        ret = binder_dec_ref_for_handle(proc, fp->handle,
2278                                hdr->type == BINDER_TYPE_HANDLE, &rdata);
2279
2280                        if (ret) {
2281                                pr_err("transaction release %d bad handle %d, ret = %d\n",
2282                                 debug_id, fp->handle, ret);
2283                                break;
2284                        }
2285                        binder_debug(BINDER_DEBUG_TRANSACTION,
2286                                     "        ref %d desc %d\n",
2287                                     rdata.debug_id, rdata.desc);
2288                } break;
2289
2290                case BINDER_TYPE_FD: {
2291                        /*
2292                         * No need to close the file here since user-space
2293                         * closes it for for successfully delivered
2294                         * transactions. For transactions that weren't
2295                         * delivered, the new fd was never allocated so
2296                         * there is no need to close and the fput on the
2297                         * file is done when the transaction is torn
2298                         * down.
2299                         */
2300                        WARN_ON(failed_at &&
2301                                proc->tsk == current->group_leader);
2302                } break;
2303                case BINDER_TYPE_PTR:
2304                        /*
2305                         * Nothing to do here, this will get cleaned up when the
2306                         * transaction buffer gets freed
2307                         */
2308                        break;
2309                case BINDER_TYPE_FDA: {
2310                        struct binder_fd_array_object *fda;
2311                        struct binder_buffer_object *parent;
2312                        uintptr_t parent_buffer;
2313                        u32 *fd_array;
2314                        size_t fd_index;
2315                        binder_size_t fd_buf_size;
2316
2317                        if (proc->tsk != current->group_leader) {
2318                                /*
2319                                 * Nothing to do if running in sender context
2320                                 * The fd fixups have not been applied so no
2321                                 * fds need to be closed.
2322                                 */
2323                                continue;
2324                        }
2325
2326                        fda = to_binder_fd_array_object(hdr);
2327                        parent = binder_validate_ptr(buffer, fda->parent,
2328                                                     off_start,
2329                                                     offp - off_start);
2330                        if (!parent) {
2331                                pr_err("transaction release %d bad parent offset\n",
2332                                       debug_id);
2333                                continue;
2334                        }
2335                        /*
2336                         * Since the parent was already fixed up, convert it
2337                         * back to kernel address space to access it
2338                         */
2339                        parent_buffer = parent->buffer -
2340                                binder_alloc_get_user_buffer_offset(
2341                                                &proc->alloc);
2342
2343                        fd_buf_size = sizeof(u32) * fda->num_fds;
2344                        if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2345                                pr_err("transaction release %d invalid number of fds (%lld)\n",
2346                                       debug_id, (u64)fda->num_fds);
2347                                continue;
2348                        }
2349                        if (fd_buf_size > parent->length ||
2350                            fda->parent_offset > parent->length - fd_buf_size) {
2351                                /* No space for all file descriptors here. */
2352                                pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2353                                       debug_id, (u64)fda->num_fds);
2354                                continue;
2355                        }
2356                        fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2357                        for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2358                                binder_deferred_fd_close(fd_array[fd_index]);
2359                } break;
2360                default:
2361                        pr_err("transaction release %d bad object type %x\n",
2362                                debug_id, hdr->type);
2363                        break;
2364                }
2365        }
2366}
2367
2368static int binder_translate_binder(struct flat_binder_object *fp,
2369                                   struct binder_transaction *t,
2370                                   struct binder_thread *thread)
2371{
2372        struct binder_node *node;
2373        struct binder_proc *proc = thread->proc;
2374        struct binder_proc *target_proc = t->to_proc;
2375        struct binder_ref_data rdata;
2376        int ret = 0;
2377
2378        node = binder_get_node(proc, fp->binder);
2379        if (!node) {
2380                node = binder_new_node(proc, fp);
2381                if (!node)
2382                        return -ENOMEM;
2383        }
2384        if (fp->cookie != node->cookie) {
2385                binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2386                                  proc->pid, thread->pid, (u64)fp->binder,
2387                                  node->debug_id, (u64)fp->cookie,
2388                                  (u64)node->cookie);
2389                ret = -EINVAL;
2390                goto done;
2391        }
2392        if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2393                ret = -EPERM;
2394                goto done;
2395        }
2396
2397        ret = binder_inc_ref_for_node(target_proc, node,
2398                        fp->hdr.type == BINDER_TYPE_BINDER,
2399                        &thread->todo, &rdata);
2400        if (ret)
2401                goto done;
2402
2403        if (fp->hdr.type == BINDER_TYPE_BINDER)
2404                fp->hdr.type = BINDER_TYPE_HANDLE;
2405        else
2406                fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2407        fp->binder = 0;
2408        fp->handle = rdata.desc;
2409        fp->cookie = 0;
2410
2411        trace_binder_transaction_node_to_ref(t, node, &rdata);
2412        binder_debug(BINDER_DEBUG_TRANSACTION,
2413                     "        node %d u%016llx -> ref %d desc %d\n",
2414                     node->debug_id, (u64)node->ptr,
2415                     rdata.debug_id, rdata.desc);
2416done:
2417        binder_put_node(node);
2418        return ret;
2419}
2420
2421static int binder_translate_handle(struct flat_binder_object *fp,
2422                                   struct binder_transaction *t,
2423                                   struct binder_thread *thread)
2424{
2425        struct binder_proc *proc = thread->proc;
2426        struct binder_proc *target_proc = t->to_proc;
2427        struct binder_node *node;
2428        struct binder_ref_data src_rdata;
2429        int ret = 0;
2430
2431        node = binder_get_node_from_ref(proc, fp->handle,
2432                        fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2433        if (!node) {
2434                binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2435                                  proc->pid, thread->pid, fp->handle);
2436                return -EINVAL;
2437        }
2438        if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2439                ret = -EPERM;
2440                goto done;
2441        }
2442
2443        binder_node_lock(node);
2444        if (node->proc == target_proc) {
2445                if (fp->hdr.type == BINDER_TYPE_HANDLE)
2446                        fp->hdr.type = BINDER_TYPE_BINDER;
2447                else
2448                        fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2449                fp->binder = node->ptr;
2450                fp->cookie = node->cookie;
2451                if (node->proc)
2452                        binder_inner_proc_lock(node->proc);
2453                else
2454                        __acquire(&node->proc->inner_lock);
2455                binder_inc_node_nilocked(node,
2456                                         fp->hdr.type == BINDER_TYPE_BINDER,
2457                                         0, NULL);
2458                if (node->proc)
2459                        binder_inner_proc_unlock(node->proc);
2460                else
2461                        __release(&node->proc->inner_lock);
2462                trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2463                binder_debug(BINDER_DEBUG_TRANSACTION,
2464                             "        ref %d desc %d -> node %d u%016llx\n",
2465                             src_rdata.debug_id, src_rdata.desc, node->debug_id,
2466                             (u64)node->ptr);
2467                binder_node_unlock(node);
2468        } else {
2469                struct binder_ref_data dest_rdata;
2470
2471                binder_node_unlock(node);
2472                ret = binder_inc_ref_for_node(target_proc, node,
2473                                fp->hdr.type == BINDER_TYPE_HANDLE,
2474                                NULL, &dest_rdata);
2475                if (ret)
2476                        goto done;
2477
2478                fp->binder = 0;
2479                fp->handle = dest_rdata.desc;
2480                fp->cookie = 0;
2481                trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2482                                                    &dest_rdata);
2483                binder_debug(BINDER_DEBUG_TRANSACTION,
2484                             "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2485                             src_rdata.debug_id, src_rdata.desc,
2486                             dest_rdata.debug_id, dest_rdata.desc,
2487                             node->debug_id);
2488        }
2489done:
2490        binder_put_node(node);
2491        return ret;
2492}
2493
2494static int binder_translate_fd(u32 *fdp,
2495                               struct binder_transaction *t,
2496                               struct binder_thread *thread,
2497                               struct binder_transaction *in_reply_to)
2498{
2499        struct binder_proc *proc = thread->proc;
2500        struct binder_proc *target_proc = t->to_proc;
2501        struct binder_txn_fd_fixup *fixup;
2502        struct file *file;
2503        int ret = 0;
2504        bool target_allows_fd;
2505        int fd = *fdp;
2506
2507        if (in_reply_to)
2508                target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2509        else
2510                target_allows_fd = t->buffer->target_node->accept_fds;
2511        if (!target_allows_fd) {
2512                binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2513                                  proc->pid, thread->pid,
2514                                  in_reply_to ? "reply" : "transaction",
2515                                  fd);
2516                ret = -EPERM;
2517                goto err_fd_not_accepted;
2518        }
2519
2520        file = fget(fd);
2521        if (!file) {
2522                binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2523                                  proc->pid, thread->pid, fd);
2524                ret = -EBADF;
2525                goto err_fget;
2526        }
2527        ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2528        if (ret < 0) {
2529                ret = -EPERM;
2530                goto err_security;
2531        }
2532
2533        /*
2534         * Add fixup record for this transaction. The allocation
2535         * of the fd in the target needs to be done from a
2536         * target thread.
2537         */
2538        fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2539        if (!fixup) {
2540                ret = -ENOMEM;
2541                goto err_alloc;
2542        }
2543        fixup->file = file;
2544        fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
2545        trace_binder_transaction_fd_send(t, fd, fixup->offset);
2546        list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2547
2548        return ret;
2549
2550err_alloc:
2551err_security:
2552        fput(file);
2553err_fget:
2554err_fd_not_accepted:
2555        return ret;
2556}
2557
2558static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2559                                     struct binder_buffer_object *parent,
2560                                     struct binder_transaction *t,
2561                                     struct binder_thread *thread,
2562                                     struct binder_transaction *in_reply_to)
2563{
2564        binder_size_t fdi, fd_buf_size;
2565        uintptr_t parent_buffer;
2566        u32 *fd_array;
2567        struct binder_proc *proc = thread->proc;
2568        struct binder_proc *target_proc = t->to_proc;
2569
2570        fd_buf_size = sizeof(u32) * fda->num_fds;
2571        if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2572                binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2573                                  proc->pid, thread->pid, (u64)fda->num_fds);
2574                return -EINVAL;
2575        }
2576        if (fd_buf_size > parent->length ||
2577            fda->parent_offset > parent->length - fd_buf_size) {
2578                /* No space for all file descriptors here. */
2579                binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2580                                  proc->pid, thread->pid, (u64)fda->num_fds);
2581                return -EINVAL;
2582        }
2583        /*
2584         * Since the parent was already fixed up, convert it
2585         * back to the kernel address space to access it
2586         */
2587        parent_buffer = parent->buffer -
2588                binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2589        fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2590        if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2591                binder_user_error("%d:%d parent offset not aligned correctly.\n",
2592                                  proc->pid, thread->pid);
2593                return -EINVAL;
2594        }
2595        for (fdi = 0; fdi < fda->num_fds; fdi++) {
2596                int ret = binder_translate_fd(&fd_array[fdi], t, thread,
2597                                                in_reply_to);
2598                if (ret < 0)
2599                        return ret;
2600        }
2601        return 0;
2602}
2603
2604static int binder_fixup_parent(struct binder_transaction *t,
2605                               struct binder_thread *thread,
2606                               struct binder_buffer_object *bp,
2607                               binder_size_t *off_start,
2608                               binder_size_t num_valid,
2609                               struct binder_buffer_object *last_fixup_obj,
2610                               binder_size_t last_fixup_min_off)
2611{
2612        struct binder_buffer_object *parent;
2613        u8 *parent_buffer;
2614        struct binder_buffer *b = t->buffer;
2615        struct binder_proc *proc = thread->proc;
2616        struct binder_proc *target_proc = t->to_proc;
2617
2618        if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2619                return 0;
2620
2621        parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2622        if (!parent) {
2623                binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2624                                  proc->pid, thread->pid);
2625                return -EINVAL;
2626        }
2627
2628        if (!binder_validate_fixup(b, off_start,
2629                                   parent, bp->parent_offset,
2630                                   last_fixup_obj,
2631                                   last_fixup_min_off)) {
2632                binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2633                                  proc->pid, thread->pid);
2634                return -EINVAL;
2635        }
2636
2637        if (parent->length < sizeof(binder_uintptr_t) ||
2638            bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2639                /* No space for a pointer here! */
2640                binder_user_error("%d:%d got transaction with invalid parent offset\n",
2641                                  proc->pid, thread->pid);
2642                return -EINVAL;
2643        }
2644        parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2645                        binder_alloc_get_user_buffer_offset(
2646                                &target_proc->alloc));
2647        *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2648
2649        return 0;
2650}
2651
2652/**
2653 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2654 * @t:          transaction to send
2655 * @proc:       process to send the transaction to
2656 * @thread:     thread in @proc to send the transaction to (may be NULL)
2657 *
2658 * This function queues a transaction to the specified process. It will try
2659 * to find a thread in the target process to handle the transaction and
2660 * wake it up. If no thread is found, the work is queued to the proc
2661 * waitqueue.
2662 *
2663 * If the @thread parameter is not NULL, the transaction is always queued
2664 * to the waitlist of that specific thread.
2665 *
2666 * Return:      true if the transactions was successfully queued
2667 *              false if the target process or thread is dead
2668 */
2669static bool binder_proc_transaction(struct binder_transaction *t,
2670                                    struct binder_proc *proc,
2671                                    struct binder_thread *thread)
2672{
2673        struct binder_node *node = t->buffer->target_node;
2674        bool oneway = !!(t->flags & TF_ONE_WAY);
2675        bool pending_async = false;
2676
2677        BUG_ON(!node);
2678        binder_node_lock(node);
2679        if (oneway) {
2680                BUG_ON(thread);
2681                if (node->has_async_transaction) {
2682                        pending_async = true;
2683                } else {
2684                        node->has_async_transaction = true;
2685                }
2686        }
2687
2688        binder_inner_proc_lock(proc);
2689
2690        if (proc->is_dead || (thread && thread->is_dead)) {
2691                binder_inner_proc_unlock(proc);
2692                binder_node_unlock(node);
2693                return false;
2694        }
2695
2696        if (!thread && !pending_async)
2697                thread = binder_select_thread_ilocked(proc);
2698
2699        if (thread)
2700                binder_enqueue_thread_work_ilocked(thread, &t->work);
2701        else if (!pending_async)
2702                binder_enqueue_work_ilocked(&t->work, &proc->todo);
2703        else
2704                binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2705
2706        if (!pending_async)
2707                binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2708
2709        binder_inner_proc_unlock(proc);
2710        binder_node_unlock(node);
2711
2712        return true;
2713}
2714
2715/**
2716 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2717 * @node:         struct binder_node for which to get refs
2718 * @proc:         returns @node->proc if valid
2719 * @error:        if no @proc then returns BR_DEAD_REPLY
2720 *
2721 * User-space normally keeps the node alive when creating a transaction
2722 * since it has a reference to the target. The local strong ref keeps it
2723 * alive if the sending process dies before the target process processes
2724 * the transaction. If the source process is malicious or has a reference
2725 * counting bug, relying on the local strong ref can fail.
2726 *
2727 * Since user-space can cause the local strong ref to go away, we also take
2728 * a tmpref on the node to ensure it survives while we are constructing
2729 * the transaction. We also need a tmpref on the proc while we are
2730 * constructing the transaction, so we take that here as well.
2731 *
2732 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2733 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2734 * target proc has died, @error is set to BR_DEAD_REPLY
2735 */
2736static struct binder_node *binder_get_node_refs_for_txn(
2737                struct binder_node *node,
2738                struct binder_proc **procp,
2739                uint32_t *error)
2740{
2741        struct binder_node *target_node = NULL;
2742
2743        binder_node_inner_lock(node);
2744        if (node->proc) {
2745                target_node = node;
2746                binder_inc_node_nilocked(node, 1, 0, NULL);
2747                binder_inc_node_tmpref_ilocked(node);
2748                node->proc->tmp_ref++;
2749                *procp = node->proc;
2750        } else
2751                *error = BR_DEAD_REPLY;
2752        binder_node_inner_unlock(node);
2753
2754        return target_node;
2755}
2756
2757static void binder_transaction(struct binder_proc *proc,
2758                               struct binder_thread *thread,
2759                               struct binder_transaction_data *tr, int reply,
2760                               binder_size_t extra_buffers_size)
2761{
2762        int ret;
2763        struct binder_transaction *t;
2764        struct binder_work *w;
2765        struct binder_work *tcomplete;
2766        binder_size_t *offp, *off_end, *off_start;
2767        binder_size_t off_min;
2768        u8 *sg_bufp, *sg_buf_end;
2769        struct binder_proc *target_proc = NULL;
2770        struct binder_thread *target_thread = NULL;
2771        struct binder_node *target_node = NULL;
2772        struct binder_transaction *in_reply_to = NULL;
2773        struct binder_transaction_log_entry *e;
2774        uint32_t return_error = 0;
2775        uint32_t return_error_param = 0;
2776        uint32_t return_error_line = 0;
2777        struct binder_buffer_object *last_fixup_obj = NULL;
2778        binder_size_t last_fixup_min_off = 0;
2779        struct binder_context *context = proc->context;
2780        int t_debug_id = atomic_inc_return(&binder_last_id);
2781
2782        e = binder_transaction_log_add(&binder_transaction_log);
2783        e->debug_id = t_debug_id;
2784        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2785        e->from_proc = proc->pid;
2786        e->from_thread = thread->pid;
2787        e->target_handle = tr->target.handle;
2788        e->data_size = tr->data_size;
2789        e->offsets_size = tr->offsets_size;
2790        e->context_name = proc->context->name;
2791
2792        if (reply) {
2793                binder_inner_proc_lock(proc);
2794                in_reply_to = thread->transaction_stack;
2795                if (in_reply_to == NULL) {
2796                        binder_inner_proc_unlock(proc);
2797                        binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2798                                          proc->pid, thread->pid);
2799                        return_error = BR_FAILED_REPLY;
2800                        return_error_param = -EPROTO;
2801                        return_error_line = __LINE__;
2802                        goto err_empty_call_stack;
2803                }
2804                if (in_reply_to->to_thread != thread) {
2805                        spin_lock(&in_reply_to->lock);
2806                        binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2807                                proc->pid, thread->pid, in_reply_to->debug_id,
2808                                in_reply_to->to_proc ?
2809                                in_reply_to->to_proc->pid : 0,
2810                                in_reply_to->to_thread ?
2811                                in_reply_to->to_thread->pid : 0);
2812                        spin_unlock(&in_reply_to->lock);
2813                        binder_inner_proc_unlock(proc);
2814                        return_error = BR_FAILED_REPLY;
2815                        return_error_param = -EPROTO;
2816                        return_error_line = __LINE__;
2817                        in_reply_to = NULL;
2818                        goto err_bad_call_stack;
2819                }
2820                thread->transaction_stack = in_reply_to->to_parent;
2821                binder_inner_proc_unlock(proc);
2822                binder_set_nice(in_reply_to->saved_priority);
2823                target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2824                if (target_thread == NULL) {
2825                        /* annotation for sparse */
2826                        __release(&target_thread->proc->inner_lock);
2827                        return_error = BR_DEAD_REPLY;
2828                        return_error_line = __LINE__;
2829                        goto err_dead_binder;
2830                }
2831                if (target_thread->transaction_stack != in_reply_to) {
2832                        binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2833                                proc->pid, thread->pid,
2834                                target_thread->transaction_stack ?
2835                                target_thread->transaction_stack->debug_id : 0,
2836                                in_reply_to->debug_id);
2837                        binder_inner_proc_unlock(target_thread->proc);
2838                        return_error = BR_FAILED_REPLY;
2839                        return_error_param = -EPROTO;
2840                        return_error_line = __LINE__;
2841                        in_reply_to = NULL;
2842                        target_thread = NULL;
2843                        goto err_dead_binder;
2844                }
2845                target_proc = target_thread->proc;
2846                target_proc->tmp_ref++;
2847                binder_inner_proc_unlock(target_thread->proc);
2848        } else {
2849                if (tr->target.handle) {
2850                        struct binder_ref *ref;
2851
2852                        /*
2853                         * There must already be a strong ref
2854                         * on this node. If so, do a strong
2855                         * increment on the node to ensure it
2856                         * stays alive until the transaction is
2857                         * done.
2858                         */
2859                        binder_proc_lock(proc);
2860                        ref = binder_get_ref_olocked(proc, tr->target.handle,
2861                                                     true);
2862                        if (ref) {
2863                                target_node = binder_get_node_refs_for_txn(
2864                                                ref->node, &target_proc,
2865                                                &return_error);
2866                        } else {
2867                                binder_user_error("%d:%d got transaction to invalid handle\n",
2868                                                  proc->pid, thread->pid);
2869                                return_error = BR_FAILED_REPLY;
2870                        }
2871                        binder_proc_unlock(proc);
2872                } else {
2873                        mutex_lock(&context->context_mgr_node_lock);
2874                        target_node = context->binder_context_mgr_node;
2875                        if (target_node)
2876                                target_node = binder_get_node_refs_for_txn(
2877                                                target_node, &target_proc,
2878                                                &return_error);
2879                        else
2880                                return_error = BR_DEAD_REPLY;
2881                        mutex_unlock(&context->context_mgr_node_lock);
2882                        if (target_node && target_proc == proc) {
2883                                binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2884                                                  proc->pid, thread->pid);
2885                                return_error = BR_FAILED_REPLY;
2886                                return_error_param = -EINVAL;
2887                                return_error_line = __LINE__;
2888                                goto err_invalid_target_handle;
2889                        }
2890                }
2891                if (!target_node) {
2892                        /*
2893                         * return_error is set above
2894                         */
2895                        return_error_param = -EINVAL;
2896                        return_error_line = __LINE__;
2897                        goto err_dead_binder;
2898                }
2899                e->to_node = target_node->debug_id;
2900                if (security_binder_transaction(proc->tsk,
2901                                                target_proc->tsk) < 0) {
2902                        return_error = BR_FAILED_REPLY;
2903                        return_error_param = -EPERM;
2904                        return_error_line = __LINE__;
2905                        goto err_invalid_target_handle;
2906                }
2907                binder_inner_proc_lock(proc);
2908
2909                w = list_first_entry_or_null(&thread->todo,
2910                                             struct binder_work, entry);
2911                if (!(tr->flags & TF_ONE_WAY) && w &&
2912                    w->type == BINDER_WORK_TRANSACTION) {
2913                        /*
2914                         * Do not allow new outgoing transaction from a
2915                         * thread that has a transaction at the head of
2916                         * its todo list. Only need to check the head
2917                         * because binder_select_thread_ilocked picks a
2918                         * thread from proc->waiting_threads to enqueue
2919                         * the transaction, and nothing is queued to the
2920                         * todo list while the thread is on waiting_threads.
2921                         */
2922                        binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2923                                          proc->pid, thread->pid);
2924                        binder_inner_proc_unlock(proc);
2925                        return_error = BR_FAILED_REPLY;
2926                        return_error_param = -EPROTO;
2927                        return_error_line = __LINE__;
2928                        goto err_bad_todo_list;
2929                }
2930
2931                if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2932                        struct binder_transaction *tmp;
2933
2934                        tmp = thread->transaction_stack;
2935                        if (tmp->to_thread != thread) {
2936                                spin_lock(&tmp->lock);
2937                                binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2938                                        proc->pid, thread->pid, tmp->debug_id,
2939                                        tmp->to_proc ? tmp->to_proc->pid : 0,
2940                                        tmp->to_thread ?
2941                                        tmp->to_thread->pid : 0);
2942                                spin_unlock(&tmp->lock);
2943                                binder_inner_proc_unlock(proc);
2944                                return_error = BR_FAILED_REPLY;
2945                                return_error_param = -EPROTO;
2946                                return_error_line = __LINE__;
2947                                goto err_bad_call_stack;
2948                        }
2949                        while (tmp) {
2950                                struct binder_thread *from;
2951
2952                                spin_lock(&tmp->lock);
2953                                from = tmp->from;
2954                                if (from && from->proc == target_proc) {
2955                                        atomic_inc(&from->tmp_ref);
2956                                        target_thread = from;
2957                                        spin_unlock(&tmp->lock);
2958                                        break;
2959                                }
2960                                spin_unlock(&tmp->lock);
2961                                tmp = tmp->from_parent;
2962                        }
2963                }
2964                binder_inner_proc_unlock(proc);
2965        }
2966        if (target_thread)
2967                e->to_thread = target_thread->pid;
2968        e->to_proc = target_proc->pid;
2969
2970        /* TODO: reuse incoming transaction for reply */
2971        t = kzalloc(sizeof(*t), GFP_KERNEL);
2972        if (t == NULL) {
2973                return_error = BR_FAILED_REPLY;
2974                return_error_param = -ENOMEM;
2975                return_error_line = __LINE__;
2976                goto err_alloc_t_failed;
2977        }
2978        INIT_LIST_HEAD(&t->fd_fixups);
2979        binder_stats_created(BINDER_STAT_TRANSACTION);
2980        spin_lock_init(&t->lock);
2981
2982        tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2983        if (tcomplete == NULL) {
2984                return_error = BR_FAILED_REPLY;
2985                return_error_param = -ENOMEM;
2986                return_error_line = __LINE__;
2987                goto err_alloc_tcomplete_failed;
2988        }
2989        binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2990
2991        t->debug_id = t_debug_id;
2992
2993        if (reply)
2994                binder_debug(BINDER_DEBUG_TRANSACTION,
2995                             "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2996                             proc->pid, thread->pid, t->debug_id,
2997                             target_proc->pid, target_thread->pid,
2998                             (u64)tr->data.ptr.buffer,
2999                             (u64)tr->data.ptr.offsets,
3000                             (u64)tr->data_size, (u64)tr->offsets_size,
3001                             (u64)extra_buffers_size);
3002        else
3003                binder_debug(BINDER_DEBUG_TRANSACTION,
3004                             "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3005                             proc->pid, thread->pid, t->debug_id,
3006                             target_proc->pid, target_node->debug_id,
3007                             (u64)tr->data.ptr.buffer,
3008                             (u64)tr->data.ptr.offsets,
3009                             (u64)tr->data_size, (u64)tr->offsets_size,
3010                             (u64)extra_buffers_size);
3011
3012        if (!reply && !(tr->flags & TF_ONE_WAY))
3013                t->from = thread;
3014        else
3015                t->from = NULL;
3016        t->sender_euid = task_euid(proc->tsk);
3017        t->to_proc = target_proc;
3018        t->to_thread = target_thread;
3019        t->code = tr->code;
3020        t->flags = tr->flags;
3021        t->priority = task_nice(current);
3022
3023        trace_binder_transaction(reply, t, target_node);
3024
3025        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3026                tr->offsets_size, extra_buffers_size,
3027                !reply && (t->flags & TF_ONE_WAY));
3028        if (IS_ERR(t->buffer)) {
3029                /*
3030                 * -ESRCH indicates VMA cleared. The target is dying.
3031                 */
3032                return_error_param = PTR_ERR(t->buffer);
3033                return_error = return_error_param == -ESRCH ?
3034                        BR_DEAD_REPLY : BR_FAILED_REPLY;
3035                return_error_line = __LINE__;
3036                t->buffer = NULL;
3037                goto err_binder_alloc_buf_failed;
3038        }
3039        t->buffer->debug_id = t->debug_id;
3040        t->buffer->transaction = t;
3041        t->buffer->target_node = target_node;
3042        trace_binder_transaction_alloc_buf(t->buffer);
3043        off_start = (binder_size_t *)(t->buffer->data +
3044                                      ALIGN(tr->data_size, sizeof(void *)));
3045        offp = off_start;
3046
3047        if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3048                           tr->data.ptr.buffer, tr->data_size)) {
3049                binder_user_error("%d:%d got transaction with invalid data ptr\n",
3050                                proc->pid, thread->pid);
3051                return_error = BR_FAILED_REPLY;
3052                return_error_param = -EFAULT;
3053                return_error_line = __LINE__;
3054                goto err_copy_data_failed;
3055        }
3056        if (copy_from_user(offp, (const void __user *)(uintptr_t)
3057                           tr->data.ptr.offsets, tr->offsets_size)) {
3058                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3059                                proc->pid, thread->pid);
3060                return_error = BR_FAILED_REPLY;
3061                return_error_param = -EFAULT;
3062                return_error_line = __LINE__;
3063                goto err_copy_data_failed;
3064        }
3065        if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3066                binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3067                                proc->pid, thread->pid, (u64)tr->offsets_size);
3068                return_error = BR_FAILED_REPLY;
3069                return_error_param = -EINVAL;
3070                return_error_line = __LINE__;
3071                goto err_bad_offset;
3072        }
3073        if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3074                binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3075                                  proc->pid, thread->pid,
3076                                  (u64)extra_buffers_size);
3077                return_error = BR_FAILED_REPLY;
3078                return_error_param = -EINVAL;
3079                return_error_line = __LINE__;
3080                goto err_bad_offset;
3081        }
3082        off_end = (void *)off_start + tr->offsets_size;
3083        sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3084        sg_buf_end = sg_bufp + extra_buffers_size;
3085        off_min = 0;
3086        for (; offp < off_end; offp++) {
3087                struct binder_object_header *hdr;
3088                size_t object_size = binder_validate_object(t->buffer, *offp);
3089
3090                if (object_size == 0 || *offp < off_min) {
3091                        binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3092                                          proc->pid, thread->pid, (u64)*offp,
3093                                          (u64)off_min,
3094                                          (u64)t->buffer->data_size);
3095                        return_error = BR_FAILED_REPLY;
3096                        return_error_param = -EINVAL;
3097                        return_error_line = __LINE__;
3098                        goto err_bad_offset;
3099                }
3100
3101                hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3102                off_min = *offp + object_size;
3103                switch (hdr->type) {
3104                case BINDER_TYPE_BINDER:
3105                case BINDER_TYPE_WEAK_BINDER: {
3106                        struct flat_binder_object *fp;
3107
3108                        fp = to_flat_binder_object(hdr);
3109                        ret = binder_translate_binder(fp, t, thread);
3110                        if (ret < 0) {
3111                                return_error = BR_FAILED_REPLY;
3112                                return_error_param = ret;
3113                                return_error_line = __LINE__;
3114                                goto err_translate_failed;
3115                        }
3116                } break;
3117                case BINDER_TYPE_HANDLE:
3118                case BINDER_TYPE_WEAK_HANDLE: {
3119                        struct flat_binder_object *fp;
3120
3121                        fp = to_flat_binder_object(hdr);
3122                        ret = binder_translate_handle(fp, t, thread);
3123                        if (ret < 0) {
3124                                return_error = BR_FAILED_REPLY;
3125                                return_error_param = ret;
3126                                return_error_line = __LINE__;
3127                                goto err_translate_failed;
3128                        }
3129                } break;
3130
3131                case BINDER_TYPE_FD: {
3132                        struct binder_fd_object *fp = to_binder_fd_object(hdr);
3133                        int ret = binder_translate_fd(&fp->fd, t, thread,
3134                                                      in_reply_to);
3135
3136                        if (ret < 0) {
3137                                return_error = BR_FAILED_REPLY;
3138                                return_error_param = ret;
3139                                return_error_line = __LINE__;
3140                                goto err_translate_failed;
3141                        }
3142                        fp->pad_binder = 0;
3143                } break;
3144                case BINDER_TYPE_FDA: {
3145                        struct binder_fd_array_object *fda =
3146                                to_binder_fd_array_object(hdr);
3147                        struct binder_buffer_object *parent =
3148                                binder_validate_ptr(t->buffer, fda->parent,
3149                                                    off_start,
3150                                                    offp - off_start);
3151                        if (!parent) {
3152                                binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3153                                                  proc->pid, thread->pid);
3154                                return_error = BR_FAILED_REPLY;
3155                                return_error_param = -EINVAL;
3156                                return_error_line = __LINE__;
3157                                goto err_bad_parent;
3158                        }
3159                        if (!binder_validate_fixup(t->buffer, off_start,
3160                                                   parent, fda->parent_offset,
3161                                                   last_fixup_obj,
3162                                                   last_fixup_min_off)) {
3163                                binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3164                                                  proc->pid, thread->pid);
3165                                return_error = BR_FAILED_REPLY;
3166                                return_error_param = -EINVAL;
3167                                return_error_line = __LINE__;
3168                                goto err_bad_parent;
3169                        }
3170                        ret = binder_translate_fd_array(fda, parent, t, thread,
3171                                                        in_reply_to);
3172                        if (ret < 0) {
3173                                return_error = BR_FAILED_REPLY;
3174                                return_error_param = ret;
3175                                return_error_line = __LINE__;
3176                                goto err_translate_failed;
3177                        }
3178                        last_fixup_obj = parent;
3179                        last_fixup_min_off =
3180                                fda->parent_offset + sizeof(u32) * fda->num_fds;
3181                } break;
3182                case BINDER_TYPE_PTR: {
3183                        struct binder_buffer_object *bp =
3184                                to_binder_buffer_object(hdr);
3185                        size_t buf_left = sg_buf_end - sg_bufp;
3186
3187                        if (bp->length > buf_left) {
3188                                binder_user_error("%d:%d got transaction with too large buffer\n",
3189                                                  proc->pid, thread->pid);
3190                                return_error = BR_FAILED_REPLY;
3191                                return_error_param = -EINVAL;
3192                                return_error_line = __LINE__;
3193                                goto err_bad_offset;
3194                        }
3195                        if (copy_from_user(sg_bufp,
3196                                           (const void __user *)(uintptr_t)
3197                                           bp->buffer, bp->length)) {
3198                                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3199                                                  proc->pid, thread->pid);
3200                                return_error_param = -EFAULT;
3201                                return_error = BR_FAILED_REPLY;
3202                                return_error_line = __LINE__;
3203                                goto err_copy_data_failed;
3204                        }
3205                        /* Fixup buffer pointer to target proc address space */
3206                        bp->buffer = (uintptr_t)sg_bufp +
3207                                binder_alloc_get_user_buffer_offset(
3208                                                &target_proc->alloc);
3209                        sg_bufp += ALIGN(bp->length, sizeof(u64));
3210
3211                        ret = binder_fixup_parent(t, thread, bp, off_start,
3212                                                  offp - off_start,
3213                                                  last_fixup_obj,
3214                                                  last_fixup_min_off);
3215                        if (ret < 0) {
3216                                return_error = BR_FAILED_REPLY;
3217                                return_error_param = ret;
3218                                return_error_line = __LINE__;
3219                                goto err_translate_failed;
3220                        }
3221                        last_fixup_obj = bp;
3222                        last_fixup_min_off = 0;
3223                } break;
3224                default:
3225                        binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3226                                proc->pid, thread->pid, hdr->type);
3227                        return_error = BR_FAILED_REPLY;
3228                        return_error_param = -EINVAL;
3229                        return_error_line = __LINE__;
3230                        goto err_bad_object_type;
3231                }
3232        }
3233        tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3234        t->work.type = BINDER_WORK_TRANSACTION;
3235
3236        if (reply) {
3237                binder_enqueue_thread_work(thread, tcomplete);
3238                binder_inner_proc_lock(target_proc);
3239                if (target_thread->is_dead) {
3240                        binder_inner_proc_unlock(target_proc);
3241                        goto err_dead_proc_or_thread;
3242                }
3243                BUG_ON(t->buffer->async_transaction != 0);
3244                binder_pop_transaction_ilocked(target_thread, in_reply_to);
3245                binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3246                binder_inner_proc_unlock(target_proc);
3247                wake_up_interruptible_sync(&target_thread->wait);
3248                binder_free_transaction(in_reply_to);
3249        } else if (!(t->flags & TF_ONE_WAY)) {
3250                BUG_ON(t->buffer->async_transaction != 0);
3251                binder_inner_proc_lock(proc);
3252                /*
3253                 * Defer the TRANSACTION_COMPLETE, so we don't return to
3254                 * userspace immediately; this allows the target process to
3255                 * immediately start processing this transaction, reducing
3256                 * latency. We will then return the TRANSACTION_COMPLETE when
3257                 * the target replies (or there is an error).
3258                 */
3259                binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3260                t->need_reply = 1;
3261                t->from_parent = thread->transaction_stack;
3262                thread->transaction_stack = t;
3263                binder_inner_proc_unlock(proc);
3264                if (!binder_proc_transaction(t, target_proc, target_thread)) {
3265                        binder_inner_proc_lock(proc);
3266                        binder_pop_transaction_ilocked(thread, t);
3267                        binder_inner_proc_unlock(proc);
3268                        goto err_dead_proc_or_thread;
3269                }
3270        } else {
3271                BUG_ON(target_node == NULL);
3272                BUG_ON(t->buffer->async_transaction != 1);
3273                binder_enqueue_thread_work(thread, tcomplete);
3274                if (!binder_proc_transaction(t, target_proc, NULL))
3275                        goto err_dead_proc_or_thread;
3276        }
3277        if (target_thread)
3278                binder_thread_dec_tmpref(target_thread);
3279        binder_proc_dec_tmpref(target_proc);
3280        if (target_node)
3281                binder_dec_node_tmpref(target_node);
3282        /*
3283         * write barrier to synchronize with initialization
3284         * of log entry
3285         */
3286        smp_wmb();
3287        WRITE_ONCE(e->debug_id_done, t_debug_id);
3288        return;
3289
3290err_dead_proc_or_thread:
3291        return_error = BR_DEAD_REPLY;
3292        return_error_line = __LINE__;
3293        binder_dequeue_work(proc, tcomplete);
3294err_translate_failed:
3295err_bad_object_type:
3296err_bad_offset:
3297err_bad_parent:
3298err_copy_data_failed:
3299        binder_free_txn_fixups(t);
3300        trace_binder_transaction_failed_buffer_release(t->buffer);
3301        binder_transaction_buffer_release(target_proc, t->buffer, offp);
3302        if (target_node)
3303                binder_dec_node_tmpref(target_node);
3304        target_node = NULL;
3305        t->buffer->transaction = NULL;
3306        binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3307err_binder_alloc_buf_failed:
3308        kfree(tcomplete);
3309        binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3310err_alloc_tcomplete_failed:
3311        kfree(t);
3312        binder_stats_deleted(BINDER_STAT_TRANSACTION);
3313err_alloc_t_failed:
3314err_bad_todo_list:
3315err_bad_call_stack:
3316err_empty_call_stack:
3317err_dead_binder:
3318err_invalid_target_handle:
3319        if (target_thread)
3320                binder_thread_dec_tmpref(target_thread);
3321        if (target_proc)
3322                binder_proc_dec_tmpref(target_proc);
3323        if (target_node) {
3324                binder_dec_node(target_node, 1, 0);
3325                binder_dec_node_tmpref(target_node);
3326        }
3327
3328        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3329                     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3330                     proc->pid, thread->pid, return_error, return_error_param,
3331                     (u64)tr->data_size, (u64)tr->offsets_size,
3332                     return_error_line);
3333
3334        {
3335                struct binder_transaction_log_entry *fe;
3336
3337                e->return_error = return_error;
3338                e->return_error_param = return_error_param;
3339                e->return_error_line = return_error_line;
3340                fe = binder_transaction_log_add(&binder_transaction_log_failed);
3341                *fe = *e;
3342                /*
3343                 * write barrier to synchronize with initialization
3344                 * of log entry
3345                 */
3346                smp_wmb();
3347                WRITE_ONCE(e->debug_id_done, t_debug_id);
3348                WRITE_ONCE(fe->debug_id_done, t_debug_id);
3349        }
3350
3351        BUG_ON(thread->return_error.cmd != BR_OK);
3352        if (in_reply_to) {
3353                thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3354                binder_enqueue_thread_work(thread, &thread->return_error.work);
3355                binder_send_failed_reply(in_reply_to, return_error);
3356        } else {
3357                thread->return_error.cmd = return_error;
3358                binder_enqueue_thread_work(thread, &thread->return_error.work);
3359        }
3360}
3361
3362/**
3363 * binder_free_buf() - free the specified buffer
3364 * @proc:       binder proc that owns buffer
3365 * @buffer:     buffer to be freed
3366 *
3367 * If buffer for an async transaction, enqueue the next async
3368 * transaction from the node.
3369 *
3370 * Cleanup buffer and free it.
3371 */
3372static void
3373binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3374{
3375        if (buffer->transaction) {
3376                buffer->transaction->buffer = NULL;
3377                buffer->transaction = NULL;
3378        }
3379        if (buffer->async_transaction && buffer->target_node) {
3380                struct binder_node *buf_node;
3381                struct binder_work *w;
3382
3383                buf_node = buffer->target_node;
3384                binder_node_inner_lock(buf_node);
3385                BUG_ON(!buf_node->has_async_transaction);
3386                BUG_ON(buf_node->proc != proc);
3387                w = binder_dequeue_work_head_ilocked(
3388                                &buf_node->async_todo);
3389                if (!w) {
3390                        buf_node->has_async_transaction = false;
3391                } else {
3392                        binder_enqueue_work_ilocked(
3393                                        w, &proc->todo);
3394                        binder_wakeup_proc_ilocked(proc);
3395                }
3396                binder_node_inner_unlock(buf_node);
3397        }
3398        trace_binder_transaction_buffer_release(buffer);
3399        binder_transaction_buffer_release(proc, buffer, NULL);
3400        binder_alloc_free_buf(&proc->alloc, buffer);
3401}
3402
3403static int binder_thread_write(struct binder_proc *proc,
3404                        struct binder_thread *thread,
3405                        binder_uintptr_t binder_buffer, size_t size,
3406                        binder_size_t *consumed)
3407{
3408        uint32_t cmd;
3409        struct binder_context *context = proc->context;
3410        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3411        void __user *ptr = buffer + *consumed;
3412        void __user *end = buffer + size;
3413
3414        while (ptr < end && thread->return_error.cmd == BR_OK) {
3415                int ret;
3416
3417                if (get_user(cmd, (uint32_t __user *)ptr))
3418                        return -EFAULT;
3419                ptr += sizeof(uint32_t);
3420                trace_binder_command(cmd);
3421                if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3422                        atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3423                        atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3424                        atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3425                }
3426                switch (cmd) {
3427                case BC_INCREFS:
3428                case BC_ACQUIRE:
3429                case BC_RELEASE:
3430                case BC_DECREFS: {
3431                        uint32_t target;
3432                        const char *debug_string;
3433                        bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3434                        bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3435                        struct binder_ref_data rdata;
3436
3437                        if (get_user(target, (uint32_t __user *)ptr))
3438                                return -EFAULT;
3439
3440                        ptr += sizeof(uint32_t);
3441                        ret = -1;
3442                        if (increment && !target) {
3443                                struct binder_node *ctx_mgr_node;
3444                                mutex_lock(&context->context_mgr_node_lock);
3445                                ctx_mgr_node = context->binder_context_mgr_node;
3446                                if (ctx_mgr_node)
3447                                        ret = binder_inc_ref_for_node(
3448                                                        proc, ctx_mgr_node,
3449                                                        strong, NULL, &rdata);
3450                                mutex_unlock(&context->context_mgr_node_lock);
3451                        }
3452                        if (ret)
3453                                ret = binder_update_ref_for_handle(
3454                                                proc, target, increment, strong,
3455                                                &rdata);
3456                        if (!ret && rdata.desc != target) {
3457                                binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3458                                        proc->pid, thread->pid,
3459                                        target, rdata.desc);
3460                        }
3461                        switch (cmd) {
3462                        case BC_INCREFS:
3463                                debug_string = "IncRefs";
3464                                break;
3465                        case BC_ACQUIRE:
3466                                debug_string = "Acquire";
3467                                break;
3468                        case BC_RELEASE:
3469                                debug_string = "Release";
3470                                break;
3471                        case BC_DECREFS:
3472                        default:
3473                                debug_string = "DecRefs";
3474                                break;
3475                        }
3476                        if (ret) {
3477                                binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3478                                        proc->pid, thread->pid, debug_string,
3479                                        strong, target, ret);
3480                                break;
3481                        }
3482                        binder_debug(BINDER_DEBUG_USER_REFS,
3483                                     "%d:%d %s ref %d desc %d s %d w %d\n",
3484                                     proc->pid, thread->pid, debug_string,
3485                                     rdata.debug_id, rdata.desc, rdata.strong,
3486                                     rdata.weak);
3487                        break;
3488                }
3489                case BC_INCREFS_DONE:
3490                case BC_ACQUIRE_DONE: {
3491                        binder_uintptr_t node_ptr;
3492                        binder_uintptr_t cookie;
3493                        struct binder_node *node;
3494                        bool free_node;
3495
3496                        if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3497                                return -EFAULT;
3498                        ptr += sizeof(binder_uintptr_t);
3499                        if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3500                                return -EFAULT;
3501                        ptr += sizeof(binder_uintptr_t);
3502                        node = binder_get_node(proc, node_ptr);
3503                        if (node == NULL) {
3504                                binder_user_error("%d:%d %s u%016llx no match\n",
3505                                        proc->pid, thread->pid,
3506                                        cmd == BC_INCREFS_DONE ?
3507                                        "BC_INCREFS_DONE" :
3508                                        "BC_ACQUIRE_DONE",
3509                                        (u64)node_ptr);
3510                                break;
3511                        }
3512                        if (cookie != node->cookie) {
3513                                binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3514                                        proc->pid, thread->pid,
3515                                        cmd == BC_INCREFS_DONE ?
3516                                        "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3517                                        (u64)node_ptr, node->debug_id,
3518                                        (u64)cookie, (u64)node->cookie);
3519                                binder_put_node(node);
3520                                break;
3521                        }
3522                        binder_node_inner_lock(node);
3523                        if (cmd == BC_ACQUIRE_DONE) {
3524                                if (node->pending_strong_ref == 0) {
3525                                        binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3526                                                proc->pid, thread->pid,
3527                                                node->debug_id);
3528                                        binder_node_inner_unlock(node);
3529                                        binder_put_node(node);
3530                                        break;
3531                                }
3532                                node->pending_strong_ref = 0;
3533                        } else {
3534                                if (node->pending_weak_ref == 0) {
3535                                        binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3536                                                proc->pid, thread->pid,
3537                                                node->debug_id);
3538                                        binder_node_inner_unlock(node);
3539                                        binder_put_node(node);
3540                                        break;
3541                                }
3542                                node->pending_weak_ref = 0;
3543                        }
3544                        free_node = binder_dec_node_nilocked(node,
3545                                        cmd == BC_ACQUIRE_DONE, 0);
3546                        WARN_ON(free_node);
3547                        binder_debug(BINDER_DEBUG_USER_REFS,
3548                                     "%d:%d %s node %d ls %d lw %d tr %d\n",
3549                                     proc->pid, thread->pid,
3550                                     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3551                                     node->debug_id, node->local_strong_refs,
3552                                     node->local_weak_refs, node->tmp_refs);
3553                        binder_node_inner_unlock(node);
3554                        binder_put_node(node);
3555                        break;
3556                }
3557                case BC_ATTEMPT_ACQUIRE:
3558                        pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3559                        return -EINVAL;
3560                case BC_ACQUIRE_RESULT:
3561                        pr_err("BC_ACQUIRE_RESULT not supported\n");
3562                        return -EINVAL;
3563
3564                case BC_FREE_BUFFER: {
3565                        binder_uintptr_t data_ptr;
3566                        struct binder_buffer *buffer;
3567
3568                        if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3569                                return -EFAULT;
3570                        ptr += sizeof(binder_uintptr_t);
3571
3572                        buffer = binder_alloc_prepare_to_free(&proc->alloc,
3573                                                              data_ptr);
3574                        if (IS_ERR_OR_NULL(buffer)) {
3575                                if (PTR_ERR(buffer) == -EPERM) {
3576                                        binder_user_error(
3577                                                "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3578                                                proc->pid, thread->pid,
3579                                                (u64)data_ptr);
3580                                } else {
3581                                        binder_user_error(
3582                                                "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3583                                                proc->pid, thread->pid,
3584                                                (u64)data_ptr);
3585                                }
3586                                break;
3587                        }
3588                        binder_debug(BINDER_DEBUG_FREE_BUFFER,
3589                                     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3590                                     proc->pid, thread->pid, (u64)data_ptr,
3591                                     buffer->debug_id,
3592                                     buffer->transaction ? "active" : "finished");
3593                        binder_free_buf(proc, buffer);
3594                        break;
3595                }
3596
3597                case BC_TRANSACTION_SG:
3598                case BC_REPLY_SG: {
3599                        struct binder_transaction_data_sg tr;
3600
3601                        if (copy_from_user(&tr, ptr, sizeof(tr)))
3602                                return -EFAULT;
3603                        ptr += sizeof(tr);
3604                        binder_transaction(proc, thread, &tr.transaction_data,
3605                                           cmd == BC_REPLY_SG, tr.buffers_size);
3606                        break;
3607                }
3608                case BC_TRANSACTION:
3609                case BC_REPLY: {
3610                        struct binder_transaction_data tr;
3611
3612                        if (copy_from_user(&tr, ptr, sizeof(tr)))
3613                                return -EFAULT;
3614                        ptr += sizeof(tr);
3615                        binder_transaction(proc, thread, &tr,
3616                                           cmd == BC_REPLY, 0);
3617                        break;
3618                }
3619
3620                case BC_REGISTER_LOOPER:
3621                        binder_debug(BINDER_DEBUG_THREADS,
3622                                     "%d:%d BC_REGISTER_LOOPER\n",
3623                                     proc->pid, thread->pid);
3624                        binder_inner_proc_lock(proc);
3625                        if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3626                                thread->looper |= BINDER_LOOPER_STATE_INVALID;
3627                                binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3628                                        proc->pid, thread->pid);
3629                        } else if (proc->requested_threads == 0) {
3630                                thread->looper |= BINDER_LOOPER_STATE_INVALID;
3631                                binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3632                                        proc->pid, thread->pid);
3633                        } else {
3634                                proc->requested_threads--;
3635                                proc->requested_threads_started++;
3636                        }
3637                        thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3638                        binder_inner_proc_unlock(proc);
3639                        break;
3640                case BC_ENTER_LOOPER:
3641                        binder_debug(BINDER_DEBUG_THREADS,
3642                                     "%d:%d BC_ENTER_LOOPER\n",
3643                                     proc->pid, thread->pid);
3644                        if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3645                                thread->looper |= BINDER_LOOPER_STATE_INVALID;
3646                                binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3647                                        proc->pid, thread->pid);
3648                        }
3649                        thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3650                        break;
3651                case BC_EXIT_LOOPER:
3652                        binder_debug(BINDER_DEBUG_THREADS,
3653                                     "%d:%d BC_EXIT_LOOPER\n",
3654                                     proc->pid, thread->pid);
3655                        thread->looper |= BINDER_LOOPER_STATE_EXITED;
3656                        break;
3657
3658                case BC_REQUEST_DEATH_NOTIFICATION:
3659                case BC_CLEAR_DEATH_NOTIFICATION: {
3660                        uint32_t target;
3661                        binder_uintptr_t cookie;
3662                        struct binder_ref *ref;
3663                        struct binder_ref_death *death = NULL;
3664
3665                        if (get_user(target, (uint32_t __user *)ptr))
3666                                return -EFAULT;
3667                        ptr += sizeof(uint32_t);
3668                        if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3669                                return -EFAULT;
3670                        ptr += sizeof(binder_uintptr_t);
3671                        if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3672                                /*
3673                                 * Allocate memory for death notification
3674                                 * before taking lock
3675                                 */
3676                                death = kzalloc(sizeof(*death), GFP_KERNEL);
3677                                if (death == NULL) {
3678                                        WARN_ON(thread->return_error.cmd !=
3679                                                BR_OK);
3680                                        thread->return_error.cmd = BR_ERROR;
3681                                        binder_enqueue_thread_work(
3682                                                thread,
3683                                                &thread->return_error.work);
3684                                        binder_debug(
3685                                                BINDER_DEBUG_FAILED_TRANSACTION,
3686                                                "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3687                                                proc->pid, thread->pid);
3688                                        break;
3689                                }
3690                        }
3691                        binder_proc_lock(proc);
3692                        ref = binder_get_ref_olocked(proc, target, false);
3693                        if (ref == NULL) {
3694                                binder_user_error("%d:%d %s invalid ref %d\n",
3695                                        proc->pid, thread->pid,
3696                                        cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3697                                        "BC_REQUEST_DEATH_NOTIFICATION" :
3698                                        "BC_CLEAR_DEATH_NOTIFICATION",
3699                                        target);
3700                                binder_proc_unlock(proc);
3701                                kfree(death);
3702                                break;
3703                        }
3704
3705                        binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3706                                     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3707                                     proc->pid, thread->pid,
3708                                     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3709                                     "BC_REQUEST_DEATH_NOTIFICATION" :
3710                                     "BC_CLEAR_DEATH_NOTIFICATION",
3711                                     (u64)cookie, ref->data.debug_id,
3712                                     ref->data.desc, ref->data.strong,
3713                                     ref->data.weak, ref->node->debug_id);
3714
3715                        binder_node_lock(ref->node);
3716                        if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3717                                if (ref->death) {
3718                                        binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3719                                                proc->pid, thread->pid);
3720                                        binder_node_unlock(ref->node);
3721                                        binder_proc_unlock(proc);
3722                                        kfree(death);
3723                                        break;
3724                                }
3725                                binder_stats_created(BINDER_STAT_DEATH);
3726                                INIT_LIST_HEAD(&death->work.entry);
3727                                death->cookie = cookie;
3728                                ref->death = death;
3729                                if (ref->node->proc == NULL) {
3730                                        ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3731
3732                                        binder_inner_proc_lock(proc);
3733                                        binder_enqueue_work_ilocked(
3734                                                &ref->death->work, &proc->todo);
3735                                        binder_wakeup_proc_ilocked(proc);
3736                                        binder_inner_proc_unlock(proc);
3737                                }
3738                        } else {
3739                                if (ref->death == NULL) {
3740                                        binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3741                                                proc->pid, thread->pid);
3742                                        binder_node_unlock(ref->node);
3743                                        binder_proc_unlock(proc);
3744                                        break;
3745                                }
3746                                death = ref->death;
3747                                if (death->cookie != cookie) {
3748                                        binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3749                                                proc->pid, thread->pid,
3750                                                (u64)death->cookie,
3751                                                (u64)cookie);
3752                                        binder_node_unlock(ref->node);
3753                                        binder_proc_unlock(proc);
3754                                        break;
3755                                }
3756                                ref->death = NULL;
3757                                binder_inner_proc_lock(proc);
3758                                if (list_empty(&death->work.entry)) {
3759                                        death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3760                                        if (thread->looper &
3761                                            (BINDER_LOOPER_STATE_REGISTERED |
3762                                             BINDER_LOOPER_STATE_ENTERED))
3763                                                binder_enqueue_thread_work_ilocked(
3764                                                                thread,
3765                                                                &death->work);
3766                                        else {
3767                                                binder_enqueue_work_ilocked(
3768                                                                &death->work,
3769                                                                &proc->todo);
3770                                                binder_wakeup_proc_ilocked(
3771                                                                proc);
3772                                        }
3773                                } else {
3774                                        BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3775                                        death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3776                                }
3777                                binder_inner_proc_unlock(proc);
3778                        }
3779                        binder_node_unlock(ref->node);
3780                        binder_proc_unlock(proc);
3781                } break;
3782                case BC_DEAD_BINDER_DONE: {
3783                        struct binder_work *w;
3784                        binder_uintptr_t cookie;
3785                        struct binder_ref_death *death = NULL;
3786
3787                        if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3788                                return -EFAULT;
3789
3790                        ptr += sizeof(cookie);
3791                        binder_inner_proc_lock(proc);
3792                        list_for_each_entry(w, &proc->delivered_death,
3793                                            entry) {
3794                                struct binder_ref_death *tmp_death =
3795                                        container_of(w,
3796                                                     struct binder_ref_death,
3797                                                     work);
3798
3799                                if (tmp_death->cookie == cookie) {
3800                                        death = tmp_death;
3801                                        break;
3802                                }
3803                        }
3804                        binder_debug(BINDER_DEBUG_DEAD_BINDER,
3805                                     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3806                                     proc->pid, thread->pid, (u64)cookie,
3807                                     death);
3808                        if (death == NULL) {
3809                                binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3810                                        proc->pid, thread->pid, (u64)cookie);
3811                                binder_inner_proc_unlock(proc);
3812                                break;
3813                        }
3814                        binder_dequeue_work_ilocked(&death->work);
3815                        if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3816                                death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3817                                if (thread->looper &
3818                                        (BINDER_LOOPER_STATE_REGISTERED |
3819                                         BINDER_LOOPER_STATE_ENTERED))
3820                                        binder_enqueue_thread_work_ilocked(
3821                                                thread, &death->work);
3822                                else {
3823                                        binder_enqueue_work_ilocked(
3824                                                        &death->work,
3825                                                        &proc->todo);
3826                                        binder_wakeup_proc_ilocked(proc);
3827                                }
3828                        }
3829                        binder_inner_proc_unlock(proc);
3830                } break;
3831
3832                default:
3833                        pr_err("%d:%d unknown command %d\n",
3834                               proc->pid, thread->pid, cmd);
3835                        return -EINVAL;
3836                }
3837                *consumed = ptr - buffer;
3838        }
3839        return 0;
3840}
3841
3842static void binder_stat_br(struct binder_proc *proc,
3843                           struct binder_thread *thread, uint32_t cmd)
3844{
3845        trace_binder_return(cmd);
3846        if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3847                atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3848                atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3849                atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3850        }
3851}
3852
3853static int binder_put_node_cmd(struct binder_proc *proc,
3854                               struct binder_thread *thread,
3855                               void __user **ptrp,
3856                               binder_uintptr_t node_ptr,
3857                               binder_uintptr_t node_cookie,
3858                               int node_debug_id,
3859                               uint32_t cmd, const char *cmd_name)
3860{
3861        void __user *ptr = *ptrp;
3862
3863        if (put_user(cmd, (uint32_t __user *)ptr))
3864                return -EFAULT;
3865        ptr += sizeof(uint32_t);
3866
3867        if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3868                return -EFAULT;
3869        ptr += sizeof(binder_uintptr_t);
3870
3871        if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3872                return -EFAULT;
3873        ptr += sizeof(binder_uintptr_t);
3874
3875        binder_stat_br(proc, thread, cmd);
3876        binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3877                     proc->pid, thread->pid, cmd_name, node_debug_id,
3878                     (u64)node_ptr, (u64)node_cookie);
3879
3880        *ptrp = ptr;
3881        return 0;
3882}
3883
3884static int binder_wait_for_work(struct binder_thread *thread,
3885                                bool do_proc_work)
3886{
3887        DEFINE_WAIT(wait);
3888        struct binder_proc *proc = thread->proc;
3889        int ret = 0;
3890
3891        freezer_do_not_count();
3892        binder_inner_proc_lock(proc);
3893        for (;;) {
3894                prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3895                if (binder_has_work_ilocked(thread, do_proc_work))
3896                        break;
3897                if (do_proc_work)
3898                        list_add(&thread->waiting_thread_node,
3899                                 &proc->waiting_threads);
3900                binder_inner_proc_unlock(proc);
3901                schedule();
3902                binder_inner_proc_lock(proc);
3903                list_del_init(&thread->waiting_thread_node);
3904                if (signal_pending(current)) {
3905                        ret = -ERESTARTSYS;
3906                        break;
3907                }
3908        }
3909        finish_wait(&thread->wait, &wait);
3910        binder_inner_proc_unlock(proc);
3911        freezer_count();
3912
3913        return ret;
3914}
3915
3916/**
3917 * binder_apply_fd_fixups() - finish fd translation
3918 * @t:  binder transaction with list of fd fixups
3919 *
3920 * Now that we are in the context of the transaction target
3921 * process, we can allocate and install fds. Process the
3922 * list of fds to translate and fixup the buffer with the
3923 * new fds.
3924 *
3925 * If we fail to allocate an fd, then free the resources by
3926 * fput'ing files that have not been processed and ksys_close'ing
3927 * any fds that have already been allocated.
3928 */
3929static int binder_apply_fd_fixups(struct binder_transaction *t)
3930{
3931        struct binder_txn_fd_fixup *fixup, *tmp;
3932        int ret = 0;
3933
3934        list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3935                int fd = get_unused_fd_flags(O_CLOEXEC);
3936                u32 *fdp;
3937
3938                if (fd < 0) {
3939                        binder_debug(BINDER_DEBUG_TRANSACTION,
3940                                     "failed fd fixup txn %d fd %d\n",
3941                                     t->debug_id, fd);
3942                        ret = -ENOMEM;
3943                        break;
3944                }
3945                binder_debug(BINDER_DEBUG_TRANSACTION,
3946                             "fd fixup txn %d fd %d\n",
3947                             t->debug_id, fd);
3948                trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3949                fd_install(fd, fixup->file);
3950                fixup->file = NULL;
3951                fdp = (u32 *)(t->buffer->data + fixup->offset);
3952                /*
3953                 * This store can cause problems for CPUs with a
3954                 * VIVT cache (eg ARMv5) since the cache cannot
3955                 * detect virtual aliases to the same physical cacheline.
3956                 * To support VIVT, this address and the user-space VA
3957                 * would both need to be flushed. Since this kernel
3958                 * VA is not constructed via page_to_virt(), we can't
3959                 * use flush_dcache_page() on it, so we'd have to use
3960                 * an internal function. If devices with VIVT ever
3961                 * need to run Android, we'll either need to go back
3962                 * to patching the translated fd from the sender side
3963                 * (using the non-standard kernel functions), or rework
3964                 * how the kernel uses the buffer to use page_to_virt()
3965                 * addresses instead of allocating in our own vm area.
3966                 *
3967                 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3968                 */
3969                *fdp = fd;
3970        }
3971        list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3972                if (fixup->file) {
3973                        fput(fixup->file);
3974                } else if (ret) {
3975                        u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
3976
3977                        binder_deferred_fd_close(*fdp);
3978                }
3979                list_del(&fixup->fixup_entry);
3980                kfree(fixup);
3981        }
3982
3983        return ret;
3984}
3985
3986static int binder_thread_read(struct binder_proc *proc,
3987                              struct binder_thread *thread,
3988                              binder_uintptr_t binder_buffer, size_t size,
3989                              binder_size_t *consumed, int non_block)
3990{
3991        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3992        void __user *ptr = buffer + *consumed;
3993        void __user *end = buffer + size;
3994
3995        int ret = 0;
3996        int wait_for_proc_work;
3997
3998        if (*consumed == 0) {
3999                if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4000                        return -EFAULT;
4001                ptr += sizeof(uint32_t);
4002        }
4003
4004retry:
4005        binder_inner_proc_lock(proc);
4006        wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4007        binder_inner_proc_unlock(proc);
4008
4009        thread->looper |= BINDER_LOOPER_STATE_WAITING;
4010
4011        trace_binder_wait_for_work(wait_for_proc_work,
4012                                   !!thread->transaction_stack,
4013                                   !binder_worklist_empty(proc, &thread->todo));
4014        if (wait_for_proc_work) {
4015                if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4016                                        BINDER_LOOPER_STATE_ENTERED))) {
4017                        binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4018                                proc->pid, thread->pid, thread->looper);
4019                        wait_event_interruptible(binder_user_error_wait,
4020                                                 binder_stop_on_user_error < 2);
4021                }
4022                binder_set_nice(proc->default_priority);
4023        }
4024
4025        if (non_block) {
4026                if (!binder_has_work(thread, wait_for_proc_work))
4027                        ret = -EAGAIN;
4028        } else {
4029                ret = binder_wait_for_work(thread, wait_for_proc_work);
4030        }
4031
4032        thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4033
4034        if (ret)
4035                return ret;
4036
4037        while (1) {
4038                uint32_t cmd;
4039                struct binder_transaction_data tr;
4040                struct binder_work *w = NULL;
4041                struct list_head *list = NULL;
4042                struct binder_transaction *t = NULL;
4043                struct binder_thread *t_from;
4044
4045                binder_inner_proc_lock(proc);
4046                if (!binder_worklist_empty_ilocked(&thread->todo))
4047                        list = &thread->todo;
4048                else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4049                           wait_for_proc_work)
4050                        list = &proc->todo;
4051                else {
4052                        binder_inner_proc_unlock(proc);
4053
4054                        /* no data added */
4055                        if (ptr - buffer == 4 && !thread->looper_need_return)
4056                                goto retry;
4057                        break;
4058                }
4059
4060                if (end - ptr < sizeof(tr) + 4) {
4061                        binder_inner_proc_unlock(proc);
4062                        break;
4063                }
4064                w = binder_dequeue_work_head_ilocked(list);
4065                if (binder_worklist_empty_ilocked(&thread->todo))
4066                        thread->process_todo = false;
4067
4068                switch (w->type) {
4069                case BINDER_WORK_TRANSACTION: {
4070                        binder_inner_proc_unlock(proc);
4071                        t = container_of(w, struct binder_transaction, work);
4072                } break;
4073                case BINDER_WORK_RETURN_ERROR: {
4074                        struct binder_error *e = container_of(
4075                                        w, struct binder_error, work);
4076
4077                        WARN_ON(e->cmd == BR_OK);
4078                        binder_inner_proc_unlock(proc);
4079                        if (put_user(e->cmd, (uint32_t __user *)ptr))
4080                                return -EFAULT;
4081                        cmd = e->cmd;
4082                        e->cmd = BR_OK;
4083                        ptr += sizeof(uint32_t);
4084
4085                        binder_stat_br(proc, thread, cmd);
4086                } break;
4087                case BINDER_WORK_TRANSACTION_COMPLETE: {
4088                        binder_inner_proc_unlock(proc);
4089                        cmd = BR_TRANSACTION_COMPLETE;
4090                        if (put_user(cmd, (uint32_t __user *)ptr))
4091                                return -EFAULT;
4092                        ptr += sizeof(uint32_t);
4093
4094                        binder_stat_br(proc, thread, cmd);
4095                        binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4096                                     "%d:%d BR_TRANSACTION_COMPLETE\n",
4097                                     proc->pid, thread->pid);
4098                        kfree(w);
4099                        binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4100                } break;
4101                case BINDER_WORK_NODE: {
4102                        struct binder_node *node = container_of(w, struct binder_node, work);
4103                        int strong, weak;
4104                        binder_uintptr_t node_ptr = node->ptr;
4105                        binder_uintptr_t node_cookie = node->cookie;
4106                        int node_debug_id = node->debug_id;
4107                        int has_weak_ref;
4108                        int has_strong_ref;
4109                        void __user *orig_ptr = ptr;
4110
4111                        BUG_ON(proc != node->proc);
4112                        strong = node->internal_strong_refs ||
4113                                        node->local_strong_refs;
4114                        weak = !hlist_empty(&node->refs) ||
4115                                        node->local_weak_refs ||
4116                                        node->tmp_refs || strong;
4117                        has_strong_ref = node->has_strong_ref;
4118                        has_weak_ref = node->has_weak_ref;
4119
4120                        if (weak && !has_weak_ref) {
4121                                node->has_weak_ref = 1;
4122                                node->pending_weak_ref = 1;
4123                                node->local_weak_refs++;
4124                        }
4125                        if (strong && !has_strong_ref) {
4126                                node->has_strong_ref = 1;
4127                                node->pending_strong_ref = 1;
4128                                node->local_strong_refs++;
4129                        }
4130                        if (!strong && has_strong_ref)
4131                                node->has_strong_ref = 0;
4132                        if (!weak && has_weak_ref)
4133                                node->has_weak_ref = 0;
4134                        if (!weak && !strong) {
4135                                binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4136                                             "%d:%d node %d u%016llx c%016llx deleted\n",
4137                                             proc->pid, thread->pid,
4138                                             node_debug_id,
4139                                             (u64)node_ptr,
4140                                             (u64)node_cookie);
4141                                rb_erase(&node->rb_node, &proc->nodes);
4142                                binder_inner_proc_unlock(proc);
4143                                binder_node_lock(node);
4144                                /*
4145                                 * Acquire the node lock before freeing the
4146                                 * node to serialize with other threads that
4147                                 * may have been holding the node lock while
4148                                 * decrementing this node (avoids race where
4149                                 * this thread frees while the other thread
4150                                 * is unlocking the node after the final
4151                                 * decrement)
4152                                 */
4153                                binder_node_unlock(node);
4154                                binder_free_node(node);
4155                        } else
4156                                binder_inner_proc_unlock(proc);
4157
4158                        if (weak && !has_weak_ref)
4159                                ret = binder_put_node_cmd(
4160                                                proc, thread, &ptr, node_ptr,
4161                                                node_cookie, node_debug_id,
4162                                                BR_INCREFS, "BR_INCREFS");
4163                        if (!ret && strong && !has_strong_ref)
4164                                ret = binder_put_node_cmd(
4165                                                proc, thread, &ptr, node_ptr,
4166                                                node_cookie, node_debug_id,
4167                                                BR_ACQUIRE, "BR_ACQUIRE");
4168                        if (!ret && !strong && has_strong_ref)
4169                                ret = binder_put_node_cmd(
4170                                                proc, thread, &ptr, node_ptr,
4171                                                node_cookie, node_debug_id,
4172                                                BR_RELEASE, "BR_RELEASE");
4173                        if (!ret && !weak && has_weak_ref)
4174                                ret = binder_put_node_cmd(
4175                                                proc, thread, &ptr, node_ptr,
4176                                                node_cookie, node_debug_id,
4177                                                BR_DECREFS, "BR_DECREFS");
4178                        if (orig_ptr == ptr)
4179                                binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4180                                             "%d:%d node %d u%016llx c%016llx state unchanged\n",
4181                                             proc->pid, thread->pid,
4182                                             node_debug_id,
4183                                             (u64)node_ptr,
4184                                             (u64)node_cookie);
4185                        if (ret)
4186                                return ret;
4187                } break;
4188                case BINDER_WORK_DEAD_BINDER:
4189                case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4190                case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4191                        struct binder_ref_death *death;
4192                        uint32_t cmd;
4193                        binder_uintptr_t cookie;
4194
4195                        death = container_of(w, struct binder_ref_death, work);
4196                        if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4197                                cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4198                        else
4199                                cmd = BR_DEAD_BINDER;
4200                        cookie = death->cookie;
4201
4202                        binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4203                                     "%d:%d %s %016llx\n",
4204                                      proc->pid, thread->pid,
4205                                      cmd == BR_DEAD_BINDER ?
4206                                      "BR_DEAD_BINDER" :
4207                                      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4208                                      (u64)cookie);
4209                        if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4210                                binder_inner_proc_unlock(proc);
4211                                kfree(death);
4212                                binder_stats_deleted(BINDER_STAT_DEATH);
4213                        } else {
4214                                binder_enqueue_work_ilocked(
4215                                                w, &proc->delivered_death);
4216                                binder_inner_proc_unlock(proc);
4217                        }
4218                        if (put_user(cmd, (uint32_t __user *)ptr))
4219                                return -EFAULT;
4220                        ptr += sizeof(uint32_t);
4221                        if (put_user(cookie,
4222                                     (binder_uintptr_t __user *)ptr))
4223                                return -EFAULT;
4224                        ptr += sizeof(binder_uintptr_t);
4225                        binder_stat_br(proc, thread, cmd);
4226                        if (cmd == BR_DEAD_BINDER)
4227                                goto done; /* DEAD_BINDER notifications can cause transactions */
4228                } break;
4229                default:
4230                        binder_inner_proc_unlock(proc);
4231                        pr_err("%d:%d: bad work type %d\n",
4232                               proc->pid, thread->pid, w->type);
4233                        break;
4234                }
4235
4236                if (!t)
4237                        continue;
4238
4239                BUG_ON(t->buffer == NULL);
4240                if (t->buffer->target_node) {
4241                        struct binder_node *target_node = t->buffer->target_node;
4242
4243                        tr.target.ptr = target_node->ptr;
4244                        tr.cookie =  target_node->cookie;
4245                        t->saved_priority = task_nice(current);
4246                        if (t->priority < target_node->min_priority &&
4247                            !(t->flags & TF_ONE_WAY))
4248                                binder_set_nice(t->priority);
4249                        else if (!(t->flags & TF_ONE_WAY) ||
4250                                 t->saved_priority > target_node->min_priority)
4251                                binder_set_nice(target_node->min_priority);
4252                        cmd = BR_TRANSACTION;
4253                } else {
4254                        tr.target.ptr = 0;
4255                        tr.cookie = 0;
4256                        cmd = BR_REPLY;
4257                }
4258                tr.code = t->code;
4259                tr.flags = t->flags;
4260                tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4261
4262                t_from = binder_get_txn_from(t);
4263                if (t_from) {
4264                        struct task_struct *sender = t_from->proc->tsk;
4265
4266                        tr.sender_pid = task_tgid_nr_ns(sender,
4267                                                        task_active_pid_ns(current));
4268                } else {
4269                        tr.sender_pid = 0;
4270                }
4271
4272                ret = binder_apply_fd_fixups(t);
4273                if (ret) {
4274                        struct binder_buffer *buffer = t->buffer;
4275                        bool oneway = !!(t->flags & TF_ONE_WAY);
4276                        int tid = t->debug_id;
4277
4278                        if (t_from)
4279                                binder_thread_dec_tmpref(t_from);
4280                        buffer->transaction = NULL;
4281                        binder_cleanup_transaction(t, "fd fixups failed",
4282                                                   BR_FAILED_REPLY);
4283                        binder_free_buf(proc, buffer);
4284                        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4285                                     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4286                                     proc->pid, thread->pid,
4287                                     oneway ? "async " :
4288                                        (cmd == BR_REPLY ? "reply " : ""),
4289                                     tid, BR_FAILED_REPLY, ret, __LINE__);
4290                        if (cmd == BR_REPLY) {
4291                                cmd = BR_FAILED_REPLY;
4292                                if (put_user(cmd, (uint32_t __user *)ptr))
4293                                        return -EFAULT;
4294                                ptr += sizeof(uint32_t);
4295                                binder_stat_br(proc, thread, cmd);
4296                                break;
4297                        }
4298                        continue;
4299                }
4300                tr.data_size = t->buffer->data_size;
4301                tr.offsets_size = t->buffer->offsets_size;
4302                tr.data.ptr.buffer = (binder_uintptr_t)
4303                        ((uintptr_t)t->buffer->data +
4304                        binder_alloc_get_user_buffer_offset(&proc->alloc));
4305                tr.data.ptr.offsets = tr.data.ptr.buffer +
4306                                        ALIGN(t->buffer->data_size,
4307                                            sizeof(void *));
4308
4309                if (put_user(cmd, (uint32_t __user *)ptr)) {
4310                        if (t_from)
4311                                binder_thread_dec_tmpref(t_from);
4312
4313                        binder_cleanup_transaction(t, "put_user failed",
4314                                                   BR_FAILED_REPLY);
4315
4316                        return -EFAULT;
4317                }
4318                ptr += sizeof(uint32_t);
4319                if (copy_to_user(ptr, &tr, sizeof(tr))) {
4320                        if (t_from)
4321                                binder_thread_dec_tmpref(t_from);
4322
4323                        binder_cleanup_transaction(t, "copy_to_user failed",
4324                                                   BR_FAILED_REPLY);
4325
4326                        return -EFAULT;
4327                }
4328                ptr += sizeof(tr);
4329
4330                trace_binder_transaction_received(t);
4331                binder_stat_br(proc, thread, cmd);
4332                binder_debug(BINDER_DEBUG_TRANSACTION,
4333                             "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4334                             proc->pid, thread->pid,
4335                             (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4336                             "BR_REPLY",
4337                             t->debug_id, t_from ? t_from->proc->pid : 0,
4338                             t_from ? t_from->pid : 0, cmd,
4339                             t->buffer->data_size, t->buffer->offsets_size,
4340                             (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4341
4342                if (t_from)
4343                        binder_thread_dec_tmpref(t_from);
4344                t->buffer->allow_user_free = 1;
4345                if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4346                        binder_inner_proc_lock(thread->proc);
4347                        t->to_parent = thread->transaction_stack;
4348                        t->to_thread = thread;
4349                        thread->transaction_stack = t;
4350                        binder_inner_proc_unlock(thread->proc);
4351                } else {
4352                        binder_free_transaction(t);
4353                }
4354                break;
4355        }
4356
4357done:
4358
4359        *consumed = ptr - buffer;
4360        binder_inner_proc_lock(proc);
4361        if (proc->requested_threads == 0 &&
4362            list_empty(&thread->proc->waiting_threads) &&
4363            proc->requested_threads_started < proc->max_threads &&
4364            (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4365             BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4366             /*spawn a new thread if we leave this out */) {
4367                proc->requested_threads++;
4368                binder_inner_proc_unlock(proc);
4369                binder_debug(BINDER_DEBUG_THREADS,
4370                             "%d:%d BR_SPAWN_LOOPER\n",
4371                             proc->pid, thread->pid);
4372                if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4373                        return -EFAULT;
4374                binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4375        } else
4376                binder_inner_proc_unlock(proc);
4377        return 0;
4378}
4379
4380static void binder_release_work(struct binder_proc *proc,
4381                                struct list_head *list)
4382{
4383        struct binder_work *w;
4384
4385        while (1) {
4386                w = binder_dequeue_work_head(proc, list);
4387                if (!w)
4388                        return;
4389
4390                switch (w->type) {
4391                case BINDER_WORK_TRANSACTION: {
4392                        struct binder_transaction *t;
4393
4394                        t = container_of(w, struct binder_transaction, work);
4395
4396                        binder_cleanup_transaction(t, "process died.",
4397                                                   BR_DEAD_REPLY);
4398                } break;
4399                case BINDER_WORK_RETURN_ERROR: {
4400                        struct binder_error *e = container_of(
4401                                        w, struct binder_error, work);
4402
4403                        binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4404                                "undelivered TRANSACTION_ERROR: %u\n",
4405                                e->cmd);
4406                } break;
4407                case BINDER_WORK_TRANSACTION_COMPLETE: {
4408                        binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4409                                "undelivered TRANSACTION_COMPLETE\n");
4410                        kfree(w);
4411                        binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4412                } break;
4413                case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4414                case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4415                        struct binder_ref_death *death;
4416
4417                        death = container_of(w, struct binder_ref_death, work);
4418                        binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4419                                "undelivered death notification, %016llx\n",
4420                                (u64)death->cookie);
4421                        kfree(death);
4422                        binder_stats_deleted(BINDER_STAT_DEATH);
4423                } break;
4424                default:
4425                        pr_err("unexpected work type, %d, not freed\n",
4426                               w->type);
4427                        break;
4428                }
4429        }
4430
4431}
4432
4433static struct binder_thread *binder_get_thread_ilocked(
4434                struct binder_proc *proc, struct binder_thread *new_thread)
4435{
4436        struct binder_thread *thread = NULL;
4437        struct rb_node *parent = NULL;
4438        struct rb_node **p = &proc->threads.rb_node;
4439
4440        while (*p) {
4441                parent = *p;
4442                thread = rb_entry(parent, struct binder_thread, rb_node);
4443
4444                if (current->pid < thread->pid)
4445                        p = &(*p)->rb_left;
4446                else if (current->pid > thread->pid)
4447                        p = &(*p)->rb_right;
4448                else
4449                        return thread;
4450        }
4451        if (!new_thread)
4452                return NULL;
4453        thread = new_thread;
4454        binder_stats_created(BINDER_STAT_THREAD);
4455        thread->proc = proc;
4456        thread->pid = current->pid;
4457        atomic_set(&thread->tmp_ref, 0);
4458        init_waitqueue_head(&thread->wait);
4459        INIT_LIST_HEAD(&thread->todo);
4460        rb_link_node(&thread->rb_node, parent, p);
4461        rb_insert_color(&thread->rb_node, &proc->threads);
4462        thread->looper_need_return = true;
4463        thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4464        thread->return_error.cmd = BR_OK;
4465        thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4466        thread->reply_error.cmd = BR_OK;
4467        INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4468        return thread;
4469}
4470
4471static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4472{
4473        struct binder_thread *thread;
4474        struct binder_thread *new_thread;
4475
4476        binder_inner_proc_lock(proc);
4477        thread = binder_get_thread_ilocked(proc, NULL);
4478        binder_inner_proc_unlock(proc);
4479        if (!thread) {
4480                new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4481                if (new_thread == NULL)
4482                        return NULL;
4483                binder_inner_proc_lock(proc);
4484                thread = binder_get_thread_ilocked(proc, new_thread);
4485                binder_inner_proc_unlock(proc);
4486                if (thread != new_thread)
4487                        kfree(new_thread);
4488        }
4489        return thread;
4490}
4491
4492static void binder_free_proc(struct binder_proc *proc)
4493{
4494        BUG_ON(!list_empty(&proc->todo));
4495        BUG_ON(!list_empty(&proc->delivered_death));
4496        binder_alloc_deferred_release(&proc->alloc);
4497        put_task_struct(proc->tsk);
4498        binder_stats_deleted(BINDER_STAT_PROC);
4499        kfree(proc);
4500}
4501
4502static void binder_free_thread(struct binder_thread *thread)
4503{
4504        BUG_ON(!list_empty(&thread->todo));
4505        binder_stats_deleted(BINDER_STAT_THREAD);
4506        binder_proc_dec_tmpref(thread->proc);
4507        kfree(thread);
4508}
4509
4510static int binder_thread_release(struct binder_proc *proc,
4511                                 struct binder_thread *thread)
4512{
4513        struct binder_transaction *t;
4514        struct binder_transaction *send_reply = NULL;
4515        int active_transactions = 0;
4516        struct binder_transaction *last_t = NULL;
4517
4518        binder_inner_proc_lock(thread->proc);
4519        /*
4520         * take a ref on the proc so it survives
4521         * after we remove this thread from proc->threads.
4522         * The corresponding dec is when we actually
4523         * free the thread in binder_free_thread()
4524         */
4525        proc->tmp_ref++;
4526        /*
4527         * take a ref on this thread to ensure it
4528         * survives while we are releasing it
4529         */
4530        atomic_inc(&thread->tmp_ref);
4531        rb_erase(&thread->rb_node, &proc->threads);
4532        t = thread->transaction_stack;
4533        if (t) {
4534                spin_lock(&t->lock);
4535                if (t->to_thread == thread)
4536                        send_reply = t;
4537        } else {
4538                __acquire(&t->lock);
4539        }
4540        thread->is_dead = true;
4541
4542        while (t) {
4543                last_t = t;
4544                active_transactions++;
4545                binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4546                             "release %d:%d transaction %d %s, still active\n",
4547                              proc->pid, thread->pid,
4548                             t->debug_id,
4549                             (t->to_thread == thread) ? "in" : "out");
4550
4551                if (t->to_thread == thread) {
4552                        t->to_proc = NULL;
4553                        t->to_thread = NULL;
4554                        if (t->buffer) {
4555                                t->buffer->transaction = NULL;
4556                                t->buffer = NULL;
4557                        }
4558                        t = t->to_parent;
4559                } else if (t->from == thread) {
4560                        t->from = NULL;
4561                        t = t->from_parent;
4562                } else
4563                        BUG();
4564                spin_unlock(&last_t->lock);
4565                if (t)
4566                        spin_lock(&t->lock);
4567                else
4568                        __acquire(&t->lock);
4569        }
4570        /* annotation for sparse, lock not acquired in last iteration above */
4571        __release(&t->lock);
4572
4573        /*
4574         * If this thread used poll, make sure we remove the waitqueue
4575         * from any epoll data structures holding it with POLLFREE.
4576         * waitqueue_active() is safe to use here because we're holding
4577         * the inner lock.
4578         */
4579        if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4580            waitqueue_active(&thread->wait)) {
4581                wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4582        }
4583
4584        binder_inner_proc_unlock(thread->proc);
4585
4586        /*
4587         * This is needed to avoid races between wake_up_poll() above and
4588         * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4589         * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4590         * lock, so we can be sure it's done after calling synchronize_rcu().
4591         */
4592        if (thread->looper & BINDER_LOOPER_STATE_POLL)
4593                synchronize_rcu();
4594
4595        if (send_reply)
4596                binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4597        binder_release_work(proc, &thread->todo);
4598        binder_thread_dec_tmpref(thread);
4599        return active_transactions;
4600}
4601
4602static __poll_t binder_poll(struct file *filp,
4603                                struct poll_table_struct *wait)
4604{
4605        struct binder_proc *proc = filp->private_data;
4606        struct binder_thread *thread = NULL;
4607        bool wait_for_proc_work;
4608
4609        thread = binder_get_thread(proc);
4610        if (!thread)
4611                return POLLERR;
4612
4613        binder_inner_proc_lock(thread->proc);
4614        thread->looper |= BINDER_LOOPER_STATE_POLL;
4615        wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4616
4617        binder_inner_proc_unlock(thread->proc);
4618
4619        poll_wait(filp, &thread->wait, wait);
4620
4621        if (binder_has_work(thread, wait_for_proc_work))
4622                return EPOLLIN;
4623
4624        return 0;
4625}
4626
4627static int binder_ioctl_write_read(struct file *filp,
4628                                unsigned int cmd, unsigned long arg,
4629                                struct binder_thread *thread)
4630{
4631        int ret = 0;
4632        struct binder_proc *proc = filp->private_data;
4633        unsigned int size = _IOC_SIZE(cmd);
4634        void __user *ubuf = (void __user *)arg;
4635        struct binder_write_read bwr;
4636
4637        if (size != sizeof(struct binder_write_read)) {
4638                ret = -EINVAL;
4639                goto out;
4640        }
4641        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4642                ret = -EFAULT;
4643                goto out;
4644        }
4645        binder_debug(BINDER_DEBUG_READ_WRITE,
4646                     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4647                     proc->pid, thread->pid,
4648                     (u64)bwr.write_size, (u64)bwr.write_buffer,
4649                     (u64)bwr.read_size, (u64)bwr.read_buffer);
4650
4651        if (bwr.write_size > 0) {
4652                ret = binder_thread_write(proc, thread,
4653                                          bwr.write_buffer,
4654                                          bwr.write_size,
4655                                          &bwr.write_consumed);
4656                trace_binder_write_done(ret);
4657                if (ret < 0) {
4658                        bwr.read_consumed = 0;
4659                        if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4660                                ret = -EFAULT;
4661                        goto out;
4662                }
4663        }
4664        if (bwr.read_size > 0) {
4665                ret = binder_thread_read(proc, thread, bwr.read_buffer,
4666                                         bwr.read_size,
4667                                         &bwr.read_consumed,
4668                                         filp->f_flags & O_NONBLOCK);
4669                trace_binder_read_done(ret);
4670                binder_inner_proc_lock(proc);
4671                if (!binder_worklist_empty_ilocked(&proc->todo))
4672                        binder_wakeup_proc_ilocked(proc);
4673                binder_inner_proc_unlock(proc);
4674                if (ret < 0) {
4675                        if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4676                                ret = -EFAULT;
4677                        goto out;
4678                }
4679        }
4680        binder_debug(BINDER_DEBUG_READ_WRITE,
4681                     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4682                     proc->pid, thread->pid,
4683                     (u64)bwr.write_consumed, (u64)bwr.write_size,
4684                     (u64)bwr.read_consumed, (u64)bwr.read_size);
4685        if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4686                ret = -EFAULT;
4687                goto out;
4688        }
4689out:
4690        return ret;
4691}
4692
4693static int binder_ioctl_set_ctx_mgr(struct file *filp)
4694{
4695        int ret = 0;
4696        struct binder_proc *proc = filp->private_data;
4697        struct binder_context *context = proc->context;
4698        struct binder_node *new_node;
4699        kuid_t curr_euid = current_euid();
4700
4701        mutex_lock(&context->context_mgr_node_lock);
4702        if (context->binder_context_mgr_node) {
4703                pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4704                ret = -EBUSY;
4705                goto out;
4706        }
4707        ret = security_binder_set_context_mgr(proc->tsk);
4708        if (ret < 0)
4709                goto out;
4710        if (uid_valid(context->binder_context_mgr_uid)) {
4711                if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4712                        pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4713                               from_kuid(&init_user_ns, curr_euid),
4714                               from_kuid(&init_user_ns,
4715                                         context->binder_context_mgr_uid));
4716                        ret = -EPERM;
4717                        goto out;
4718                }
4719        } else {
4720                context->binder_context_mgr_uid = curr_euid;
4721        }
4722        new_node = binder_new_node(proc, NULL);
4723        if (!new_node) {
4724                ret = -ENOMEM;
4725                goto out;
4726        }
4727        binder_node_lock(new_node);
4728        new_node->local_weak_refs++;
4729        new_node->local_strong_refs++;
4730        new_node->has_strong_ref = 1;
4731        new_node->has_weak_ref = 1;
4732        context->binder_context_mgr_node = new_node;
4733        binder_node_unlock(new_node);
4734        binder_put_node(new_node);
4735out:
4736        mutex_unlock(&context->context_mgr_node_lock);
4737        return ret;
4738}
4739
4740static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4741                struct binder_node_info_for_ref *info)
4742{
4743        struct binder_node *node;
4744        struct binder_context *context = proc->context;
4745        __u32 handle = info->handle;
4746
4747        if (info->strong_count || info->weak_count || info->reserved1 ||
4748            info->reserved2 || info->reserved3) {
4749                binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4750                                  proc->pid);
4751                return -EINVAL;
4752        }
4753
4754        /* This ioctl may only be used by the context manager */
4755        mutex_lock(&context->context_mgr_node_lock);
4756        if (!context->binder_context_mgr_node ||
4757                context->binder_context_mgr_node->proc != proc) {
4758                mutex_unlock(&context->context_mgr_node_lock);
4759                return -EPERM;
4760        }
4761        mutex_unlock(&context->context_mgr_node_lock);
4762
4763        node = binder_get_node_from_ref(proc, handle, true, NULL);
4764        if (!node)
4765                return -EINVAL;
4766
4767        info->strong_count = node->local_strong_refs +
4768                node->internal_strong_refs;
4769        info->weak_count = node->local_weak_refs;
4770
4771        binder_put_node(node);
4772
4773        return 0;
4774}
4775
4776static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4777                                struct binder_node_debug_info *info)
4778{
4779        struct rb_node *n;
4780        binder_uintptr_t ptr = info->ptr;
4781
4782        memset(info, 0, sizeof(*info));
4783
4784        binder_inner_proc_lock(proc);
4785        for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4786                struct binder_node *node = rb_entry(n, struct binder_node,
4787                                                    rb_node);
4788                if (node->ptr > ptr) {
4789                        info->ptr = node->ptr;
4790                        info->cookie = node->cookie;
4791                        info->has_strong_ref = node->has_strong_ref;
4792                        info->has_weak_ref = node->has_weak_ref;
4793                        break;
4794                }
4795        }
4796        binder_inner_proc_unlock(proc);
4797
4798        return 0;
4799}
4800
4801static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4802{
4803        int ret;
4804        struct binder_proc *proc = filp->private_data;
4805        struct binder_thread *thread;
4806        unsigned int size = _IOC_SIZE(cmd);
4807        void __user *ubuf = (void __user *)arg;
4808
4809        /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4810                        proc->pid, current->pid, cmd, arg);*/
4811
4812        binder_selftest_alloc(&proc->alloc);
4813
4814        trace_binder_ioctl(cmd, arg);
4815
4816        ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4817        if (ret)
4818                goto err_unlocked;
4819
4820        thread = binder_get_thread(proc);
4821        if (thread == NULL) {
4822                ret = -ENOMEM;
4823                goto err;
4824        }
4825
4826        switch (cmd) {
4827        case BINDER_WRITE_READ:
4828                ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4829                if (ret)
4830                        goto err;
4831                break;
4832        case BINDER_SET_MAX_THREADS: {
4833                int max_threads;
4834
4835                if (copy_from_user(&max_threads, ubuf,
4836                                   sizeof(max_threads))) {
4837                        ret = -EINVAL;
4838                        goto err;
4839                }
4840                binder_inner_proc_lock(proc);
4841                proc->max_threads = max_threads;
4842                binder_inner_proc_unlock(proc);
4843                break;
4844        }
4845        case BINDER_SET_CONTEXT_MGR:
4846                ret = binder_ioctl_set_ctx_mgr(filp);
4847                if (ret)
4848                        goto err;
4849                break;
4850        case BINDER_THREAD_EXIT:
4851                binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4852                             proc->pid, thread->pid);
4853                binder_thread_release(proc, thread);
4854                thread = NULL;
4855                break;
4856        case BINDER_VERSION: {
4857                struct binder_version __user *ver = ubuf;
4858
4859                if (size != sizeof(struct binder_version)) {
4860                        ret = -EINVAL;
4861                        goto err;
4862                }
4863                if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4864                             &ver->protocol_version)) {
4865                        ret = -EINVAL;
4866                        goto err;
4867                }
4868                break;
4869        }
4870        case BINDER_GET_NODE_INFO_FOR_REF: {
4871                struct binder_node_info_for_ref info;
4872
4873                if (copy_from_user(&info, ubuf, sizeof(info))) {
4874                        ret = -EFAULT;
4875                        goto err;
4876                }
4877
4878                ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4879                if (ret < 0)
4880                        goto err;
4881
4882                if (copy_to_user(ubuf, &info, sizeof(info))) {
4883                        ret = -EFAULT;
4884                        goto err;
4885                }
4886
4887                break;
4888        }
4889        case BINDER_GET_NODE_DEBUG_INFO: {
4890                struct binder_node_debug_info info;
4891
4892                if (copy_from_user(&info, ubuf, sizeof(info))) {
4893                        ret = -EFAULT;
4894                        goto err;
4895                }
4896
4897                ret = binder_ioctl_get_node_debug_info(proc, &info);
4898                if (ret < 0)
4899                        goto err;
4900
4901                if (copy_to_user(ubuf, &info, sizeof(info))) {
4902                        ret = -EFAULT;
4903                        goto err;
4904                }
4905                break;
4906        }
4907        default:
4908                ret = -EINVAL;
4909                goto err;
4910        }
4911        ret = 0;
4912err:
4913        if (thread)
4914                thread->looper_need_return = false;
4915        wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4916        if (ret && ret != -ERESTARTSYS)
4917                pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4918err_unlocked:
4919        trace_binder_ioctl_done(ret);
4920        return ret;
4921}
4922
4923static void binder_vma_open(struct vm_area_struct *vma)
4924{
4925        struct binder_proc *proc = vma->vm_private_data;
4926
4927        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4928                     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4929                     proc->pid, vma->vm_start, vma->vm_end,
4930                     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4931                     (unsigned long)pgprot_val(vma->vm_page_prot));
4932}
4933
4934static void binder_vma_close(struct vm_area_struct *vma)
4935{
4936        struct binder_proc *proc = vma->vm_private_data;
4937
4938        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4939                     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4940                     proc->pid, vma->vm_start, vma->vm_end,
4941                     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4942                     (unsigned long)pgprot_val(vma->vm_page_prot));
4943        binder_alloc_vma_close(&proc->alloc);
4944}
4945
4946static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4947{
4948        return VM_FAULT_SIGBUS;
4949}
4950
4951static const struct vm_operations_struct binder_vm_ops = {
4952        .open = binder_vma_open,
4953        .close = binder_vma_close,
4954        .fault = binder_vm_fault,
4955};
4956
4957static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4958{
4959        int ret;
4960        struct binder_proc *proc = filp->private_data;
4961        const char *failure_string;
4962
4963        if (proc->tsk != current->group_leader)
4964                return -EINVAL;
4965
4966        if ((vma->vm_end - vma->vm_start) > SZ_4M)
4967                vma->vm_end = vma->vm_start + SZ_4M;
4968
4969        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4970                     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4971                     __func__, proc->pid, vma->vm_start, vma->vm_end,
4972                     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4973                     (unsigned long)pgprot_val(vma->vm_page_prot));
4974
4975        if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4976                ret = -EPERM;
4977                failure_string = "bad vm_flags";
4978                goto err_bad_arg;
4979        }
4980        vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4981        vma->vm_flags &= ~VM_MAYWRITE;
4982
4983        vma->vm_ops = &binder_vm_ops;
4984        vma->vm_private_data = proc;
4985
4986        ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4987        if (ret)
4988                return ret;
4989        return 0;
4990
4991err_bad_arg:
4992        pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4993               proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4994        return ret;
4995}
4996
4997static int binder_open(struct inode *nodp, struct file *filp)
4998{
4999        struct binder_proc *proc;
5000        struct binder_device *binder_dev;
5001
5002        binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5003                     current->group_leader->pid, current->pid);
5004
5005        proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5006        if (proc == NULL)
5007                return -ENOMEM;
5008        spin_lock_init(&proc->inner_lock);
5009        spin_lock_init(&proc->outer_lock);
5010        get_task_struct(current->group_leader);
5011        proc->tsk = current->group_leader;
5012        INIT_LIST_HEAD(&proc->todo);
5013        proc->default_priority = task_nice(current);
5014        /* binderfs stashes devices in i_private */
5015        if (is_binderfs_device(nodp))
5016                binder_dev = nodp->i_private;
5017        else
5018                binder_dev = container_of(filp->private_data,
5019                                          struct binder_device, miscdev);
5020        proc->context = &binder_dev->context;
5021        binder_alloc_init(&proc->alloc);
5022
5023        binder_stats_created(BINDER_STAT_PROC);
5024        proc->pid = current->group_leader->pid;
5025        INIT_LIST_HEAD(&proc->delivered_death);
5026        INIT_LIST_HEAD(&proc->waiting_threads);
5027        filp->private_data = proc;
5028
5029        mutex_lock(&binder_procs_lock);
5030        hlist_add_head(&proc->proc_node, &binder_procs);
5031        mutex_unlock(&binder_procs_lock);
5032
5033        if (binder_debugfs_dir_entry_proc) {
5034                char strbuf[11];
5035
5036                snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5037                /*
5038                 * proc debug entries are shared between contexts, so
5039                 * this will fail if the process tries to open the driver
5040                 * again with a different context. The priting code will
5041                 * anyway print all contexts that a given PID has, so this
5042                 * is not a problem.
5043                 */
5044                proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5045                        binder_debugfs_dir_entry_proc,
5046                        (void *)(unsigned long)proc->pid,
5047                        &proc_fops);
5048        }
5049
5050        return 0;
5051}
5052
5053static int binder_flush(struct file *filp, fl_owner_t id)
5054{
5055        struct binder_proc *proc = filp->private_data;
5056
5057        binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5058
5059        return 0;
5060}
5061
5062static void binder_deferred_flush(struct binder_proc *proc)
5063{
5064        struct rb_node *n;
5065        int wake_count = 0;
5066
5067        binder_inner_proc_lock(proc);
5068        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5069                struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5070
5071                thread->looper_need_return = true;
5072                if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5073                        wake_up_interruptible(&thread->wait);
5074                        wake_count++;
5075                }
5076        }
5077        binder_inner_proc_unlock(proc);
5078
5079        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5080                     "binder_flush: %d woke %d threads\n", proc->pid,
5081                     wake_count);
5082}
5083
5084static int binder_release(struct inode *nodp, struct file *filp)
5085{
5086        struct binder_proc *proc = filp->private_data;
5087
5088        debugfs_remove(proc->debugfs_entry);
5089        binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5090
5091        return 0;
5092}
5093
5094static int binder_node_release(struct binder_node *node, int refs)
5095{
5096        struct binder_ref *ref;
5097        int death = 0;
5098        struct binder_proc *proc = node->proc;
5099
5100        binder_release_work(proc, &node->async_todo);
5101
5102        binder_node_lock(node);
5103        binder_inner_proc_lock(proc);
5104        binder_dequeue_work_ilocked(&node->work);
5105        /*
5106         * The caller must have taken a temporary ref on the node,
5107         */
5108        BUG_ON(!node->tmp_refs);
5109        if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5110                binder_inner_proc_unlock(proc);
5111                binder_node_unlock(node);
5112                binder_free_node(node);
5113
5114                return refs;
5115        }
5116
5117        node->proc = NULL;
5118        node->local_strong_refs = 0;
5119        node->local_weak_refs = 0;
5120        binder_inner_proc_unlock(proc);
5121
5122        spin_lock(&binder_dead_nodes_lock);
5123        hlist_add_head(&node->dead_node, &binder_dead_nodes);
5124        spin_unlock(&binder_dead_nodes_lock);
5125
5126        hlist_for_each_entry(ref, &node->refs, node_entry) {
5127                refs++;
5128                /*
5129                 * Need the node lock to synchronize
5130                 * with new notification requests and the
5131                 * inner lock to synchronize with queued
5132                 * death notifications.
5133                 */
5134                binder_inner_proc_lock(ref->proc);
5135                if (!ref->death) {
5136                        binder_inner_proc_unlock(ref->proc);
5137                        continue;
5138                }
5139
5140                death++;
5141
5142                BUG_ON(!list_empty(&ref->death->work.entry));
5143                ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5144                binder_enqueue_work_ilocked(&ref->death->work,
5145                                            &ref->proc->todo);
5146                binder_wakeup_proc_ilocked(ref->proc);
5147                binder_inner_proc_unlock(ref->proc);
5148        }
5149
5150        binder_debug(BINDER_DEBUG_DEAD_BINDER,
5151                     "node %d now dead, refs %d, death %d\n",
5152                     node->debug_id, refs, death);
5153        binder_node_unlock(node);
5154        binder_put_node(node);
5155
5156        return refs;
5157}
5158
5159static void binder_deferred_release(struct binder_proc *proc)
5160{
5161        struct binder_context *context = proc->context;
5162        struct rb_node *n;
5163        int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5164
5165        mutex_lock(&binder_procs_lock);
5166        hlist_del(&proc->proc_node);
5167        mutex_unlock(&binder_procs_lock);
5168
5169        mutex_lock(&context->context_mgr_node_lock);
5170        if (context->binder_context_mgr_node &&
5171            context->binder_context_mgr_node->proc == proc) {
5172                binder_debug(BINDER_DEBUG_DEAD_BINDER,
5173                             "%s: %d context_mgr_node gone\n",
5174                             __func__, proc->pid);
5175                context->binder_context_mgr_node = NULL;
5176        }
5177        mutex_unlock(&context->context_mgr_node_lock);
5178        binder_inner_proc_lock(proc);
5179        /*
5180         * Make sure proc stays alive after we
5181         * remove all the threads
5182         */
5183        proc->tmp_ref++;
5184
5185        proc->is_dead = true;
5186        threads = 0;
5187        active_transactions = 0;
5188        while ((n = rb_first(&proc->threads))) {
5189                struct binder_thread *thread;
5190
5191                thread = rb_entry(n, struct binder_thread, rb_node);
5192                binder_inner_proc_unlock(proc);
5193                threads++;
5194                active_transactions += binder_thread_release(proc, thread);
5195                binder_inner_proc_lock(proc);
5196        }
5197
5198        nodes = 0;
5199        incoming_refs = 0;
5200        while ((n = rb_first(&proc->nodes))) {
5201                struct binder_node *node;
5202
5203                node = rb_entry(n, struct binder_node, rb_node);
5204                nodes++;
5205                /*
5206                 * take a temporary ref on the node before
5207                 * calling binder_node_release() which will either
5208                 * kfree() the node or call binder_put_node()
5209                 */
5210                binder_inc_node_tmpref_ilocked(node);
5211                rb_erase(&node->rb_node, &proc->nodes);
5212                binder_inner_proc_unlock(proc);
5213                incoming_refs = binder_node_release(node, incoming_refs);
5214                binder_inner_proc_lock(proc);
5215        }
5216        binder_inner_proc_unlock(proc);
5217
5218        outgoing_refs = 0;
5219        binder_proc_lock(proc);
5220        while ((n = rb_first(&proc->refs_by_desc))) {
5221                struct binder_ref *ref;
5222
5223                ref = rb_entry(n, struct binder_ref, rb_node_desc);
5224                outgoing_refs++;
5225                binder_cleanup_ref_olocked(ref);
5226                binder_proc_unlock(proc);
5227                binder_free_ref(ref);
5228                binder_proc_lock(proc);
5229        }
5230        binder_proc_unlock(proc);
5231
5232        binder_release_work(proc, &proc->todo);
5233        binder_release_work(proc, &proc->delivered_death);
5234
5235        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5236                     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5237                     __func__, proc->pid, threads, nodes, incoming_refs,
5238                     outgoing_refs, active_transactions);
5239
5240        binder_proc_dec_tmpref(proc);
5241}
5242
5243static void binder_deferred_func(struct work_struct *work)
5244{
5245        struct binder_proc *proc;
5246
5247        int defer;
5248
5249        do {
5250                mutex_lock(&binder_deferred_lock);
5251                if (!hlist_empty(&binder_deferred_list)) {
5252                        proc = hlist_entry(binder_deferred_list.first,
5253                                        struct binder_proc, deferred_work_node);
5254                        hlist_del_init(&proc->deferred_work_node);
5255                        defer = proc->deferred_work;
5256                        proc->deferred_work = 0;
5257                } else {
5258                        proc = NULL;
5259                        defer = 0;
5260                }
5261                mutex_unlock(&binder_deferred_lock);
5262
5263                if (defer & BINDER_DEFERRED_FLUSH)
5264                        binder_deferred_flush(proc);
5265
5266                if (defer & BINDER_DEFERRED_RELEASE)
5267                        binder_deferred_release(proc); /* frees proc */
5268        } while (proc);
5269}
5270static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5271
5272static void
5273binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5274{
5275        mutex_lock(&binder_deferred_lock);
5276        proc->deferred_work |= defer;
5277        if (hlist_unhashed(&proc->deferred_work_node)) {
5278                hlist_add_head(&proc->deferred_work_node,
5279                                &binder_deferred_list);
5280                schedule_work(&binder_deferred_work);
5281        }
5282        mutex_unlock(&binder_deferred_lock);
5283}
5284
5285static void print_binder_transaction_ilocked(struct seq_file *m,
5286                                             struct binder_proc *proc,
5287                                             const char *prefix,
5288                                             struct binder_transaction *t)
5289{
5290        struct binder_proc *to_proc;
5291        struct binder_buffer *buffer = t->buffer;
5292
5293        spin_lock(&t->lock);
5294        to_proc = t->to_proc;
5295        seq_printf(m,
5296                   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5297                   prefix, t->debug_id, t,
5298                   t->from ? t->from->proc->pid : 0,
5299                   t->from ? t->from->pid : 0,
5300                   to_proc ? to_proc->pid : 0,
5301                   t->to_thread ? t->to_thread->pid : 0,
5302                   t->code, t->flags, t->priority, t->need_reply);
5303        spin_unlock(&t->lock);
5304
5305        if (proc != to_proc) {
5306                /*
5307                 * Can only safely deref buffer if we are holding the
5308                 * correct proc inner lock for this node
5309                 */
5310                seq_puts(m, "\n");
5311                return;
5312        }
5313
5314        if (buffer == NULL) {
5315                seq_puts(m, " buffer free\n");
5316                return;
5317        }
5318        if (buffer->target_node)
5319                seq_printf(m, " node %d", buffer->target_node->debug_id);
5320        seq_printf(m, " size %zd:%zd data %pK\n",
5321                   buffer->data_size, buffer->offsets_size,
5322                   buffer->data);
5323}
5324
5325static void print_binder_work_ilocked(struct seq_file *m,
5326                                     struct binder_proc *proc,
5327                                     const char *prefix,
5328                                     const char *transaction_prefix,
5329                                     struct binder_work *w)
5330{
5331        struct binder_node *node;
5332        struct binder_transaction *t;
5333
5334        switch (w->type) {
5335        case BINDER_WORK_TRANSACTION:
5336                t = container_of(w, struct binder_transaction, work);
5337                print_binder_transaction_ilocked(
5338                                m, proc, transaction_prefix, t);
5339                break;
5340        case BINDER_WORK_RETURN_ERROR: {
5341                struct binder_error *e = container_of(
5342                                w, struct binder_error, work);
5343
5344                seq_printf(m, "%stransaction error: %u\n",
5345                           prefix, e->cmd);
5346        } break;
5347        case BINDER_WORK_TRANSACTION_COMPLETE:
5348                seq_printf(m, "%stransaction complete\n", prefix);
5349                break;
5350        case BINDER_WORK_NODE:
5351                node = container_of(w, struct binder_node, work);
5352                seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5353                           prefix, node->debug_id,
5354                           (u64)node->ptr, (u64)node->cookie);
5355                break;
5356        case BINDER_WORK_DEAD_BINDER:
5357                seq_printf(m, "%shas dead binder\n", prefix);
5358                break;
5359        case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5360                seq_printf(m, "%shas cleared dead binder\n", prefix);
5361                break;
5362        case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5363                seq_printf(m, "%shas cleared death notification\n", prefix);
5364                break;
5365        default:
5366                seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5367                break;
5368        }
5369}
5370
5371static void print_binder_thread_ilocked(struct seq_file *m,
5372                                        struct binder_thread *thread,
5373                                        int print_always)
5374{
5375        struct binder_transaction *t;
5376        struct binder_work *w;
5377        size_t start_pos = m->count;
5378        size_t header_pos;
5379
5380        seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5381                        thread->pid, thread->looper,
5382                        thread->looper_need_return,
5383                        atomic_read(&thread->tmp_ref));
5384        header_pos = m->count;
5385        t = thread->transaction_stack;
5386        while (t) {
5387                if (t->from == thread) {
5388                        print_binder_transaction_ilocked(m, thread->proc,
5389                                        "    outgoing transaction", t);
5390                        t = t->from_parent;
5391                } else if (t->to_thread == thread) {
5392                        print_binder_transaction_ilocked(m, thread->proc,
5393                                                 "    incoming transaction", t);
5394                        t = t->to_parent;
5395                } else {
5396                        print_binder_transaction_ilocked(m, thread->proc,
5397                                        "    bad transaction", t);
5398                        t = NULL;
5399                }
5400        }
5401        list_for_each_entry(w, &thread->todo, entry) {
5402                print_binder_work_ilocked(m, thread->proc, "    ",
5403                                          "    pending transaction", w);
5404        }
5405        if (!print_always && m->count == header_pos)
5406                m->count = start_pos;
5407}
5408
5409static void print_binder_node_nilocked(struct seq_file *m,
5410                                       struct binder_node *node)
5411{
5412        struct binder_ref *ref;
5413        struct binder_work *w;
5414        int count;
5415
5416        count = 0;
5417        hlist_for_each_entry(ref, &node->refs, node_entry)
5418                count++;
5419
5420        seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5421                   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5422                   node->has_strong_ref, node->has_weak_ref,
5423                   node->local_strong_refs, node->local_weak_refs,
5424                   node->internal_strong_refs, count, node->tmp_refs);
5425        if (count) {
5426                seq_puts(m, " proc");
5427                hlist_for_each_entry(ref, &node->refs, node_entry)
5428                        seq_printf(m, " %d", ref->proc->pid);
5429        }
5430        seq_puts(m, "\n");
5431        if (node->proc) {
5432                list_for_each_entry(w, &node->async_todo, entry)
5433                        print_binder_work_ilocked(m, node->proc, "    ",
5434                                          "    pending async transaction", w);
5435        }
5436}
5437
5438static void print_binder_ref_olocked(struct seq_file *m,
5439                                     struct binder_ref *ref)
5440{
5441        binder_node_lock(ref->node);
5442        seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5443                   ref->data.debug_id, ref->data.desc,
5444                   ref->node->proc ? "" : "dead ",
5445                   ref->node->debug_id, ref->data.strong,
5446                   ref->data.weak, ref->death);
5447        binder_node_unlock(ref->node);
5448}
5449
5450static void print_binder_proc(struct seq_file *m,
5451                              struct binder_proc *proc, int print_all)
5452{
5453        struct binder_work *w;
5454        struct rb_node *n;
5455        size_t start_pos = m->count;
5456        size_t header_pos;
5457        struct binder_node *last_node = NULL;
5458
5459        seq_printf(m, "proc %d\n", proc->pid);
5460        seq_printf(m, "context %s\n", proc->context->name);
5461        header_pos = m->count;
5462
5463        binder_inner_proc_lock(proc);
5464        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5465                print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5466                                                rb_node), print_all);
5467
5468        for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5469                struct binder_node *node = rb_entry(n, struct binder_node,
5470                                                    rb_node);
5471                if (!print_all && !node->has_async_transaction)
5472                        continue;
5473
5474                /*
5475                 * take a temporary reference on the node so it
5476                 * survives and isn't removed from the tree
5477                 * while we print it.
5478                 */
5479                binder_inc_node_tmpref_ilocked(node);
5480                /* Need to drop inner lock to take node lock */
5481                binder_inner_proc_unlock(proc);
5482                if (last_node)
5483                        binder_put_node(last_node);
5484                binder_node_inner_lock(node);
5485                print_binder_node_nilocked(m, node);
5486                binder_node_inner_unlock(node);
5487                last_node = node;
5488                binder_inner_proc_lock(proc);
5489        }
5490        binder_inner_proc_unlock(proc);
5491        if (last_node)
5492                binder_put_node(last_node);
5493
5494        if (print_all) {
5495                binder_proc_lock(proc);
5496                for (n = rb_first(&proc->refs_by_desc);
5497                     n != NULL;
5498                     n = rb_next(n))
5499                        print_binder_ref_olocked(m, rb_entry(n,
5500                                                            struct binder_ref,
5501                                                            rb_node_desc));
5502                binder_proc_unlock(proc);
5503        }
5504        binder_alloc_print_allocated(m, &proc->alloc);
5505        binder_inner_proc_lock(proc);
5506        list_for_each_entry(w, &proc->todo, entry)
5507                print_binder_work_ilocked(m, proc, "  ",
5508                                          "  pending transaction", w);
5509        list_for_each_entry(w, &proc->delivered_death, entry) {
5510                seq_puts(m, "  has delivered dead binder\n");
5511                break;
5512        }
5513        binder_inner_proc_unlock(proc);
5514        if (!print_all && m->count == header_pos)
5515                m->count = start_pos;
5516}
5517
5518static const char * const binder_return_strings[] = {
5519        "BR_ERROR",
5520        "BR_OK",
5521        "BR_TRANSACTION",
5522        "BR_REPLY",
5523        "BR_ACQUIRE_RESULT",
5524        "BR_DEAD_REPLY",
5525        "BR_TRANSACTION_COMPLETE",
5526        "BR_INCREFS",
5527        "BR_ACQUIRE",
5528        "BR_RELEASE",
5529        "BR_DECREFS",
5530        "BR_ATTEMPT_ACQUIRE",
5531        "BR_NOOP",
5532        "BR_SPAWN_LOOPER",
5533        "BR_FINISHED",
5534        "BR_DEAD_BINDER",
5535        "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5536        "BR_FAILED_REPLY"
5537};
5538
5539static const char * const binder_command_strings[] = {
5540        "BC_TRANSACTION",
5541        "BC_REPLY",
5542        "BC_ACQUIRE_RESULT",
5543        "BC_FREE_BUFFER",
5544        "BC_INCREFS",
5545        "BC_ACQUIRE",
5546        "BC_RELEASE",
5547        "BC_DECREFS",
5548        "BC_INCREFS_DONE",
5549        "BC_ACQUIRE_DONE",
5550        "BC_ATTEMPT_ACQUIRE",
5551        "BC_REGISTER_LOOPER",
5552        "BC_ENTER_LOOPER",
5553        "BC_EXIT_LOOPER",
5554        "BC_REQUEST_DEATH_NOTIFICATION",
5555        "BC_CLEAR_DEATH_NOTIFICATION",
5556        "BC_DEAD_BINDER_DONE",
5557        "BC_TRANSACTION_SG",
5558        "BC_REPLY_SG",
5559};
5560
5561static const char * const binder_objstat_strings[] = {
5562        "proc",
5563        "thread",
5564        "node",
5565        "ref",
5566        "death",
5567        "transaction",
5568        "transaction_complete"
5569};
5570
5571static void print_binder_stats(struct seq_file *m, const char *prefix,
5572                               struct binder_stats *stats)
5573{
5574        int i;
5575
5576        BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5577                     ARRAY_SIZE(binder_command_strings));
5578        for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5579                int temp = atomic_read(&stats->bc[i]);
5580
5581                if (temp)
5582                        seq_printf(m, "%s%s: %d\n", prefix,
5583                                   binder_command_strings[i], temp);
5584        }
5585
5586        BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5587                     ARRAY_SIZE(binder_return_strings));
5588        for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5589                int temp = atomic_read(&stats->br[i]);
5590
5591                if (temp)
5592                        seq_printf(m, "%s%s: %d\n", prefix,
5593                                   binder_return_strings[i], temp);
5594        }
5595
5596        BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5597                     ARRAY_SIZE(binder_objstat_strings));
5598        BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5599                     ARRAY_SIZE(stats->obj_deleted));
5600        for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5601                int created = atomic_read(&stats->obj_created[i]);
5602                int deleted = atomic_read(&stats->obj_deleted[i]);
5603
5604                if (created || deleted)
5605                        seq_printf(m, "%s%s: active %d total %d\n",
5606                                prefix,
5607                                binder_objstat_strings[i],
5608                                created - deleted,
5609                                created);
5610        }
5611}
5612
5613static void print_binder_proc_stats(struct seq_file *m,
5614                                    struct binder_proc *proc)
5615{
5616        struct binder_work *w;
5617        struct binder_thread *thread;
5618        struct rb_node *n;
5619        int count, strong, weak, ready_threads;
5620        size_t free_async_space =
5621                binder_alloc_get_free_async_space(&proc->alloc);
5622
5623        seq_printf(m, "proc %d\n", proc->pid);
5624        seq_printf(m, "context %s\n", proc->context->name);
5625        count = 0;
5626        ready_threads = 0;
5627        binder_inner_proc_lock(proc);
5628        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5629                count++;
5630
5631        list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5632                ready_threads++;
5633
5634        seq_printf(m, "  threads: %d\n", count);
5635        seq_printf(m, "  requested threads: %d+%d/%d\n"
5636                        "  ready threads %d\n"
5637                        "  free async space %zd\n", proc->requested_threads,
5638                        proc->requested_threads_started, proc->max_threads,
5639                        ready_threads,
5640                        free_async_space);
5641        count = 0;
5642        for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5643                count++;
5644        binder_inner_proc_unlock(proc);
5645        seq_printf(m, "  nodes: %d\n", count);
5646        count = 0;
5647        strong = 0;
5648        weak = 0;
5649        binder_proc_lock(proc);
5650        for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5651                struct binder_ref *ref = rb_entry(n, struct binder_ref,
5652                                                  rb_node_desc);
5653                count++;
5654                strong += ref->data.strong;
5655                weak += ref->data.weak;
5656        }
5657        binder_proc_unlock(proc);
5658        seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5659
5660        count = binder_alloc_get_allocated_count(&proc->alloc);
5661        seq_printf(m, "  buffers: %d\n", count);
5662
5663        binder_alloc_print_pages(m, &proc->alloc);
5664
5665        count = 0;
5666        binder_inner_proc_lock(proc);
5667        list_for_each_entry(w, &proc->todo, entry) {
5668                if (w->type == BINDER_WORK_TRANSACTION)
5669                        count++;
5670        }
5671        binder_inner_proc_unlock(proc);
5672        seq_printf(m, "  pending transactions: %d\n", count);
5673
5674        print_binder_stats(m, "  ", &proc->stats);
5675}
5676
5677
5678static int state_show(struct seq_file *m, void *unused)
5679{
5680        struct binder_proc *proc;
5681        struct binder_node *node;
5682        struct binder_node *last_node = NULL;
5683
5684        seq_puts(m, "binder state:\n");
5685
5686        spin_lock(&binder_dead_nodes_lock);
5687        if (!hlist_empty(&binder_dead_nodes))
5688                seq_puts(m, "dead nodes:\n");
5689        hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5690                /*
5691                 * take a temporary reference on the node so it
5692                 * survives and isn't removed from the list
5693                 * while we print it.
5694                 */
5695                node->tmp_refs++;
5696                spin_unlock(&binder_dead_nodes_lock);
5697                if (last_node)
5698                        binder_put_node(last_node);
5699                binder_node_lock(node);
5700                print_binder_node_nilocked(m, node);
5701                binder_node_unlock(node);
5702                last_node = node;
5703                spin_lock(&binder_dead_nodes_lock);
5704        }
5705        spin_unlock(&binder_dead_nodes_lock);
5706        if (last_node)
5707                binder_put_node(last_node);
5708
5709        mutex_lock(&binder_procs_lock);
5710        hlist_for_each_entry(proc, &binder_procs, proc_node)
5711                print_binder_proc(m, proc, 1);
5712        mutex_unlock(&binder_procs_lock);
5713
5714        return 0;
5715}
5716
5717static int stats_show(struct seq_file *m, void *unused)
5718{
5719        struct binder_proc *proc;
5720
5721        seq_puts(m, "binder stats:\n");
5722
5723        print_binder_stats(m, "", &binder_stats);
5724
5725        mutex_lock(&binder_procs_lock);
5726        hlist_for_each_entry(proc, &binder_procs, proc_node)
5727                print_binder_proc_stats(m, proc);
5728        mutex_unlock(&binder_procs_lock);
5729
5730        return 0;
5731}
5732
5733static int transactions_show(struct seq_file *m, void *unused)
5734{
5735        struct binder_proc *proc;
5736
5737        seq_puts(m, "binder transactions:\n");
5738        mutex_lock(&binder_procs_lock);
5739        hlist_for_each_entry(proc, &binder_procs, proc_node)
5740                print_binder_proc(m, proc, 0);
5741        mutex_unlock(&binder_procs_lock);
5742
5743        return 0;
5744}
5745
5746static int proc_show(struct seq_file *m, void *unused)
5747{
5748        struct binder_proc *itr;
5749        int pid = (unsigned long)m->private;
5750
5751        mutex_lock(&binder_procs_lock);
5752        hlist_for_each_entry(itr, &binder_procs, proc_node) {
5753                if (itr->pid == pid) {
5754                        seq_puts(m, "binder proc state:\n");
5755                        print_binder_proc(m, itr, 1);
5756                }
5757        }
5758        mutex_unlock(&binder_procs_lock);
5759
5760        return 0;
5761}
5762
5763static void print_binder_transaction_log_entry(struct seq_file *m,
5764                                        struct binder_transaction_log_entry *e)
5765{
5766        int debug_id = READ_ONCE(e->debug_id_done);
5767        /*
5768         * read barrier to guarantee debug_id_done read before
5769         * we print the log values
5770         */
5771        smp_rmb();
5772        seq_printf(m,
5773                   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5774                   e->debug_id, (e->call_type == 2) ? "reply" :
5775                   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5776                   e->from_thread, e->to_proc, e->to_thread, e->context_name,
5777                   e->to_node, e->target_handle, e->data_size, e->offsets_size,
5778                   e->return_error, e->return_error_param,
5779                   e->return_error_line);
5780        /*
5781         * read-barrier to guarantee read of debug_id_done after
5782         * done printing the fields of the entry
5783         */
5784        smp_rmb();
5785        seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5786                        "\n" : " (incomplete)\n");
5787}
5788
5789static int transaction_log_show(struct seq_file *m, void *unused)
5790{
5791        struct binder_transaction_log *log = m->private;
5792        unsigned int log_cur = atomic_read(&log->cur);
5793        unsigned int count;
5794        unsigned int cur;
5795        int i;
5796
5797        count = log_cur + 1;
5798        cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5799                0 : count % ARRAY_SIZE(log->entry);
5800        if (count > ARRAY_SIZE(log->entry) || log->full)
5801                count = ARRAY_SIZE(log->entry);
5802        for (i = 0; i < count; i++) {
5803                unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5804
5805                print_binder_transaction_log_entry(m, &log->entry[index]);
5806        }
5807        return 0;
5808}
5809
5810const struct file_operations binder_fops = {
5811        .owner = THIS_MODULE,
5812        .poll = binder_poll,
5813        .unlocked_ioctl = binder_ioctl,
5814        .compat_ioctl = binder_ioctl,
5815        .mmap = binder_mmap,
5816        .open = binder_open,
5817        .flush = binder_flush,
5818        .release = binder_release,
5819};
5820
5821DEFINE_SHOW_ATTRIBUTE(state);
5822DEFINE_SHOW_ATTRIBUTE(stats);
5823DEFINE_SHOW_ATTRIBUTE(transactions);
5824DEFINE_SHOW_ATTRIBUTE(transaction_log);
5825
5826static int __init init_binder_device(const char *name)
5827{
5828        int ret;
5829        struct binder_device *binder_device;
5830
5831        binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5832        if (!binder_device)
5833                return -ENOMEM;
5834
5835        binder_device->miscdev.fops = &binder_fops;
5836        binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5837        binder_device->miscdev.name = name;
5838
5839        binder_device->context.binder_context_mgr_uid = INVALID_UID;
5840        binder_device->context.name = name;
5841        mutex_init(&binder_device->context.context_mgr_node_lock);
5842
5843        ret = misc_register(&binder_device->miscdev);
5844        if (ret < 0) {
5845                kfree(binder_device);
5846                return ret;
5847        }
5848
5849        hlist_add_head(&binder_device->hlist, &binder_devices);
5850
5851        return ret;
5852}
5853
5854static int __init binder_init(void)
5855{
5856        int ret;
5857        char *device_name, *device_tmp;
5858        struct binder_device *device;
5859        struct hlist_node *tmp;
5860        char *device_names = NULL;
5861
5862        ret = binder_alloc_shrinker_init();
5863        if (ret)
5864                return ret;
5865
5866        atomic_set(&binder_transaction_log.cur, ~0U);
5867        atomic_set(&binder_transaction_log_failed.cur, ~0U);
5868
5869        binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5870        if (binder_debugfs_dir_entry_root)
5871                binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5872                                                 binder_debugfs_dir_entry_root);
5873
5874        if (binder_debugfs_dir_entry_root) {
5875                debugfs_create_file("state",
5876                                    0444,
5877                                    binder_debugfs_dir_entry_root,
5878                                    NULL,
5879                                    &state_fops);
5880                debugfs_create_file("stats",
5881                                    0444,
5882                                    binder_debugfs_dir_entry_root,
5883                                    NULL,
5884                                    &stats_fops);
5885                debugfs_create_file("transactions",
5886                                    0444,
5887                                    binder_debugfs_dir_entry_root,
5888                                    NULL,
5889                                    &transactions_fops);
5890                debugfs_create_file("transaction_log",
5891                                    0444,
5892                                    binder_debugfs_dir_entry_root,
5893                                    &binder_transaction_log,
5894                                    &transaction_log_fops);
5895                debugfs_create_file("failed_transaction_log",
5896                                    0444,
5897                                    binder_debugfs_dir_entry_root,
5898                                    &binder_transaction_log_failed,
5899                                    &transaction_log_fops);
5900        }
5901
5902        if (strcmp(binder_devices_param, "") != 0) {
5903                /*
5904                * Copy the module_parameter string, because we don't want to
5905                * tokenize it in-place.
5906                 */
5907                device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5908                if (!device_names) {
5909                        ret = -ENOMEM;
5910                        goto err_alloc_device_names_failed;
5911                }
5912
5913                device_tmp = device_names;
5914                while ((device_name = strsep(&device_tmp, ","))) {
5915                        ret = init_binder_device(device_name);
5916                        if (ret)
5917                                goto err_init_binder_device_failed;
5918                }
5919        }
5920
5921        ret = init_binderfs();
5922        if (ret)
5923                goto err_init_binder_device_failed;
5924
5925        return ret;
5926
5927err_init_binder_device_failed:
5928        hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5929                misc_deregister(&device->miscdev);
5930                hlist_del(&device->hlist);
5931                kfree(device);
5932        }
5933
5934        kfree(device_names);
5935
5936err_alloc_device_names_failed:
5937        debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5938
5939        return ret;
5940}
5941
5942device_initcall(binder_init);
5943
5944#define CREATE_TRACE_POINTS
5945#include "binder_trace.h"
5946
5947MODULE_LICENSE("GPL v2");
5948