1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
23#include <linux/freezer.h>
24#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
27#include <linux/mm.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30#include <linux/nsproxy.h>
31#include <linux/poll.h>
32#include <linux/debugfs.h>
33#include <linux/rbtree.h>
34#include <linux/sched/signal.h>
35#include <linux/sched/mm.h>
36#include <linux/seq_file.h>
37#include <linux/uaccess.h>
38#include <linux/vmalloc.h>
39#include <linux/slab.h>
40#include <linux/pid_namespace.h>
41#include <linux/security.h>
42
43#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
44#define BINDER_IPC_32BIT 1
45#endif
46
47#include <uapi/linux/android/binder.h>
48#include "binder_trace.h"
49
50static DEFINE_MUTEX(binder_main_lock);
51static DEFINE_MUTEX(binder_deferred_lock);
52static DEFINE_MUTEX(binder_mmap_lock);
53
54static HLIST_HEAD(binder_devices);
55static HLIST_HEAD(binder_procs);
56static HLIST_HEAD(binder_deferred_list);
57static HLIST_HEAD(binder_dead_nodes);
58
59static struct dentry *binder_debugfs_dir_entry_root;
60static struct dentry *binder_debugfs_dir_entry_proc;
61static int binder_last_id;
62
63#define BINDER_DEBUG_ENTRY(name) \
64static int binder_##name##_open(struct inode *inode, struct file *file) \
65{ \
66 return single_open(file, binder_##name##_show, inode->i_private); \
67} \
68\
69static const struct file_operations binder_##name##_fops = { \
70 .owner = THIS_MODULE, \
71 .open = binder_##name##_open, \
72 .read = seq_read, \
73 .llseek = seq_lseek, \
74 .release = single_release, \
75}
76
77static int binder_proc_show(struct seq_file *m, void *unused);
78BINDER_DEBUG_ENTRY(proc);
79
80
81#ifndef SZ_1K
82#define SZ_1K 0x400
83#endif
84
85#ifndef SZ_4M
86#define SZ_4M 0x400000
87#endif
88
89#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
90
91#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
92
93enum {
94 BINDER_DEBUG_USER_ERROR = 1U << 0,
95 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
96 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
97 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
98 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
99 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
100 BINDER_DEBUG_READ_WRITE = 1U << 6,
101 BINDER_DEBUG_USER_REFS = 1U << 7,
102 BINDER_DEBUG_THREADS = 1U << 8,
103 BINDER_DEBUG_TRANSACTION = 1U << 9,
104 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
105 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
106 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
107 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
108 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
110};
111static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
114
115static bool binder_debug_no_lock;
116module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
117
118static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119module_param_named(devices, binder_devices_param, charp, 0444);
120
121static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122static int binder_stop_on_user_error;
123
124static int binder_set_stop_on_user_error(const char *val,
125 struct kernel_param *kp)
126{
127 int ret;
128
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
132 return ret;
133}
134module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
136
137#define binder_debug(mask, x...) \
138 do { \
139 if (binder_debug_mask & mask) \
140 pr_info(x); \
141 } while (0)
142
143#define binder_user_error(x...) \
144 do { \
145 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
146 pr_info(x); \
147 if (binder_stop_on_user_error) \
148 binder_stop_on_user_error = 2; \
149 } while (0)
150
151#define to_flat_binder_object(hdr) \
152 container_of(hdr, struct flat_binder_object, hdr)
153
154#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
155
156#define to_binder_buffer_object(hdr) \
157 container_of(hdr, struct binder_buffer_object, hdr)
158
159#define to_binder_fd_array_object(hdr) \
160 container_of(hdr, struct binder_fd_array_object, hdr)
161
162enum binder_stat_types {
163 BINDER_STAT_PROC,
164 BINDER_STAT_THREAD,
165 BINDER_STAT_NODE,
166 BINDER_STAT_REF,
167 BINDER_STAT_DEATH,
168 BINDER_STAT_TRANSACTION,
169 BINDER_STAT_TRANSACTION_COMPLETE,
170 BINDER_STAT_COUNT
171};
172
173struct binder_stats {
174 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
175 int bc[_IOC_NR(BC_REPLY_SG) + 1];
176 int obj_created[BINDER_STAT_COUNT];
177 int obj_deleted[BINDER_STAT_COUNT];
178};
179
180static struct binder_stats binder_stats;
181
182static inline void binder_stats_deleted(enum binder_stat_types type)
183{
184 binder_stats.obj_deleted[type]++;
185}
186
187static inline void binder_stats_created(enum binder_stat_types type)
188{
189 binder_stats.obj_created[type]++;
190}
191
192struct binder_transaction_log_entry {
193 int debug_id;
194 int call_type;
195 int from_proc;
196 int from_thread;
197 int target_handle;
198 int to_proc;
199 int to_thread;
200 int to_node;
201 int data_size;
202 int offsets_size;
203 const char *context_name;
204};
205struct binder_transaction_log {
206 int next;
207 int full;
208 struct binder_transaction_log_entry entry[32];
209};
210static struct binder_transaction_log binder_transaction_log;
211static struct binder_transaction_log binder_transaction_log_failed;
212
213static struct binder_transaction_log_entry *binder_transaction_log_add(
214 struct binder_transaction_log *log)
215{
216 struct binder_transaction_log_entry *e;
217
218 e = &log->entry[log->next];
219 memset(e, 0, sizeof(*e));
220 log->next++;
221 if (log->next == ARRAY_SIZE(log->entry)) {
222 log->next = 0;
223 log->full = 1;
224 }
225 return e;
226}
227
228struct binder_context {
229 struct binder_node *binder_context_mgr_node;
230 kuid_t binder_context_mgr_uid;
231 const char *name;
232};
233
234struct binder_device {
235 struct hlist_node hlist;
236 struct miscdevice miscdev;
237 struct binder_context context;
238};
239
240struct binder_work {
241 struct list_head entry;
242 enum {
243 BINDER_WORK_TRANSACTION = 1,
244 BINDER_WORK_TRANSACTION_COMPLETE,
245 BINDER_WORK_NODE,
246 BINDER_WORK_DEAD_BINDER,
247 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
248 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
249 } type;
250};
251
252struct binder_node {
253 int debug_id;
254 struct binder_work work;
255 union {
256 struct rb_node rb_node;
257 struct hlist_node dead_node;
258 };
259 struct binder_proc *proc;
260 struct hlist_head refs;
261 int internal_strong_refs;
262 int local_weak_refs;
263 int local_strong_refs;
264 binder_uintptr_t ptr;
265 binder_uintptr_t cookie;
266 unsigned has_strong_ref:1;
267 unsigned pending_strong_ref:1;
268 unsigned has_weak_ref:1;
269 unsigned pending_weak_ref:1;
270 unsigned has_async_transaction:1;
271 unsigned accept_fds:1;
272 unsigned min_priority:8;
273 struct list_head async_todo;
274};
275
276struct binder_ref_death {
277 struct binder_work work;
278 binder_uintptr_t cookie;
279};
280
281struct binder_ref {
282
283
284
285
286 int debug_id;
287 struct rb_node rb_node_desc;
288 struct rb_node rb_node_node;
289 struct hlist_node node_entry;
290 struct binder_proc *proc;
291 struct binder_node *node;
292 uint32_t desc;
293 int strong;
294 int weak;
295 struct binder_ref_death *death;
296};
297
298struct binder_buffer {
299 struct list_head entry;
300 struct rb_node rb_node;
301
302 unsigned free:1;
303 unsigned allow_user_free:1;
304 unsigned async_transaction:1;
305 unsigned debug_id:29;
306
307 struct binder_transaction *transaction;
308
309 struct binder_node *target_node;
310 size_t data_size;
311 size_t offsets_size;
312 size_t extra_buffers_size;
313 uint8_t data[0];
314};
315
316enum binder_deferred_state {
317 BINDER_DEFERRED_PUT_FILES = 0x01,
318 BINDER_DEFERRED_FLUSH = 0x02,
319 BINDER_DEFERRED_RELEASE = 0x04,
320};
321
322struct binder_proc {
323 struct hlist_node proc_node;
324 struct rb_root threads;
325 struct rb_root nodes;
326 struct rb_root refs_by_desc;
327 struct rb_root refs_by_node;
328 int pid;
329 struct vm_area_struct *vma;
330 struct mm_struct *vma_vm_mm;
331 struct task_struct *tsk;
332 struct files_struct *files;
333 struct hlist_node deferred_work_node;
334 int deferred_work;
335 void *buffer;
336 ptrdiff_t user_buffer_offset;
337
338 struct list_head buffers;
339 struct rb_root free_buffers;
340 struct rb_root allocated_buffers;
341 size_t free_async_space;
342
343 struct page **pages;
344 size_t buffer_size;
345 uint32_t buffer_free;
346 struct list_head todo;
347 wait_queue_head_t wait;
348 struct binder_stats stats;
349 struct list_head delivered_death;
350 int max_threads;
351 int requested_threads;
352 int requested_threads_started;
353 int ready_threads;
354 long default_priority;
355 struct dentry *debugfs_entry;
356 struct binder_context *context;
357};
358
359enum {
360 BINDER_LOOPER_STATE_REGISTERED = 0x01,
361 BINDER_LOOPER_STATE_ENTERED = 0x02,
362 BINDER_LOOPER_STATE_EXITED = 0x04,
363 BINDER_LOOPER_STATE_INVALID = 0x08,
364 BINDER_LOOPER_STATE_WAITING = 0x10,
365 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
366};
367
368struct binder_thread {
369 struct binder_proc *proc;
370 struct rb_node rb_node;
371 int pid;
372 int looper;
373 struct binder_transaction *transaction_stack;
374 struct list_head todo;
375 uint32_t return_error;
376 uint32_t return_error2;
377
378
379 wait_queue_head_t wait;
380 struct binder_stats stats;
381};
382
383struct binder_transaction {
384 int debug_id;
385 struct binder_work work;
386 struct binder_thread *from;
387 struct binder_transaction *from_parent;
388 struct binder_proc *to_proc;
389 struct binder_thread *to_thread;
390 struct binder_transaction *to_parent;
391 unsigned need_reply:1;
392
393
394 struct binder_buffer *buffer;
395 unsigned int code;
396 unsigned int flags;
397 long priority;
398 long saved_priority;
399 kuid_t sender_euid;
400};
401
402static void
403binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
404
405static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
406{
407 struct files_struct *files = proc->files;
408 unsigned long rlim_cur;
409 unsigned long irqs;
410
411 if (files == NULL)
412 return -ESRCH;
413
414 if (!lock_task_sighand(proc->tsk, &irqs))
415 return -EMFILE;
416
417 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
418 unlock_task_sighand(proc->tsk, &irqs);
419
420 return __alloc_fd(files, 0, rlim_cur, flags);
421}
422
423
424
425
426static void task_fd_install(
427 struct binder_proc *proc, unsigned int fd, struct file *file)
428{
429 if (proc->files)
430 __fd_install(proc->files, fd, file);
431}
432
433
434
435
436static long task_close_fd(struct binder_proc *proc, unsigned int fd)
437{
438 int retval;
439
440 if (proc->files == NULL)
441 return -ESRCH;
442
443 retval = __close_fd(proc->files, fd);
444
445 if (unlikely(retval == -ERESTARTSYS ||
446 retval == -ERESTARTNOINTR ||
447 retval == -ERESTARTNOHAND ||
448 retval == -ERESTART_RESTARTBLOCK))
449 retval = -EINTR;
450
451 return retval;
452}
453
454static inline void binder_lock(const char *tag)
455{
456 trace_binder_lock(tag);
457 mutex_lock(&binder_main_lock);
458 trace_binder_locked(tag);
459}
460
461static inline void binder_unlock(const char *tag)
462{
463 trace_binder_unlock(tag);
464 mutex_unlock(&binder_main_lock);
465}
466
467static void binder_set_nice(long nice)
468{
469 long min_nice;
470
471 if (can_nice(current, nice)) {
472 set_user_nice(current, nice);
473 return;
474 }
475 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
476 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
477 "%d: nice value %ld not allowed use %ld instead\n",
478 current->pid, nice, min_nice);
479 set_user_nice(current, min_nice);
480 if (min_nice <= MAX_NICE)
481 return;
482 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
483}
484
485static size_t binder_buffer_size(struct binder_proc *proc,
486 struct binder_buffer *buffer)
487{
488 if (list_is_last(&buffer->entry, &proc->buffers))
489 return proc->buffer + proc->buffer_size - (void *)buffer->data;
490 return (size_t)list_entry(buffer->entry.next,
491 struct binder_buffer, entry) - (size_t)buffer->data;
492}
493
494static void binder_insert_free_buffer(struct binder_proc *proc,
495 struct binder_buffer *new_buffer)
496{
497 struct rb_node **p = &proc->free_buffers.rb_node;
498 struct rb_node *parent = NULL;
499 struct binder_buffer *buffer;
500 size_t buffer_size;
501 size_t new_buffer_size;
502
503 BUG_ON(!new_buffer->free);
504
505 new_buffer_size = binder_buffer_size(proc, new_buffer);
506
507 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
508 "%d: add free buffer, size %zd, at %p\n",
509 proc->pid, new_buffer_size, new_buffer);
510
511 while (*p) {
512 parent = *p;
513 buffer = rb_entry(parent, struct binder_buffer, rb_node);
514 BUG_ON(!buffer->free);
515
516 buffer_size = binder_buffer_size(proc, buffer);
517
518 if (new_buffer_size < buffer_size)
519 p = &parent->rb_left;
520 else
521 p = &parent->rb_right;
522 }
523 rb_link_node(&new_buffer->rb_node, parent, p);
524 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
525}
526
527static void binder_insert_allocated_buffer(struct binder_proc *proc,
528 struct binder_buffer *new_buffer)
529{
530 struct rb_node **p = &proc->allocated_buffers.rb_node;
531 struct rb_node *parent = NULL;
532 struct binder_buffer *buffer;
533
534 BUG_ON(new_buffer->free);
535
536 while (*p) {
537 parent = *p;
538 buffer = rb_entry(parent, struct binder_buffer, rb_node);
539 BUG_ON(buffer->free);
540
541 if (new_buffer < buffer)
542 p = &parent->rb_left;
543 else if (new_buffer > buffer)
544 p = &parent->rb_right;
545 else
546 BUG();
547 }
548 rb_link_node(&new_buffer->rb_node, parent, p);
549 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
550}
551
552static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
553 uintptr_t user_ptr)
554{
555 struct rb_node *n = proc->allocated_buffers.rb_node;
556 struct binder_buffer *buffer;
557 struct binder_buffer *kern_ptr;
558
559 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
560 - offsetof(struct binder_buffer, data));
561
562 while (n) {
563 buffer = rb_entry(n, struct binder_buffer, rb_node);
564 BUG_ON(buffer->free);
565
566 if (kern_ptr < buffer)
567 n = n->rb_left;
568 else if (kern_ptr > buffer)
569 n = n->rb_right;
570 else
571 return buffer;
572 }
573 return NULL;
574}
575
576static int binder_update_page_range(struct binder_proc *proc, int allocate,
577 void *start, void *end,
578 struct vm_area_struct *vma)
579{
580 void *page_addr;
581 unsigned long user_page_addr;
582 struct page **page;
583 struct mm_struct *mm;
584
585 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
586 "%d: %s pages %p-%p\n", proc->pid,
587 allocate ? "allocate" : "free", start, end);
588
589 if (end <= start)
590 return 0;
591
592 trace_binder_update_page_range(proc, allocate, start, end);
593
594 if (vma)
595 mm = NULL;
596 else
597 mm = get_task_mm(proc->tsk);
598
599 if (mm) {
600 down_write(&mm->mmap_sem);
601 vma = proc->vma;
602 if (vma && mm != proc->vma_vm_mm) {
603 pr_err("%d: vma mm and task mm mismatch\n",
604 proc->pid);
605 vma = NULL;
606 }
607 }
608
609 if (allocate == 0)
610 goto free_range;
611
612 if (vma == NULL) {
613 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
614 proc->pid);
615 goto err_no_vma;
616 }
617
618 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
619 int ret;
620
621 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
622
623 BUG_ON(*page);
624 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
625 if (*page == NULL) {
626 pr_err("%d: binder_alloc_buf failed for page at %p\n",
627 proc->pid, page_addr);
628 goto err_alloc_page_failed;
629 }
630 ret = map_kernel_range_noflush((unsigned long)page_addr,
631 PAGE_SIZE, PAGE_KERNEL, page);
632 flush_cache_vmap((unsigned long)page_addr,
633 (unsigned long)page_addr + PAGE_SIZE);
634 if (ret != 1) {
635 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
636 proc->pid, page_addr);
637 goto err_map_kernel_failed;
638 }
639 user_page_addr =
640 (uintptr_t)page_addr + proc->user_buffer_offset;
641 ret = vm_insert_page(vma, user_page_addr, page[0]);
642 if (ret) {
643 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
644 proc->pid, user_page_addr);
645 goto err_vm_insert_page_failed;
646 }
647
648 }
649 if (mm) {
650 up_write(&mm->mmap_sem);
651 mmput(mm);
652 }
653 return 0;
654
655free_range:
656 for (page_addr = end - PAGE_SIZE; page_addr >= start;
657 page_addr -= PAGE_SIZE) {
658 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
659 if (vma)
660 zap_page_range(vma, (uintptr_t)page_addr +
661 proc->user_buffer_offset, PAGE_SIZE);
662err_vm_insert_page_failed:
663 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
664err_map_kernel_failed:
665 __free_page(*page);
666 *page = NULL;
667err_alloc_page_failed:
668 ;
669 }
670err_no_vma:
671 if (mm) {
672 up_write(&mm->mmap_sem);
673 mmput(mm);
674 }
675 return -ENOMEM;
676}
677
678static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
679 size_t data_size,
680 size_t offsets_size,
681 size_t extra_buffers_size,
682 int is_async)
683{
684 struct rb_node *n = proc->free_buffers.rb_node;
685 struct binder_buffer *buffer;
686 size_t buffer_size;
687 struct rb_node *best_fit = NULL;
688 void *has_page_addr;
689 void *end_page_addr;
690 size_t size, data_offsets_size;
691
692 if (proc->vma == NULL) {
693 pr_err("%d: binder_alloc_buf, no vma\n",
694 proc->pid);
695 return NULL;
696 }
697
698 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
699 ALIGN(offsets_size, sizeof(void *));
700
701 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
702 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
703 proc->pid, data_size, offsets_size);
704 return NULL;
705 }
706 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
707 if (size < data_offsets_size || size < extra_buffers_size) {
708 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
709 proc->pid, extra_buffers_size);
710 return NULL;
711 }
712 if (is_async &&
713 proc->free_async_space < size + sizeof(struct binder_buffer)) {
714 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
715 "%d: binder_alloc_buf size %zd failed, no async space left\n",
716 proc->pid, size);
717 return NULL;
718 }
719
720 while (n) {
721 buffer = rb_entry(n, struct binder_buffer, rb_node);
722 BUG_ON(!buffer->free);
723 buffer_size = binder_buffer_size(proc, buffer);
724
725 if (size < buffer_size) {
726 best_fit = n;
727 n = n->rb_left;
728 } else if (size > buffer_size)
729 n = n->rb_right;
730 else {
731 best_fit = n;
732 break;
733 }
734 }
735 if (best_fit == NULL) {
736 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
737 proc->pid, size);
738 return NULL;
739 }
740 if (n == NULL) {
741 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
742 buffer_size = binder_buffer_size(proc, buffer);
743 }
744
745 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
746 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
747 proc->pid, size, buffer, buffer_size);
748
749 has_page_addr =
750 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
751 if (n == NULL) {
752 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
753 buffer_size = size;
754 else
755 buffer_size = size + sizeof(struct binder_buffer);
756 }
757 end_page_addr =
758 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
759 if (end_page_addr > has_page_addr)
760 end_page_addr = has_page_addr;
761 if (binder_update_page_range(proc, 1,
762 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
763 return NULL;
764
765 rb_erase(best_fit, &proc->free_buffers);
766 buffer->free = 0;
767 binder_insert_allocated_buffer(proc, buffer);
768 if (buffer_size != size) {
769 struct binder_buffer *new_buffer = (void *)buffer->data + size;
770
771 list_add(&new_buffer->entry, &buffer->entry);
772 new_buffer->free = 1;
773 binder_insert_free_buffer(proc, new_buffer);
774 }
775 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
776 "%d: binder_alloc_buf size %zd got %p\n",
777 proc->pid, size, buffer);
778 buffer->data_size = data_size;
779 buffer->offsets_size = offsets_size;
780 buffer->extra_buffers_size = extra_buffers_size;
781 buffer->async_transaction = is_async;
782 if (is_async) {
783 proc->free_async_space -= size + sizeof(struct binder_buffer);
784 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
785 "%d: binder_alloc_buf size %zd async free %zd\n",
786 proc->pid, size, proc->free_async_space);
787 }
788
789 return buffer;
790}
791
792static void *buffer_start_page(struct binder_buffer *buffer)
793{
794 return (void *)((uintptr_t)buffer & PAGE_MASK);
795}
796
797static void *buffer_end_page(struct binder_buffer *buffer)
798{
799 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
800}
801
802static void binder_delete_free_buffer(struct binder_proc *proc,
803 struct binder_buffer *buffer)
804{
805 struct binder_buffer *prev, *next = NULL;
806 int free_page_end = 1;
807 int free_page_start = 1;
808
809 BUG_ON(proc->buffers.next == &buffer->entry);
810 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
811 BUG_ON(!prev->free);
812 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
813 free_page_start = 0;
814 if (buffer_end_page(prev) == buffer_end_page(buffer))
815 free_page_end = 0;
816 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
817 "%d: merge free, buffer %p share page with %p\n",
818 proc->pid, buffer, prev);
819 }
820
821 if (!list_is_last(&buffer->entry, &proc->buffers)) {
822 next = list_entry(buffer->entry.next,
823 struct binder_buffer, entry);
824 if (buffer_start_page(next) == buffer_end_page(buffer)) {
825 free_page_end = 0;
826 if (buffer_start_page(next) ==
827 buffer_start_page(buffer))
828 free_page_start = 0;
829 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
830 "%d: merge free, buffer %p share page with %p\n",
831 proc->pid, buffer, prev);
832 }
833 }
834 list_del(&buffer->entry);
835 if (free_page_start || free_page_end) {
836 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
837 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
838 proc->pid, buffer, free_page_start ? "" : " end",
839 free_page_end ? "" : " start", prev, next);
840 binder_update_page_range(proc, 0, free_page_start ?
841 buffer_start_page(buffer) : buffer_end_page(buffer),
842 (free_page_end ? buffer_end_page(buffer) :
843 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
844 }
845}
846
847static void binder_free_buf(struct binder_proc *proc,
848 struct binder_buffer *buffer)
849{
850 size_t size, buffer_size;
851
852 buffer_size = binder_buffer_size(proc, buffer);
853
854 size = ALIGN(buffer->data_size, sizeof(void *)) +
855 ALIGN(buffer->offsets_size, sizeof(void *)) +
856 ALIGN(buffer->extra_buffers_size, sizeof(void *));
857
858 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
859 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
860 proc->pid, buffer, size, buffer_size);
861
862 BUG_ON(buffer->free);
863 BUG_ON(size > buffer_size);
864 BUG_ON(buffer->transaction != NULL);
865 BUG_ON((void *)buffer < proc->buffer);
866 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
867
868 if (buffer->async_transaction) {
869 proc->free_async_space += size + sizeof(struct binder_buffer);
870
871 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
872 "%d: binder_free_buf size %zd async free %zd\n",
873 proc->pid, size, proc->free_async_space);
874 }
875
876 binder_update_page_range(proc, 0,
877 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
878 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
879 NULL);
880 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
881 buffer->free = 1;
882 if (!list_is_last(&buffer->entry, &proc->buffers)) {
883 struct binder_buffer *next = list_entry(buffer->entry.next,
884 struct binder_buffer, entry);
885
886 if (next->free) {
887 rb_erase(&next->rb_node, &proc->free_buffers);
888 binder_delete_free_buffer(proc, next);
889 }
890 }
891 if (proc->buffers.next != &buffer->entry) {
892 struct binder_buffer *prev = list_entry(buffer->entry.prev,
893 struct binder_buffer, entry);
894
895 if (prev->free) {
896 binder_delete_free_buffer(proc, buffer);
897 rb_erase(&prev->rb_node, &proc->free_buffers);
898 buffer = prev;
899 }
900 }
901 binder_insert_free_buffer(proc, buffer);
902}
903
904static struct binder_node *binder_get_node(struct binder_proc *proc,
905 binder_uintptr_t ptr)
906{
907 struct rb_node *n = proc->nodes.rb_node;
908 struct binder_node *node;
909
910 while (n) {
911 node = rb_entry(n, struct binder_node, rb_node);
912
913 if (ptr < node->ptr)
914 n = n->rb_left;
915 else if (ptr > node->ptr)
916 n = n->rb_right;
917 else
918 return node;
919 }
920 return NULL;
921}
922
923static struct binder_node *binder_new_node(struct binder_proc *proc,
924 binder_uintptr_t ptr,
925 binder_uintptr_t cookie)
926{
927 struct rb_node **p = &proc->nodes.rb_node;
928 struct rb_node *parent = NULL;
929 struct binder_node *node;
930
931 while (*p) {
932 parent = *p;
933 node = rb_entry(parent, struct binder_node, rb_node);
934
935 if (ptr < node->ptr)
936 p = &(*p)->rb_left;
937 else if (ptr > node->ptr)
938 p = &(*p)->rb_right;
939 else
940 return NULL;
941 }
942
943 node = kzalloc(sizeof(*node), GFP_KERNEL);
944 if (node == NULL)
945 return NULL;
946 binder_stats_created(BINDER_STAT_NODE);
947 rb_link_node(&node->rb_node, parent, p);
948 rb_insert_color(&node->rb_node, &proc->nodes);
949 node->debug_id = ++binder_last_id;
950 node->proc = proc;
951 node->ptr = ptr;
952 node->cookie = cookie;
953 node->work.type = BINDER_WORK_NODE;
954 INIT_LIST_HEAD(&node->work.entry);
955 INIT_LIST_HEAD(&node->async_todo);
956 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
957 "%d:%d node %d u%016llx c%016llx created\n",
958 proc->pid, current->pid, node->debug_id,
959 (u64)node->ptr, (u64)node->cookie);
960 return node;
961}
962
963static int binder_inc_node(struct binder_node *node, int strong, int internal,
964 struct list_head *target_list)
965{
966 if (strong) {
967 if (internal) {
968 if (target_list == NULL &&
969 node->internal_strong_refs == 0 &&
970 !(node->proc &&
971 node == node->proc->context->binder_context_mgr_node &&
972 node->has_strong_ref)) {
973 pr_err("invalid inc strong node for %d\n",
974 node->debug_id);
975 return -EINVAL;
976 }
977 node->internal_strong_refs++;
978 } else
979 node->local_strong_refs++;
980 if (!node->has_strong_ref && target_list) {
981 list_del_init(&node->work.entry);
982 list_add_tail(&node->work.entry, target_list);
983 }
984 } else {
985 if (!internal)
986 node->local_weak_refs++;
987 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
988 if (target_list == NULL) {
989 pr_err("invalid inc weak node for %d\n",
990 node->debug_id);
991 return -EINVAL;
992 }
993 list_add_tail(&node->work.entry, target_list);
994 }
995 }
996 return 0;
997}
998
999static int binder_dec_node(struct binder_node *node, int strong, int internal)
1000{
1001 if (strong) {
1002 if (internal)
1003 node->internal_strong_refs--;
1004 else
1005 node->local_strong_refs--;
1006 if (node->local_strong_refs || node->internal_strong_refs)
1007 return 0;
1008 } else {
1009 if (!internal)
1010 node->local_weak_refs--;
1011 if (node->local_weak_refs || !hlist_empty(&node->refs))
1012 return 0;
1013 }
1014 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1015 if (list_empty(&node->work.entry)) {
1016 list_add_tail(&node->work.entry, &node->proc->todo);
1017 wake_up_interruptible(&node->proc->wait);
1018 }
1019 } else {
1020 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1021 !node->local_weak_refs) {
1022 list_del_init(&node->work.entry);
1023 if (node->proc) {
1024 rb_erase(&node->rb_node, &node->proc->nodes);
1025 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1026 "refless node %d deleted\n",
1027 node->debug_id);
1028 } else {
1029 hlist_del(&node->dead_node);
1030 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1031 "dead node %d deleted\n",
1032 node->debug_id);
1033 }
1034 kfree(node);
1035 binder_stats_deleted(BINDER_STAT_NODE);
1036 }
1037 }
1038
1039 return 0;
1040}
1041
1042
1043static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1044 u32 desc, bool need_strong_ref)
1045{
1046 struct rb_node *n = proc->refs_by_desc.rb_node;
1047 struct binder_ref *ref;
1048
1049 while (n) {
1050 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1051
1052 if (desc < ref->desc) {
1053 n = n->rb_left;
1054 } else if (desc > ref->desc) {
1055 n = n->rb_right;
1056 } else if (need_strong_ref && !ref->strong) {
1057 binder_user_error("tried to use weak ref as strong ref\n");
1058 return NULL;
1059 } else {
1060 return ref;
1061 }
1062 }
1063 return NULL;
1064}
1065
1066static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1067 struct binder_node *node)
1068{
1069 struct rb_node *n;
1070 struct rb_node **p = &proc->refs_by_node.rb_node;
1071 struct rb_node *parent = NULL;
1072 struct binder_ref *ref, *new_ref;
1073 struct binder_context *context = proc->context;
1074
1075 while (*p) {
1076 parent = *p;
1077 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1078
1079 if (node < ref->node)
1080 p = &(*p)->rb_left;
1081 else if (node > ref->node)
1082 p = &(*p)->rb_right;
1083 else
1084 return ref;
1085 }
1086 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1087 if (new_ref == NULL)
1088 return NULL;
1089 binder_stats_created(BINDER_STAT_REF);
1090 new_ref->debug_id = ++binder_last_id;
1091 new_ref->proc = proc;
1092 new_ref->node = node;
1093 rb_link_node(&new_ref->rb_node_node, parent, p);
1094 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1095
1096 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1097 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1098 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1099 if (ref->desc > new_ref->desc)
1100 break;
1101 new_ref->desc = ref->desc + 1;
1102 }
1103
1104 p = &proc->refs_by_desc.rb_node;
1105 while (*p) {
1106 parent = *p;
1107 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1108
1109 if (new_ref->desc < ref->desc)
1110 p = &(*p)->rb_left;
1111 else if (new_ref->desc > ref->desc)
1112 p = &(*p)->rb_right;
1113 else
1114 BUG();
1115 }
1116 rb_link_node(&new_ref->rb_node_desc, parent, p);
1117 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1118 if (node) {
1119 hlist_add_head(&new_ref->node_entry, &node->refs);
1120
1121 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1122 "%d new ref %d desc %d for node %d\n",
1123 proc->pid, new_ref->debug_id, new_ref->desc,
1124 node->debug_id);
1125 } else {
1126 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1127 "%d new ref %d desc %d for dead node\n",
1128 proc->pid, new_ref->debug_id, new_ref->desc);
1129 }
1130 return new_ref;
1131}
1132
1133static void binder_delete_ref(struct binder_ref *ref)
1134{
1135 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1136 "%d delete ref %d desc %d for node %d\n",
1137 ref->proc->pid, ref->debug_id, ref->desc,
1138 ref->node->debug_id);
1139
1140 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1141 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1142 if (ref->strong)
1143 binder_dec_node(ref->node, 1, 1);
1144 hlist_del(&ref->node_entry);
1145 binder_dec_node(ref->node, 0, 1);
1146 if (ref->death) {
1147 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1148 "%d delete ref %d desc %d has death notification\n",
1149 ref->proc->pid, ref->debug_id, ref->desc);
1150 list_del(&ref->death->work.entry);
1151 kfree(ref->death);
1152 binder_stats_deleted(BINDER_STAT_DEATH);
1153 }
1154 kfree(ref);
1155 binder_stats_deleted(BINDER_STAT_REF);
1156}
1157
1158static int binder_inc_ref(struct binder_ref *ref, int strong,
1159 struct list_head *target_list)
1160{
1161 int ret;
1162
1163 if (strong) {
1164 if (ref->strong == 0) {
1165 ret = binder_inc_node(ref->node, 1, 1, target_list);
1166 if (ret)
1167 return ret;
1168 }
1169 ref->strong++;
1170 } else {
1171 if (ref->weak == 0) {
1172 ret = binder_inc_node(ref->node, 0, 1, target_list);
1173 if (ret)
1174 return ret;
1175 }
1176 ref->weak++;
1177 }
1178 return 0;
1179}
1180
1181
1182static int binder_dec_ref(struct binder_ref *ref, int strong)
1183{
1184 if (strong) {
1185 if (ref->strong == 0) {
1186 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1187 ref->proc->pid, ref->debug_id,
1188 ref->desc, ref->strong, ref->weak);
1189 return -EINVAL;
1190 }
1191 ref->strong--;
1192 if (ref->strong == 0) {
1193 int ret;
1194
1195 ret = binder_dec_node(ref->node, strong, 1);
1196 if (ret)
1197 return ret;
1198 }
1199 } else {
1200 if (ref->weak == 0) {
1201 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1202 ref->proc->pid, ref->debug_id,
1203 ref->desc, ref->strong, ref->weak);
1204 return -EINVAL;
1205 }
1206 ref->weak--;
1207 }
1208 if (ref->strong == 0 && ref->weak == 0)
1209 binder_delete_ref(ref);
1210 return 0;
1211}
1212
1213static void binder_pop_transaction(struct binder_thread *target_thread,
1214 struct binder_transaction *t)
1215{
1216 if (target_thread) {
1217 BUG_ON(target_thread->transaction_stack != t);
1218 BUG_ON(target_thread->transaction_stack->from != target_thread);
1219 target_thread->transaction_stack =
1220 target_thread->transaction_stack->from_parent;
1221 t->from = NULL;
1222 }
1223 t->need_reply = 0;
1224 if (t->buffer)
1225 t->buffer->transaction = NULL;
1226 kfree(t);
1227 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1228}
1229
1230static void binder_send_failed_reply(struct binder_transaction *t,
1231 uint32_t error_code)
1232{
1233 struct binder_thread *target_thread;
1234 struct binder_transaction *next;
1235
1236 BUG_ON(t->flags & TF_ONE_WAY);
1237 while (1) {
1238 target_thread = t->from;
1239 if (target_thread) {
1240 if (target_thread->return_error != BR_OK &&
1241 target_thread->return_error2 == BR_OK) {
1242 target_thread->return_error2 =
1243 target_thread->return_error;
1244 target_thread->return_error = BR_OK;
1245 }
1246 if (target_thread->return_error == BR_OK) {
1247 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1248 "send failed reply for transaction %d to %d:%d\n",
1249 t->debug_id,
1250 target_thread->proc->pid,
1251 target_thread->pid);
1252
1253 binder_pop_transaction(target_thread, t);
1254 target_thread->return_error = error_code;
1255 wake_up_interruptible(&target_thread->wait);
1256 } else {
1257 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1258 target_thread->proc->pid,
1259 target_thread->pid,
1260 target_thread->return_error);
1261 }
1262 return;
1263 }
1264 next = t->from_parent;
1265
1266 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1267 "send failed reply for transaction %d, target dead\n",
1268 t->debug_id);
1269
1270 binder_pop_transaction(target_thread, t);
1271 if (next == NULL) {
1272 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1273 "reply failed, no target thread at root\n");
1274 return;
1275 }
1276 t = next;
1277 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1278 "reply failed, no target thread -- retry %d\n",
1279 t->debug_id);
1280 }
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1292{
1293
1294 struct binder_object_header *hdr;
1295 size_t object_size = 0;
1296
1297 if (offset > buffer->data_size - sizeof(*hdr) ||
1298 buffer->data_size < sizeof(*hdr) ||
1299 !IS_ALIGNED(offset, sizeof(u32)))
1300 return 0;
1301
1302
1303 hdr = (struct binder_object_header *)(buffer->data + offset);
1304 switch (hdr->type) {
1305 case BINDER_TYPE_BINDER:
1306 case BINDER_TYPE_WEAK_BINDER:
1307 case BINDER_TYPE_HANDLE:
1308 case BINDER_TYPE_WEAK_HANDLE:
1309 object_size = sizeof(struct flat_binder_object);
1310 break;
1311 case BINDER_TYPE_FD:
1312 object_size = sizeof(struct binder_fd_object);
1313 break;
1314 case BINDER_TYPE_PTR:
1315 object_size = sizeof(struct binder_buffer_object);
1316 break;
1317 case BINDER_TYPE_FDA:
1318 object_size = sizeof(struct binder_fd_array_object);
1319 break;
1320 default:
1321 return 0;
1322 }
1323 if (offset <= buffer->data_size - object_size &&
1324 buffer->data_size >= object_size)
1325 return object_size;
1326 else
1327 return 0;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1348 binder_size_t index,
1349 binder_size_t *start,
1350 binder_size_t num_valid)
1351{
1352 struct binder_buffer_object *buffer_obj;
1353 binder_size_t *offp;
1354
1355 if (index >= num_valid)
1356 return NULL;
1357
1358 offp = start + index;
1359 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1360 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1361 return NULL;
1362
1363 return buffer_obj;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404static bool binder_validate_fixup(struct binder_buffer *b,
1405 binder_size_t *objects_start,
1406 struct binder_buffer_object *buffer,
1407 binder_size_t fixup_offset,
1408 struct binder_buffer_object *last_obj,
1409 binder_size_t last_min_offset)
1410{
1411 if (!last_obj) {
1412
1413 return false;
1414 }
1415
1416 while (last_obj != buffer) {
1417
1418
1419
1420
1421 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1422 return false;
1423 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1424 last_obj = (struct binder_buffer_object *)
1425 (b->data + *(objects_start + last_obj->parent));
1426 }
1427 return (fixup_offset >= last_min_offset);
1428}
1429
1430static void binder_transaction_buffer_release(struct binder_proc *proc,
1431 struct binder_buffer *buffer,
1432 binder_size_t *failed_at)
1433{
1434 binder_size_t *offp, *off_start, *off_end;
1435 int debug_id = buffer->debug_id;
1436
1437 binder_debug(BINDER_DEBUG_TRANSACTION,
1438 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1439 proc->pid, buffer->debug_id,
1440 buffer->data_size, buffer->offsets_size, failed_at);
1441
1442 if (buffer->target_node)
1443 binder_dec_node(buffer->target_node, 1, 0);
1444
1445 off_start = (binder_size_t *)(buffer->data +
1446 ALIGN(buffer->data_size, sizeof(void *)));
1447 if (failed_at)
1448 off_end = failed_at;
1449 else
1450 off_end = (void *)off_start + buffer->offsets_size;
1451 for (offp = off_start; offp < off_end; offp++) {
1452 struct binder_object_header *hdr;
1453 size_t object_size = binder_validate_object(buffer, *offp);
1454
1455 if (object_size == 0) {
1456 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1457 debug_id, (u64)*offp, buffer->data_size);
1458 continue;
1459 }
1460 hdr = (struct binder_object_header *)(buffer->data + *offp);
1461 switch (hdr->type) {
1462 case BINDER_TYPE_BINDER:
1463 case BINDER_TYPE_WEAK_BINDER: {
1464 struct flat_binder_object *fp;
1465 struct binder_node *node;
1466
1467 fp = to_flat_binder_object(hdr);
1468 node = binder_get_node(proc, fp->binder);
1469 if (node == NULL) {
1470 pr_err("transaction release %d bad node %016llx\n",
1471 debug_id, (u64)fp->binder);
1472 break;
1473 }
1474 binder_debug(BINDER_DEBUG_TRANSACTION,
1475 " node %d u%016llx\n",
1476 node->debug_id, (u64)node->ptr);
1477 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1478 0);
1479 } break;
1480 case BINDER_TYPE_HANDLE:
1481 case BINDER_TYPE_WEAK_HANDLE: {
1482 struct flat_binder_object *fp;
1483 struct binder_ref *ref;
1484
1485 fp = to_flat_binder_object(hdr);
1486 ref = binder_get_ref(proc, fp->handle,
1487 hdr->type == BINDER_TYPE_HANDLE);
1488 if (ref == NULL) {
1489 pr_err("transaction release %d bad handle %d\n",
1490 debug_id, fp->handle);
1491 break;
1492 }
1493 binder_debug(BINDER_DEBUG_TRANSACTION,
1494 " ref %d desc %d (node %d)\n",
1495 ref->debug_id, ref->desc, ref->node->debug_id);
1496 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1497 } break;
1498
1499 case BINDER_TYPE_FD: {
1500 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1501
1502 binder_debug(BINDER_DEBUG_TRANSACTION,
1503 " fd %d\n", fp->fd);
1504 if (failed_at)
1505 task_close_fd(proc, fp->fd);
1506 } break;
1507 case BINDER_TYPE_PTR:
1508
1509
1510
1511
1512 break;
1513 case BINDER_TYPE_FDA: {
1514 struct binder_fd_array_object *fda;
1515 struct binder_buffer_object *parent;
1516 uintptr_t parent_buffer;
1517 u32 *fd_array;
1518 size_t fd_index;
1519 binder_size_t fd_buf_size;
1520
1521 fda = to_binder_fd_array_object(hdr);
1522 parent = binder_validate_ptr(buffer, fda->parent,
1523 off_start,
1524 offp - off_start);
1525 if (!parent) {
1526 pr_err("transaction release %d bad parent offset",
1527 debug_id);
1528 continue;
1529 }
1530
1531
1532
1533
1534 parent_buffer = parent->buffer -
1535 proc->user_buffer_offset;
1536
1537 fd_buf_size = sizeof(u32) * fda->num_fds;
1538 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1539 pr_err("transaction release %d invalid number of fds (%lld)\n",
1540 debug_id, (u64)fda->num_fds);
1541 continue;
1542 }
1543 if (fd_buf_size > parent->length ||
1544 fda->parent_offset > parent->length - fd_buf_size) {
1545
1546 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1547 debug_id, (u64)fda->num_fds);
1548 continue;
1549 }
1550 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1551 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1552 task_close_fd(proc, fd_array[fd_index]);
1553 } break;
1554 default:
1555 pr_err("transaction release %d bad object type %x\n",
1556 debug_id, hdr->type);
1557 break;
1558 }
1559 }
1560}
1561
1562static int binder_translate_binder(struct flat_binder_object *fp,
1563 struct binder_transaction *t,
1564 struct binder_thread *thread)
1565{
1566 struct binder_node *node;
1567 struct binder_ref *ref;
1568 struct binder_proc *proc = thread->proc;
1569 struct binder_proc *target_proc = t->to_proc;
1570
1571 node = binder_get_node(proc, fp->binder);
1572 if (!node) {
1573 node = binder_new_node(proc, fp->binder, fp->cookie);
1574 if (!node)
1575 return -ENOMEM;
1576
1577 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1578 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1579 }
1580 if (fp->cookie != node->cookie) {
1581 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1582 proc->pid, thread->pid, (u64)fp->binder,
1583 node->debug_id, (u64)fp->cookie,
1584 (u64)node->cookie);
1585 return -EINVAL;
1586 }
1587 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1588 return -EPERM;
1589
1590 ref = binder_get_ref_for_node(target_proc, node);
1591 if (!ref)
1592 return -EINVAL;
1593
1594 if (fp->hdr.type == BINDER_TYPE_BINDER)
1595 fp->hdr.type = BINDER_TYPE_HANDLE;
1596 else
1597 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1598 fp->binder = 0;
1599 fp->handle = ref->desc;
1600 fp->cookie = 0;
1601 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1602
1603 trace_binder_transaction_node_to_ref(t, node, ref);
1604 binder_debug(BINDER_DEBUG_TRANSACTION,
1605 " node %d u%016llx -> ref %d desc %d\n",
1606 node->debug_id, (u64)node->ptr,
1607 ref->debug_id, ref->desc);
1608
1609 return 0;
1610}
1611
1612static int binder_translate_handle(struct flat_binder_object *fp,
1613 struct binder_transaction *t,
1614 struct binder_thread *thread)
1615{
1616 struct binder_ref *ref;
1617 struct binder_proc *proc = thread->proc;
1618 struct binder_proc *target_proc = t->to_proc;
1619
1620 ref = binder_get_ref(proc, fp->handle,
1621 fp->hdr.type == BINDER_TYPE_HANDLE);
1622 if (!ref) {
1623 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1624 proc->pid, thread->pid, fp->handle);
1625 return -EINVAL;
1626 }
1627 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1628 return -EPERM;
1629
1630 if (ref->node->proc == target_proc) {
1631 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1632 fp->hdr.type = BINDER_TYPE_BINDER;
1633 else
1634 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1635 fp->binder = ref->node->ptr;
1636 fp->cookie = ref->node->cookie;
1637 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1638 0, NULL);
1639 trace_binder_transaction_ref_to_node(t, ref);
1640 binder_debug(BINDER_DEBUG_TRANSACTION,
1641 " ref %d desc %d -> node %d u%016llx\n",
1642 ref->debug_id, ref->desc, ref->node->debug_id,
1643 (u64)ref->node->ptr);
1644 } else {
1645 struct binder_ref *new_ref;
1646
1647 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1648 if (!new_ref)
1649 return -EINVAL;
1650
1651 fp->binder = 0;
1652 fp->handle = new_ref->desc;
1653 fp->cookie = 0;
1654 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1655 NULL);
1656 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1657 binder_debug(BINDER_DEBUG_TRANSACTION,
1658 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1659 ref->debug_id, ref->desc, new_ref->debug_id,
1660 new_ref->desc, ref->node->debug_id);
1661 }
1662 return 0;
1663}
1664
1665static int binder_translate_fd(int fd,
1666 struct binder_transaction *t,
1667 struct binder_thread *thread,
1668 struct binder_transaction *in_reply_to)
1669{
1670 struct binder_proc *proc = thread->proc;
1671 struct binder_proc *target_proc = t->to_proc;
1672 int target_fd;
1673 struct file *file;
1674 int ret;
1675 bool target_allows_fd;
1676
1677 if (in_reply_to)
1678 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1679 else
1680 target_allows_fd = t->buffer->target_node->accept_fds;
1681 if (!target_allows_fd) {
1682 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1683 proc->pid, thread->pid,
1684 in_reply_to ? "reply" : "transaction",
1685 fd);
1686 ret = -EPERM;
1687 goto err_fd_not_accepted;
1688 }
1689
1690 file = fget(fd);
1691 if (!file) {
1692 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1693 proc->pid, thread->pid, fd);
1694 ret = -EBADF;
1695 goto err_fget;
1696 }
1697 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1698 if (ret < 0) {
1699 ret = -EPERM;
1700 goto err_security;
1701 }
1702
1703 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1704 if (target_fd < 0) {
1705 ret = -ENOMEM;
1706 goto err_get_unused_fd;
1707 }
1708 task_fd_install(target_proc, target_fd, file);
1709 trace_binder_transaction_fd(t, fd, target_fd);
1710 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1711 fd, target_fd);
1712
1713 return target_fd;
1714
1715err_get_unused_fd:
1716err_security:
1717 fput(file);
1718err_fget:
1719err_fd_not_accepted:
1720 return ret;
1721}
1722
1723static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1724 struct binder_buffer_object *parent,
1725 struct binder_transaction *t,
1726 struct binder_thread *thread,
1727 struct binder_transaction *in_reply_to)
1728{
1729 binder_size_t fdi, fd_buf_size, num_installed_fds;
1730 int target_fd;
1731 uintptr_t parent_buffer;
1732 u32 *fd_array;
1733 struct binder_proc *proc = thread->proc;
1734 struct binder_proc *target_proc = t->to_proc;
1735
1736 fd_buf_size = sizeof(u32) * fda->num_fds;
1737 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1738 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1739 proc->pid, thread->pid, (u64)fda->num_fds);
1740 return -EINVAL;
1741 }
1742 if (fd_buf_size > parent->length ||
1743 fda->parent_offset > parent->length - fd_buf_size) {
1744
1745 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1746 proc->pid, thread->pid, (u64)fda->num_fds);
1747 return -EINVAL;
1748 }
1749
1750
1751
1752
1753 parent_buffer = parent->buffer - target_proc->user_buffer_offset;
1754 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1755 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1756 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1757 proc->pid, thread->pid);
1758 return -EINVAL;
1759 }
1760 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1761 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1762 in_reply_to);
1763 if (target_fd < 0)
1764 goto err_translate_fd_failed;
1765 fd_array[fdi] = target_fd;
1766 }
1767 return 0;
1768
1769err_translate_fd_failed:
1770
1771
1772
1773
1774 num_installed_fds = fdi;
1775 for (fdi = 0; fdi < num_installed_fds; fdi++)
1776 task_close_fd(target_proc, fd_array[fdi]);
1777 return target_fd;
1778}
1779
1780static int binder_fixup_parent(struct binder_transaction *t,
1781 struct binder_thread *thread,
1782 struct binder_buffer_object *bp,
1783 binder_size_t *off_start,
1784 binder_size_t num_valid,
1785 struct binder_buffer_object *last_fixup_obj,
1786 binder_size_t last_fixup_min_off)
1787{
1788 struct binder_buffer_object *parent;
1789 u8 *parent_buffer;
1790 struct binder_buffer *b = t->buffer;
1791 struct binder_proc *proc = thread->proc;
1792 struct binder_proc *target_proc = t->to_proc;
1793
1794 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1795 return 0;
1796
1797 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1798 if (!parent) {
1799 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1800 proc->pid, thread->pid);
1801 return -EINVAL;
1802 }
1803
1804 if (!binder_validate_fixup(b, off_start,
1805 parent, bp->parent_offset,
1806 last_fixup_obj,
1807 last_fixup_min_off)) {
1808 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1809 proc->pid, thread->pid);
1810 return -EINVAL;
1811 }
1812
1813 if (parent->length < sizeof(binder_uintptr_t) ||
1814 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1815
1816 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1817 proc->pid, thread->pid);
1818 return -EINVAL;
1819 }
1820 parent_buffer = (u8 *)(parent->buffer -
1821 target_proc->user_buffer_offset);
1822 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1823
1824 return 0;
1825}
1826
1827static void binder_transaction(struct binder_proc *proc,
1828 struct binder_thread *thread,
1829 struct binder_transaction_data *tr, int reply,
1830 binder_size_t extra_buffers_size)
1831{
1832 int ret;
1833 struct binder_transaction *t;
1834 struct binder_work *tcomplete;
1835 binder_size_t *offp, *off_end, *off_start;
1836 binder_size_t off_min;
1837 u8 *sg_bufp, *sg_buf_end;
1838 struct binder_proc *target_proc;
1839 struct binder_thread *target_thread = NULL;
1840 struct binder_node *target_node = NULL;
1841 struct list_head *target_list;
1842 wait_queue_head_t *target_wait;
1843 struct binder_transaction *in_reply_to = NULL;
1844 struct binder_transaction_log_entry *e;
1845 uint32_t return_error;
1846 struct binder_buffer_object *last_fixup_obj = NULL;
1847 binder_size_t last_fixup_min_off = 0;
1848 struct binder_context *context = proc->context;
1849
1850 e = binder_transaction_log_add(&binder_transaction_log);
1851 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1852 e->from_proc = proc->pid;
1853 e->from_thread = thread->pid;
1854 e->target_handle = tr->target.handle;
1855 e->data_size = tr->data_size;
1856 e->offsets_size = tr->offsets_size;
1857 e->context_name = proc->context->name;
1858
1859 if (reply) {
1860 in_reply_to = thread->transaction_stack;
1861 if (in_reply_to == NULL) {
1862 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1863 proc->pid, thread->pid);
1864 return_error = BR_FAILED_REPLY;
1865 goto err_empty_call_stack;
1866 }
1867 binder_set_nice(in_reply_to->saved_priority);
1868 if (in_reply_to->to_thread != thread) {
1869 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1870 proc->pid, thread->pid, in_reply_to->debug_id,
1871 in_reply_to->to_proc ?
1872 in_reply_to->to_proc->pid : 0,
1873 in_reply_to->to_thread ?
1874 in_reply_to->to_thread->pid : 0);
1875 return_error = BR_FAILED_REPLY;
1876 in_reply_to = NULL;
1877 goto err_bad_call_stack;
1878 }
1879 thread->transaction_stack = in_reply_to->to_parent;
1880 target_thread = in_reply_to->from;
1881 if (target_thread == NULL) {
1882 return_error = BR_DEAD_REPLY;
1883 goto err_dead_binder;
1884 }
1885 if (target_thread->transaction_stack != in_reply_to) {
1886 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1887 proc->pid, thread->pid,
1888 target_thread->transaction_stack ?
1889 target_thread->transaction_stack->debug_id : 0,
1890 in_reply_to->debug_id);
1891 return_error = BR_FAILED_REPLY;
1892 in_reply_to = NULL;
1893 target_thread = NULL;
1894 goto err_dead_binder;
1895 }
1896 target_proc = target_thread->proc;
1897 } else {
1898 if (tr->target.handle) {
1899 struct binder_ref *ref;
1900
1901 ref = binder_get_ref(proc, tr->target.handle, true);
1902 if (ref == NULL) {
1903 binder_user_error("%d:%d got transaction to invalid handle\n",
1904 proc->pid, thread->pid);
1905 return_error = BR_FAILED_REPLY;
1906 goto err_invalid_target_handle;
1907 }
1908 target_node = ref->node;
1909 } else {
1910 target_node = context->binder_context_mgr_node;
1911 if (target_node == NULL) {
1912 return_error = BR_DEAD_REPLY;
1913 goto err_no_context_mgr_node;
1914 }
1915 }
1916 e->to_node = target_node->debug_id;
1917 target_proc = target_node->proc;
1918 if (target_proc == NULL) {
1919 return_error = BR_DEAD_REPLY;
1920 goto err_dead_binder;
1921 }
1922 if (security_binder_transaction(proc->tsk,
1923 target_proc->tsk) < 0) {
1924 return_error = BR_FAILED_REPLY;
1925 goto err_invalid_target_handle;
1926 }
1927 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1928 struct binder_transaction *tmp;
1929
1930 tmp = thread->transaction_stack;
1931 if (tmp->to_thread != thread) {
1932 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1933 proc->pid, thread->pid, tmp->debug_id,
1934 tmp->to_proc ? tmp->to_proc->pid : 0,
1935 tmp->to_thread ?
1936 tmp->to_thread->pid : 0);
1937 return_error = BR_FAILED_REPLY;
1938 goto err_bad_call_stack;
1939 }
1940 while (tmp) {
1941 if (tmp->from && tmp->from->proc == target_proc)
1942 target_thread = tmp->from;
1943 tmp = tmp->from_parent;
1944 }
1945 }
1946 }
1947 if (target_thread) {
1948 e->to_thread = target_thread->pid;
1949 target_list = &target_thread->todo;
1950 target_wait = &target_thread->wait;
1951 } else {
1952 target_list = &target_proc->todo;
1953 target_wait = &target_proc->wait;
1954 }
1955 e->to_proc = target_proc->pid;
1956
1957
1958 t = kzalloc(sizeof(*t), GFP_KERNEL);
1959 if (t == NULL) {
1960 return_error = BR_FAILED_REPLY;
1961 goto err_alloc_t_failed;
1962 }
1963 binder_stats_created(BINDER_STAT_TRANSACTION);
1964
1965 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1966 if (tcomplete == NULL) {
1967 return_error = BR_FAILED_REPLY;
1968 goto err_alloc_tcomplete_failed;
1969 }
1970 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1971
1972 t->debug_id = ++binder_last_id;
1973 e->debug_id = t->debug_id;
1974
1975 if (reply)
1976 binder_debug(BINDER_DEBUG_TRANSACTION,
1977 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1978 proc->pid, thread->pid, t->debug_id,
1979 target_proc->pid, target_thread->pid,
1980 (u64)tr->data.ptr.buffer,
1981 (u64)tr->data.ptr.offsets,
1982 (u64)tr->data_size, (u64)tr->offsets_size,
1983 (u64)extra_buffers_size);
1984 else
1985 binder_debug(BINDER_DEBUG_TRANSACTION,
1986 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1987 proc->pid, thread->pid, t->debug_id,
1988 target_proc->pid, target_node->debug_id,
1989 (u64)tr->data.ptr.buffer,
1990 (u64)tr->data.ptr.offsets,
1991 (u64)tr->data_size, (u64)tr->offsets_size,
1992 (u64)extra_buffers_size);
1993
1994 if (!reply && !(tr->flags & TF_ONE_WAY))
1995 t->from = thread;
1996 else
1997 t->from = NULL;
1998 t->sender_euid = task_euid(proc->tsk);
1999 t->to_proc = target_proc;
2000 t->to_thread = target_thread;
2001 t->code = tr->code;
2002 t->flags = tr->flags;
2003 t->priority = task_nice(current);
2004
2005 trace_binder_transaction(reply, t, target_node);
2006
2007 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
2008 tr->offsets_size, extra_buffers_size,
2009 !reply && (t->flags & TF_ONE_WAY));
2010 if (t->buffer == NULL) {
2011 return_error = BR_FAILED_REPLY;
2012 goto err_binder_alloc_buf_failed;
2013 }
2014 t->buffer->allow_user_free = 0;
2015 t->buffer->debug_id = t->debug_id;
2016 t->buffer->transaction = t;
2017 t->buffer->target_node = target_node;
2018 trace_binder_transaction_alloc_buf(t->buffer);
2019 if (target_node)
2020 binder_inc_node(target_node, 1, 0, NULL);
2021
2022 off_start = (binder_size_t *)(t->buffer->data +
2023 ALIGN(tr->data_size, sizeof(void *)));
2024 offp = off_start;
2025
2026 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2027 tr->data.ptr.buffer, tr->data_size)) {
2028 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2029 proc->pid, thread->pid);
2030 return_error = BR_FAILED_REPLY;
2031 goto err_copy_data_failed;
2032 }
2033 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2034 tr->data.ptr.offsets, tr->offsets_size)) {
2035 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2036 proc->pid, thread->pid);
2037 return_error = BR_FAILED_REPLY;
2038 goto err_copy_data_failed;
2039 }
2040 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2041 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2042 proc->pid, thread->pid, (u64)tr->offsets_size);
2043 return_error = BR_FAILED_REPLY;
2044 goto err_bad_offset;
2045 }
2046 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2047 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2048 proc->pid, thread->pid,
2049 (u64)extra_buffers_size);
2050 return_error = BR_FAILED_REPLY;
2051 goto err_bad_offset;
2052 }
2053 off_end = (void *)off_start + tr->offsets_size;
2054 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2055 sg_buf_end = sg_bufp + extra_buffers_size;
2056 off_min = 0;
2057 for (; offp < off_end; offp++) {
2058 struct binder_object_header *hdr;
2059 size_t object_size = binder_validate_object(t->buffer, *offp);
2060
2061 if (object_size == 0 || *offp < off_min) {
2062 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2063 proc->pid, thread->pid, (u64)*offp,
2064 (u64)off_min,
2065 (u64)t->buffer->data_size);
2066 return_error = BR_FAILED_REPLY;
2067 goto err_bad_offset;
2068 }
2069
2070 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2071 off_min = *offp + object_size;
2072 switch (hdr->type) {
2073 case BINDER_TYPE_BINDER:
2074 case BINDER_TYPE_WEAK_BINDER: {
2075 struct flat_binder_object *fp;
2076
2077 fp = to_flat_binder_object(hdr);
2078 ret = binder_translate_binder(fp, t, thread);
2079 if (ret < 0) {
2080 return_error = BR_FAILED_REPLY;
2081 goto err_translate_failed;
2082 }
2083 } break;
2084 case BINDER_TYPE_HANDLE:
2085 case BINDER_TYPE_WEAK_HANDLE: {
2086 struct flat_binder_object *fp;
2087
2088 fp = to_flat_binder_object(hdr);
2089 ret = binder_translate_handle(fp, t, thread);
2090 if (ret < 0) {
2091 return_error = BR_FAILED_REPLY;
2092 goto err_translate_failed;
2093 }
2094 } break;
2095
2096 case BINDER_TYPE_FD: {
2097 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2098 int target_fd = binder_translate_fd(fp->fd, t, thread,
2099 in_reply_to);
2100
2101 if (target_fd < 0) {
2102 return_error = BR_FAILED_REPLY;
2103 goto err_translate_failed;
2104 }
2105 fp->pad_binder = 0;
2106 fp->fd = target_fd;
2107 } break;
2108 case BINDER_TYPE_FDA: {
2109 struct binder_fd_array_object *fda =
2110 to_binder_fd_array_object(hdr);
2111 struct binder_buffer_object *parent =
2112 binder_validate_ptr(t->buffer, fda->parent,
2113 off_start,
2114 offp - off_start);
2115 if (!parent) {
2116 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2117 proc->pid, thread->pid);
2118 return_error = BR_FAILED_REPLY;
2119 goto err_bad_parent;
2120 }
2121 if (!binder_validate_fixup(t->buffer, off_start,
2122 parent, fda->parent_offset,
2123 last_fixup_obj,
2124 last_fixup_min_off)) {
2125 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2126 proc->pid, thread->pid);
2127 return_error = BR_FAILED_REPLY;
2128 goto err_bad_parent;
2129 }
2130 ret = binder_translate_fd_array(fda, parent, t, thread,
2131 in_reply_to);
2132 if (ret < 0) {
2133 return_error = BR_FAILED_REPLY;
2134 goto err_translate_failed;
2135 }
2136 last_fixup_obj = parent;
2137 last_fixup_min_off =
2138 fda->parent_offset + sizeof(u32) * fda->num_fds;
2139 } break;
2140 case BINDER_TYPE_PTR: {
2141 struct binder_buffer_object *bp =
2142 to_binder_buffer_object(hdr);
2143 size_t buf_left = sg_buf_end - sg_bufp;
2144
2145 if (bp->length > buf_left) {
2146 binder_user_error("%d:%d got transaction with too large buffer\n",
2147 proc->pid, thread->pid);
2148 return_error = BR_FAILED_REPLY;
2149 goto err_bad_offset;
2150 }
2151 if (copy_from_user(sg_bufp,
2152 (const void __user *)(uintptr_t)
2153 bp->buffer, bp->length)) {
2154 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2155 proc->pid, thread->pid);
2156 return_error = BR_FAILED_REPLY;
2157 goto err_copy_data_failed;
2158 }
2159
2160 bp->buffer = (uintptr_t)sg_bufp +
2161 target_proc->user_buffer_offset;
2162 sg_bufp += ALIGN(bp->length, sizeof(u64));
2163
2164 ret = binder_fixup_parent(t, thread, bp, off_start,
2165 offp - off_start,
2166 last_fixup_obj,
2167 last_fixup_min_off);
2168 if (ret < 0) {
2169 return_error = BR_FAILED_REPLY;
2170 goto err_translate_failed;
2171 }
2172 last_fixup_obj = bp;
2173 last_fixup_min_off = 0;
2174 } break;
2175 default:
2176 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2177 proc->pid, thread->pid, hdr->type);
2178 return_error = BR_FAILED_REPLY;
2179 goto err_bad_object_type;
2180 }
2181 }
2182 if (reply) {
2183 BUG_ON(t->buffer->async_transaction != 0);
2184 binder_pop_transaction(target_thread, in_reply_to);
2185 } else if (!(t->flags & TF_ONE_WAY)) {
2186 BUG_ON(t->buffer->async_transaction != 0);
2187 t->need_reply = 1;
2188 t->from_parent = thread->transaction_stack;
2189 thread->transaction_stack = t;
2190 } else {
2191 BUG_ON(target_node == NULL);
2192 BUG_ON(t->buffer->async_transaction != 1);
2193 if (target_node->has_async_transaction) {
2194 target_list = &target_node->async_todo;
2195 target_wait = NULL;
2196 } else
2197 target_node->has_async_transaction = 1;
2198 }
2199 t->work.type = BINDER_WORK_TRANSACTION;
2200 list_add_tail(&t->work.entry, target_list);
2201 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2202 list_add_tail(&tcomplete->entry, &thread->todo);
2203 if (target_wait) {
2204 if (reply || !(t->flags & TF_ONE_WAY))
2205 wake_up_interruptible_sync(target_wait);
2206 else
2207 wake_up_interruptible(target_wait);
2208 }
2209 return;
2210
2211err_translate_failed:
2212err_bad_object_type:
2213err_bad_offset:
2214err_bad_parent:
2215err_copy_data_failed:
2216 trace_binder_transaction_failed_buffer_release(t->buffer);
2217 binder_transaction_buffer_release(target_proc, t->buffer, offp);
2218 t->buffer->transaction = NULL;
2219 binder_free_buf(target_proc, t->buffer);
2220err_binder_alloc_buf_failed:
2221 kfree(tcomplete);
2222 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2223err_alloc_tcomplete_failed:
2224 kfree(t);
2225 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2226err_alloc_t_failed:
2227err_bad_call_stack:
2228err_empty_call_stack:
2229err_dead_binder:
2230err_invalid_target_handle:
2231err_no_context_mgr_node:
2232 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2233 "%d:%d transaction failed %d, size %lld-%lld\n",
2234 proc->pid, thread->pid, return_error,
2235 (u64)tr->data_size, (u64)tr->offsets_size);
2236
2237 {
2238 struct binder_transaction_log_entry *fe;
2239
2240 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2241 *fe = *e;
2242 }
2243
2244 BUG_ON(thread->return_error != BR_OK);
2245 if (in_reply_to) {
2246 thread->return_error = BR_TRANSACTION_COMPLETE;
2247 binder_send_failed_reply(in_reply_to, return_error);
2248 } else
2249 thread->return_error = return_error;
2250}
2251
2252static int binder_thread_write(struct binder_proc *proc,
2253 struct binder_thread *thread,
2254 binder_uintptr_t binder_buffer, size_t size,
2255 binder_size_t *consumed)
2256{
2257 uint32_t cmd;
2258 struct binder_context *context = proc->context;
2259 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2260 void __user *ptr = buffer + *consumed;
2261 void __user *end = buffer + size;
2262
2263 while (ptr < end && thread->return_error == BR_OK) {
2264 if (get_user(cmd, (uint32_t __user *)ptr))
2265 return -EFAULT;
2266 ptr += sizeof(uint32_t);
2267 trace_binder_command(cmd);
2268 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
2269 binder_stats.bc[_IOC_NR(cmd)]++;
2270 proc->stats.bc[_IOC_NR(cmd)]++;
2271 thread->stats.bc[_IOC_NR(cmd)]++;
2272 }
2273 switch (cmd) {
2274 case BC_INCREFS:
2275 case BC_ACQUIRE:
2276 case BC_RELEASE:
2277 case BC_DECREFS: {
2278 uint32_t target;
2279 struct binder_ref *ref;
2280 const char *debug_string;
2281
2282 if (get_user(target, (uint32_t __user *)ptr))
2283 return -EFAULT;
2284 ptr += sizeof(uint32_t);
2285 if (target == 0 && context->binder_context_mgr_node &&
2286 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
2287 ref = binder_get_ref_for_node(proc,
2288 context->binder_context_mgr_node);
2289 if (ref->desc != target) {
2290 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2291 proc->pid, thread->pid,
2292 ref->desc);
2293 }
2294 } else
2295 ref = binder_get_ref(proc, target,
2296 cmd == BC_ACQUIRE ||
2297 cmd == BC_RELEASE);
2298 if (ref == NULL) {
2299 binder_user_error("%d:%d refcount change on invalid ref %d\n",
2300 proc->pid, thread->pid, target);
2301 break;
2302 }
2303 switch (cmd) {
2304 case BC_INCREFS:
2305 debug_string = "IncRefs";
2306 binder_inc_ref(ref, 0, NULL);
2307 break;
2308 case BC_ACQUIRE:
2309 debug_string = "Acquire";
2310 binder_inc_ref(ref, 1, NULL);
2311 break;
2312 case BC_RELEASE:
2313 debug_string = "Release";
2314 binder_dec_ref(ref, 1);
2315 break;
2316 case BC_DECREFS:
2317 default:
2318 debug_string = "DecRefs";
2319 binder_dec_ref(ref, 0);
2320 break;
2321 }
2322 binder_debug(BINDER_DEBUG_USER_REFS,
2323 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
2324 proc->pid, thread->pid, debug_string, ref->debug_id,
2325 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
2326 break;
2327 }
2328 case BC_INCREFS_DONE:
2329 case BC_ACQUIRE_DONE: {
2330 binder_uintptr_t node_ptr;
2331 binder_uintptr_t cookie;
2332 struct binder_node *node;
2333
2334 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
2335 return -EFAULT;
2336 ptr += sizeof(binder_uintptr_t);
2337 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2338 return -EFAULT;
2339 ptr += sizeof(binder_uintptr_t);
2340 node = binder_get_node(proc, node_ptr);
2341 if (node == NULL) {
2342 binder_user_error("%d:%d %s u%016llx no match\n",
2343 proc->pid, thread->pid,
2344 cmd == BC_INCREFS_DONE ?
2345 "BC_INCREFS_DONE" :
2346 "BC_ACQUIRE_DONE",
2347 (u64)node_ptr);
2348 break;
2349 }
2350 if (cookie != node->cookie) {
2351 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2352 proc->pid, thread->pid,
2353 cmd == BC_INCREFS_DONE ?
2354 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2355 (u64)node_ptr, node->debug_id,
2356 (u64)cookie, (u64)node->cookie);
2357 break;
2358 }
2359 if (cmd == BC_ACQUIRE_DONE) {
2360 if (node->pending_strong_ref == 0) {
2361 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2362 proc->pid, thread->pid,
2363 node->debug_id);
2364 break;
2365 }
2366 node->pending_strong_ref = 0;
2367 } else {
2368 if (node->pending_weak_ref == 0) {
2369 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2370 proc->pid, thread->pid,
2371 node->debug_id);
2372 break;
2373 }
2374 node->pending_weak_ref = 0;
2375 }
2376 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2377 binder_debug(BINDER_DEBUG_USER_REFS,
2378 "%d:%d %s node %d ls %d lw %d\n",
2379 proc->pid, thread->pid,
2380 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2381 node->debug_id, node->local_strong_refs, node->local_weak_refs);
2382 break;
2383 }
2384 case BC_ATTEMPT_ACQUIRE:
2385 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2386 return -EINVAL;
2387 case BC_ACQUIRE_RESULT:
2388 pr_err("BC_ACQUIRE_RESULT not supported\n");
2389 return -EINVAL;
2390
2391 case BC_FREE_BUFFER: {
2392 binder_uintptr_t data_ptr;
2393 struct binder_buffer *buffer;
2394
2395 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2396 return -EFAULT;
2397 ptr += sizeof(binder_uintptr_t);
2398
2399 buffer = binder_buffer_lookup(proc, data_ptr);
2400 if (buffer == NULL) {
2401 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2402 proc->pid, thread->pid, (u64)data_ptr);
2403 break;
2404 }
2405 if (!buffer->allow_user_free) {
2406 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2407 proc->pid, thread->pid, (u64)data_ptr);
2408 break;
2409 }
2410 binder_debug(BINDER_DEBUG_FREE_BUFFER,
2411 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2412 proc->pid, thread->pid, (u64)data_ptr,
2413 buffer->debug_id,
2414 buffer->transaction ? "active" : "finished");
2415
2416 if (buffer->transaction) {
2417 buffer->transaction->buffer = NULL;
2418 buffer->transaction = NULL;
2419 }
2420 if (buffer->async_transaction && buffer->target_node) {
2421 BUG_ON(!buffer->target_node->has_async_transaction);
2422 if (list_empty(&buffer->target_node->async_todo))
2423 buffer->target_node->has_async_transaction = 0;
2424 else
2425 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2426 }
2427 trace_binder_transaction_buffer_release(buffer);
2428 binder_transaction_buffer_release(proc, buffer, NULL);
2429 binder_free_buf(proc, buffer);
2430 break;
2431 }
2432
2433 case BC_TRANSACTION_SG:
2434 case BC_REPLY_SG: {
2435 struct binder_transaction_data_sg tr;
2436
2437 if (copy_from_user(&tr, ptr, sizeof(tr)))
2438 return -EFAULT;
2439 ptr += sizeof(tr);
2440 binder_transaction(proc, thread, &tr.transaction_data,
2441 cmd == BC_REPLY_SG, tr.buffers_size);
2442 break;
2443 }
2444 case BC_TRANSACTION:
2445 case BC_REPLY: {
2446 struct binder_transaction_data tr;
2447
2448 if (copy_from_user(&tr, ptr, sizeof(tr)))
2449 return -EFAULT;
2450 ptr += sizeof(tr);
2451 binder_transaction(proc, thread, &tr,
2452 cmd == BC_REPLY, 0);
2453 break;
2454 }
2455
2456 case BC_REGISTER_LOOPER:
2457 binder_debug(BINDER_DEBUG_THREADS,
2458 "%d:%d BC_REGISTER_LOOPER\n",
2459 proc->pid, thread->pid);
2460 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2461 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2462 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2463 proc->pid, thread->pid);
2464 } else if (proc->requested_threads == 0) {
2465 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2466 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2467 proc->pid, thread->pid);
2468 } else {
2469 proc->requested_threads--;
2470 proc->requested_threads_started++;
2471 }
2472 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2473 break;
2474 case BC_ENTER_LOOPER:
2475 binder_debug(BINDER_DEBUG_THREADS,
2476 "%d:%d BC_ENTER_LOOPER\n",
2477 proc->pid, thread->pid);
2478 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2479 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2480 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2481 proc->pid, thread->pid);
2482 }
2483 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2484 break;
2485 case BC_EXIT_LOOPER:
2486 binder_debug(BINDER_DEBUG_THREADS,
2487 "%d:%d BC_EXIT_LOOPER\n",
2488 proc->pid, thread->pid);
2489 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2490 break;
2491
2492 case BC_REQUEST_DEATH_NOTIFICATION:
2493 case BC_CLEAR_DEATH_NOTIFICATION: {
2494 uint32_t target;
2495 binder_uintptr_t cookie;
2496 struct binder_ref *ref;
2497 struct binder_ref_death *death;
2498
2499 if (get_user(target, (uint32_t __user *)ptr))
2500 return -EFAULT;
2501 ptr += sizeof(uint32_t);
2502 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2503 return -EFAULT;
2504 ptr += sizeof(binder_uintptr_t);
2505 ref = binder_get_ref(proc, target, false);
2506 if (ref == NULL) {
2507 binder_user_error("%d:%d %s invalid ref %d\n",
2508 proc->pid, thread->pid,
2509 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2510 "BC_REQUEST_DEATH_NOTIFICATION" :
2511 "BC_CLEAR_DEATH_NOTIFICATION",
2512 target);
2513 break;
2514 }
2515
2516 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2517 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2518 proc->pid, thread->pid,
2519 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2520 "BC_REQUEST_DEATH_NOTIFICATION" :
2521 "BC_CLEAR_DEATH_NOTIFICATION",
2522 (u64)cookie, ref->debug_id, ref->desc,
2523 ref->strong, ref->weak, ref->node->debug_id);
2524
2525 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2526 if (ref->death) {
2527 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2528 proc->pid, thread->pid);
2529 break;
2530 }
2531 death = kzalloc(sizeof(*death), GFP_KERNEL);
2532 if (death == NULL) {
2533 thread->return_error = BR_ERROR;
2534 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2535 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2536 proc->pid, thread->pid);
2537 break;
2538 }
2539 binder_stats_created(BINDER_STAT_DEATH);
2540 INIT_LIST_HEAD(&death->work.entry);
2541 death->cookie = cookie;
2542 ref->death = death;
2543 if (ref->node->proc == NULL) {
2544 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2545 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2546 list_add_tail(&ref->death->work.entry, &thread->todo);
2547 } else {
2548 list_add_tail(&ref->death->work.entry, &proc->todo);
2549 wake_up_interruptible(&proc->wait);
2550 }
2551 }
2552 } else {
2553 if (ref->death == NULL) {
2554 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2555 proc->pid, thread->pid);
2556 break;
2557 }
2558 death = ref->death;
2559 if (death->cookie != cookie) {
2560 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2561 proc->pid, thread->pid,
2562 (u64)death->cookie,
2563 (u64)cookie);
2564 break;
2565 }
2566 ref->death = NULL;
2567 if (list_empty(&death->work.entry)) {
2568 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2569 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2570 list_add_tail(&death->work.entry, &thread->todo);
2571 } else {
2572 list_add_tail(&death->work.entry, &proc->todo);
2573 wake_up_interruptible(&proc->wait);
2574 }
2575 } else {
2576 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2577 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2578 }
2579 }
2580 } break;
2581 case BC_DEAD_BINDER_DONE: {
2582 struct binder_work *w;
2583 binder_uintptr_t cookie;
2584 struct binder_ref_death *death = NULL;
2585
2586 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2587 return -EFAULT;
2588
2589 ptr += sizeof(cookie);
2590 list_for_each_entry(w, &proc->delivered_death, entry) {
2591 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2592
2593 if (tmp_death->cookie == cookie) {
2594 death = tmp_death;
2595 break;
2596 }
2597 }
2598 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2599 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2600 proc->pid, thread->pid, (u64)cookie,
2601 death);
2602 if (death == NULL) {
2603 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2604 proc->pid, thread->pid, (u64)cookie);
2605 break;
2606 }
2607
2608 list_del_init(&death->work.entry);
2609 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2610 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2611 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2612 list_add_tail(&death->work.entry, &thread->todo);
2613 } else {
2614 list_add_tail(&death->work.entry, &proc->todo);
2615 wake_up_interruptible(&proc->wait);
2616 }
2617 }
2618 } break;
2619
2620 default:
2621 pr_err("%d:%d unknown command %d\n",
2622 proc->pid, thread->pid, cmd);
2623 return -EINVAL;
2624 }
2625 *consumed = ptr - buffer;
2626 }
2627 return 0;
2628}
2629
2630static void binder_stat_br(struct binder_proc *proc,
2631 struct binder_thread *thread, uint32_t cmd)
2632{
2633 trace_binder_return(cmd);
2634 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2635 binder_stats.br[_IOC_NR(cmd)]++;
2636 proc->stats.br[_IOC_NR(cmd)]++;
2637 thread->stats.br[_IOC_NR(cmd)]++;
2638 }
2639}
2640
2641static int binder_has_proc_work(struct binder_proc *proc,
2642 struct binder_thread *thread)
2643{
2644 return !list_empty(&proc->todo) ||
2645 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2646}
2647
2648static int binder_has_thread_work(struct binder_thread *thread)
2649{
2650 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2651 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2652}
2653
2654static int binder_thread_read(struct binder_proc *proc,
2655 struct binder_thread *thread,
2656 binder_uintptr_t binder_buffer, size_t size,
2657 binder_size_t *consumed, int non_block)
2658{
2659 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2660 void __user *ptr = buffer + *consumed;
2661 void __user *end = buffer + size;
2662
2663 int ret = 0;
2664 int wait_for_proc_work;
2665
2666 if (*consumed == 0) {
2667 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2668 return -EFAULT;
2669 ptr += sizeof(uint32_t);
2670 }
2671
2672retry:
2673 wait_for_proc_work = thread->transaction_stack == NULL &&
2674 list_empty(&thread->todo);
2675
2676 if (thread->return_error != BR_OK && ptr < end) {
2677 if (thread->return_error2 != BR_OK) {
2678 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2679 return -EFAULT;
2680 ptr += sizeof(uint32_t);
2681 binder_stat_br(proc, thread, thread->return_error2);
2682 if (ptr == end)
2683 goto done;
2684 thread->return_error2 = BR_OK;
2685 }
2686 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2687 return -EFAULT;
2688 ptr += sizeof(uint32_t);
2689 binder_stat_br(proc, thread, thread->return_error);
2690 thread->return_error = BR_OK;
2691 goto done;
2692 }
2693
2694
2695 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2696 if (wait_for_proc_work)
2697 proc->ready_threads++;
2698
2699 binder_unlock(__func__);
2700
2701 trace_binder_wait_for_work(wait_for_proc_work,
2702 !!thread->transaction_stack,
2703 !list_empty(&thread->todo));
2704 if (wait_for_proc_work) {
2705 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2706 BINDER_LOOPER_STATE_ENTERED))) {
2707 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2708 proc->pid, thread->pid, thread->looper);
2709 wait_event_interruptible(binder_user_error_wait,
2710 binder_stop_on_user_error < 2);
2711 }
2712 binder_set_nice(proc->default_priority);
2713 if (non_block) {
2714 if (!binder_has_proc_work(proc, thread))
2715 ret = -EAGAIN;
2716 } else
2717 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2718 } else {
2719 if (non_block) {
2720 if (!binder_has_thread_work(thread))
2721 ret = -EAGAIN;
2722 } else
2723 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2724 }
2725
2726 binder_lock(__func__);
2727
2728 if (wait_for_proc_work)
2729 proc->ready_threads--;
2730 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2731
2732 if (ret)
2733 return ret;
2734
2735 while (1) {
2736 uint32_t cmd;
2737 struct binder_transaction_data tr;
2738 struct binder_work *w;
2739 struct binder_transaction *t = NULL;
2740
2741 if (!list_empty(&thread->todo)) {
2742 w = list_first_entry(&thread->todo, struct binder_work,
2743 entry);
2744 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2745 w = list_first_entry(&proc->todo, struct binder_work,
2746 entry);
2747 } else {
2748
2749 if (ptr - buffer == 4 &&
2750 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2751 goto retry;
2752 break;
2753 }
2754
2755 if (end - ptr < sizeof(tr) + 4)
2756 break;
2757
2758 switch (w->type) {
2759 case BINDER_WORK_TRANSACTION: {
2760 t = container_of(w, struct binder_transaction, work);
2761 } break;
2762 case BINDER_WORK_TRANSACTION_COMPLETE: {
2763 cmd = BR_TRANSACTION_COMPLETE;
2764 if (put_user(cmd, (uint32_t __user *)ptr))
2765 return -EFAULT;
2766 ptr += sizeof(uint32_t);
2767
2768 binder_stat_br(proc, thread, cmd);
2769 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2770 "%d:%d BR_TRANSACTION_COMPLETE\n",
2771 proc->pid, thread->pid);
2772
2773 list_del(&w->entry);
2774 kfree(w);
2775 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2776 } break;
2777 case BINDER_WORK_NODE: {
2778 struct binder_node *node = container_of(w, struct binder_node, work);
2779 uint32_t cmd = BR_NOOP;
2780 const char *cmd_name;
2781 int strong = node->internal_strong_refs || node->local_strong_refs;
2782 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2783
2784 if (weak && !node->has_weak_ref) {
2785 cmd = BR_INCREFS;
2786 cmd_name = "BR_INCREFS";
2787 node->has_weak_ref = 1;
2788 node->pending_weak_ref = 1;
2789 node->local_weak_refs++;
2790 } else if (strong && !node->has_strong_ref) {
2791 cmd = BR_ACQUIRE;
2792 cmd_name = "BR_ACQUIRE";
2793 node->has_strong_ref = 1;
2794 node->pending_strong_ref = 1;
2795 node->local_strong_refs++;
2796 } else if (!strong && node->has_strong_ref) {
2797 cmd = BR_RELEASE;
2798 cmd_name = "BR_RELEASE";
2799 node->has_strong_ref = 0;
2800 } else if (!weak && node->has_weak_ref) {
2801 cmd = BR_DECREFS;
2802 cmd_name = "BR_DECREFS";
2803 node->has_weak_ref = 0;
2804 }
2805 if (cmd != BR_NOOP) {
2806 if (put_user(cmd, (uint32_t __user *)ptr))
2807 return -EFAULT;
2808 ptr += sizeof(uint32_t);
2809 if (put_user(node->ptr,
2810 (binder_uintptr_t __user *)ptr))
2811 return -EFAULT;
2812 ptr += sizeof(binder_uintptr_t);
2813 if (put_user(node->cookie,
2814 (binder_uintptr_t __user *)ptr))
2815 return -EFAULT;
2816 ptr += sizeof(binder_uintptr_t);
2817
2818 binder_stat_br(proc, thread, cmd);
2819 binder_debug(BINDER_DEBUG_USER_REFS,
2820 "%d:%d %s %d u%016llx c%016llx\n",
2821 proc->pid, thread->pid, cmd_name,
2822 node->debug_id,
2823 (u64)node->ptr, (u64)node->cookie);
2824 } else {
2825 list_del_init(&w->entry);
2826 if (!weak && !strong) {
2827 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2828 "%d:%d node %d u%016llx c%016llx deleted\n",
2829 proc->pid, thread->pid,
2830 node->debug_id,
2831 (u64)node->ptr,
2832 (u64)node->cookie);
2833 rb_erase(&node->rb_node, &proc->nodes);
2834 kfree(node);
2835 binder_stats_deleted(BINDER_STAT_NODE);
2836 } else {
2837 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2838 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2839 proc->pid, thread->pid,
2840 node->debug_id,
2841 (u64)node->ptr,
2842 (u64)node->cookie);
2843 }
2844 }
2845 } break;
2846 case BINDER_WORK_DEAD_BINDER:
2847 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2848 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2849 struct binder_ref_death *death;
2850 uint32_t cmd;
2851
2852 death = container_of(w, struct binder_ref_death, work);
2853 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2854 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2855 else
2856 cmd = BR_DEAD_BINDER;
2857 if (put_user(cmd, (uint32_t __user *)ptr))
2858 return -EFAULT;
2859 ptr += sizeof(uint32_t);
2860 if (put_user(death->cookie,
2861 (binder_uintptr_t __user *)ptr))
2862 return -EFAULT;
2863 ptr += sizeof(binder_uintptr_t);
2864 binder_stat_br(proc, thread, cmd);
2865 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2866 "%d:%d %s %016llx\n",
2867 proc->pid, thread->pid,
2868 cmd == BR_DEAD_BINDER ?
2869 "BR_DEAD_BINDER" :
2870 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2871 (u64)death->cookie);
2872
2873 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2874 list_del(&w->entry);
2875 kfree(death);
2876 binder_stats_deleted(BINDER_STAT_DEATH);
2877 } else
2878 list_move(&w->entry, &proc->delivered_death);
2879 if (cmd == BR_DEAD_BINDER)
2880 goto done;
2881 } break;
2882 }
2883
2884 if (!t)
2885 continue;
2886
2887 BUG_ON(t->buffer == NULL);
2888 if (t->buffer->target_node) {
2889 struct binder_node *target_node = t->buffer->target_node;
2890
2891 tr.target.ptr = target_node->ptr;
2892 tr.cookie = target_node->cookie;
2893 t->saved_priority = task_nice(current);
2894 if (t->priority < target_node->min_priority &&
2895 !(t->flags & TF_ONE_WAY))
2896 binder_set_nice(t->priority);
2897 else if (!(t->flags & TF_ONE_WAY) ||
2898 t->saved_priority > target_node->min_priority)
2899 binder_set_nice(target_node->min_priority);
2900 cmd = BR_TRANSACTION;
2901 } else {
2902 tr.target.ptr = 0;
2903 tr.cookie = 0;
2904 cmd = BR_REPLY;
2905 }
2906 tr.code = t->code;
2907 tr.flags = t->flags;
2908 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2909
2910 if (t->from) {
2911 struct task_struct *sender = t->from->proc->tsk;
2912
2913 tr.sender_pid = task_tgid_nr_ns(sender,
2914 task_active_pid_ns(current));
2915 } else {
2916 tr.sender_pid = 0;
2917 }
2918
2919 tr.data_size = t->buffer->data_size;
2920 tr.offsets_size = t->buffer->offsets_size;
2921 tr.data.ptr.buffer = (binder_uintptr_t)(
2922 (uintptr_t)t->buffer->data +
2923 proc->user_buffer_offset);
2924 tr.data.ptr.offsets = tr.data.ptr.buffer +
2925 ALIGN(t->buffer->data_size,
2926 sizeof(void *));
2927
2928 if (put_user(cmd, (uint32_t __user *)ptr))
2929 return -EFAULT;
2930 ptr += sizeof(uint32_t);
2931 if (copy_to_user(ptr, &tr, sizeof(tr)))
2932 return -EFAULT;
2933 ptr += sizeof(tr);
2934
2935 trace_binder_transaction_received(t);
2936 binder_stat_br(proc, thread, cmd);
2937 binder_debug(BINDER_DEBUG_TRANSACTION,
2938 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2939 proc->pid, thread->pid,
2940 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2941 "BR_REPLY",
2942 t->debug_id, t->from ? t->from->proc->pid : 0,
2943 t->from ? t->from->pid : 0, cmd,
2944 t->buffer->data_size, t->buffer->offsets_size,
2945 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2946
2947 list_del(&t->work.entry);
2948 t->buffer->allow_user_free = 1;
2949 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2950 t->to_parent = thread->transaction_stack;
2951 t->to_thread = thread;
2952 thread->transaction_stack = t;
2953 } else {
2954 t->buffer->transaction = NULL;
2955 kfree(t);
2956 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2957 }
2958 break;
2959 }
2960
2961done:
2962
2963 *consumed = ptr - buffer;
2964 if (proc->requested_threads + proc->ready_threads == 0 &&
2965 proc->requested_threads_started < proc->max_threads &&
2966 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2967 BINDER_LOOPER_STATE_ENTERED))
2968 ) {
2969 proc->requested_threads++;
2970 binder_debug(BINDER_DEBUG_THREADS,
2971 "%d:%d BR_SPAWN_LOOPER\n",
2972 proc->pid, thread->pid);
2973 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2974 return -EFAULT;
2975 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2976 }
2977 return 0;
2978}
2979
2980static void binder_release_work(struct list_head *list)
2981{
2982 struct binder_work *w;
2983
2984 while (!list_empty(list)) {
2985 w = list_first_entry(list, struct binder_work, entry);
2986 list_del_init(&w->entry);
2987 switch (w->type) {
2988 case BINDER_WORK_TRANSACTION: {
2989 struct binder_transaction *t;
2990
2991 t = container_of(w, struct binder_transaction, work);
2992 if (t->buffer->target_node &&
2993 !(t->flags & TF_ONE_WAY)) {
2994 binder_send_failed_reply(t, BR_DEAD_REPLY);
2995 } else {
2996 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2997 "undelivered transaction %d\n",
2998 t->debug_id);
2999 t->buffer->transaction = NULL;
3000 kfree(t);
3001 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3002 }
3003 } break;
3004 case BINDER_WORK_TRANSACTION_COMPLETE: {
3005 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3006 "undelivered TRANSACTION_COMPLETE\n");
3007 kfree(w);
3008 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3009 } break;
3010 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3011 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3012 struct binder_ref_death *death;
3013
3014 death = container_of(w, struct binder_ref_death, work);
3015 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3016 "undelivered death notification, %016llx\n",
3017 (u64)death->cookie);
3018 kfree(death);
3019 binder_stats_deleted(BINDER_STAT_DEATH);
3020 } break;
3021 default:
3022 pr_err("unexpected work type, %d, not freed\n",
3023 w->type);
3024 break;
3025 }
3026 }
3027
3028}
3029
3030static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3031{
3032 struct binder_thread *thread = NULL;
3033 struct rb_node *parent = NULL;
3034 struct rb_node **p = &proc->threads.rb_node;
3035
3036 while (*p) {
3037 parent = *p;
3038 thread = rb_entry(parent, struct binder_thread, rb_node);
3039
3040 if (current->pid < thread->pid)
3041 p = &(*p)->rb_left;
3042 else if (current->pid > thread->pid)
3043 p = &(*p)->rb_right;
3044 else
3045 break;
3046 }
3047 if (*p == NULL) {
3048 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3049 if (thread == NULL)
3050 return NULL;
3051 binder_stats_created(BINDER_STAT_THREAD);
3052 thread->proc = proc;
3053 thread->pid = current->pid;
3054 init_waitqueue_head(&thread->wait);
3055 INIT_LIST_HEAD(&thread->todo);
3056 rb_link_node(&thread->rb_node, parent, p);
3057 rb_insert_color(&thread->rb_node, &proc->threads);
3058 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3059 thread->return_error = BR_OK;
3060 thread->return_error2 = BR_OK;
3061 }
3062 return thread;
3063}
3064
3065static int binder_free_thread(struct binder_proc *proc,
3066 struct binder_thread *thread)
3067{
3068 struct binder_transaction *t;
3069 struct binder_transaction *send_reply = NULL;
3070 int active_transactions = 0;
3071
3072 rb_erase(&thread->rb_node, &proc->threads);
3073 t = thread->transaction_stack;
3074 if (t && t->to_thread == thread)
3075 send_reply = t;
3076 while (t) {
3077 active_transactions++;
3078 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3079 "release %d:%d transaction %d %s, still active\n",
3080 proc->pid, thread->pid,
3081 t->debug_id,
3082 (t->to_thread == thread) ? "in" : "out");
3083
3084 if (t->to_thread == thread) {
3085 t->to_proc = NULL;
3086 t->to_thread = NULL;
3087 if (t->buffer) {
3088 t->buffer->transaction = NULL;
3089 t->buffer = NULL;
3090 }
3091 t = t->to_parent;
3092 } else if (t->from == thread) {
3093 t->from = NULL;
3094 t = t->from_parent;
3095 } else
3096 BUG();
3097 }
3098 if (send_reply)
3099 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3100 binder_release_work(&thread->todo);
3101 kfree(thread);
3102 binder_stats_deleted(BINDER_STAT_THREAD);
3103 return active_transactions;
3104}
3105
3106static unsigned int binder_poll(struct file *filp,
3107 struct poll_table_struct *wait)
3108{
3109 struct binder_proc *proc = filp->private_data;
3110 struct binder_thread *thread = NULL;
3111 int wait_for_proc_work;
3112
3113 binder_lock(__func__);
3114
3115 thread = binder_get_thread(proc);
3116
3117 wait_for_proc_work = thread->transaction_stack == NULL &&
3118 list_empty(&thread->todo) && thread->return_error == BR_OK;
3119
3120 binder_unlock(__func__);
3121
3122 if (wait_for_proc_work) {
3123 if (binder_has_proc_work(proc, thread))
3124 return POLLIN;
3125 poll_wait(filp, &proc->wait, wait);
3126 if (binder_has_proc_work(proc, thread))
3127 return POLLIN;
3128 } else {
3129 if (binder_has_thread_work(thread))
3130 return POLLIN;
3131 poll_wait(filp, &thread->wait, wait);
3132 if (binder_has_thread_work(thread))
3133 return POLLIN;
3134 }
3135 return 0;
3136}
3137
3138static int binder_ioctl_write_read(struct file *filp,
3139 unsigned int cmd, unsigned long arg,
3140 struct binder_thread *thread)
3141{
3142 int ret = 0;
3143 struct binder_proc *proc = filp->private_data;
3144 unsigned int size = _IOC_SIZE(cmd);
3145 void __user *ubuf = (void __user *)arg;
3146 struct binder_write_read bwr;
3147
3148 if (size != sizeof(struct binder_write_read)) {
3149 ret = -EINVAL;
3150 goto out;
3151 }
3152 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3153 ret = -EFAULT;
3154 goto out;
3155 }
3156 binder_debug(BINDER_DEBUG_READ_WRITE,
3157 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3158 proc->pid, thread->pid,
3159 (u64)bwr.write_size, (u64)bwr.write_buffer,
3160 (u64)bwr.read_size, (u64)bwr.read_buffer);
3161
3162 if (bwr.write_size > 0) {
3163 ret = binder_thread_write(proc, thread,
3164 bwr.write_buffer,
3165 bwr.write_size,
3166 &bwr.write_consumed);
3167 trace_binder_write_done(ret);
3168 if (ret < 0) {
3169 bwr.read_consumed = 0;
3170 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3171 ret = -EFAULT;
3172 goto out;
3173 }
3174 }
3175 if (bwr.read_size > 0) {
3176 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3177 bwr.read_size,
3178 &bwr.read_consumed,
3179 filp->f_flags & O_NONBLOCK);
3180 trace_binder_read_done(ret);
3181 if (!list_empty(&proc->todo))
3182 wake_up_interruptible(&proc->wait);
3183 if (ret < 0) {
3184 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3185 ret = -EFAULT;
3186 goto out;
3187 }
3188 }
3189 binder_debug(BINDER_DEBUG_READ_WRITE,
3190 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3191 proc->pid, thread->pid,
3192 (u64)bwr.write_consumed, (u64)bwr.write_size,
3193 (u64)bwr.read_consumed, (u64)bwr.read_size);
3194 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3195 ret = -EFAULT;
3196 goto out;
3197 }
3198out:
3199 return ret;
3200}
3201
3202static int binder_ioctl_set_ctx_mgr(struct file *filp)
3203{
3204 int ret = 0;
3205 struct binder_proc *proc = filp->private_data;
3206 struct binder_context *context = proc->context;
3207
3208 kuid_t curr_euid = current_euid();
3209
3210 if (context->binder_context_mgr_node) {
3211 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3212 ret = -EBUSY;
3213 goto out;
3214 }
3215 ret = security_binder_set_context_mgr(proc->tsk);
3216 if (ret < 0)
3217 goto out;
3218 if (uid_valid(context->binder_context_mgr_uid)) {
3219 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
3220 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3221 from_kuid(&init_user_ns, curr_euid),
3222 from_kuid(&init_user_ns,
3223 context->binder_context_mgr_uid));
3224 ret = -EPERM;
3225 goto out;
3226 }
3227 } else {
3228 context->binder_context_mgr_uid = curr_euid;
3229 }
3230 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
3231 if (!context->binder_context_mgr_node) {
3232 ret = -ENOMEM;
3233 goto out;
3234 }
3235 context->binder_context_mgr_node->local_weak_refs++;
3236 context->binder_context_mgr_node->local_strong_refs++;
3237 context->binder_context_mgr_node->has_strong_ref = 1;
3238 context->binder_context_mgr_node->has_weak_ref = 1;
3239out:
3240 return ret;
3241}
3242
3243static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3244{
3245 int ret;
3246 struct binder_proc *proc = filp->private_data;
3247 struct binder_thread *thread;
3248 unsigned int size = _IOC_SIZE(cmd);
3249 void __user *ubuf = (void __user *)arg;
3250
3251
3252
3253
3254 trace_binder_ioctl(cmd, arg);
3255
3256 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3257 if (ret)
3258 goto err_unlocked;
3259
3260 binder_lock(__func__);
3261 thread = binder_get_thread(proc);
3262 if (thread == NULL) {
3263 ret = -ENOMEM;
3264 goto err;
3265 }
3266
3267 switch (cmd) {
3268 case BINDER_WRITE_READ:
3269 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3270 if (ret)
3271 goto err;
3272 break;
3273 case BINDER_SET_MAX_THREADS:
3274 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3275 ret = -EINVAL;
3276 goto err;
3277 }
3278 break;
3279 case BINDER_SET_CONTEXT_MGR:
3280 ret = binder_ioctl_set_ctx_mgr(filp);
3281 if (ret)
3282 goto err;
3283 break;
3284 case BINDER_THREAD_EXIT:
3285 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
3286 proc->pid, thread->pid);
3287 binder_free_thread(proc, thread);
3288 thread = NULL;
3289 break;
3290 case BINDER_VERSION: {
3291 struct binder_version __user *ver = ubuf;
3292
3293 if (size != sizeof(struct binder_version)) {
3294 ret = -EINVAL;
3295 goto err;
3296 }
3297 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3298 &ver->protocol_version)) {
3299 ret = -EINVAL;
3300 goto err;
3301 }
3302 break;
3303 }
3304 default:
3305 ret = -EINVAL;
3306 goto err;
3307 }
3308 ret = 0;
3309err:
3310 if (thread)
3311 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
3312 binder_unlock(__func__);
3313 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3314 if (ret && ret != -ERESTARTSYS)
3315 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
3316err_unlocked:
3317 trace_binder_ioctl_done(ret);
3318 return ret;
3319}
3320
3321static void binder_vma_open(struct vm_area_struct *vma)
3322{
3323 struct binder_proc *proc = vma->vm_private_data;
3324
3325 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3326 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3327 proc->pid, vma->vm_start, vma->vm_end,
3328 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3329 (unsigned long)pgprot_val(vma->vm_page_prot));
3330}
3331
3332static void binder_vma_close(struct vm_area_struct *vma)
3333{
3334 struct binder_proc *proc = vma->vm_private_data;
3335
3336 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3337 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3338 proc->pid, vma->vm_start, vma->vm_end,
3339 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3340 (unsigned long)pgprot_val(vma->vm_page_prot));
3341 proc->vma = NULL;
3342 proc->vma_vm_mm = NULL;
3343 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3344}
3345
3346static int binder_vm_fault(struct vm_fault *vmf)
3347{
3348 return VM_FAULT_SIGBUS;
3349}
3350
3351static const struct vm_operations_struct binder_vm_ops = {
3352 .open = binder_vma_open,
3353 .close = binder_vma_close,
3354 .fault = binder_vm_fault,
3355};
3356
3357static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3358{
3359 int ret;
3360 struct vm_struct *area;
3361 struct binder_proc *proc = filp->private_data;
3362 const char *failure_string;
3363 struct binder_buffer *buffer;
3364
3365 if (proc->tsk != current->group_leader)
3366 return -EINVAL;
3367
3368 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3369 vma->vm_end = vma->vm_start + SZ_4M;
3370
3371 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3372 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3373 proc->pid, vma->vm_start, vma->vm_end,
3374 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3375 (unsigned long)pgprot_val(vma->vm_page_prot));
3376
3377 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3378 ret = -EPERM;
3379 failure_string = "bad vm_flags";
3380 goto err_bad_arg;
3381 }
3382 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3383
3384 mutex_lock(&binder_mmap_lock);
3385 if (proc->buffer) {
3386 ret = -EBUSY;
3387 failure_string = "already mapped";
3388 goto err_already_mapped;
3389 }
3390
3391 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
3392 if (area == NULL) {
3393 ret = -ENOMEM;
3394 failure_string = "get_vm_area";
3395 goto err_get_vm_area_failed;
3396 }
3397 proc->buffer = area->addr;
3398 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
3399 mutex_unlock(&binder_mmap_lock);
3400
3401#ifdef CONFIG_CPU_CACHE_VIPT
3402 if (cache_is_vipt_aliasing()) {
3403 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
3404 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
3405 vma->vm_start += PAGE_SIZE;
3406 }
3407 }
3408#endif
3409 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
3410 if (proc->pages == NULL) {
3411 ret = -ENOMEM;
3412 failure_string = "alloc page array";
3413 goto err_alloc_pages_failed;
3414 }
3415 proc->buffer_size = vma->vm_end - vma->vm_start;
3416
3417 vma->vm_ops = &binder_vm_ops;
3418 vma->vm_private_data = proc;
3419
3420 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
3421 ret = -ENOMEM;
3422 failure_string = "alloc small buf";
3423 goto err_alloc_small_buf_failed;
3424 }
3425 buffer = proc->buffer;
3426 INIT_LIST_HEAD(&proc->buffers);
3427 list_add(&buffer->entry, &proc->buffers);
3428 buffer->free = 1;
3429 binder_insert_free_buffer(proc, buffer);
3430 proc->free_async_space = proc->buffer_size / 2;
3431 barrier();
3432 proc->files = get_files_struct(current);
3433 proc->vma = vma;
3434 proc->vma_vm_mm = vma->vm_mm;
3435
3436
3437
3438 return 0;
3439
3440err_alloc_small_buf_failed:
3441 kfree(proc->pages);
3442 proc->pages = NULL;
3443err_alloc_pages_failed:
3444 mutex_lock(&binder_mmap_lock);
3445 vfree(proc->buffer);
3446 proc->buffer = NULL;
3447err_get_vm_area_failed:
3448err_already_mapped:
3449 mutex_unlock(&binder_mmap_lock);
3450err_bad_arg:
3451 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3452 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3453 return ret;
3454}
3455
3456static int binder_open(struct inode *nodp, struct file *filp)
3457{
3458 struct binder_proc *proc;
3459 struct binder_device *binder_dev;
3460
3461 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3462 current->group_leader->pid, current->pid);
3463
3464 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3465 if (proc == NULL)
3466 return -ENOMEM;
3467 get_task_struct(current->group_leader);
3468 proc->tsk = current->group_leader;
3469 INIT_LIST_HEAD(&proc->todo);
3470 init_waitqueue_head(&proc->wait);
3471 proc->default_priority = task_nice(current);
3472 binder_dev = container_of(filp->private_data, struct binder_device,
3473 miscdev);
3474 proc->context = &binder_dev->context;
3475
3476 binder_lock(__func__);
3477
3478 binder_stats_created(BINDER_STAT_PROC);
3479 hlist_add_head(&proc->proc_node, &binder_procs);
3480 proc->pid = current->group_leader->pid;
3481 INIT_LIST_HEAD(&proc->delivered_death);
3482 filp->private_data = proc;
3483
3484 binder_unlock(__func__);
3485
3486 if (binder_debugfs_dir_entry_proc) {
3487 char strbuf[11];
3488
3489 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3490
3491
3492
3493
3494
3495
3496
3497 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3498 binder_debugfs_dir_entry_proc,
3499 (void *)(unsigned long)proc->pid,
3500 &binder_proc_fops);
3501 }
3502
3503 return 0;
3504}
3505
3506static int binder_flush(struct file *filp, fl_owner_t id)
3507{
3508 struct binder_proc *proc = filp->private_data;
3509
3510 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3511
3512 return 0;
3513}
3514
3515static void binder_deferred_flush(struct binder_proc *proc)
3516{
3517 struct rb_node *n;
3518 int wake_count = 0;
3519
3520 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3521 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3522
3523 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3524 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3525 wake_up_interruptible(&thread->wait);
3526 wake_count++;
3527 }
3528 }
3529 wake_up_interruptible_all(&proc->wait);
3530
3531 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3532 "binder_flush: %d woke %d threads\n", proc->pid,
3533 wake_count);
3534}
3535
3536static int binder_release(struct inode *nodp, struct file *filp)
3537{
3538 struct binder_proc *proc = filp->private_data;
3539
3540 debugfs_remove(proc->debugfs_entry);
3541 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3542
3543 return 0;
3544}
3545
3546static int binder_node_release(struct binder_node *node, int refs)
3547{
3548 struct binder_ref *ref;
3549 int death = 0;
3550
3551 list_del_init(&node->work.entry);
3552 binder_release_work(&node->async_todo);
3553
3554 if (hlist_empty(&node->refs)) {
3555 kfree(node);
3556 binder_stats_deleted(BINDER_STAT_NODE);
3557
3558 return refs;
3559 }
3560
3561 node->proc = NULL;
3562 node->local_strong_refs = 0;
3563 node->local_weak_refs = 0;
3564 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3565
3566 hlist_for_each_entry(ref, &node->refs, node_entry) {
3567 refs++;
3568
3569 if (!ref->death)
3570 continue;
3571
3572 death++;
3573
3574 if (list_empty(&ref->death->work.entry)) {
3575 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3576 list_add_tail(&ref->death->work.entry,
3577 &ref->proc->todo);
3578 wake_up_interruptible(&ref->proc->wait);
3579 } else
3580 BUG();
3581 }
3582
3583 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3584 "node %d now dead, refs %d, death %d\n",
3585 node->debug_id, refs, death);
3586
3587 return refs;
3588}
3589
3590static void binder_deferred_release(struct binder_proc *proc)
3591{
3592 struct binder_transaction *t;
3593 struct binder_context *context = proc->context;
3594 struct rb_node *n;
3595 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3596 active_transactions, page_count;
3597
3598 BUG_ON(proc->vma);
3599 BUG_ON(proc->files);
3600
3601 hlist_del(&proc->proc_node);
3602
3603 if (context->binder_context_mgr_node &&
3604 context->binder_context_mgr_node->proc == proc) {
3605 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3606 "%s: %d context_mgr_node gone\n",
3607 __func__, proc->pid);
3608 context->binder_context_mgr_node = NULL;
3609 }
3610
3611 threads = 0;
3612 active_transactions = 0;
3613 while ((n = rb_first(&proc->threads))) {
3614 struct binder_thread *thread;
3615
3616 thread = rb_entry(n, struct binder_thread, rb_node);
3617 threads++;
3618 active_transactions += binder_free_thread(proc, thread);
3619 }
3620
3621 nodes = 0;
3622 incoming_refs = 0;
3623 while ((n = rb_first(&proc->nodes))) {
3624 struct binder_node *node;
3625
3626 node = rb_entry(n, struct binder_node, rb_node);
3627 nodes++;
3628 rb_erase(&node->rb_node, &proc->nodes);
3629 incoming_refs = binder_node_release(node, incoming_refs);
3630 }
3631
3632 outgoing_refs = 0;
3633 while ((n = rb_first(&proc->refs_by_desc))) {
3634 struct binder_ref *ref;
3635
3636 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3637 outgoing_refs++;
3638 binder_delete_ref(ref);
3639 }
3640
3641 binder_release_work(&proc->todo);
3642 binder_release_work(&proc->delivered_death);
3643
3644 buffers = 0;
3645 while ((n = rb_first(&proc->allocated_buffers))) {
3646 struct binder_buffer *buffer;
3647
3648 buffer = rb_entry(n, struct binder_buffer, rb_node);
3649
3650 t = buffer->transaction;
3651 if (t) {
3652 t->buffer = NULL;
3653 buffer->transaction = NULL;
3654 pr_err("release proc %d, transaction %d, not freed\n",
3655 proc->pid, t->debug_id);
3656
3657 }
3658
3659 binder_free_buf(proc, buffer);
3660 buffers++;
3661 }
3662
3663 binder_stats_deleted(BINDER_STAT_PROC);
3664
3665 page_count = 0;
3666 if (proc->pages) {
3667 int i;
3668
3669 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3670 void *page_addr;
3671
3672 if (!proc->pages[i])
3673 continue;
3674
3675 page_addr = proc->buffer + i * PAGE_SIZE;
3676 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3677 "%s: %d: page %d at %p not freed\n",
3678 __func__, proc->pid, i, page_addr);
3679 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3680 __free_page(proc->pages[i]);
3681 page_count++;
3682 }
3683 kfree(proc->pages);
3684 vfree(proc->buffer);
3685 }
3686
3687 put_task_struct(proc->tsk);
3688
3689 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3690 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3691 __func__, proc->pid, threads, nodes, incoming_refs,
3692 outgoing_refs, active_transactions, buffers, page_count);
3693
3694 kfree(proc);
3695}
3696
3697static void binder_deferred_func(struct work_struct *work)
3698{
3699 struct binder_proc *proc;
3700 struct files_struct *files;
3701
3702 int defer;
3703
3704 do {
3705 binder_lock(__func__);
3706 mutex_lock(&binder_deferred_lock);
3707 if (!hlist_empty(&binder_deferred_list)) {
3708 proc = hlist_entry(binder_deferred_list.first,
3709 struct binder_proc, deferred_work_node);
3710 hlist_del_init(&proc->deferred_work_node);
3711 defer = proc->deferred_work;
3712 proc->deferred_work = 0;
3713 } else {
3714 proc = NULL;
3715 defer = 0;
3716 }
3717 mutex_unlock(&binder_deferred_lock);
3718
3719 files = NULL;
3720 if (defer & BINDER_DEFERRED_PUT_FILES) {
3721 files = proc->files;
3722 if (files)
3723 proc->files = NULL;
3724 }
3725
3726 if (defer & BINDER_DEFERRED_FLUSH)
3727 binder_deferred_flush(proc);
3728
3729 if (defer & BINDER_DEFERRED_RELEASE)
3730 binder_deferred_release(proc);
3731
3732 binder_unlock(__func__);
3733 if (files)
3734 put_files_struct(files);
3735 } while (proc);
3736}
3737static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3738
3739static void
3740binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3741{
3742 mutex_lock(&binder_deferred_lock);
3743 proc->deferred_work |= defer;
3744 if (hlist_unhashed(&proc->deferred_work_node)) {
3745 hlist_add_head(&proc->deferred_work_node,
3746 &binder_deferred_list);
3747 schedule_work(&binder_deferred_work);
3748 }
3749 mutex_unlock(&binder_deferred_lock);
3750}
3751
3752static void print_binder_transaction(struct seq_file *m, const char *prefix,
3753 struct binder_transaction *t)
3754{
3755 seq_printf(m,
3756 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3757 prefix, t->debug_id, t,
3758 t->from ? t->from->proc->pid : 0,
3759 t->from ? t->from->pid : 0,
3760 t->to_proc ? t->to_proc->pid : 0,
3761 t->to_thread ? t->to_thread->pid : 0,
3762 t->code, t->flags, t->priority, t->need_reply);
3763 if (t->buffer == NULL) {
3764 seq_puts(m, " buffer free\n");
3765 return;
3766 }
3767 if (t->buffer->target_node)
3768 seq_printf(m, " node %d",
3769 t->buffer->target_node->debug_id);
3770 seq_printf(m, " size %zd:%zd data %p\n",
3771 t->buffer->data_size, t->buffer->offsets_size,
3772 t->buffer->data);
3773}
3774
3775static void print_binder_buffer(struct seq_file *m, const char *prefix,
3776 struct binder_buffer *buffer)
3777{
3778 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3779 prefix, buffer->debug_id, buffer->data,
3780 buffer->data_size, buffer->offsets_size,
3781 buffer->transaction ? "active" : "delivered");
3782}
3783
3784static void print_binder_work(struct seq_file *m, const char *prefix,
3785 const char *transaction_prefix,
3786 struct binder_work *w)
3787{
3788 struct binder_node *node;
3789 struct binder_transaction *t;
3790
3791 switch (w->type) {
3792 case BINDER_WORK_TRANSACTION:
3793 t = container_of(w, struct binder_transaction, work);
3794 print_binder_transaction(m, transaction_prefix, t);
3795 break;
3796 case BINDER_WORK_TRANSACTION_COMPLETE:
3797 seq_printf(m, "%stransaction complete\n", prefix);
3798 break;
3799 case BINDER_WORK_NODE:
3800 node = container_of(w, struct binder_node, work);
3801 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3802 prefix, node->debug_id,
3803 (u64)node->ptr, (u64)node->cookie);
3804 break;
3805 case BINDER_WORK_DEAD_BINDER:
3806 seq_printf(m, "%shas dead binder\n", prefix);
3807 break;
3808 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3809 seq_printf(m, "%shas cleared dead binder\n", prefix);
3810 break;
3811 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3812 seq_printf(m, "%shas cleared death notification\n", prefix);
3813 break;
3814 default:
3815 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3816 break;
3817 }
3818}
3819
3820static void print_binder_thread(struct seq_file *m,
3821 struct binder_thread *thread,
3822 int print_always)
3823{
3824 struct binder_transaction *t;
3825 struct binder_work *w;
3826 size_t start_pos = m->count;
3827 size_t header_pos;
3828
3829 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3830 header_pos = m->count;
3831 t = thread->transaction_stack;
3832 while (t) {
3833 if (t->from == thread) {
3834 print_binder_transaction(m,
3835 " outgoing transaction", t);
3836 t = t->from_parent;
3837 } else if (t->to_thread == thread) {
3838 print_binder_transaction(m,
3839 " incoming transaction", t);
3840 t = t->to_parent;
3841 } else {
3842 print_binder_transaction(m, " bad transaction", t);
3843 t = NULL;
3844 }
3845 }
3846 list_for_each_entry(w, &thread->todo, entry) {
3847 print_binder_work(m, " ", " pending transaction", w);
3848 }
3849 if (!print_always && m->count == header_pos)
3850 m->count = start_pos;
3851}
3852
3853static void print_binder_node(struct seq_file *m, struct binder_node *node)
3854{
3855 struct binder_ref *ref;
3856 struct binder_work *w;
3857 int count;
3858
3859 count = 0;
3860 hlist_for_each_entry(ref, &node->refs, node_entry)
3861 count++;
3862
3863 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3864 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3865 node->has_strong_ref, node->has_weak_ref,
3866 node->local_strong_refs, node->local_weak_refs,
3867 node->internal_strong_refs, count);
3868 if (count) {
3869 seq_puts(m, " proc");
3870 hlist_for_each_entry(ref, &node->refs, node_entry)
3871 seq_printf(m, " %d", ref->proc->pid);
3872 }
3873 seq_puts(m, "\n");
3874 list_for_each_entry(w, &node->async_todo, entry)
3875 print_binder_work(m, " ",
3876 " pending async transaction", w);
3877}
3878
3879static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3880{
3881 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3882 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3883 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3884}
3885
3886static void print_binder_proc(struct seq_file *m,
3887 struct binder_proc *proc, int print_all)
3888{
3889 struct binder_work *w;
3890 struct rb_node *n;
3891 size_t start_pos = m->count;
3892 size_t header_pos;
3893
3894 seq_printf(m, "proc %d\n", proc->pid);
3895 seq_printf(m, "context %s\n", proc->context->name);
3896 header_pos = m->count;
3897
3898 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3899 print_binder_thread(m, rb_entry(n, struct binder_thread,
3900 rb_node), print_all);
3901 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3902 struct binder_node *node = rb_entry(n, struct binder_node,
3903 rb_node);
3904 if (print_all || node->has_async_transaction)
3905 print_binder_node(m, node);
3906 }
3907 if (print_all) {
3908 for (n = rb_first(&proc->refs_by_desc);
3909 n != NULL;
3910 n = rb_next(n))
3911 print_binder_ref(m, rb_entry(n, struct binder_ref,
3912 rb_node_desc));
3913 }
3914 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3915 print_binder_buffer(m, " buffer",
3916 rb_entry(n, struct binder_buffer, rb_node));
3917 list_for_each_entry(w, &proc->todo, entry)
3918 print_binder_work(m, " ", " pending transaction", w);
3919 list_for_each_entry(w, &proc->delivered_death, entry) {
3920 seq_puts(m, " has delivered dead binder\n");
3921 break;
3922 }
3923 if (!print_all && m->count == header_pos)
3924 m->count = start_pos;
3925}
3926
3927static const char * const binder_return_strings[] = {
3928 "BR_ERROR",
3929 "BR_OK",
3930 "BR_TRANSACTION",
3931 "BR_REPLY",
3932 "BR_ACQUIRE_RESULT",
3933 "BR_DEAD_REPLY",
3934 "BR_TRANSACTION_COMPLETE",
3935 "BR_INCREFS",
3936 "BR_ACQUIRE",
3937 "BR_RELEASE",
3938 "BR_DECREFS",
3939 "BR_ATTEMPT_ACQUIRE",
3940 "BR_NOOP",
3941 "BR_SPAWN_LOOPER",
3942 "BR_FINISHED",
3943 "BR_DEAD_BINDER",
3944 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3945 "BR_FAILED_REPLY"
3946};
3947
3948static const char * const binder_command_strings[] = {
3949 "BC_TRANSACTION",
3950 "BC_REPLY",
3951 "BC_ACQUIRE_RESULT",
3952 "BC_FREE_BUFFER",
3953 "BC_INCREFS",
3954 "BC_ACQUIRE",
3955 "BC_RELEASE",
3956 "BC_DECREFS",
3957 "BC_INCREFS_DONE",
3958 "BC_ACQUIRE_DONE",
3959 "BC_ATTEMPT_ACQUIRE",
3960 "BC_REGISTER_LOOPER",
3961 "BC_ENTER_LOOPER",
3962 "BC_EXIT_LOOPER",
3963 "BC_REQUEST_DEATH_NOTIFICATION",
3964 "BC_CLEAR_DEATH_NOTIFICATION",
3965 "BC_DEAD_BINDER_DONE",
3966 "BC_TRANSACTION_SG",
3967 "BC_REPLY_SG",
3968};
3969
3970static const char * const binder_objstat_strings[] = {
3971 "proc",
3972 "thread",
3973 "node",
3974 "ref",
3975 "death",
3976 "transaction",
3977 "transaction_complete"
3978};
3979
3980static void print_binder_stats(struct seq_file *m, const char *prefix,
3981 struct binder_stats *stats)
3982{
3983 int i;
3984
3985 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3986 ARRAY_SIZE(binder_command_strings));
3987 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3988 if (stats->bc[i])
3989 seq_printf(m, "%s%s: %d\n", prefix,
3990 binder_command_strings[i], stats->bc[i]);
3991 }
3992
3993 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3994 ARRAY_SIZE(binder_return_strings));
3995 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3996 if (stats->br[i])
3997 seq_printf(m, "%s%s: %d\n", prefix,
3998 binder_return_strings[i], stats->br[i]);
3999 }
4000
4001 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4002 ARRAY_SIZE(binder_objstat_strings));
4003 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4004 ARRAY_SIZE(stats->obj_deleted));
4005 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
4006 if (stats->obj_created[i] || stats->obj_deleted[i])
4007 seq_printf(m, "%s%s: active %d total %d\n", prefix,
4008 binder_objstat_strings[i],
4009 stats->obj_created[i] - stats->obj_deleted[i],
4010 stats->obj_created[i]);
4011 }
4012}
4013
4014static void print_binder_proc_stats(struct seq_file *m,
4015 struct binder_proc *proc)
4016{
4017 struct binder_work *w;
4018 struct rb_node *n;
4019 int count, strong, weak;
4020
4021 seq_printf(m, "proc %d\n", proc->pid);
4022 seq_printf(m, "context %s\n", proc->context->name);
4023 count = 0;
4024 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4025 count++;
4026 seq_printf(m, " threads: %d\n", count);
4027 seq_printf(m, " requested threads: %d+%d/%d\n"
4028 " ready threads %d\n"
4029 " free async space %zd\n", proc->requested_threads,
4030 proc->requested_threads_started, proc->max_threads,
4031 proc->ready_threads, proc->free_async_space);
4032 count = 0;
4033 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4034 count++;
4035 seq_printf(m, " nodes: %d\n", count);
4036 count = 0;
4037 strong = 0;
4038 weak = 0;
4039 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4040 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4041 rb_node_desc);
4042 count++;
4043 strong += ref->strong;
4044 weak += ref->weak;
4045 }
4046 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
4047
4048 count = 0;
4049 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
4050 count++;
4051 seq_printf(m, " buffers: %d\n", count);
4052
4053 count = 0;
4054 list_for_each_entry(w, &proc->todo, entry) {
4055 switch (w->type) {
4056 case BINDER_WORK_TRANSACTION:
4057 count++;
4058 break;
4059 default:
4060 break;
4061 }
4062 }
4063 seq_printf(m, " pending transactions: %d\n", count);
4064
4065 print_binder_stats(m, " ", &proc->stats);
4066}
4067
4068
4069static int binder_state_show(struct seq_file *m, void *unused)
4070{
4071 struct binder_proc *proc;
4072 struct binder_node *node;
4073 int do_lock = !binder_debug_no_lock;
4074
4075 if (do_lock)
4076 binder_lock(__func__);
4077
4078 seq_puts(m, "binder state:\n");
4079
4080 if (!hlist_empty(&binder_dead_nodes))
4081 seq_puts(m, "dead nodes:\n");
4082 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
4083 print_binder_node(m, node);
4084
4085 hlist_for_each_entry(proc, &binder_procs, proc_node)
4086 print_binder_proc(m, proc, 1);
4087 if (do_lock)
4088 binder_unlock(__func__);
4089 return 0;
4090}
4091
4092static int binder_stats_show(struct seq_file *m, void *unused)
4093{
4094 struct binder_proc *proc;
4095 int do_lock = !binder_debug_no_lock;
4096
4097 if (do_lock)
4098 binder_lock(__func__);
4099
4100 seq_puts(m, "binder stats:\n");
4101
4102 print_binder_stats(m, "", &binder_stats);
4103
4104 hlist_for_each_entry(proc, &binder_procs, proc_node)
4105 print_binder_proc_stats(m, proc);
4106 if (do_lock)
4107 binder_unlock(__func__);
4108 return 0;
4109}
4110
4111static int binder_transactions_show(struct seq_file *m, void *unused)
4112{
4113 struct binder_proc *proc;
4114 int do_lock = !binder_debug_no_lock;
4115
4116 if (do_lock)
4117 binder_lock(__func__);
4118
4119 seq_puts(m, "binder transactions:\n");
4120 hlist_for_each_entry(proc, &binder_procs, proc_node)
4121 print_binder_proc(m, proc, 0);
4122 if (do_lock)
4123 binder_unlock(__func__);
4124 return 0;
4125}
4126
4127static int binder_proc_show(struct seq_file *m, void *unused)
4128{
4129 struct binder_proc *itr;
4130 int pid = (unsigned long)m->private;
4131 int do_lock = !binder_debug_no_lock;
4132
4133 if (do_lock)
4134 binder_lock(__func__);
4135
4136 hlist_for_each_entry(itr, &binder_procs, proc_node) {
4137 if (itr->pid == pid) {
4138 seq_puts(m, "binder proc state:\n");
4139 print_binder_proc(m, itr, 1);
4140 }
4141 }
4142 if (do_lock)
4143 binder_unlock(__func__);
4144 return 0;
4145}
4146
4147static void print_binder_transaction_log_entry(struct seq_file *m,
4148 struct binder_transaction_log_entry *e)
4149{
4150 seq_printf(m,
4151 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
4152 e->debug_id, (e->call_type == 2) ? "reply" :
4153 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
4154 e->from_thread, e->to_proc, e->to_thread, e->context_name,
4155 e->to_node, e->target_handle, e->data_size, e->offsets_size);
4156}
4157
4158static int binder_transaction_log_show(struct seq_file *m, void *unused)
4159{
4160 struct binder_transaction_log *log = m->private;
4161 int i;
4162
4163 if (log->full) {
4164 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
4165 print_binder_transaction_log_entry(m, &log->entry[i]);
4166 }
4167 for (i = 0; i < log->next; i++)
4168 print_binder_transaction_log_entry(m, &log->entry[i]);
4169 return 0;
4170}
4171
4172static const struct file_operations binder_fops = {
4173 .owner = THIS_MODULE,
4174 .poll = binder_poll,
4175 .unlocked_ioctl = binder_ioctl,
4176 .compat_ioctl = binder_ioctl,
4177 .mmap = binder_mmap,
4178 .open = binder_open,
4179 .flush = binder_flush,
4180 .release = binder_release,
4181};
4182
4183BINDER_DEBUG_ENTRY(state);
4184BINDER_DEBUG_ENTRY(stats);
4185BINDER_DEBUG_ENTRY(transactions);
4186BINDER_DEBUG_ENTRY(transaction_log);
4187
4188static int __init init_binder_device(const char *name)
4189{
4190 int ret;
4191 struct binder_device *binder_device;
4192
4193 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4194 if (!binder_device)
4195 return -ENOMEM;
4196
4197 binder_device->miscdev.fops = &binder_fops;
4198 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4199 binder_device->miscdev.name = name;
4200
4201 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4202 binder_device->context.name = name;
4203
4204 ret = misc_register(&binder_device->miscdev);
4205 if (ret < 0) {
4206 kfree(binder_device);
4207 return ret;
4208 }
4209
4210 hlist_add_head(&binder_device->hlist, &binder_devices);
4211
4212 return ret;
4213}
4214
4215static int __init binder_init(void)
4216{
4217 int ret;
4218 char *device_name, *device_names;
4219 struct binder_device *device;
4220 struct hlist_node *tmp;
4221
4222 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4223 if (binder_debugfs_dir_entry_root)
4224 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4225 binder_debugfs_dir_entry_root);
4226
4227 if (binder_debugfs_dir_entry_root) {
4228 debugfs_create_file("state",
4229 S_IRUGO,
4230 binder_debugfs_dir_entry_root,
4231 NULL,
4232 &binder_state_fops);
4233 debugfs_create_file("stats",
4234 S_IRUGO,
4235 binder_debugfs_dir_entry_root,
4236 NULL,
4237 &binder_stats_fops);
4238 debugfs_create_file("transactions",
4239 S_IRUGO,
4240 binder_debugfs_dir_entry_root,
4241 NULL,
4242 &binder_transactions_fops);
4243 debugfs_create_file("transaction_log",
4244 S_IRUGO,
4245 binder_debugfs_dir_entry_root,
4246 &binder_transaction_log,
4247 &binder_transaction_log_fops);
4248 debugfs_create_file("failed_transaction_log",
4249 S_IRUGO,
4250 binder_debugfs_dir_entry_root,
4251 &binder_transaction_log_failed,
4252 &binder_transaction_log_fops);
4253 }
4254
4255
4256
4257
4258
4259 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4260 if (!device_names) {
4261 ret = -ENOMEM;
4262 goto err_alloc_device_names_failed;
4263 }
4264 strcpy(device_names, binder_devices_param);
4265
4266 while ((device_name = strsep(&device_names, ","))) {
4267 ret = init_binder_device(device_name);
4268 if (ret)
4269 goto err_init_binder_device_failed;
4270 }
4271
4272 return ret;
4273
4274err_init_binder_device_failed:
4275 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4276 misc_deregister(&device->miscdev);
4277 hlist_del(&device->hlist);
4278 kfree(device);
4279 }
4280err_alloc_device_names_failed:
4281 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4282
4283 return ret;
4284}
4285
4286device_initcall(binder_init);
4287
4288#define CREATE_TRACE_POINTS
4289#include "binder_trace.h"
4290
4291MODULE_LICENSE("GPL v2");
4292