1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69
70#include <uapi/linux/android/binder.h>
71
72#include <linux/cacheflush.h>
73
74#include "binder_internal.h"
75#include "binder_trace.h"
76
77static HLIST_HEAD(binder_deferred_list);
78static DEFINE_MUTEX(binder_deferred_lock);
79
80static HLIST_HEAD(binder_devices);
81static HLIST_HEAD(binder_procs);
82static DEFINE_MUTEX(binder_procs_lock);
83
84static HLIST_HEAD(binder_dead_nodes);
85static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86
87static struct dentry *binder_debugfs_dir_entry_root;
88static struct dentry *binder_debugfs_dir_entry_proc;
89static atomic_t binder_last_id;
90
91static int proc_show(struct seq_file *m, void *unused);
92DEFINE_SHOW_ATTRIBUTE(proc);
93
94#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
95
96enum {
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
112};
113static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116
117char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118module_param_named(devices, binder_devices_param, charp, 0444);
119
120static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121static int binder_stop_on_user_error;
122
123static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
125{
126 int ret;
127
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
131 return ret;
132}
133module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
135
136#define binder_debug(mask, x...) \
137 do { \
138 if (binder_debug_mask & mask) \
139 pr_info_ratelimited(x); \
140 } while (0)
141
142#define binder_user_error(x...) \
143 do { \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 pr_info_ratelimited(x); \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
148 } while (0)
149
150#define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
152
153#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154
155#define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
157
158#define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
160
161static struct binder_stats binder_stats;
162
163static inline void binder_stats_deleted(enum binder_stat_types type)
164{
165 atomic_inc(&binder_stats.obj_deleted[type]);
166}
167
168static inline void binder_stats_created(enum binder_stat_types type)
169{
170 atomic_inc(&binder_stats.obj_created[type]);
171}
172
173struct binder_transaction_log binder_transaction_log;
174struct binder_transaction_log binder_transaction_log_failed;
175
176static struct binder_transaction_log_entry *binder_transaction_log_add(
177 struct binder_transaction_log *log)
178{
179 struct binder_transaction_log_entry *e;
180 unsigned int cur = atomic_inc_return(&log->cur);
181
182 if (cur >= ARRAY_SIZE(log->entry))
183 log->full = true;
184 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185 WRITE_ONCE(e->debug_id_done, 0);
186
187
188
189
190
191 smp_wmb();
192 memset(e, 0, sizeof(*e));
193 return e;
194}
195
196enum binder_deferred_state {
197 BINDER_DEFERRED_FLUSH = 0x01,
198 BINDER_DEFERRED_RELEASE = 0x02,
199};
200
201enum {
202 BINDER_LOOPER_STATE_REGISTERED = 0x01,
203 BINDER_LOOPER_STATE_ENTERED = 0x02,
204 BINDER_LOOPER_STATE_EXITED = 0x04,
205 BINDER_LOOPER_STATE_INVALID = 0x08,
206 BINDER_LOOPER_STATE_WAITING = 0x10,
207 BINDER_LOOPER_STATE_POLL = 0x20,
208};
209
210
211
212
213
214
215
216
217#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
218static void
219_binder_proc_lock(struct binder_proc *proc, int line)
220 __acquires(&proc->outer_lock)
221{
222 binder_debug(BINDER_DEBUG_SPINLOCKS,
223 "%s: line=%d\n", __func__, line);
224 spin_lock(&proc->outer_lock);
225}
226
227
228
229
230
231
232
233#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
234static void
235_binder_proc_unlock(struct binder_proc *proc, int line)
236 __releases(&proc->outer_lock)
237{
238 binder_debug(BINDER_DEBUG_SPINLOCKS,
239 "%s: line=%d\n", __func__, line);
240 spin_unlock(&proc->outer_lock);
241}
242
243
244
245
246
247
248
249#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
250static void
251_binder_inner_proc_lock(struct binder_proc *proc, int line)
252 __acquires(&proc->inner_lock)
253{
254 binder_debug(BINDER_DEBUG_SPINLOCKS,
255 "%s: line=%d\n", __func__, line);
256 spin_lock(&proc->inner_lock);
257}
258
259
260
261
262
263
264
265#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
266static void
267_binder_inner_proc_unlock(struct binder_proc *proc, int line)
268 __releases(&proc->inner_lock)
269{
270 binder_debug(BINDER_DEBUG_SPINLOCKS,
271 "%s: line=%d\n", __func__, line);
272 spin_unlock(&proc->inner_lock);
273}
274
275
276
277
278
279
280
281#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
282static void
283_binder_node_lock(struct binder_node *node, int line)
284 __acquires(&node->lock)
285{
286 binder_debug(BINDER_DEBUG_SPINLOCKS,
287 "%s: line=%d\n", __func__, line);
288 spin_lock(&node->lock);
289}
290
291
292
293
294
295
296
297#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
298static void
299_binder_node_unlock(struct binder_node *node, int line)
300 __releases(&node->lock)
301{
302 binder_debug(BINDER_DEBUG_SPINLOCKS,
303 "%s: line=%d\n", __func__, line);
304 spin_unlock(&node->lock);
305}
306
307
308
309
310
311
312
313
314#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
315static void
316_binder_node_inner_lock(struct binder_node *node, int line)
317 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
318{
319 binder_debug(BINDER_DEBUG_SPINLOCKS,
320 "%s: line=%d\n", __func__, line);
321 spin_lock(&node->lock);
322 if (node->proc)
323 binder_inner_proc_lock(node->proc);
324 else
325
326 __acquire(&node->proc->inner_lock);
327}
328
329
330
331
332
333
334
335#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
336static void
337_binder_node_inner_unlock(struct binder_node *node, int line)
338 __releases(&node->lock) __releases(&node->proc->inner_lock)
339{
340 struct binder_proc *proc = node->proc;
341
342 binder_debug(BINDER_DEBUG_SPINLOCKS,
343 "%s: line=%d\n", __func__, line);
344 if (proc)
345 binder_inner_proc_unlock(proc);
346 else
347
348 __release(&node->proc->inner_lock);
349 spin_unlock(&node->lock);
350}
351
352static bool binder_worklist_empty_ilocked(struct list_head *list)
353{
354 return list_empty(list);
355}
356
357
358
359
360
361
362
363
364static bool binder_worklist_empty(struct binder_proc *proc,
365 struct list_head *list)
366{
367 bool ret;
368
369 binder_inner_proc_lock(proc);
370 ret = binder_worklist_empty_ilocked(list);
371 binder_inner_proc_unlock(proc);
372 return ret;
373}
374
375
376
377
378
379
380
381
382
383
384
385static void
386binder_enqueue_work_ilocked(struct binder_work *work,
387 struct list_head *target_list)
388{
389 BUG_ON(target_list == NULL);
390 BUG_ON(work->entry.next && !list_empty(&work->entry));
391 list_add_tail(&work->entry, target_list);
392}
393
394
395
396
397
398
399
400
401
402
403
404
405static void
406binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407 struct binder_work *work)
408{
409 WARN_ON(!list_empty(&thread->waiting_thread_node));
410 binder_enqueue_work_ilocked(work, &thread->todo);
411}
412
413
414
415
416
417
418
419
420
421
422
423static void
424binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425 struct binder_work *work)
426{
427 WARN_ON(!list_empty(&thread->waiting_thread_node));
428 binder_enqueue_work_ilocked(work, &thread->todo);
429 thread->process_todo = true;
430}
431
432
433
434
435
436
437
438
439
440static void
441binder_enqueue_thread_work(struct binder_thread *thread,
442 struct binder_work *work)
443{
444 binder_inner_proc_lock(thread->proc);
445 binder_enqueue_thread_work_ilocked(thread, work);
446 binder_inner_proc_unlock(thread->proc);
447}
448
449static void
450binder_dequeue_work_ilocked(struct binder_work *work)
451{
452 list_del_init(&work->entry);
453}
454
455
456
457
458
459
460
461
462
463static void
464binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
465{
466 binder_inner_proc_lock(proc);
467 binder_dequeue_work_ilocked(work);
468 binder_inner_proc_unlock(proc);
469}
470
471static struct binder_work *binder_dequeue_work_head_ilocked(
472 struct list_head *list)
473{
474 struct binder_work *w;
475
476 w = list_first_entry_or_null(list, struct binder_work, entry);
477 if (w)
478 list_del_init(&w->entry);
479 return w;
480}
481
482static void
483binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484static void binder_free_thread(struct binder_thread *thread);
485static void binder_free_proc(struct binder_proc *proc);
486static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487
488static bool binder_has_work_ilocked(struct binder_thread *thread,
489 bool do_proc_work)
490{
491 return thread->process_todo ||
492 thread->looper_need_return ||
493 (do_proc_work &&
494 !binder_worklist_empty_ilocked(&thread->proc->todo));
495}
496
497static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
498{
499 bool has_work;
500
501 binder_inner_proc_lock(thread->proc);
502 has_work = binder_has_work_ilocked(thread, do_proc_work);
503 binder_inner_proc_unlock(thread->proc);
504
505 return has_work;
506}
507
508static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
509{
510 return !thread->transaction_stack &&
511 binder_worklist_empty_ilocked(&thread->todo) &&
512 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513 BINDER_LOOPER_STATE_REGISTERED));
514}
515
516static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
517 bool sync)
518{
519 struct rb_node *n;
520 struct binder_thread *thread;
521
522 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523 thread = rb_entry(n, struct binder_thread, rb_node);
524 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525 binder_available_for_proc_work_ilocked(thread)) {
526 if (sync)
527 wake_up_interruptible_sync(&thread->wait);
528 else
529 wake_up_interruptible(&thread->wait);
530 }
531 }
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546static struct binder_thread *
547binder_select_thread_ilocked(struct binder_proc *proc)
548{
549 struct binder_thread *thread;
550
551 assert_spin_locked(&proc->inner_lock);
552 thread = list_first_entry_or_null(&proc->waiting_threads,
553 struct binder_thread,
554 waiting_thread_node);
555
556 if (thread)
557 list_del_init(&thread->waiting_thread_node);
558
559 return thread;
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579 struct binder_thread *thread,
580 bool sync)
581{
582 assert_spin_locked(&proc->inner_lock);
583
584 if (thread) {
585 if (sync)
586 wake_up_interruptible_sync(&thread->wait);
587 else
588 wake_up_interruptible(&thread->wait);
589 return;
590 }
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605 binder_wakeup_poll_threads_ilocked(proc, sync);
606}
607
608static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
609{
610 struct binder_thread *thread = binder_select_thread_ilocked(proc);
611
612 binder_wakeup_thread_ilocked(proc, thread, false);
613}
614
615static void binder_set_nice(long nice)
616{
617 long min_nice;
618
619 if (can_nice(current, nice)) {
620 set_user_nice(current, nice);
621 return;
622 }
623 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625 "%d: nice value %ld not allowed use %ld instead\n",
626 current->pid, nice, min_nice);
627 set_user_nice(current, min_nice);
628 if (min_nice <= MAX_NICE)
629 return;
630 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631}
632
633static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634 binder_uintptr_t ptr)
635{
636 struct rb_node *n = proc->nodes.rb_node;
637 struct binder_node *node;
638
639 assert_spin_locked(&proc->inner_lock);
640
641 while (n) {
642 node = rb_entry(n, struct binder_node, rb_node);
643
644 if (ptr < node->ptr)
645 n = n->rb_left;
646 else if (ptr > node->ptr)
647 n = n->rb_right;
648 else {
649
650
651
652
653
654 binder_inc_node_tmpref_ilocked(node);
655 return node;
656 }
657 }
658 return NULL;
659}
660
661static struct binder_node *binder_get_node(struct binder_proc *proc,
662 binder_uintptr_t ptr)
663{
664 struct binder_node *node;
665
666 binder_inner_proc_lock(proc);
667 node = binder_get_node_ilocked(proc, ptr);
668 binder_inner_proc_unlock(proc);
669 return node;
670}
671
672static struct binder_node *binder_init_node_ilocked(
673 struct binder_proc *proc,
674 struct binder_node *new_node,
675 struct flat_binder_object *fp)
676{
677 struct rb_node **p = &proc->nodes.rb_node;
678 struct rb_node *parent = NULL;
679 struct binder_node *node;
680 binder_uintptr_t ptr = fp ? fp->binder : 0;
681 binder_uintptr_t cookie = fp ? fp->cookie : 0;
682 __u32 flags = fp ? fp->flags : 0;
683
684 assert_spin_locked(&proc->inner_lock);
685
686 while (*p) {
687
688 parent = *p;
689 node = rb_entry(parent, struct binder_node, rb_node);
690
691 if (ptr < node->ptr)
692 p = &(*p)->rb_left;
693 else if (ptr > node->ptr)
694 p = &(*p)->rb_right;
695 else {
696
697
698
699
700
701 binder_inc_node_tmpref_ilocked(node);
702 return node;
703 }
704 }
705 node = new_node;
706 binder_stats_created(BINDER_STAT_NODE);
707 node->tmp_refs++;
708 rb_link_node(&node->rb_node, parent, p);
709 rb_insert_color(&node->rb_node, &proc->nodes);
710 node->debug_id = atomic_inc_return(&binder_last_id);
711 node->proc = proc;
712 node->ptr = ptr;
713 node->cookie = cookie;
714 node->work.type = BINDER_WORK_NODE;
715 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718 spin_lock_init(&node->lock);
719 INIT_LIST_HEAD(&node->work.entry);
720 INIT_LIST_HEAD(&node->async_todo);
721 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722 "%d:%d node %d u%016llx c%016llx created\n",
723 proc->pid, current->pid, node->debug_id,
724 (u64)node->ptr, (u64)node->cookie);
725
726 return node;
727}
728
729static struct binder_node *binder_new_node(struct binder_proc *proc,
730 struct flat_binder_object *fp)
731{
732 struct binder_node *node;
733 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
734
735 if (!new_node)
736 return NULL;
737 binder_inner_proc_lock(proc);
738 node = binder_init_node_ilocked(proc, new_node, fp);
739 binder_inner_proc_unlock(proc);
740 if (node != new_node)
741
742
743
744 kfree(new_node);
745
746 return node;
747}
748
749static void binder_free_node(struct binder_node *node)
750{
751 kfree(node);
752 binder_stats_deleted(BINDER_STAT_NODE);
753}
754
755static int binder_inc_node_nilocked(struct binder_node *node, int strong,
756 int internal,
757 struct list_head *target_list)
758{
759 struct binder_proc *proc = node->proc;
760
761 assert_spin_locked(&node->lock);
762 if (proc)
763 assert_spin_locked(&proc->inner_lock);
764 if (strong) {
765 if (internal) {
766 if (target_list == NULL &&
767 node->internal_strong_refs == 0 &&
768 !(node->proc &&
769 node == node->proc->context->binder_context_mgr_node &&
770 node->has_strong_ref)) {
771 pr_err("invalid inc strong node for %d\n",
772 node->debug_id);
773 return -EINVAL;
774 }
775 node->internal_strong_refs++;
776 } else
777 node->local_strong_refs++;
778 if (!node->has_strong_ref && target_list) {
779 struct binder_thread *thread = container_of(target_list,
780 struct binder_thread, todo);
781 binder_dequeue_work_ilocked(&node->work);
782 BUG_ON(&thread->todo != target_list);
783 binder_enqueue_deferred_thread_work_ilocked(thread,
784 &node->work);
785 }
786 } else {
787 if (!internal)
788 node->local_weak_refs++;
789 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790 if (target_list == NULL) {
791 pr_err("invalid inc weak node for %d\n",
792 node->debug_id);
793 return -EINVAL;
794 }
795
796
797
798 binder_enqueue_work_ilocked(&node->work, target_list);
799 }
800 }
801 return 0;
802}
803
804static int binder_inc_node(struct binder_node *node, int strong, int internal,
805 struct list_head *target_list)
806{
807 int ret;
808
809 binder_node_inner_lock(node);
810 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811 binder_node_inner_unlock(node);
812
813 return ret;
814}
815
816static bool binder_dec_node_nilocked(struct binder_node *node,
817 int strong, int internal)
818{
819 struct binder_proc *proc = node->proc;
820
821 assert_spin_locked(&node->lock);
822 if (proc)
823 assert_spin_locked(&proc->inner_lock);
824 if (strong) {
825 if (internal)
826 node->internal_strong_refs--;
827 else
828 node->local_strong_refs--;
829 if (node->local_strong_refs || node->internal_strong_refs)
830 return false;
831 } else {
832 if (!internal)
833 node->local_weak_refs--;
834 if (node->local_weak_refs || node->tmp_refs ||
835 !hlist_empty(&node->refs))
836 return false;
837 }
838
839 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840 if (list_empty(&node->work.entry)) {
841 binder_enqueue_work_ilocked(&node->work, &proc->todo);
842 binder_wakeup_proc_ilocked(proc);
843 }
844 } else {
845 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846 !node->local_weak_refs && !node->tmp_refs) {
847 if (proc) {
848 binder_dequeue_work_ilocked(&node->work);
849 rb_erase(&node->rb_node, &proc->nodes);
850 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851 "refless node %d deleted\n",
852 node->debug_id);
853 } else {
854 BUG_ON(!list_empty(&node->work.entry));
855 spin_lock(&binder_dead_nodes_lock);
856
857
858
859
860 if (node->tmp_refs) {
861 spin_unlock(&binder_dead_nodes_lock);
862 return false;
863 }
864 hlist_del(&node->dead_node);
865 spin_unlock(&binder_dead_nodes_lock);
866 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867 "dead node %d deleted\n",
868 node->debug_id);
869 }
870 return true;
871 }
872 }
873 return false;
874}
875
876static void binder_dec_node(struct binder_node *node, int strong, int internal)
877{
878 bool free_node;
879
880 binder_node_inner_lock(node);
881 free_node = binder_dec_node_nilocked(node, strong, internal);
882 binder_node_inner_unlock(node);
883 if (free_node)
884 binder_free_node(node);
885}
886
887static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
888{
889
890
891
892
893
894 node->tmp_refs++;
895}
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910static void binder_inc_node_tmpref(struct binder_node *node)
911{
912 binder_node_lock(node);
913 if (node->proc)
914 binder_inner_proc_lock(node->proc);
915 else
916 spin_lock(&binder_dead_nodes_lock);
917 binder_inc_node_tmpref_ilocked(node);
918 if (node->proc)
919 binder_inner_proc_unlock(node->proc);
920 else
921 spin_unlock(&binder_dead_nodes_lock);
922 binder_node_unlock(node);
923}
924
925
926
927
928
929
930
931static void binder_dec_node_tmpref(struct binder_node *node)
932{
933 bool free_node;
934
935 binder_node_inner_lock(node);
936 if (!node->proc)
937 spin_lock(&binder_dead_nodes_lock);
938 else
939 __acquire(&binder_dead_nodes_lock);
940 node->tmp_refs--;
941 BUG_ON(node->tmp_refs < 0);
942 if (!node->proc)
943 spin_unlock(&binder_dead_nodes_lock);
944 else
945 __release(&binder_dead_nodes_lock);
946
947
948
949
950
951
952 free_node = binder_dec_node_nilocked(node, 0, 1);
953 binder_node_inner_unlock(node);
954 if (free_node)
955 binder_free_node(node);
956}
957
958static void binder_put_node(struct binder_node *node)
959{
960 binder_dec_node_tmpref(node);
961}
962
963static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964 u32 desc, bool need_strong_ref)
965{
966 struct rb_node *n = proc->refs_by_desc.rb_node;
967 struct binder_ref *ref;
968
969 while (n) {
970 ref = rb_entry(n, struct binder_ref, rb_node_desc);
971
972 if (desc < ref->data.desc) {
973 n = n->rb_left;
974 } else if (desc > ref->data.desc) {
975 n = n->rb_right;
976 } else if (need_strong_ref && !ref->data.strong) {
977 binder_user_error("tried to use weak ref as strong ref\n");
978 return NULL;
979 } else {
980 return ref;
981 }
982 }
983 return NULL;
984}
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004static struct binder_ref *binder_get_ref_for_node_olocked(
1005 struct binder_proc *proc,
1006 struct binder_node *node,
1007 struct binder_ref *new_ref)
1008{
1009 struct binder_context *context = proc->context;
1010 struct rb_node **p = &proc->refs_by_node.rb_node;
1011 struct rb_node *parent = NULL;
1012 struct binder_ref *ref;
1013 struct rb_node *n;
1014
1015 while (*p) {
1016 parent = *p;
1017 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018
1019 if (node < ref->node)
1020 p = &(*p)->rb_left;
1021 else if (node > ref->node)
1022 p = &(*p)->rb_right;
1023 else
1024 return ref;
1025 }
1026 if (!new_ref)
1027 return NULL;
1028
1029 binder_stats_created(BINDER_STAT_REF);
1030 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031 new_ref->proc = proc;
1032 new_ref->node = node;
1033 rb_link_node(&new_ref->rb_node_node, parent, p);
1034 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035
1036 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039 if (ref->data.desc > new_ref->data.desc)
1040 break;
1041 new_ref->data.desc = ref->data.desc + 1;
1042 }
1043
1044 p = &proc->refs_by_desc.rb_node;
1045 while (*p) {
1046 parent = *p;
1047 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048
1049 if (new_ref->data.desc < ref->data.desc)
1050 p = &(*p)->rb_left;
1051 else if (new_ref->data.desc > ref->data.desc)
1052 p = &(*p)->rb_right;
1053 else
1054 BUG();
1055 }
1056 rb_link_node(&new_ref->rb_node_desc, parent, p);
1057 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058
1059 binder_node_lock(node);
1060 hlist_add_head(&new_ref->node_entry, &node->refs);
1061
1062 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063 "%d new ref %d desc %d for node %d\n",
1064 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065 node->debug_id);
1066 binder_node_unlock(node);
1067 return new_ref;
1068}
1069
1070static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071{
1072 bool delete_node = false;
1073
1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075 "%d delete ref %d desc %d for node %d\n",
1076 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077 ref->node->debug_id);
1078
1079 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081
1082 binder_node_inner_lock(ref->node);
1083 if (ref->data.strong)
1084 binder_dec_node_nilocked(ref->node, 1, 1);
1085
1086 hlist_del(&ref->node_entry);
1087 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088 binder_node_inner_unlock(ref->node);
1089
1090
1091
1092 if (!delete_node) {
1093
1094
1095
1096
1097
1098 ref->node = NULL;
1099 }
1100
1101 if (ref->death) {
1102 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103 "%d delete ref %d desc %d has death notification\n",
1104 ref->proc->pid, ref->data.debug_id,
1105 ref->data.desc);
1106 binder_dequeue_work(ref->proc, &ref->death->work);
1107 binder_stats_deleted(BINDER_STAT_DEATH);
1108 }
1109 binder_stats_deleted(BINDER_STAT_REF);
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123 struct list_head *target_list)
1124{
1125 int ret;
1126
1127 if (strong) {
1128 if (ref->data.strong == 0) {
1129 ret = binder_inc_node(ref->node, 1, 1, target_list);
1130 if (ret)
1131 return ret;
1132 }
1133 ref->data.strong++;
1134 } else {
1135 if (ref->data.weak == 0) {
1136 ret = binder_inc_node(ref->node, 0, 1, target_list);
1137 if (ret)
1138 return ret;
1139 }
1140 ref->data.weak++;
1141 }
1142 return 0;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155{
1156 if (strong) {
1157 if (ref->data.strong == 0) {
1158 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159 ref->proc->pid, ref->data.debug_id,
1160 ref->data.desc, ref->data.strong,
1161 ref->data.weak);
1162 return false;
1163 }
1164 ref->data.strong--;
1165 if (ref->data.strong == 0)
1166 binder_dec_node(ref->node, strong, 1);
1167 } else {
1168 if (ref->data.weak == 0) {
1169 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170 ref->proc->pid, ref->data.debug_id,
1171 ref->data.desc, ref->data.strong,
1172 ref->data.weak);
1173 return false;
1174 }
1175 ref->data.weak--;
1176 }
1177 if (ref->data.strong == 0 && ref->data.weak == 0) {
1178 binder_cleanup_ref_olocked(ref);
1179 return true;
1180 }
1181 return false;
1182}
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195static struct binder_node *binder_get_node_from_ref(
1196 struct binder_proc *proc,
1197 u32 desc, bool need_strong_ref,
1198 struct binder_ref_data *rdata)
1199{
1200 struct binder_node *node;
1201 struct binder_ref *ref;
1202
1203 binder_proc_lock(proc);
1204 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205 if (!ref)
1206 goto err_no_ref;
1207 node = ref->node;
1208
1209
1210
1211
1212 binder_inc_node_tmpref(node);
1213 if (rdata)
1214 *rdata = ref->data;
1215 binder_proc_unlock(proc);
1216
1217 return node;
1218
1219err_no_ref:
1220 binder_proc_unlock(proc);
1221 return NULL;
1222}
1223
1224
1225
1226
1227
1228
1229
1230
1231static void binder_free_ref(struct binder_ref *ref)
1232{
1233 if (ref->node)
1234 binder_free_node(ref->node);
1235 kfree(ref->death);
1236 kfree(ref);
1237}
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252static int binder_update_ref_for_handle(struct binder_proc *proc,
1253 uint32_t desc, bool increment, bool strong,
1254 struct binder_ref_data *rdata)
1255{
1256 int ret = 0;
1257 struct binder_ref *ref;
1258 bool delete_ref = false;
1259
1260 binder_proc_lock(proc);
1261 ref = binder_get_ref_olocked(proc, desc, strong);
1262 if (!ref) {
1263 ret = -EINVAL;
1264 goto err_no_ref;
1265 }
1266 if (increment)
1267 ret = binder_inc_ref_olocked(ref, strong, NULL);
1268 else
1269 delete_ref = binder_dec_ref_olocked(ref, strong);
1270
1271 if (rdata)
1272 *rdata = ref->data;
1273 binder_proc_unlock(proc);
1274
1275 if (delete_ref)
1276 binder_free_ref(ref);
1277 return ret;
1278
1279err_no_ref:
1280 binder_proc_unlock(proc);
1281 return ret;
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297{
1298 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299}
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static int binder_inc_ref_for_node(struct binder_proc *proc,
1316 struct binder_node *node,
1317 bool strong,
1318 struct list_head *target_list,
1319 struct binder_ref_data *rdata)
1320{
1321 struct binder_ref *ref;
1322 struct binder_ref *new_ref = NULL;
1323 int ret = 0;
1324
1325 binder_proc_lock(proc);
1326 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327 if (!ref) {
1328 binder_proc_unlock(proc);
1329 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330 if (!new_ref)
1331 return -ENOMEM;
1332 binder_proc_lock(proc);
1333 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334 }
1335 ret = binder_inc_ref_olocked(ref, strong, target_list);
1336 *rdata = ref->data;
1337 binder_proc_unlock(proc);
1338 if (new_ref && ref != new_ref)
1339
1340
1341
1342
1343 kfree(new_ref);
1344 return ret;
1345}
1346
1347static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348 struct binder_transaction *t)
1349{
1350 BUG_ON(!target_thread);
1351 assert_spin_locked(&target_thread->proc->inner_lock);
1352 BUG_ON(target_thread->transaction_stack != t);
1353 BUG_ON(target_thread->transaction_stack->from != target_thread);
1354 target_thread->transaction_stack =
1355 target_thread->transaction_stack->from_parent;
1356 t->from = NULL;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372{
1373
1374
1375
1376
1377 binder_inner_proc_lock(thread->proc);
1378 atomic_dec(&thread->tmp_ref);
1379 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380 binder_inner_proc_unlock(thread->proc);
1381 binder_free_thread(thread);
1382 return;
1383 }
1384 binder_inner_proc_unlock(thread->proc);
1385}
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400{
1401 binder_inner_proc_lock(proc);
1402 proc->tmp_ref--;
1403 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404 !proc->tmp_ref) {
1405 binder_inner_proc_unlock(proc);
1406 binder_free_proc(proc);
1407 return;
1408 }
1409 binder_inner_proc_unlock(proc);
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422static struct binder_thread *binder_get_txn_from(
1423 struct binder_transaction *t)
1424{
1425 struct binder_thread *from;
1426
1427 spin_lock(&t->lock);
1428 from = t->from;
1429 if (from)
1430 atomic_inc(&from->tmp_ref);
1431 spin_unlock(&t->lock);
1432 return from;
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447 struct binder_transaction *t)
1448 __acquires(&t->from->proc->inner_lock)
1449{
1450 struct binder_thread *from;
1451
1452 from = binder_get_txn_from(t);
1453 if (!from) {
1454 __acquire(&from->proc->inner_lock);
1455 return NULL;
1456 }
1457 binder_inner_proc_lock(from->proc);
1458 if (t->from) {
1459 BUG_ON(from != t->from);
1460 return from;
1461 }
1462 binder_inner_proc_unlock(from->proc);
1463 __acquire(&from->proc->inner_lock);
1464 binder_thread_dec_tmpref(from);
1465 return NULL;
1466}
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478static void binder_free_txn_fixups(struct binder_transaction *t)
1479{
1480 struct binder_txn_fd_fixup *fixup, *tmp;
1481
1482 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483 fput(fixup->file);
1484 list_del(&fixup->fixup_entry);
1485 kfree(fixup);
1486 }
1487}
1488
1489static void binder_txn_latency_free(struct binder_transaction *t)
1490{
1491 int from_proc, from_thread, to_proc, to_thread;
1492
1493 spin_lock(&t->lock);
1494 from_proc = t->from ? t->from->proc->pid : 0;
1495 from_thread = t->from ? t->from->pid : 0;
1496 to_proc = t->to_proc ? t->to_proc->pid : 0;
1497 to_thread = t->to_thread ? t->to_thread->pid : 0;
1498 spin_unlock(&t->lock);
1499
1500 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1501}
1502
1503static void binder_free_transaction(struct binder_transaction *t)
1504{
1505 struct binder_proc *target_proc = t->to_proc;
1506
1507 if (target_proc) {
1508 binder_inner_proc_lock(target_proc);
1509 target_proc->outstanding_txns--;
1510 if (target_proc->outstanding_txns < 0)
1511 pr_warn("%s: Unexpected outstanding_txns %d\n",
1512 __func__, target_proc->outstanding_txns);
1513 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514 wake_up_interruptible_all(&target_proc->freeze_wait);
1515 if (t->buffer)
1516 t->buffer->transaction = NULL;
1517 binder_inner_proc_unlock(target_proc);
1518 }
1519 if (trace_binder_txn_latency_free_enabled())
1520 binder_txn_latency_free(t);
1521
1522
1523
1524
1525 binder_free_txn_fixups(t);
1526 kfree(t);
1527 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1528}
1529
1530static void binder_send_failed_reply(struct binder_transaction *t,
1531 uint32_t error_code)
1532{
1533 struct binder_thread *target_thread;
1534 struct binder_transaction *next;
1535
1536 BUG_ON(t->flags & TF_ONE_WAY);
1537 while (1) {
1538 target_thread = binder_get_txn_from_and_acq_inner(t);
1539 if (target_thread) {
1540 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541 "send failed reply for transaction %d to %d:%d\n",
1542 t->debug_id,
1543 target_thread->proc->pid,
1544 target_thread->pid);
1545
1546 binder_pop_transaction_ilocked(target_thread, t);
1547 if (target_thread->reply_error.cmd == BR_OK) {
1548 target_thread->reply_error.cmd = error_code;
1549 binder_enqueue_thread_work_ilocked(
1550 target_thread,
1551 &target_thread->reply_error.work);
1552 wake_up_interruptible(&target_thread->wait);
1553 } else {
1554
1555
1556
1557
1558
1559
1560 pr_warn("Unexpected reply error: %u\n",
1561 target_thread->reply_error.cmd);
1562 }
1563 binder_inner_proc_unlock(target_thread->proc);
1564 binder_thread_dec_tmpref(target_thread);
1565 binder_free_transaction(t);
1566 return;
1567 }
1568 __release(&target_thread->proc->inner_lock);
1569 next = t->from_parent;
1570
1571 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572 "send failed reply for transaction %d, target dead\n",
1573 t->debug_id);
1574
1575 binder_free_transaction(t);
1576 if (next == NULL) {
1577 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578 "reply failed, no target thread at root\n");
1579 return;
1580 }
1581 t = next;
1582 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583 "reply failed, no target thread -- retry %d\n",
1584 t->debug_id);
1585 }
1586}
1587
1588
1589
1590
1591
1592
1593
1594static void binder_cleanup_transaction(struct binder_transaction *t,
1595 const char *reason,
1596 uint32_t error_code)
1597{
1598 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599 binder_send_failed_reply(t, error_code);
1600 } else {
1601 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602 "undelivered transaction %d, %s\n",
1603 t->debug_id, reason);
1604 binder_free_transaction(t);
1605 }
1606}
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624static size_t binder_get_object(struct binder_proc *proc,
1625 const void __user *u,
1626 struct binder_buffer *buffer,
1627 unsigned long offset,
1628 struct binder_object *object)
1629{
1630 size_t read_size;
1631 struct binder_object_header *hdr;
1632 size_t object_size = 0;
1633
1634 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1635 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1636 return 0;
1637 if (u) {
1638 if (copy_from_user(object, u + offset, read_size))
1639 return 0;
1640 } else {
1641 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1642 offset, read_size))
1643 return 0;
1644 }
1645
1646
1647 hdr = &object->hdr;
1648 switch (hdr->type) {
1649 case BINDER_TYPE_BINDER:
1650 case BINDER_TYPE_WEAK_BINDER:
1651 case BINDER_TYPE_HANDLE:
1652 case BINDER_TYPE_WEAK_HANDLE:
1653 object_size = sizeof(struct flat_binder_object);
1654 break;
1655 case BINDER_TYPE_FD:
1656 object_size = sizeof(struct binder_fd_object);
1657 break;
1658 case BINDER_TYPE_PTR:
1659 object_size = sizeof(struct binder_buffer_object);
1660 break;
1661 case BINDER_TYPE_FDA:
1662 object_size = sizeof(struct binder_fd_array_object);
1663 break;
1664 default:
1665 return 0;
1666 }
1667 if (offset <= buffer->data_size - object_size &&
1668 buffer->data_size >= object_size)
1669 return object_size;
1670 else
1671 return 0;
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696static struct binder_buffer_object *binder_validate_ptr(
1697 struct binder_proc *proc,
1698 struct binder_buffer *b,
1699 struct binder_object *object,
1700 binder_size_t index,
1701 binder_size_t start_offset,
1702 binder_size_t *object_offsetp,
1703 binder_size_t num_valid)
1704{
1705 size_t object_size;
1706 binder_size_t object_offset;
1707 unsigned long buffer_offset;
1708
1709 if (index >= num_valid)
1710 return NULL;
1711
1712 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1713 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1714 b, buffer_offset,
1715 sizeof(object_offset)))
1716 return NULL;
1717 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1718 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1719 return NULL;
1720 if (object_offsetp)
1721 *object_offsetp = object_offset;
1722
1723 return &object->bbo;
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static bool binder_validate_fixup(struct binder_proc *proc,
1766 struct binder_buffer *b,
1767 binder_size_t objects_start_offset,
1768 binder_size_t buffer_obj_offset,
1769 binder_size_t fixup_offset,
1770 binder_size_t last_obj_offset,
1771 binder_size_t last_min_offset)
1772{
1773 if (!last_obj_offset) {
1774
1775 return false;
1776 }
1777
1778 while (last_obj_offset != buffer_obj_offset) {
1779 unsigned long buffer_offset;
1780 struct binder_object last_object;
1781 struct binder_buffer_object *last_bbo;
1782 size_t object_size = binder_get_object(proc, NULL, b,
1783 last_obj_offset,
1784 &last_object);
1785 if (object_size != sizeof(*last_bbo))
1786 return false;
1787
1788 last_bbo = &last_object.bbo;
1789
1790
1791
1792
1793 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1794 return false;
1795 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1796 buffer_offset = objects_start_offset +
1797 sizeof(binder_size_t) * last_bbo->parent;
1798 if (binder_alloc_copy_from_buffer(&proc->alloc,
1799 &last_obj_offset,
1800 b, buffer_offset,
1801 sizeof(last_obj_offset)))
1802 return false;
1803 }
1804 return (fixup_offset >= last_min_offset);
1805}
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816struct binder_task_work_cb {
1817 struct callback_head twork;
1818 struct file *file;
1819};
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834static void binder_do_fd_close(struct callback_head *twork)
1835{
1836 struct binder_task_work_cb *twcb = container_of(twork,
1837 struct binder_task_work_cb, twork);
1838
1839 fput(twcb->file);
1840 kfree(twcb);
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850static void binder_deferred_fd_close(int fd)
1851{
1852 struct binder_task_work_cb *twcb;
1853
1854 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1855 if (!twcb)
1856 return;
1857 init_task_work(&twcb->twork, binder_do_fd_close);
1858 close_fd_get_file(fd, &twcb->file);
1859 if (twcb->file) {
1860 filp_close(twcb->file, current->files);
1861 task_work_add(current, &twcb->twork, TWA_RESUME);
1862 } else {
1863 kfree(twcb);
1864 }
1865}
1866
1867static void binder_transaction_buffer_release(struct binder_proc *proc,
1868 struct binder_thread *thread,
1869 struct binder_buffer *buffer,
1870 binder_size_t failed_at,
1871 bool is_failure)
1872{
1873 int debug_id = buffer->debug_id;
1874 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1875
1876 binder_debug(BINDER_DEBUG_TRANSACTION,
1877 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1878 proc->pid, buffer->debug_id,
1879 buffer->data_size, buffer->offsets_size,
1880 (unsigned long long)failed_at);
1881
1882 if (buffer->target_node)
1883 binder_dec_node(buffer->target_node, 1, 0);
1884
1885 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1886 off_end_offset = is_failure && failed_at ? failed_at :
1887 off_start_offset + buffer->offsets_size;
1888 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1889 buffer_offset += sizeof(binder_size_t)) {
1890 struct binder_object_header *hdr;
1891 size_t object_size = 0;
1892 struct binder_object object;
1893 binder_size_t object_offset;
1894
1895 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1896 buffer, buffer_offset,
1897 sizeof(object_offset)))
1898 object_size = binder_get_object(proc, NULL, buffer,
1899 object_offset, &object);
1900 if (object_size == 0) {
1901 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1902 debug_id, (u64)object_offset, buffer->data_size);
1903 continue;
1904 }
1905 hdr = &object.hdr;
1906 switch (hdr->type) {
1907 case BINDER_TYPE_BINDER:
1908 case BINDER_TYPE_WEAK_BINDER: {
1909 struct flat_binder_object *fp;
1910 struct binder_node *node;
1911
1912 fp = to_flat_binder_object(hdr);
1913 node = binder_get_node(proc, fp->binder);
1914 if (node == NULL) {
1915 pr_err("transaction release %d bad node %016llx\n",
1916 debug_id, (u64)fp->binder);
1917 break;
1918 }
1919 binder_debug(BINDER_DEBUG_TRANSACTION,
1920 " node %d u%016llx\n",
1921 node->debug_id, (u64)node->ptr);
1922 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1923 0);
1924 binder_put_node(node);
1925 } break;
1926 case BINDER_TYPE_HANDLE:
1927 case BINDER_TYPE_WEAK_HANDLE: {
1928 struct flat_binder_object *fp;
1929 struct binder_ref_data rdata;
1930 int ret;
1931
1932 fp = to_flat_binder_object(hdr);
1933 ret = binder_dec_ref_for_handle(proc, fp->handle,
1934 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1935
1936 if (ret) {
1937 pr_err("transaction release %d bad handle %d, ret = %d\n",
1938 debug_id, fp->handle, ret);
1939 break;
1940 }
1941 binder_debug(BINDER_DEBUG_TRANSACTION,
1942 " ref %d desc %d\n",
1943 rdata.debug_id, rdata.desc);
1944 } break;
1945
1946 case BINDER_TYPE_FD: {
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 } break;
1957 case BINDER_TYPE_PTR:
1958
1959
1960
1961
1962 break;
1963 case BINDER_TYPE_FDA: {
1964 struct binder_fd_array_object *fda;
1965 struct binder_buffer_object *parent;
1966 struct binder_object ptr_object;
1967 binder_size_t fda_offset;
1968 size_t fd_index;
1969 binder_size_t fd_buf_size;
1970 binder_size_t num_valid;
1971
1972 if (is_failure) {
1973
1974
1975
1976
1977 continue;
1978 }
1979
1980 num_valid = (buffer_offset - off_start_offset) /
1981 sizeof(binder_size_t);
1982 fda = to_binder_fd_array_object(hdr);
1983 parent = binder_validate_ptr(proc, buffer, &ptr_object,
1984 fda->parent,
1985 off_start_offset,
1986 NULL,
1987 num_valid);
1988 if (!parent) {
1989 pr_err("transaction release %d bad parent offset\n",
1990 debug_id);
1991 continue;
1992 }
1993 fd_buf_size = sizeof(u32) * fda->num_fds;
1994 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1995 pr_err("transaction release %d invalid number of fds (%lld)\n",
1996 debug_id, (u64)fda->num_fds);
1997 continue;
1998 }
1999 if (fd_buf_size > parent->length ||
2000 fda->parent_offset > parent->length - fd_buf_size) {
2001
2002 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2003 debug_id, (u64)fda->num_fds);
2004 continue;
2005 }
2006
2007
2008
2009
2010
2011
2012
2013 fda_offset =
2014 (parent->buffer - (uintptr_t)buffer->user_data) +
2015 fda->parent_offset;
2016 for (fd_index = 0; fd_index < fda->num_fds;
2017 fd_index++) {
2018 u32 fd;
2019 int err;
2020 binder_size_t offset = fda_offset +
2021 fd_index * sizeof(fd);
2022
2023 err = binder_alloc_copy_from_buffer(
2024 &proc->alloc, &fd, buffer,
2025 offset, sizeof(fd));
2026 WARN_ON(err);
2027 if (!err) {
2028 binder_deferred_fd_close(fd);
2029
2030
2031
2032
2033
2034 if (thread)
2035 thread->looper_need_return = true;
2036 }
2037 }
2038 } break;
2039 default:
2040 pr_err("transaction release %d bad object type %x\n",
2041 debug_id, hdr->type);
2042 break;
2043 }
2044 }
2045}
2046
2047static int binder_translate_binder(struct flat_binder_object *fp,
2048 struct binder_transaction *t,
2049 struct binder_thread *thread)
2050{
2051 struct binder_node *node;
2052 struct binder_proc *proc = thread->proc;
2053 struct binder_proc *target_proc = t->to_proc;
2054 struct binder_ref_data rdata;
2055 int ret = 0;
2056
2057 node = binder_get_node(proc, fp->binder);
2058 if (!node) {
2059 node = binder_new_node(proc, fp);
2060 if (!node)
2061 return -ENOMEM;
2062 }
2063 if (fp->cookie != node->cookie) {
2064 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2065 proc->pid, thread->pid, (u64)fp->binder,
2066 node->debug_id, (u64)fp->cookie,
2067 (u64)node->cookie);
2068 ret = -EINVAL;
2069 goto done;
2070 }
2071 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2072 ret = -EPERM;
2073 goto done;
2074 }
2075
2076 ret = binder_inc_ref_for_node(target_proc, node,
2077 fp->hdr.type == BINDER_TYPE_BINDER,
2078 &thread->todo, &rdata);
2079 if (ret)
2080 goto done;
2081
2082 if (fp->hdr.type == BINDER_TYPE_BINDER)
2083 fp->hdr.type = BINDER_TYPE_HANDLE;
2084 else
2085 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2086 fp->binder = 0;
2087 fp->handle = rdata.desc;
2088 fp->cookie = 0;
2089
2090 trace_binder_transaction_node_to_ref(t, node, &rdata);
2091 binder_debug(BINDER_DEBUG_TRANSACTION,
2092 " node %d u%016llx -> ref %d desc %d\n",
2093 node->debug_id, (u64)node->ptr,
2094 rdata.debug_id, rdata.desc);
2095done:
2096 binder_put_node(node);
2097 return ret;
2098}
2099
2100static int binder_translate_handle(struct flat_binder_object *fp,
2101 struct binder_transaction *t,
2102 struct binder_thread *thread)
2103{
2104 struct binder_proc *proc = thread->proc;
2105 struct binder_proc *target_proc = t->to_proc;
2106 struct binder_node *node;
2107 struct binder_ref_data src_rdata;
2108 int ret = 0;
2109
2110 node = binder_get_node_from_ref(proc, fp->handle,
2111 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2112 if (!node) {
2113 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2114 proc->pid, thread->pid, fp->handle);
2115 return -EINVAL;
2116 }
2117 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2118 ret = -EPERM;
2119 goto done;
2120 }
2121
2122 binder_node_lock(node);
2123 if (node->proc == target_proc) {
2124 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2125 fp->hdr.type = BINDER_TYPE_BINDER;
2126 else
2127 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2128 fp->binder = node->ptr;
2129 fp->cookie = node->cookie;
2130 if (node->proc)
2131 binder_inner_proc_lock(node->proc);
2132 else
2133 __acquire(&node->proc->inner_lock);
2134 binder_inc_node_nilocked(node,
2135 fp->hdr.type == BINDER_TYPE_BINDER,
2136 0, NULL);
2137 if (node->proc)
2138 binder_inner_proc_unlock(node->proc);
2139 else
2140 __release(&node->proc->inner_lock);
2141 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2142 binder_debug(BINDER_DEBUG_TRANSACTION,
2143 " ref %d desc %d -> node %d u%016llx\n",
2144 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2145 (u64)node->ptr);
2146 binder_node_unlock(node);
2147 } else {
2148 struct binder_ref_data dest_rdata;
2149
2150 binder_node_unlock(node);
2151 ret = binder_inc_ref_for_node(target_proc, node,
2152 fp->hdr.type == BINDER_TYPE_HANDLE,
2153 NULL, &dest_rdata);
2154 if (ret)
2155 goto done;
2156
2157 fp->binder = 0;
2158 fp->handle = dest_rdata.desc;
2159 fp->cookie = 0;
2160 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2161 &dest_rdata);
2162 binder_debug(BINDER_DEBUG_TRANSACTION,
2163 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2164 src_rdata.debug_id, src_rdata.desc,
2165 dest_rdata.debug_id, dest_rdata.desc,
2166 node->debug_id);
2167 }
2168done:
2169 binder_put_node(node);
2170 return ret;
2171}
2172
2173static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2174 struct binder_transaction *t,
2175 struct binder_thread *thread,
2176 struct binder_transaction *in_reply_to)
2177{
2178 struct binder_proc *proc = thread->proc;
2179 struct binder_proc *target_proc = t->to_proc;
2180 struct binder_txn_fd_fixup *fixup;
2181 struct file *file;
2182 int ret = 0;
2183 bool target_allows_fd;
2184
2185 if (in_reply_to)
2186 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2187 else
2188 target_allows_fd = t->buffer->target_node->accept_fds;
2189 if (!target_allows_fd) {
2190 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2191 proc->pid, thread->pid,
2192 in_reply_to ? "reply" : "transaction",
2193 fd);
2194 ret = -EPERM;
2195 goto err_fd_not_accepted;
2196 }
2197
2198 file = fget(fd);
2199 if (!file) {
2200 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2201 proc->pid, thread->pid, fd);
2202 ret = -EBADF;
2203 goto err_fget;
2204 }
2205 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2206 if (ret < 0) {
2207 ret = -EPERM;
2208 goto err_security;
2209 }
2210
2211
2212
2213
2214
2215
2216 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2217 if (!fixup) {
2218 ret = -ENOMEM;
2219 goto err_alloc;
2220 }
2221 fixup->file = file;
2222 fixup->offset = fd_offset;
2223 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2224 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2225
2226 return ret;
2227
2228err_alloc:
2229err_security:
2230 fput(file);
2231err_fget:
2232err_fd_not_accepted:
2233 return ret;
2234}
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249struct binder_ptr_fixup {
2250 binder_size_t offset;
2251 size_t skip_size;
2252 binder_uintptr_t fixup_data;
2253 struct list_head node;
2254};
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269struct binder_sg_copy {
2270 binder_size_t offset;
2271 const void __user *sender_uaddr;
2272 size_t length;
2273 struct list_head node;
2274};
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2292 struct binder_buffer *buffer,
2293 struct list_head *sgc_head,
2294 struct list_head *pf_head)
2295{
2296 int ret = 0;
2297 struct binder_sg_copy *sgc, *tmpsgc;
2298 struct binder_ptr_fixup *pf =
2299 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2300 node);
2301
2302 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2303 size_t bytes_copied = 0;
2304
2305 while (bytes_copied < sgc->length) {
2306 size_t copy_size;
2307 size_t bytes_left = sgc->length - bytes_copied;
2308 size_t offset = sgc->offset + bytes_copied;
2309
2310
2311
2312
2313 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2314 : bytes_left;
2315 if (!ret && copy_size)
2316 ret = binder_alloc_copy_user_to_buffer(
2317 alloc, buffer,
2318 offset,
2319 sgc->sender_uaddr + bytes_copied,
2320 copy_size);
2321 bytes_copied += copy_size;
2322 if (copy_size != bytes_left) {
2323 BUG_ON(!pf);
2324
2325 if (pf->skip_size) {
2326
2327
2328
2329
2330
2331
2332 bytes_copied += pf->skip_size;
2333 } else {
2334
2335 if (!ret)
2336 ret = binder_alloc_copy_to_buffer(
2337 alloc, buffer,
2338 pf->offset,
2339 &pf->fixup_data,
2340 sizeof(pf->fixup_data));
2341 bytes_copied += sizeof(pf->fixup_data);
2342 }
2343 list_del(&pf->node);
2344 kfree(pf);
2345 pf = list_first_entry_or_null(pf_head,
2346 struct binder_ptr_fixup, node);
2347 }
2348 }
2349 list_del(&sgc->node);
2350 kfree(sgc);
2351 }
2352 BUG_ON(!list_empty(pf_head));
2353 BUG_ON(!list_empty(sgc_head));
2354
2355 return ret > 0 ? -EINVAL : ret;
2356}
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2367 struct list_head *pf_head)
2368{
2369 struct binder_sg_copy *sgc, *tmpsgc;
2370 struct binder_ptr_fixup *pf, *tmppf;
2371
2372 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2373 list_del(&sgc->node);
2374 kfree(sgc);
2375 }
2376 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2377 list_del(&pf->node);
2378 kfree(pf);
2379 }
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2401 const void __user *sender_uaddr, size_t length)
2402{
2403 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2404
2405 if (!bc)
2406 return -ENOMEM;
2407
2408 bc->offset = offset;
2409 bc->sender_uaddr = sender_uaddr;
2410 bc->length = length;
2411 INIT_LIST_HEAD(&bc->node);
2412
2413
2414
2415
2416
2417 list_add_tail(&bc->node, sgc_head);
2418
2419 return 0;
2420}
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2444 binder_uintptr_t fixup, size_t skip_size)
2445{
2446 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2447 struct binder_ptr_fixup *tmppf;
2448
2449 if (!pf)
2450 return -ENOMEM;
2451
2452 pf->offset = offset;
2453 pf->fixup_data = fixup;
2454 pf->skip_size = skip_size;
2455 INIT_LIST_HEAD(&pf->node);
2456
2457
2458
2459
2460 list_for_each_entry_reverse(tmppf, pf_head, node) {
2461 if (tmppf->offset < pf->offset) {
2462 list_add(&pf->node, &tmppf->node);
2463 return 0;
2464 }
2465 }
2466
2467
2468
2469
2470 list_add(&pf->node, pf_head);
2471 return 0;
2472}
2473
2474static int binder_translate_fd_array(struct list_head *pf_head,
2475 struct binder_fd_array_object *fda,
2476 const void __user *sender_ubuffer,
2477 struct binder_buffer_object *parent,
2478 struct binder_buffer_object *sender_uparent,
2479 struct binder_transaction *t,
2480 struct binder_thread *thread,
2481 struct binder_transaction *in_reply_to)
2482{
2483 binder_size_t fdi, fd_buf_size;
2484 binder_size_t fda_offset;
2485 const void __user *sender_ufda_base;
2486 struct binder_proc *proc = thread->proc;
2487 int ret;
2488
2489 fd_buf_size = sizeof(u32) * fda->num_fds;
2490 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2491 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2492 proc->pid, thread->pid, (u64)fda->num_fds);
2493 return -EINVAL;
2494 }
2495 if (fd_buf_size > parent->length ||
2496 fda->parent_offset > parent->length - fd_buf_size) {
2497
2498 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2499 proc->pid, thread->pid, (u64)fda->num_fds);
2500 return -EINVAL;
2501 }
2502
2503
2504
2505
2506
2507
2508
2509 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2510 fda->parent_offset;
2511 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2512 fda->parent_offset;
2513
2514 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2515 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2516 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2517 proc->pid, thread->pid);
2518 return -EINVAL;
2519 }
2520 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2521 if (ret)
2522 return ret;
2523
2524 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2525 u32 fd;
2526 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2527 binder_size_t sender_uoffset = fdi * sizeof(fd);
2528
2529 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2530 if (!ret)
2531 ret = binder_translate_fd(fd, offset, t, thread,
2532 in_reply_to);
2533 if (ret)
2534 return ret > 0 ? -EINVAL : ret;
2535 }
2536 return 0;
2537}
2538
2539static int binder_fixup_parent(struct list_head *pf_head,
2540 struct binder_transaction *t,
2541 struct binder_thread *thread,
2542 struct binder_buffer_object *bp,
2543 binder_size_t off_start_offset,
2544 binder_size_t num_valid,
2545 binder_size_t last_fixup_obj_off,
2546 binder_size_t last_fixup_min_off)
2547{
2548 struct binder_buffer_object *parent;
2549 struct binder_buffer *b = t->buffer;
2550 struct binder_proc *proc = thread->proc;
2551 struct binder_proc *target_proc = t->to_proc;
2552 struct binder_object object;
2553 binder_size_t buffer_offset;
2554 binder_size_t parent_offset;
2555
2556 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2557 return 0;
2558
2559 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2560 off_start_offset, &parent_offset,
2561 num_valid);
2562 if (!parent) {
2563 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2564 proc->pid, thread->pid);
2565 return -EINVAL;
2566 }
2567
2568 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2569 parent_offset, bp->parent_offset,
2570 last_fixup_obj_off,
2571 last_fixup_min_off)) {
2572 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2573 proc->pid, thread->pid);
2574 return -EINVAL;
2575 }
2576
2577 if (parent->length < sizeof(binder_uintptr_t) ||
2578 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2579
2580 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2581 proc->pid, thread->pid);
2582 return -EINVAL;
2583 }
2584 buffer_offset = bp->parent_offset +
2585 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2586 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2587}
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607static int binder_proc_transaction(struct binder_transaction *t,
2608 struct binder_proc *proc,
2609 struct binder_thread *thread)
2610{
2611 struct binder_node *node = t->buffer->target_node;
2612 bool oneway = !!(t->flags & TF_ONE_WAY);
2613 bool pending_async = false;
2614
2615 BUG_ON(!node);
2616 binder_node_lock(node);
2617 if (oneway) {
2618 BUG_ON(thread);
2619 if (node->has_async_transaction)
2620 pending_async = true;
2621 else
2622 node->has_async_transaction = true;
2623 }
2624
2625 binder_inner_proc_lock(proc);
2626 if (proc->is_frozen) {
2627 proc->sync_recv |= !oneway;
2628 proc->async_recv |= oneway;
2629 }
2630
2631 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2632 (thread && thread->is_dead)) {
2633 binder_inner_proc_unlock(proc);
2634 binder_node_unlock(node);
2635 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2636 }
2637
2638 if (!thread && !pending_async)
2639 thread = binder_select_thread_ilocked(proc);
2640
2641 if (thread)
2642 binder_enqueue_thread_work_ilocked(thread, &t->work);
2643 else if (!pending_async)
2644 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2645 else
2646 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2647
2648 if (!pending_async)
2649 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2650
2651 proc->outstanding_txns++;
2652 binder_inner_proc_unlock(proc);
2653 binder_node_unlock(node);
2654
2655 return 0;
2656}
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679static struct binder_node *binder_get_node_refs_for_txn(
2680 struct binder_node *node,
2681 struct binder_proc **procp,
2682 uint32_t *error)
2683{
2684 struct binder_node *target_node = NULL;
2685
2686 binder_node_inner_lock(node);
2687 if (node->proc) {
2688 target_node = node;
2689 binder_inc_node_nilocked(node, 1, 0, NULL);
2690 binder_inc_node_tmpref_ilocked(node);
2691 node->proc->tmp_ref++;
2692 *procp = node->proc;
2693 } else
2694 *error = BR_DEAD_REPLY;
2695 binder_node_inner_unlock(node);
2696
2697 return target_node;
2698}
2699
2700static void binder_transaction(struct binder_proc *proc,
2701 struct binder_thread *thread,
2702 struct binder_transaction_data *tr, int reply,
2703 binder_size_t extra_buffers_size)
2704{
2705 int ret;
2706 struct binder_transaction *t;
2707 struct binder_work *w;
2708 struct binder_work *tcomplete;
2709 binder_size_t buffer_offset = 0;
2710 binder_size_t off_start_offset, off_end_offset;
2711 binder_size_t off_min;
2712 binder_size_t sg_buf_offset, sg_buf_end_offset;
2713 binder_size_t user_offset = 0;
2714 struct binder_proc *target_proc = NULL;
2715 struct binder_thread *target_thread = NULL;
2716 struct binder_node *target_node = NULL;
2717 struct binder_transaction *in_reply_to = NULL;
2718 struct binder_transaction_log_entry *e;
2719 uint32_t return_error = 0;
2720 uint32_t return_error_param = 0;
2721 uint32_t return_error_line = 0;
2722 binder_size_t last_fixup_obj_off = 0;
2723 binder_size_t last_fixup_min_off = 0;
2724 struct binder_context *context = proc->context;
2725 int t_debug_id = atomic_inc_return(&binder_last_id);
2726 char *secctx = NULL;
2727 u32 secctx_sz = 0;
2728 struct list_head sgc_head;
2729 struct list_head pf_head;
2730 const void __user *user_buffer = (const void __user *)
2731 (uintptr_t)tr->data.ptr.buffer;
2732 INIT_LIST_HEAD(&sgc_head);
2733 INIT_LIST_HEAD(&pf_head);
2734
2735 e = binder_transaction_log_add(&binder_transaction_log);
2736 e->debug_id = t_debug_id;
2737 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2738 e->from_proc = proc->pid;
2739 e->from_thread = thread->pid;
2740 e->target_handle = tr->target.handle;
2741 e->data_size = tr->data_size;
2742 e->offsets_size = tr->offsets_size;
2743 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2744
2745 if (reply) {
2746 binder_inner_proc_lock(proc);
2747 in_reply_to = thread->transaction_stack;
2748 if (in_reply_to == NULL) {
2749 binder_inner_proc_unlock(proc);
2750 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2751 proc->pid, thread->pid);
2752 return_error = BR_FAILED_REPLY;
2753 return_error_param = -EPROTO;
2754 return_error_line = __LINE__;
2755 goto err_empty_call_stack;
2756 }
2757 if (in_reply_to->to_thread != thread) {
2758 spin_lock(&in_reply_to->lock);
2759 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2760 proc->pid, thread->pid, in_reply_to->debug_id,
2761 in_reply_to->to_proc ?
2762 in_reply_to->to_proc->pid : 0,
2763 in_reply_to->to_thread ?
2764 in_reply_to->to_thread->pid : 0);
2765 spin_unlock(&in_reply_to->lock);
2766 binder_inner_proc_unlock(proc);
2767 return_error = BR_FAILED_REPLY;
2768 return_error_param = -EPROTO;
2769 return_error_line = __LINE__;
2770 in_reply_to = NULL;
2771 goto err_bad_call_stack;
2772 }
2773 thread->transaction_stack = in_reply_to->to_parent;
2774 binder_inner_proc_unlock(proc);
2775 binder_set_nice(in_reply_to->saved_priority);
2776 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2777 if (target_thread == NULL) {
2778
2779 __release(&target_thread->proc->inner_lock);
2780 return_error = BR_DEAD_REPLY;
2781 return_error_line = __LINE__;
2782 goto err_dead_binder;
2783 }
2784 if (target_thread->transaction_stack != in_reply_to) {
2785 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2786 proc->pid, thread->pid,
2787 target_thread->transaction_stack ?
2788 target_thread->transaction_stack->debug_id : 0,
2789 in_reply_to->debug_id);
2790 binder_inner_proc_unlock(target_thread->proc);
2791 return_error = BR_FAILED_REPLY;
2792 return_error_param = -EPROTO;
2793 return_error_line = __LINE__;
2794 in_reply_to = NULL;
2795 target_thread = NULL;
2796 goto err_dead_binder;
2797 }
2798 target_proc = target_thread->proc;
2799 target_proc->tmp_ref++;
2800 binder_inner_proc_unlock(target_thread->proc);
2801 } else {
2802 if (tr->target.handle) {
2803 struct binder_ref *ref;
2804
2805
2806
2807
2808
2809
2810
2811
2812 binder_proc_lock(proc);
2813 ref = binder_get_ref_olocked(proc, tr->target.handle,
2814 true);
2815 if (ref) {
2816 target_node = binder_get_node_refs_for_txn(
2817 ref->node, &target_proc,
2818 &return_error);
2819 } else {
2820 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2821 proc->pid, thread->pid, tr->target.handle);
2822 return_error = BR_FAILED_REPLY;
2823 }
2824 binder_proc_unlock(proc);
2825 } else {
2826 mutex_lock(&context->context_mgr_node_lock);
2827 target_node = context->binder_context_mgr_node;
2828 if (target_node)
2829 target_node = binder_get_node_refs_for_txn(
2830 target_node, &target_proc,
2831 &return_error);
2832 else
2833 return_error = BR_DEAD_REPLY;
2834 mutex_unlock(&context->context_mgr_node_lock);
2835 if (target_node && target_proc->pid == proc->pid) {
2836 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2837 proc->pid, thread->pid);
2838 return_error = BR_FAILED_REPLY;
2839 return_error_param = -EINVAL;
2840 return_error_line = __LINE__;
2841 goto err_invalid_target_handle;
2842 }
2843 }
2844 if (!target_node) {
2845
2846
2847
2848 return_error_param = -EINVAL;
2849 return_error_line = __LINE__;
2850 goto err_dead_binder;
2851 }
2852 e->to_node = target_node->debug_id;
2853 if (WARN_ON(proc == target_proc)) {
2854 return_error = BR_FAILED_REPLY;
2855 return_error_param = -EINVAL;
2856 return_error_line = __LINE__;
2857 goto err_invalid_target_handle;
2858 }
2859 if (security_binder_transaction(proc->cred,
2860 target_proc->cred) < 0) {
2861 return_error = BR_FAILED_REPLY;
2862 return_error_param = -EPERM;
2863 return_error_line = __LINE__;
2864 goto err_invalid_target_handle;
2865 }
2866 binder_inner_proc_lock(proc);
2867
2868 w = list_first_entry_or_null(&thread->todo,
2869 struct binder_work, entry);
2870 if (!(tr->flags & TF_ONE_WAY) && w &&
2871 w->type == BINDER_WORK_TRANSACTION) {
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2882 proc->pid, thread->pid);
2883 binder_inner_proc_unlock(proc);
2884 return_error = BR_FAILED_REPLY;
2885 return_error_param = -EPROTO;
2886 return_error_line = __LINE__;
2887 goto err_bad_todo_list;
2888 }
2889
2890 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2891 struct binder_transaction *tmp;
2892
2893 tmp = thread->transaction_stack;
2894 if (tmp->to_thread != thread) {
2895 spin_lock(&tmp->lock);
2896 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2897 proc->pid, thread->pid, tmp->debug_id,
2898 tmp->to_proc ? tmp->to_proc->pid : 0,
2899 tmp->to_thread ?
2900 tmp->to_thread->pid : 0);
2901 spin_unlock(&tmp->lock);
2902 binder_inner_proc_unlock(proc);
2903 return_error = BR_FAILED_REPLY;
2904 return_error_param = -EPROTO;
2905 return_error_line = __LINE__;
2906 goto err_bad_call_stack;
2907 }
2908 while (tmp) {
2909 struct binder_thread *from;
2910
2911 spin_lock(&tmp->lock);
2912 from = tmp->from;
2913 if (from && from->proc == target_proc) {
2914 atomic_inc(&from->tmp_ref);
2915 target_thread = from;
2916 spin_unlock(&tmp->lock);
2917 break;
2918 }
2919 spin_unlock(&tmp->lock);
2920 tmp = tmp->from_parent;
2921 }
2922 }
2923 binder_inner_proc_unlock(proc);
2924 }
2925 if (target_thread)
2926 e->to_thread = target_thread->pid;
2927 e->to_proc = target_proc->pid;
2928
2929
2930 t = kzalloc(sizeof(*t), GFP_KERNEL);
2931 if (t == NULL) {
2932 return_error = BR_FAILED_REPLY;
2933 return_error_param = -ENOMEM;
2934 return_error_line = __LINE__;
2935 goto err_alloc_t_failed;
2936 }
2937 INIT_LIST_HEAD(&t->fd_fixups);
2938 binder_stats_created(BINDER_STAT_TRANSACTION);
2939 spin_lock_init(&t->lock);
2940
2941 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2942 if (tcomplete == NULL) {
2943 return_error = BR_FAILED_REPLY;
2944 return_error_param = -ENOMEM;
2945 return_error_line = __LINE__;
2946 goto err_alloc_tcomplete_failed;
2947 }
2948 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2949
2950 t->debug_id = t_debug_id;
2951
2952 if (reply)
2953 binder_debug(BINDER_DEBUG_TRANSACTION,
2954 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2955 proc->pid, thread->pid, t->debug_id,
2956 target_proc->pid, target_thread->pid,
2957 (u64)tr->data.ptr.buffer,
2958 (u64)tr->data.ptr.offsets,
2959 (u64)tr->data_size, (u64)tr->offsets_size,
2960 (u64)extra_buffers_size);
2961 else
2962 binder_debug(BINDER_DEBUG_TRANSACTION,
2963 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2964 proc->pid, thread->pid, t->debug_id,
2965 target_proc->pid, target_node->debug_id,
2966 (u64)tr->data.ptr.buffer,
2967 (u64)tr->data.ptr.offsets,
2968 (u64)tr->data_size, (u64)tr->offsets_size,
2969 (u64)extra_buffers_size);
2970
2971 if (!reply && !(tr->flags & TF_ONE_WAY))
2972 t->from = thread;
2973 else
2974 t->from = NULL;
2975 t->sender_euid = task_euid(proc->tsk);
2976 t->to_proc = target_proc;
2977 t->to_thread = target_thread;
2978 t->code = tr->code;
2979 t->flags = tr->flags;
2980 t->priority = task_nice(current);
2981
2982 if (target_node && target_node->txn_security_ctx) {
2983 u32 secid;
2984 size_t added_size;
2985
2986 security_cred_getsecid(proc->cred, &secid);
2987 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2988 if (ret) {
2989 return_error = BR_FAILED_REPLY;
2990 return_error_param = ret;
2991 return_error_line = __LINE__;
2992 goto err_get_secctx_failed;
2993 }
2994 added_size = ALIGN(secctx_sz, sizeof(u64));
2995 extra_buffers_size += added_size;
2996 if (extra_buffers_size < added_size) {
2997
2998 return_error = BR_FAILED_REPLY;
2999 return_error_param = -EINVAL;
3000 return_error_line = __LINE__;
3001 goto err_bad_extra_size;
3002 }
3003 }
3004
3005 trace_binder_transaction(reply, t, target_node);
3006
3007 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3008 tr->offsets_size, extra_buffers_size,
3009 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3010 if (IS_ERR(t->buffer)) {
3011
3012
3013
3014 return_error_param = PTR_ERR(t->buffer);
3015 return_error = return_error_param == -ESRCH ?
3016 BR_DEAD_REPLY : BR_FAILED_REPLY;
3017 return_error_line = __LINE__;
3018 t->buffer = NULL;
3019 goto err_binder_alloc_buf_failed;
3020 }
3021 if (secctx) {
3022 int err;
3023 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3024 ALIGN(tr->offsets_size, sizeof(void *)) +
3025 ALIGN(extra_buffers_size, sizeof(void *)) -
3026 ALIGN(secctx_sz, sizeof(u64));
3027
3028 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3029 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3030 t->buffer, buf_offset,
3031 secctx, secctx_sz);
3032 if (err) {
3033 t->security_ctx = 0;
3034 WARN_ON(1);
3035 }
3036 security_release_secctx(secctx, secctx_sz);
3037 secctx = NULL;
3038 }
3039 t->buffer->debug_id = t->debug_id;
3040 t->buffer->transaction = t;
3041 t->buffer->target_node = target_node;
3042 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3043 trace_binder_transaction_alloc_buf(t->buffer);
3044
3045 if (binder_alloc_copy_user_to_buffer(
3046 &target_proc->alloc,
3047 t->buffer,
3048 ALIGN(tr->data_size, sizeof(void *)),
3049 (const void __user *)
3050 (uintptr_t)tr->data.ptr.offsets,
3051 tr->offsets_size)) {
3052 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3053 proc->pid, thread->pid);
3054 return_error = BR_FAILED_REPLY;
3055 return_error_param = -EFAULT;
3056 return_error_line = __LINE__;
3057 goto err_copy_data_failed;
3058 }
3059 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3060 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3061 proc->pid, thread->pid, (u64)tr->offsets_size);
3062 return_error = BR_FAILED_REPLY;
3063 return_error_param = -EINVAL;
3064 return_error_line = __LINE__;
3065 goto err_bad_offset;
3066 }
3067 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3068 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3069 proc->pid, thread->pid,
3070 (u64)extra_buffers_size);
3071 return_error = BR_FAILED_REPLY;
3072 return_error_param = -EINVAL;
3073 return_error_line = __LINE__;
3074 goto err_bad_offset;
3075 }
3076 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3077 buffer_offset = off_start_offset;
3078 off_end_offset = off_start_offset + tr->offsets_size;
3079 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3080 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3081 ALIGN(secctx_sz, sizeof(u64));
3082 off_min = 0;
3083 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3084 buffer_offset += sizeof(binder_size_t)) {
3085 struct binder_object_header *hdr;
3086 size_t object_size;
3087 struct binder_object object;
3088 binder_size_t object_offset;
3089 binder_size_t copy_size;
3090
3091 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3092 &object_offset,
3093 t->buffer,
3094 buffer_offset,
3095 sizeof(object_offset))) {
3096 return_error = BR_FAILED_REPLY;
3097 return_error_param = -EINVAL;
3098 return_error_line = __LINE__;
3099 goto err_bad_offset;
3100 }
3101
3102
3103
3104
3105
3106 copy_size = object_offset - user_offset;
3107 if (copy_size && (user_offset > object_offset ||
3108 binder_alloc_copy_user_to_buffer(
3109 &target_proc->alloc,
3110 t->buffer, user_offset,
3111 user_buffer + user_offset,
3112 copy_size))) {
3113 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3114 proc->pid, thread->pid);
3115 return_error = BR_FAILED_REPLY;
3116 return_error_param = -EFAULT;
3117 return_error_line = __LINE__;
3118 goto err_copy_data_failed;
3119 }
3120 object_size = binder_get_object(target_proc, user_buffer,
3121 t->buffer, object_offset, &object);
3122 if (object_size == 0 || object_offset < off_min) {
3123 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3124 proc->pid, thread->pid,
3125 (u64)object_offset,
3126 (u64)off_min,
3127 (u64)t->buffer->data_size);
3128 return_error = BR_FAILED_REPLY;
3129 return_error_param = -EINVAL;
3130 return_error_line = __LINE__;
3131 goto err_bad_offset;
3132 }
3133
3134
3135
3136
3137 user_offset = object_offset + object_size;
3138
3139 hdr = &object.hdr;
3140 off_min = object_offset + object_size;
3141 switch (hdr->type) {
3142 case BINDER_TYPE_BINDER:
3143 case BINDER_TYPE_WEAK_BINDER: {
3144 struct flat_binder_object *fp;
3145
3146 fp = to_flat_binder_object(hdr);
3147 ret = binder_translate_binder(fp, t, thread);
3148
3149 if (ret < 0 ||
3150 binder_alloc_copy_to_buffer(&target_proc->alloc,
3151 t->buffer,
3152 object_offset,
3153 fp, sizeof(*fp))) {
3154 return_error = BR_FAILED_REPLY;
3155 return_error_param = ret;
3156 return_error_line = __LINE__;
3157 goto err_translate_failed;
3158 }
3159 } break;
3160 case BINDER_TYPE_HANDLE:
3161 case BINDER_TYPE_WEAK_HANDLE: {
3162 struct flat_binder_object *fp;
3163
3164 fp = to_flat_binder_object(hdr);
3165 ret = binder_translate_handle(fp, t, thread);
3166 if (ret < 0 ||
3167 binder_alloc_copy_to_buffer(&target_proc->alloc,
3168 t->buffer,
3169 object_offset,
3170 fp, sizeof(*fp))) {
3171 return_error = BR_FAILED_REPLY;
3172 return_error_param = ret;
3173 return_error_line = __LINE__;
3174 goto err_translate_failed;
3175 }
3176 } break;
3177
3178 case BINDER_TYPE_FD: {
3179 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3180 binder_size_t fd_offset = object_offset +
3181 (uintptr_t)&fp->fd - (uintptr_t)fp;
3182 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3183 thread, in_reply_to);
3184
3185 fp->pad_binder = 0;
3186 if (ret < 0 ||
3187 binder_alloc_copy_to_buffer(&target_proc->alloc,
3188 t->buffer,
3189 object_offset,
3190 fp, sizeof(*fp))) {
3191 return_error = BR_FAILED_REPLY;
3192 return_error_param = ret;
3193 return_error_line = __LINE__;
3194 goto err_translate_failed;
3195 }
3196 } break;
3197 case BINDER_TYPE_FDA: {
3198 struct binder_object ptr_object;
3199 binder_size_t parent_offset;
3200 struct binder_object user_object;
3201 size_t user_parent_size;
3202 struct binder_fd_array_object *fda =
3203 to_binder_fd_array_object(hdr);
3204 size_t num_valid = (buffer_offset - off_start_offset) /
3205 sizeof(binder_size_t);
3206 struct binder_buffer_object *parent =
3207 binder_validate_ptr(target_proc, t->buffer,
3208 &ptr_object, fda->parent,
3209 off_start_offset,
3210 &parent_offset,
3211 num_valid);
3212 if (!parent) {
3213 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3214 proc->pid, thread->pid);
3215 return_error = BR_FAILED_REPLY;
3216 return_error_param = -EINVAL;
3217 return_error_line = __LINE__;
3218 goto err_bad_parent;
3219 }
3220 if (!binder_validate_fixup(target_proc, t->buffer,
3221 off_start_offset,
3222 parent_offset,
3223 fda->parent_offset,
3224 last_fixup_obj_off,
3225 last_fixup_min_off)) {
3226 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3227 proc->pid, thread->pid);
3228 return_error = BR_FAILED_REPLY;
3229 return_error_param = -EINVAL;
3230 return_error_line = __LINE__;
3231 goto err_bad_parent;
3232 }
3233
3234
3235
3236
3237 user_parent_size =
3238 binder_get_object(proc, user_buffer, t->buffer,
3239 parent_offset, &user_object);
3240 if (user_parent_size != sizeof(user_object.bbo)) {
3241 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3242 proc->pid, thread->pid,
3243 user_parent_size,
3244 sizeof(user_object.bbo));
3245 return_error = BR_FAILED_REPLY;
3246 return_error_param = -EINVAL;
3247 return_error_line = __LINE__;
3248 goto err_bad_parent;
3249 }
3250 ret = binder_translate_fd_array(&pf_head, fda,
3251 user_buffer, parent,
3252 &user_object.bbo, t,
3253 thread, in_reply_to);
3254 if (!ret)
3255 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3256 t->buffer,
3257 object_offset,
3258 fda, sizeof(*fda));
3259 if (ret) {
3260 return_error = BR_FAILED_REPLY;
3261 return_error_param = ret > 0 ? -EINVAL : ret;
3262 return_error_line = __LINE__;
3263 goto err_translate_failed;
3264 }
3265 last_fixup_obj_off = parent_offset;
3266 last_fixup_min_off =
3267 fda->parent_offset + sizeof(u32) * fda->num_fds;
3268 } break;
3269 case BINDER_TYPE_PTR: {
3270 struct binder_buffer_object *bp =
3271 to_binder_buffer_object(hdr);
3272 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3273 size_t num_valid;
3274
3275 if (bp->length > buf_left) {
3276 binder_user_error("%d:%d got transaction with too large buffer\n",
3277 proc->pid, thread->pid);
3278 return_error = BR_FAILED_REPLY;
3279 return_error_param = -EINVAL;
3280 return_error_line = __LINE__;
3281 goto err_bad_offset;
3282 }
3283 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3284 (const void __user *)(uintptr_t)bp->buffer,
3285 bp->length);
3286 if (ret) {
3287 return_error = BR_FAILED_REPLY;
3288 return_error_param = ret;
3289 return_error_line = __LINE__;
3290 goto err_translate_failed;
3291 }
3292
3293 bp->buffer = (uintptr_t)
3294 t->buffer->user_data + sg_buf_offset;
3295 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3296
3297 num_valid = (buffer_offset - off_start_offset) /
3298 sizeof(binder_size_t);
3299 ret = binder_fixup_parent(&pf_head, t,
3300 thread, bp,
3301 off_start_offset,
3302 num_valid,
3303 last_fixup_obj_off,
3304 last_fixup_min_off);
3305 if (ret < 0 ||
3306 binder_alloc_copy_to_buffer(&target_proc->alloc,
3307 t->buffer,
3308 object_offset,
3309 bp, sizeof(*bp))) {
3310 return_error = BR_FAILED_REPLY;
3311 return_error_param = ret;
3312 return_error_line = __LINE__;
3313 goto err_translate_failed;
3314 }
3315 last_fixup_obj_off = object_offset;
3316 last_fixup_min_off = 0;
3317 } break;
3318 default:
3319 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3320 proc->pid, thread->pid, hdr->type);
3321 return_error = BR_FAILED_REPLY;
3322 return_error_param = -EINVAL;
3323 return_error_line = __LINE__;
3324 goto err_bad_object_type;
3325 }
3326 }
3327
3328 if (binder_alloc_copy_user_to_buffer(
3329 &target_proc->alloc,
3330 t->buffer, user_offset,
3331 user_buffer + user_offset,
3332 tr->data_size - user_offset)) {
3333 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3334 proc->pid, thread->pid);
3335 return_error = BR_FAILED_REPLY;
3336 return_error_param = -EFAULT;
3337 return_error_line = __LINE__;
3338 goto err_copy_data_failed;
3339 }
3340
3341 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3342 &sgc_head, &pf_head);
3343 if (ret) {
3344 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3345 proc->pid, thread->pid);
3346 return_error = BR_FAILED_REPLY;
3347 return_error_param = ret;
3348 return_error_line = __LINE__;
3349 goto err_copy_data_failed;
3350 }
3351 if (t->buffer->oneway_spam_suspect)
3352 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3353 else
3354 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3355 t->work.type = BINDER_WORK_TRANSACTION;
3356
3357 if (reply) {
3358 binder_enqueue_thread_work(thread, tcomplete);
3359 binder_inner_proc_lock(target_proc);
3360 if (target_thread->is_dead) {
3361 return_error = BR_DEAD_REPLY;
3362 binder_inner_proc_unlock(target_proc);
3363 goto err_dead_proc_or_thread;
3364 }
3365 BUG_ON(t->buffer->async_transaction != 0);
3366 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3367 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3368 target_proc->outstanding_txns++;
3369 binder_inner_proc_unlock(target_proc);
3370 wake_up_interruptible_sync(&target_thread->wait);
3371 binder_free_transaction(in_reply_to);
3372 } else if (!(t->flags & TF_ONE_WAY)) {
3373 BUG_ON(t->buffer->async_transaction != 0);
3374 binder_inner_proc_lock(proc);
3375
3376
3377
3378
3379
3380
3381
3382 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3383 t->need_reply = 1;
3384 t->from_parent = thread->transaction_stack;
3385 thread->transaction_stack = t;
3386 binder_inner_proc_unlock(proc);
3387 return_error = binder_proc_transaction(t,
3388 target_proc, target_thread);
3389 if (return_error) {
3390 binder_inner_proc_lock(proc);
3391 binder_pop_transaction_ilocked(thread, t);
3392 binder_inner_proc_unlock(proc);
3393 goto err_dead_proc_or_thread;
3394 }
3395 } else {
3396 BUG_ON(target_node == NULL);
3397 BUG_ON(t->buffer->async_transaction != 1);
3398 binder_enqueue_thread_work(thread, tcomplete);
3399 return_error = binder_proc_transaction(t, target_proc, NULL);
3400 if (return_error)
3401 goto err_dead_proc_or_thread;
3402 }
3403 if (target_thread)
3404 binder_thread_dec_tmpref(target_thread);
3405 binder_proc_dec_tmpref(target_proc);
3406 if (target_node)
3407 binder_dec_node_tmpref(target_node);
3408
3409
3410
3411
3412 smp_wmb();
3413 WRITE_ONCE(e->debug_id_done, t_debug_id);
3414 return;
3415
3416err_dead_proc_or_thread:
3417 return_error_line = __LINE__;
3418 binder_dequeue_work(proc, tcomplete);
3419err_translate_failed:
3420err_bad_object_type:
3421err_bad_offset:
3422err_bad_parent:
3423err_copy_data_failed:
3424 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3425 binder_free_txn_fixups(t);
3426 trace_binder_transaction_failed_buffer_release(t->buffer);
3427 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3428 buffer_offset, true);
3429 if (target_node)
3430 binder_dec_node_tmpref(target_node);
3431 target_node = NULL;
3432 t->buffer->transaction = NULL;
3433 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3434err_binder_alloc_buf_failed:
3435err_bad_extra_size:
3436 if (secctx)
3437 security_release_secctx(secctx, secctx_sz);
3438err_get_secctx_failed:
3439 kfree(tcomplete);
3440 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3441err_alloc_tcomplete_failed:
3442 if (trace_binder_txn_latency_free_enabled())
3443 binder_txn_latency_free(t);
3444 kfree(t);
3445 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3446err_alloc_t_failed:
3447err_bad_todo_list:
3448err_bad_call_stack:
3449err_empty_call_stack:
3450err_dead_binder:
3451err_invalid_target_handle:
3452 if (target_thread)
3453 binder_thread_dec_tmpref(target_thread);
3454 if (target_proc)
3455 binder_proc_dec_tmpref(target_proc);
3456 if (target_node) {
3457 binder_dec_node(target_node, 1, 0);
3458 binder_dec_node_tmpref(target_node);
3459 }
3460
3461 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3462 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3463 proc->pid, thread->pid, return_error, return_error_param,
3464 (u64)tr->data_size, (u64)tr->offsets_size,
3465 return_error_line);
3466
3467 {
3468 struct binder_transaction_log_entry *fe;
3469
3470 e->return_error = return_error;
3471 e->return_error_param = return_error_param;
3472 e->return_error_line = return_error_line;
3473 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3474 *fe = *e;
3475
3476
3477
3478
3479 smp_wmb();
3480 WRITE_ONCE(e->debug_id_done, t_debug_id);
3481 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3482 }
3483
3484 BUG_ON(thread->return_error.cmd != BR_OK);
3485 if (in_reply_to) {
3486 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3487 binder_enqueue_thread_work(thread, &thread->return_error.work);
3488 binder_send_failed_reply(in_reply_to, return_error);
3489 } else {
3490 thread->return_error.cmd = return_error;
3491 binder_enqueue_thread_work(thread, &thread->return_error.work);
3492 }
3493}
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506static void
3507binder_free_buf(struct binder_proc *proc,
3508 struct binder_thread *thread,
3509 struct binder_buffer *buffer, bool is_failure)
3510{
3511 binder_inner_proc_lock(proc);
3512 if (buffer->transaction) {
3513 buffer->transaction->buffer = NULL;
3514 buffer->transaction = NULL;
3515 }
3516 binder_inner_proc_unlock(proc);
3517 if (buffer->async_transaction && buffer->target_node) {
3518 struct binder_node *buf_node;
3519 struct binder_work *w;
3520
3521 buf_node = buffer->target_node;
3522 binder_node_inner_lock(buf_node);
3523 BUG_ON(!buf_node->has_async_transaction);
3524 BUG_ON(buf_node->proc != proc);
3525 w = binder_dequeue_work_head_ilocked(
3526 &buf_node->async_todo);
3527 if (!w) {
3528 buf_node->has_async_transaction = false;
3529 } else {
3530 binder_enqueue_work_ilocked(
3531 w, &proc->todo);
3532 binder_wakeup_proc_ilocked(proc);
3533 }
3534 binder_node_inner_unlock(buf_node);
3535 }
3536 trace_binder_transaction_buffer_release(buffer);
3537 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3538 binder_alloc_free_buf(&proc->alloc, buffer);
3539}
3540
3541static int binder_thread_write(struct binder_proc *proc,
3542 struct binder_thread *thread,
3543 binder_uintptr_t binder_buffer, size_t size,
3544 binder_size_t *consumed)
3545{
3546 uint32_t cmd;
3547 struct binder_context *context = proc->context;
3548 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3549 void __user *ptr = buffer + *consumed;
3550 void __user *end = buffer + size;
3551
3552 while (ptr < end && thread->return_error.cmd == BR_OK) {
3553 int ret;
3554
3555 if (get_user(cmd, (uint32_t __user *)ptr))
3556 return -EFAULT;
3557 ptr += sizeof(uint32_t);
3558 trace_binder_command(cmd);
3559 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3560 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3561 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3562 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3563 }
3564 switch (cmd) {
3565 case BC_INCREFS:
3566 case BC_ACQUIRE:
3567 case BC_RELEASE:
3568 case BC_DECREFS: {
3569 uint32_t target;
3570 const char *debug_string;
3571 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3572 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3573 struct binder_ref_data rdata;
3574
3575 if (get_user(target, (uint32_t __user *)ptr))
3576 return -EFAULT;
3577
3578 ptr += sizeof(uint32_t);
3579 ret = -1;
3580 if (increment && !target) {
3581 struct binder_node *ctx_mgr_node;
3582
3583 mutex_lock(&context->context_mgr_node_lock);
3584 ctx_mgr_node = context->binder_context_mgr_node;
3585 if (ctx_mgr_node) {
3586 if (ctx_mgr_node->proc == proc) {
3587 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3588 proc->pid, thread->pid);
3589 mutex_unlock(&context->context_mgr_node_lock);
3590 return -EINVAL;
3591 }
3592 ret = binder_inc_ref_for_node(
3593 proc, ctx_mgr_node,
3594 strong, NULL, &rdata);
3595 }
3596 mutex_unlock(&context->context_mgr_node_lock);
3597 }
3598 if (ret)
3599 ret = binder_update_ref_for_handle(
3600 proc, target, increment, strong,
3601 &rdata);
3602 if (!ret && rdata.desc != target) {
3603 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3604 proc->pid, thread->pid,
3605 target, rdata.desc);
3606 }
3607 switch (cmd) {
3608 case BC_INCREFS:
3609 debug_string = "IncRefs";
3610 break;
3611 case BC_ACQUIRE:
3612 debug_string = "Acquire";
3613 break;
3614 case BC_RELEASE:
3615 debug_string = "Release";
3616 break;
3617 case BC_DECREFS:
3618 default:
3619 debug_string = "DecRefs";
3620 break;
3621 }
3622 if (ret) {
3623 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3624 proc->pid, thread->pid, debug_string,
3625 strong, target, ret);
3626 break;
3627 }
3628 binder_debug(BINDER_DEBUG_USER_REFS,
3629 "%d:%d %s ref %d desc %d s %d w %d\n",
3630 proc->pid, thread->pid, debug_string,
3631 rdata.debug_id, rdata.desc, rdata.strong,
3632 rdata.weak);
3633 break;
3634 }
3635 case BC_INCREFS_DONE:
3636 case BC_ACQUIRE_DONE: {
3637 binder_uintptr_t node_ptr;
3638 binder_uintptr_t cookie;
3639 struct binder_node *node;
3640 bool free_node;
3641
3642 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3643 return -EFAULT;
3644 ptr += sizeof(binder_uintptr_t);
3645 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3646 return -EFAULT;
3647 ptr += sizeof(binder_uintptr_t);
3648 node = binder_get_node(proc, node_ptr);
3649 if (node == NULL) {
3650 binder_user_error("%d:%d %s u%016llx no match\n",
3651 proc->pid, thread->pid,
3652 cmd == BC_INCREFS_DONE ?
3653 "BC_INCREFS_DONE" :
3654 "BC_ACQUIRE_DONE",
3655 (u64)node_ptr);
3656 break;
3657 }
3658 if (cookie != node->cookie) {
3659 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3660 proc->pid, thread->pid,
3661 cmd == BC_INCREFS_DONE ?
3662 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3663 (u64)node_ptr, node->debug_id,
3664 (u64)cookie, (u64)node->cookie);
3665 binder_put_node(node);
3666 break;
3667 }
3668 binder_node_inner_lock(node);
3669 if (cmd == BC_ACQUIRE_DONE) {
3670 if (node->pending_strong_ref == 0) {
3671 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3672 proc->pid, thread->pid,
3673 node->debug_id);
3674 binder_node_inner_unlock(node);
3675 binder_put_node(node);
3676 break;
3677 }
3678 node->pending_strong_ref = 0;
3679 } else {
3680 if (node->pending_weak_ref == 0) {
3681 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3682 proc->pid, thread->pid,
3683 node->debug_id);
3684 binder_node_inner_unlock(node);
3685 binder_put_node(node);
3686 break;
3687 }
3688 node->pending_weak_ref = 0;
3689 }
3690 free_node = binder_dec_node_nilocked(node,
3691 cmd == BC_ACQUIRE_DONE, 0);
3692 WARN_ON(free_node);
3693 binder_debug(BINDER_DEBUG_USER_REFS,
3694 "%d:%d %s node %d ls %d lw %d tr %d\n",
3695 proc->pid, thread->pid,
3696 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3697 node->debug_id, node->local_strong_refs,
3698 node->local_weak_refs, node->tmp_refs);
3699 binder_node_inner_unlock(node);
3700 binder_put_node(node);
3701 break;
3702 }
3703 case BC_ATTEMPT_ACQUIRE:
3704 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3705 return -EINVAL;
3706 case BC_ACQUIRE_RESULT:
3707 pr_err("BC_ACQUIRE_RESULT not supported\n");
3708 return -EINVAL;
3709
3710 case BC_FREE_BUFFER: {
3711 binder_uintptr_t data_ptr;
3712 struct binder_buffer *buffer;
3713
3714 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3715 return -EFAULT;
3716 ptr += sizeof(binder_uintptr_t);
3717
3718 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3719 data_ptr);
3720 if (IS_ERR_OR_NULL(buffer)) {
3721 if (PTR_ERR(buffer) == -EPERM) {
3722 binder_user_error(
3723 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3724 proc->pid, thread->pid,
3725 (u64)data_ptr);
3726 } else {
3727 binder_user_error(
3728 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3729 proc->pid, thread->pid,
3730 (u64)data_ptr);
3731 }
3732 break;
3733 }
3734 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3735 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3736 proc->pid, thread->pid, (u64)data_ptr,
3737 buffer->debug_id,
3738 buffer->transaction ? "active" : "finished");
3739 binder_free_buf(proc, thread, buffer, false);
3740 break;
3741 }
3742
3743 case BC_TRANSACTION_SG:
3744 case BC_REPLY_SG: {
3745 struct binder_transaction_data_sg tr;
3746
3747 if (copy_from_user(&tr, ptr, sizeof(tr)))
3748 return -EFAULT;
3749 ptr += sizeof(tr);
3750 binder_transaction(proc, thread, &tr.transaction_data,
3751 cmd == BC_REPLY_SG, tr.buffers_size);
3752 break;
3753 }
3754 case BC_TRANSACTION:
3755 case BC_REPLY: {
3756 struct binder_transaction_data tr;
3757
3758 if (copy_from_user(&tr, ptr, sizeof(tr)))
3759 return -EFAULT;
3760 ptr += sizeof(tr);
3761 binder_transaction(proc, thread, &tr,
3762 cmd == BC_REPLY, 0);
3763 break;
3764 }
3765
3766 case BC_REGISTER_LOOPER:
3767 binder_debug(BINDER_DEBUG_THREADS,
3768 "%d:%d BC_REGISTER_LOOPER\n",
3769 proc->pid, thread->pid);
3770 binder_inner_proc_lock(proc);
3771 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3772 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3773 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3774 proc->pid, thread->pid);
3775 } else if (proc->requested_threads == 0) {
3776 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3777 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3778 proc->pid, thread->pid);
3779 } else {
3780 proc->requested_threads--;
3781 proc->requested_threads_started++;
3782 }
3783 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3784 binder_inner_proc_unlock(proc);
3785 break;
3786 case BC_ENTER_LOOPER:
3787 binder_debug(BINDER_DEBUG_THREADS,
3788 "%d:%d BC_ENTER_LOOPER\n",
3789 proc->pid, thread->pid);
3790 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3791 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3792 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3793 proc->pid, thread->pid);
3794 }
3795 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3796 break;
3797 case BC_EXIT_LOOPER:
3798 binder_debug(BINDER_DEBUG_THREADS,
3799 "%d:%d BC_EXIT_LOOPER\n",
3800 proc->pid, thread->pid);
3801 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3802 break;
3803
3804 case BC_REQUEST_DEATH_NOTIFICATION:
3805 case BC_CLEAR_DEATH_NOTIFICATION: {
3806 uint32_t target;
3807 binder_uintptr_t cookie;
3808 struct binder_ref *ref;
3809 struct binder_ref_death *death = NULL;
3810
3811 if (get_user(target, (uint32_t __user *)ptr))
3812 return -EFAULT;
3813 ptr += sizeof(uint32_t);
3814 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3815 return -EFAULT;
3816 ptr += sizeof(binder_uintptr_t);
3817 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3818
3819
3820
3821
3822 death = kzalloc(sizeof(*death), GFP_KERNEL);
3823 if (death == NULL) {
3824 WARN_ON(thread->return_error.cmd !=
3825 BR_OK);
3826 thread->return_error.cmd = BR_ERROR;
3827 binder_enqueue_thread_work(
3828 thread,
3829 &thread->return_error.work);
3830 binder_debug(
3831 BINDER_DEBUG_FAILED_TRANSACTION,
3832 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3833 proc->pid, thread->pid);
3834 break;
3835 }
3836 }
3837 binder_proc_lock(proc);
3838 ref = binder_get_ref_olocked(proc, target, false);
3839 if (ref == NULL) {
3840 binder_user_error("%d:%d %s invalid ref %d\n",
3841 proc->pid, thread->pid,
3842 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3843 "BC_REQUEST_DEATH_NOTIFICATION" :
3844 "BC_CLEAR_DEATH_NOTIFICATION",
3845 target);
3846 binder_proc_unlock(proc);
3847 kfree(death);
3848 break;
3849 }
3850
3851 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3852 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3853 proc->pid, thread->pid,
3854 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3855 "BC_REQUEST_DEATH_NOTIFICATION" :
3856 "BC_CLEAR_DEATH_NOTIFICATION",
3857 (u64)cookie, ref->data.debug_id,
3858 ref->data.desc, ref->data.strong,
3859 ref->data.weak, ref->node->debug_id);
3860
3861 binder_node_lock(ref->node);
3862 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3863 if (ref->death) {
3864 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3865 proc->pid, thread->pid);
3866 binder_node_unlock(ref->node);
3867 binder_proc_unlock(proc);
3868 kfree(death);
3869 break;
3870 }
3871 binder_stats_created(BINDER_STAT_DEATH);
3872 INIT_LIST_HEAD(&death->work.entry);
3873 death->cookie = cookie;
3874 ref->death = death;
3875 if (ref->node->proc == NULL) {
3876 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3877
3878 binder_inner_proc_lock(proc);
3879 binder_enqueue_work_ilocked(
3880 &ref->death->work, &proc->todo);
3881 binder_wakeup_proc_ilocked(proc);
3882 binder_inner_proc_unlock(proc);
3883 }
3884 } else {
3885 if (ref->death == NULL) {
3886 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3887 proc->pid, thread->pid);
3888 binder_node_unlock(ref->node);
3889 binder_proc_unlock(proc);
3890 break;
3891 }
3892 death = ref->death;
3893 if (death->cookie != cookie) {
3894 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3895 proc->pid, thread->pid,
3896 (u64)death->cookie,
3897 (u64)cookie);
3898 binder_node_unlock(ref->node);
3899 binder_proc_unlock(proc);
3900 break;
3901 }
3902 ref->death = NULL;
3903 binder_inner_proc_lock(proc);
3904 if (list_empty(&death->work.entry)) {
3905 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3906 if (thread->looper &
3907 (BINDER_LOOPER_STATE_REGISTERED |
3908 BINDER_LOOPER_STATE_ENTERED))
3909 binder_enqueue_thread_work_ilocked(
3910 thread,
3911 &death->work);
3912 else {
3913 binder_enqueue_work_ilocked(
3914 &death->work,
3915 &proc->todo);
3916 binder_wakeup_proc_ilocked(
3917 proc);
3918 }
3919 } else {
3920 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3921 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3922 }
3923 binder_inner_proc_unlock(proc);
3924 }
3925 binder_node_unlock(ref->node);
3926 binder_proc_unlock(proc);
3927 } break;
3928 case BC_DEAD_BINDER_DONE: {
3929 struct binder_work *w;
3930 binder_uintptr_t cookie;
3931 struct binder_ref_death *death = NULL;
3932
3933 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3934 return -EFAULT;
3935
3936 ptr += sizeof(cookie);
3937 binder_inner_proc_lock(proc);
3938 list_for_each_entry(w, &proc->delivered_death,
3939 entry) {
3940 struct binder_ref_death *tmp_death =
3941 container_of(w,
3942 struct binder_ref_death,
3943 work);
3944
3945 if (tmp_death->cookie == cookie) {
3946 death = tmp_death;
3947 break;
3948 }
3949 }
3950 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3951 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3952 proc->pid, thread->pid, (u64)cookie,
3953 death);
3954 if (death == NULL) {
3955 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3956 proc->pid, thread->pid, (u64)cookie);
3957 binder_inner_proc_unlock(proc);
3958 break;
3959 }
3960 binder_dequeue_work_ilocked(&death->work);
3961 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3962 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3963 if (thread->looper &
3964 (BINDER_LOOPER_STATE_REGISTERED |
3965 BINDER_LOOPER_STATE_ENTERED))
3966 binder_enqueue_thread_work_ilocked(
3967 thread, &death->work);
3968 else {
3969 binder_enqueue_work_ilocked(
3970 &death->work,
3971 &proc->todo);
3972 binder_wakeup_proc_ilocked(proc);
3973 }
3974 }
3975 binder_inner_proc_unlock(proc);
3976 } break;
3977
3978 default:
3979 pr_err("%d:%d unknown command %d\n",
3980 proc->pid, thread->pid, cmd);
3981 return -EINVAL;
3982 }
3983 *consumed = ptr - buffer;
3984 }
3985 return 0;
3986}
3987
3988static void binder_stat_br(struct binder_proc *proc,
3989 struct binder_thread *thread, uint32_t cmd)
3990{
3991 trace_binder_return(cmd);
3992 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3993 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3994 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3995 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3996 }
3997}
3998
3999static int binder_put_node_cmd(struct binder_proc *proc,
4000 struct binder_thread *thread,
4001 void __user **ptrp,
4002 binder_uintptr_t node_ptr,
4003 binder_uintptr_t node_cookie,
4004 int node_debug_id,
4005 uint32_t cmd, const char *cmd_name)
4006{
4007 void __user *ptr = *ptrp;
4008
4009 if (put_user(cmd, (uint32_t __user *)ptr))
4010 return -EFAULT;
4011 ptr += sizeof(uint32_t);
4012
4013 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4014 return -EFAULT;
4015 ptr += sizeof(binder_uintptr_t);
4016
4017 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4018 return -EFAULT;
4019 ptr += sizeof(binder_uintptr_t);
4020
4021 binder_stat_br(proc, thread, cmd);
4022 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4023 proc->pid, thread->pid, cmd_name, node_debug_id,
4024 (u64)node_ptr, (u64)node_cookie);
4025
4026 *ptrp = ptr;
4027 return 0;
4028}
4029
4030static int binder_wait_for_work(struct binder_thread *thread,
4031 bool do_proc_work)
4032{
4033 DEFINE_WAIT(wait);
4034 struct binder_proc *proc = thread->proc;
4035 int ret = 0;
4036
4037 freezer_do_not_count();
4038 binder_inner_proc_lock(proc);
4039 for (;;) {
4040 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4041 if (binder_has_work_ilocked(thread, do_proc_work))
4042 break;
4043 if (do_proc_work)
4044 list_add(&thread->waiting_thread_node,
4045 &proc->waiting_threads);
4046 binder_inner_proc_unlock(proc);
4047 schedule();
4048 binder_inner_proc_lock(proc);
4049 list_del_init(&thread->waiting_thread_node);
4050 if (signal_pending(current)) {
4051 ret = -EINTR;
4052 break;
4053 }
4054 }
4055 finish_wait(&thread->wait, &wait);
4056 binder_inner_proc_unlock(proc);
4057 freezer_count();
4058
4059 return ret;
4060}
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076static int binder_apply_fd_fixups(struct binder_proc *proc,
4077 struct binder_transaction *t)
4078{
4079 struct binder_txn_fd_fixup *fixup, *tmp;
4080 int ret = 0;
4081
4082 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4083 int fd = get_unused_fd_flags(O_CLOEXEC);
4084
4085 if (fd < 0) {
4086 binder_debug(BINDER_DEBUG_TRANSACTION,
4087 "failed fd fixup txn %d fd %d\n",
4088 t->debug_id, fd);
4089 ret = -ENOMEM;
4090 break;
4091 }
4092 binder_debug(BINDER_DEBUG_TRANSACTION,
4093 "fd fixup txn %d fd %d\n",
4094 t->debug_id, fd);
4095 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4096 fd_install(fd, fixup->file);
4097 fixup->file = NULL;
4098 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4099 fixup->offset, &fd,
4100 sizeof(u32))) {
4101 ret = -EINVAL;
4102 break;
4103 }
4104 }
4105 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4106 if (fixup->file) {
4107 fput(fixup->file);
4108 } else if (ret) {
4109 u32 fd;
4110 int err;
4111
4112 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4113 t->buffer,
4114 fixup->offset,
4115 sizeof(fd));
4116 WARN_ON(err);
4117 if (!err)
4118 binder_deferred_fd_close(fd);
4119 }
4120 list_del(&fixup->fixup_entry);
4121 kfree(fixup);
4122 }
4123
4124 return ret;
4125}
4126
4127static int binder_thread_read(struct binder_proc *proc,
4128 struct binder_thread *thread,
4129 binder_uintptr_t binder_buffer, size_t size,
4130 binder_size_t *consumed, int non_block)
4131{
4132 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4133 void __user *ptr = buffer + *consumed;
4134 void __user *end = buffer + size;
4135
4136 int ret = 0;
4137 int wait_for_proc_work;
4138
4139 if (*consumed == 0) {
4140 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4141 return -EFAULT;
4142 ptr += sizeof(uint32_t);
4143 }
4144
4145retry:
4146 binder_inner_proc_lock(proc);
4147 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4148 binder_inner_proc_unlock(proc);
4149
4150 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4151
4152 trace_binder_wait_for_work(wait_for_proc_work,
4153 !!thread->transaction_stack,
4154 !binder_worklist_empty(proc, &thread->todo));
4155 if (wait_for_proc_work) {
4156 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4157 BINDER_LOOPER_STATE_ENTERED))) {
4158 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4159 proc->pid, thread->pid, thread->looper);
4160 wait_event_interruptible(binder_user_error_wait,
4161 binder_stop_on_user_error < 2);
4162 }
4163 binder_set_nice(proc->default_priority);
4164 }
4165
4166 if (non_block) {
4167 if (!binder_has_work(thread, wait_for_proc_work))
4168 ret = -EAGAIN;
4169 } else {
4170 ret = binder_wait_for_work(thread, wait_for_proc_work);
4171 }
4172
4173 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4174
4175 if (ret)
4176 return ret;
4177
4178 while (1) {
4179 uint32_t cmd;
4180 struct binder_transaction_data_secctx tr;
4181 struct binder_transaction_data *trd = &tr.transaction_data;
4182 struct binder_work *w = NULL;
4183 struct list_head *list = NULL;
4184 struct binder_transaction *t = NULL;
4185 struct binder_thread *t_from;
4186 size_t trsize = sizeof(*trd);
4187
4188 binder_inner_proc_lock(proc);
4189 if (!binder_worklist_empty_ilocked(&thread->todo))
4190 list = &thread->todo;
4191 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4192 wait_for_proc_work)
4193 list = &proc->todo;
4194 else {
4195 binder_inner_proc_unlock(proc);
4196
4197
4198 if (ptr - buffer == 4 && !thread->looper_need_return)
4199 goto retry;
4200 break;
4201 }
4202
4203 if (end - ptr < sizeof(tr) + 4) {
4204 binder_inner_proc_unlock(proc);
4205 break;
4206 }
4207 w = binder_dequeue_work_head_ilocked(list);
4208 if (binder_worklist_empty_ilocked(&thread->todo))
4209 thread->process_todo = false;
4210
4211 switch (w->type) {
4212 case BINDER_WORK_TRANSACTION: {
4213 binder_inner_proc_unlock(proc);
4214 t = container_of(w, struct binder_transaction, work);
4215 } break;
4216 case BINDER_WORK_RETURN_ERROR: {
4217 struct binder_error *e = container_of(
4218 w, struct binder_error, work);
4219
4220 WARN_ON(e->cmd == BR_OK);
4221 binder_inner_proc_unlock(proc);
4222 if (put_user(e->cmd, (uint32_t __user *)ptr))
4223 return -EFAULT;
4224 cmd = e->cmd;
4225 e->cmd = BR_OK;
4226 ptr += sizeof(uint32_t);
4227
4228 binder_stat_br(proc, thread, cmd);
4229 } break;
4230 case BINDER_WORK_TRANSACTION_COMPLETE:
4231 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4232 if (proc->oneway_spam_detection_enabled &&
4233 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4234 cmd = BR_ONEWAY_SPAM_SUSPECT;
4235 else
4236 cmd = BR_TRANSACTION_COMPLETE;
4237 binder_inner_proc_unlock(proc);
4238 kfree(w);
4239 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4240 if (put_user(cmd, (uint32_t __user *)ptr))
4241 return -EFAULT;
4242 ptr += sizeof(uint32_t);
4243
4244 binder_stat_br(proc, thread, cmd);
4245 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4246 "%d:%d BR_TRANSACTION_COMPLETE\n",
4247 proc->pid, thread->pid);
4248 } break;
4249 case BINDER_WORK_NODE: {
4250 struct binder_node *node = container_of(w, struct binder_node, work);
4251 int strong, weak;
4252 binder_uintptr_t node_ptr = node->ptr;
4253 binder_uintptr_t node_cookie = node->cookie;
4254 int node_debug_id = node->debug_id;
4255 int has_weak_ref;
4256 int has_strong_ref;
4257 void __user *orig_ptr = ptr;
4258
4259 BUG_ON(proc != node->proc);
4260 strong = node->internal_strong_refs ||
4261 node->local_strong_refs;
4262 weak = !hlist_empty(&node->refs) ||
4263 node->local_weak_refs ||
4264 node->tmp_refs || strong;
4265 has_strong_ref = node->has_strong_ref;
4266 has_weak_ref = node->has_weak_ref;
4267
4268 if (weak && !has_weak_ref) {
4269 node->has_weak_ref = 1;
4270 node->pending_weak_ref = 1;
4271 node->local_weak_refs++;
4272 }
4273 if (strong && !has_strong_ref) {
4274 node->has_strong_ref = 1;
4275 node->pending_strong_ref = 1;
4276 node->local_strong_refs++;
4277 }
4278 if (!strong && has_strong_ref)
4279 node->has_strong_ref = 0;
4280 if (!weak && has_weak_ref)
4281 node->has_weak_ref = 0;
4282 if (!weak && !strong) {
4283 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4284 "%d:%d node %d u%016llx c%016llx deleted\n",
4285 proc->pid, thread->pid,
4286 node_debug_id,
4287 (u64)node_ptr,
4288 (u64)node_cookie);
4289 rb_erase(&node->rb_node, &proc->nodes);
4290 binder_inner_proc_unlock(proc);
4291 binder_node_lock(node);
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301 binder_node_unlock(node);
4302 binder_free_node(node);
4303 } else
4304 binder_inner_proc_unlock(proc);
4305
4306 if (weak && !has_weak_ref)
4307 ret = binder_put_node_cmd(
4308 proc, thread, &ptr, node_ptr,
4309 node_cookie, node_debug_id,
4310 BR_INCREFS, "BR_INCREFS");
4311 if (!ret && strong && !has_strong_ref)
4312 ret = binder_put_node_cmd(
4313 proc, thread, &ptr, node_ptr,
4314 node_cookie, node_debug_id,
4315 BR_ACQUIRE, "BR_ACQUIRE");
4316 if (!ret && !strong && has_strong_ref)
4317 ret = binder_put_node_cmd(
4318 proc, thread, &ptr, node_ptr,
4319 node_cookie, node_debug_id,
4320 BR_RELEASE, "BR_RELEASE");
4321 if (!ret && !weak && has_weak_ref)
4322 ret = binder_put_node_cmd(
4323 proc, thread, &ptr, node_ptr,
4324 node_cookie, node_debug_id,
4325 BR_DECREFS, "BR_DECREFS");
4326 if (orig_ptr == ptr)
4327 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4328 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4329 proc->pid, thread->pid,
4330 node_debug_id,
4331 (u64)node_ptr,
4332 (u64)node_cookie);
4333 if (ret)
4334 return ret;
4335 } break;
4336 case BINDER_WORK_DEAD_BINDER:
4337 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4338 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4339 struct binder_ref_death *death;
4340 uint32_t cmd;
4341 binder_uintptr_t cookie;
4342
4343 death = container_of(w, struct binder_ref_death, work);
4344 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4345 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4346 else
4347 cmd = BR_DEAD_BINDER;
4348 cookie = death->cookie;
4349
4350 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4351 "%d:%d %s %016llx\n",
4352 proc->pid, thread->pid,
4353 cmd == BR_DEAD_BINDER ?
4354 "BR_DEAD_BINDER" :
4355 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4356 (u64)cookie);
4357 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4358 binder_inner_proc_unlock(proc);
4359 kfree(death);
4360 binder_stats_deleted(BINDER_STAT_DEATH);
4361 } else {
4362 binder_enqueue_work_ilocked(
4363 w, &proc->delivered_death);
4364 binder_inner_proc_unlock(proc);
4365 }
4366 if (put_user(cmd, (uint32_t __user *)ptr))
4367 return -EFAULT;
4368 ptr += sizeof(uint32_t);
4369 if (put_user(cookie,
4370 (binder_uintptr_t __user *)ptr))
4371 return -EFAULT;
4372 ptr += sizeof(binder_uintptr_t);
4373 binder_stat_br(proc, thread, cmd);
4374 if (cmd == BR_DEAD_BINDER)
4375 goto done;
4376 } break;
4377 default:
4378 binder_inner_proc_unlock(proc);
4379 pr_err("%d:%d: bad work type %d\n",
4380 proc->pid, thread->pid, w->type);
4381 break;
4382 }
4383
4384 if (!t)
4385 continue;
4386
4387 BUG_ON(t->buffer == NULL);
4388 if (t->buffer->target_node) {
4389 struct binder_node *target_node = t->buffer->target_node;
4390
4391 trd->target.ptr = target_node->ptr;
4392 trd->cookie = target_node->cookie;
4393 t->saved_priority = task_nice(current);
4394 if (t->priority < target_node->min_priority &&
4395 !(t->flags & TF_ONE_WAY))
4396 binder_set_nice(t->priority);
4397 else if (!(t->flags & TF_ONE_WAY) ||
4398 t->saved_priority > target_node->min_priority)
4399 binder_set_nice(target_node->min_priority);
4400 cmd = BR_TRANSACTION;
4401 } else {
4402 trd->target.ptr = 0;
4403 trd->cookie = 0;
4404 cmd = BR_REPLY;
4405 }
4406 trd->code = t->code;
4407 trd->flags = t->flags;
4408 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4409
4410 t_from = binder_get_txn_from(t);
4411 if (t_from) {
4412 struct task_struct *sender = t_from->proc->tsk;
4413
4414 trd->sender_pid =
4415 task_tgid_nr_ns(sender,
4416 task_active_pid_ns(current));
4417 } else {
4418 trd->sender_pid = 0;
4419 }
4420
4421 ret = binder_apply_fd_fixups(proc, t);
4422 if (ret) {
4423 struct binder_buffer *buffer = t->buffer;
4424 bool oneway = !!(t->flags & TF_ONE_WAY);
4425 int tid = t->debug_id;
4426
4427 if (t_from)
4428 binder_thread_dec_tmpref(t_from);
4429 buffer->transaction = NULL;
4430 binder_cleanup_transaction(t, "fd fixups failed",
4431 BR_FAILED_REPLY);
4432 binder_free_buf(proc, thread, buffer, true);
4433 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4434 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4435 proc->pid, thread->pid,
4436 oneway ? "async " :
4437 (cmd == BR_REPLY ? "reply " : ""),
4438 tid, BR_FAILED_REPLY, ret, __LINE__);
4439 if (cmd == BR_REPLY) {
4440 cmd = BR_FAILED_REPLY;
4441 if (put_user(cmd, (uint32_t __user *)ptr))
4442 return -EFAULT;
4443 ptr += sizeof(uint32_t);
4444 binder_stat_br(proc, thread, cmd);
4445 break;
4446 }
4447 continue;
4448 }
4449 trd->data_size = t->buffer->data_size;
4450 trd->offsets_size = t->buffer->offsets_size;
4451 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4452 trd->data.ptr.offsets = trd->data.ptr.buffer +
4453 ALIGN(t->buffer->data_size,
4454 sizeof(void *));
4455
4456 tr.secctx = t->security_ctx;
4457 if (t->security_ctx) {
4458 cmd = BR_TRANSACTION_SEC_CTX;
4459 trsize = sizeof(tr);
4460 }
4461 if (put_user(cmd, (uint32_t __user *)ptr)) {
4462 if (t_from)
4463 binder_thread_dec_tmpref(t_from);
4464
4465 binder_cleanup_transaction(t, "put_user failed",
4466 BR_FAILED_REPLY);
4467
4468 return -EFAULT;
4469 }
4470 ptr += sizeof(uint32_t);
4471 if (copy_to_user(ptr, &tr, trsize)) {
4472 if (t_from)
4473 binder_thread_dec_tmpref(t_from);
4474
4475 binder_cleanup_transaction(t, "copy_to_user failed",
4476 BR_FAILED_REPLY);
4477
4478 return -EFAULT;
4479 }
4480 ptr += trsize;
4481
4482 trace_binder_transaction_received(t);
4483 binder_stat_br(proc, thread, cmd);
4484 binder_debug(BINDER_DEBUG_TRANSACTION,
4485 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4486 proc->pid, thread->pid,
4487 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4488 (cmd == BR_TRANSACTION_SEC_CTX) ?
4489 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4490 t->debug_id, t_from ? t_from->proc->pid : 0,
4491 t_from ? t_from->pid : 0, cmd,
4492 t->buffer->data_size, t->buffer->offsets_size,
4493 (u64)trd->data.ptr.buffer,
4494 (u64)trd->data.ptr.offsets);
4495
4496 if (t_from)
4497 binder_thread_dec_tmpref(t_from);
4498 t->buffer->allow_user_free = 1;
4499 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4500 binder_inner_proc_lock(thread->proc);
4501 t->to_parent = thread->transaction_stack;
4502 t->to_thread = thread;
4503 thread->transaction_stack = t;
4504 binder_inner_proc_unlock(thread->proc);
4505 } else {
4506 binder_free_transaction(t);
4507 }
4508 break;
4509 }
4510
4511done:
4512
4513 *consumed = ptr - buffer;
4514 binder_inner_proc_lock(proc);
4515 if (proc->requested_threads == 0 &&
4516 list_empty(&thread->proc->waiting_threads) &&
4517 proc->requested_threads_started < proc->max_threads &&
4518 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4519 BINDER_LOOPER_STATE_ENTERED))
4520 ) {
4521 proc->requested_threads++;
4522 binder_inner_proc_unlock(proc);
4523 binder_debug(BINDER_DEBUG_THREADS,
4524 "%d:%d BR_SPAWN_LOOPER\n",
4525 proc->pid, thread->pid);
4526 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4527 return -EFAULT;
4528 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4529 } else
4530 binder_inner_proc_unlock(proc);
4531 return 0;
4532}
4533
4534static void binder_release_work(struct binder_proc *proc,
4535 struct list_head *list)
4536{
4537 struct binder_work *w;
4538 enum binder_work_type wtype;
4539
4540 while (1) {
4541 binder_inner_proc_lock(proc);
4542 w = binder_dequeue_work_head_ilocked(list);
4543 wtype = w ? w->type : 0;
4544 binder_inner_proc_unlock(proc);
4545 if (!w)
4546 return;
4547
4548 switch (wtype) {
4549 case BINDER_WORK_TRANSACTION: {
4550 struct binder_transaction *t;
4551
4552 t = container_of(w, struct binder_transaction, work);
4553
4554 binder_cleanup_transaction(t, "process died.",
4555 BR_DEAD_REPLY);
4556 } break;
4557 case BINDER_WORK_RETURN_ERROR: {
4558 struct binder_error *e = container_of(
4559 w, struct binder_error, work);
4560
4561 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4562 "undelivered TRANSACTION_ERROR: %u\n",
4563 e->cmd);
4564 } break;
4565 case BINDER_WORK_TRANSACTION_COMPLETE: {
4566 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4567 "undelivered TRANSACTION_COMPLETE\n");
4568 kfree(w);
4569 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4570 } break;
4571 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4572 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4573 struct binder_ref_death *death;
4574
4575 death = container_of(w, struct binder_ref_death, work);
4576 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4577 "undelivered death notification, %016llx\n",
4578 (u64)death->cookie);
4579 kfree(death);
4580 binder_stats_deleted(BINDER_STAT_DEATH);
4581 } break;
4582 case BINDER_WORK_NODE:
4583 break;
4584 default:
4585 pr_err("unexpected work type, %d, not freed\n",
4586 wtype);
4587 break;
4588 }
4589 }
4590
4591}
4592
4593static struct binder_thread *binder_get_thread_ilocked(
4594 struct binder_proc *proc, struct binder_thread *new_thread)
4595{
4596 struct binder_thread *thread = NULL;
4597 struct rb_node *parent = NULL;
4598 struct rb_node **p = &proc->threads.rb_node;
4599
4600 while (*p) {
4601 parent = *p;
4602 thread = rb_entry(parent, struct binder_thread, rb_node);
4603
4604 if (current->pid < thread->pid)
4605 p = &(*p)->rb_left;
4606 else if (current->pid > thread->pid)
4607 p = &(*p)->rb_right;
4608 else
4609 return thread;
4610 }
4611 if (!new_thread)
4612 return NULL;
4613 thread = new_thread;
4614 binder_stats_created(BINDER_STAT_THREAD);
4615 thread->proc = proc;
4616 thread->pid = current->pid;
4617 atomic_set(&thread->tmp_ref, 0);
4618 init_waitqueue_head(&thread->wait);
4619 INIT_LIST_HEAD(&thread->todo);
4620 rb_link_node(&thread->rb_node, parent, p);
4621 rb_insert_color(&thread->rb_node, &proc->threads);
4622 thread->looper_need_return = true;
4623 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4624 thread->return_error.cmd = BR_OK;
4625 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4626 thread->reply_error.cmd = BR_OK;
4627 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4628 return thread;
4629}
4630
4631static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4632{
4633 struct binder_thread *thread;
4634 struct binder_thread *new_thread;
4635
4636 binder_inner_proc_lock(proc);
4637 thread = binder_get_thread_ilocked(proc, NULL);
4638 binder_inner_proc_unlock(proc);
4639 if (!thread) {
4640 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4641 if (new_thread == NULL)
4642 return NULL;
4643 binder_inner_proc_lock(proc);
4644 thread = binder_get_thread_ilocked(proc, new_thread);
4645 binder_inner_proc_unlock(proc);
4646 if (thread != new_thread)
4647 kfree(new_thread);
4648 }
4649 return thread;
4650}
4651
4652static void binder_free_proc(struct binder_proc *proc)
4653{
4654 struct binder_device *device;
4655
4656 BUG_ON(!list_empty(&proc->todo));
4657 BUG_ON(!list_empty(&proc->delivered_death));
4658 if (proc->outstanding_txns)
4659 pr_warn("%s: Unexpected outstanding_txns %d\n",
4660 __func__, proc->outstanding_txns);
4661 device = container_of(proc->context, struct binder_device, context);
4662 if (refcount_dec_and_test(&device->ref)) {
4663 kfree(proc->context->name);
4664 kfree(device);
4665 }
4666 binder_alloc_deferred_release(&proc->alloc);
4667 put_task_struct(proc->tsk);
4668 put_cred(proc->cred);
4669 binder_stats_deleted(BINDER_STAT_PROC);
4670 kfree(proc);
4671}
4672
4673static void binder_free_thread(struct binder_thread *thread)
4674{
4675 BUG_ON(!list_empty(&thread->todo));
4676 binder_stats_deleted(BINDER_STAT_THREAD);
4677 binder_proc_dec_tmpref(thread->proc);
4678 kfree(thread);
4679}
4680
4681static int binder_thread_release(struct binder_proc *proc,
4682 struct binder_thread *thread)
4683{
4684 struct binder_transaction *t;
4685 struct binder_transaction *send_reply = NULL;
4686 int active_transactions = 0;
4687 struct binder_transaction *last_t = NULL;
4688
4689 binder_inner_proc_lock(thread->proc);
4690
4691
4692
4693
4694
4695
4696 proc->tmp_ref++;
4697
4698
4699
4700
4701 atomic_inc(&thread->tmp_ref);
4702 rb_erase(&thread->rb_node, &proc->threads);
4703 t = thread->transaction_stack;
4704 if (t) {
4705 spin_lock(&t->lock);
4706 if (t->to_thread == thread)
4707 send_reply = t;
4708 } else {
4709 __acquire(&t->lock);
4710 }
4711 thread->is_dead = true;
4712
4713 while (t) {
4714 last_t = t;
4715 active_transactions++;
4716 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4717 "release %d:%d transaction %d %s, still active\n",
4718 proc->pid, thread->pid,
4719 t->debug_id,
4720 (t->to_thread == thread) ? "in" : "out");
4721
4722 if (t->to_thread == thread) {
4723 thread->proc->outstanding_txns--;
4724 t->to_proc = NULL;
4725 t->to_thread = NULL;
4726 if (t->buffer) {
4727 t->buffer->transaction = NULL;
4728 t->buffer = NULL;
4729 }
4730 t = t->to_parent;
4731 } else if (t->from == thread) {
4732 t->from = NULL;
4733 t = t->from_parent;
4734 } else
4735 BUG();
4736 spin_unlock(&last_t->lock);
4737 if (t)
4738 spin_lock(&t->lock);
4739 else
4740 __acquire(&t->lock);
4741 }
4742
4743 __release(&t->lock);
4744
4745
4746
4747
4748
4749 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4750 wake_up_pollfree(&thread->wait);
4751
4752 binder_inner_proc_unlock(thread->proc);
4753
4754
4755
4756
4757
4758
4759
4760
4761 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4762 synchronize_rcu();
4763
4764 if (send_reply)
4765 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4766 binder_release_work(proc, &thread->todo);
4767 binder_thread_dec_tmpref(thread);
4768 return active_transactions;
4769}
4770
4771static __poll_t binder_poll(struct file *filp,
4772 struct poll_table_struct *wait)
4773{
4774 struct binder_proc *proc = filp->private_data;
4775 struct binder_thread *thread = NULL;
4776 bool wait_for_proc_work;
4777
4778 thread = binder_get_thread(proc);
4779 if (!thread)
4780 return POLLERR;
4781
4782 binder_inner_proc_lock(thread->proc);
4783 thread->looper |= BINDER_LOOPER_STATE_POLL;
4784 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4785
4786 binder_inner_proc_unlock(thread->proc);
4787
4788 poll_wait(filp, &thread->wait, wait);
4789
4790 if (binder_has_work(thread, wait_for_proc_work))
4791 return EPOLLIN;
4792
4793 return 0;
4794}
4795
4796static int binder_ioctl_write_read(struct file *filp,
4797 unsigned int cmd, unsigned long arg,
4798 struct binder_thread *thread)
4799{
4800 int ret = 0;
4801 struct binder_proc *proc = filp->private_data;
4802 unsigned int size = _IOC_SIZE(cmd);
4803 void __user *ubuf = (void __user *)arg;
4804 struct binder_write_read bwr;
4805
4806 if (size != sizeof(struct binder_write_read)) {
4807 ret = -EINVAL;
4808 goto out;
4809 }
4810 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4811 ret = -EFAULT;
4812 goto out;
4813 }
4814 binder_debug(BINDER_DEBUG_READ_WRITE,
4815 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4816 proc->pid, thread->pid,
4817 (u64)bwr.write_size, (u64)bwr.write_buffer,
4818 (u64)bwr.read_size, (u64)bwr.read_buffer);
4819
4820 if (bwr.write_size > 0) {
4821 ret = binder_thread_write(proc, thread,
4822 bwr.write_buffer,
4823 bwr.write_size,
4824 &bwr.write_consumed);
4825 trace_binder_write_done(ret);
4826 if (ret < 0) {
4827 bwr.read_consumed = 0;
4828 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4829 ret = -EFAULT;
4830 goto out;
4831 }
4832 }
4833 if (bwr.read_size > 0) {
4834 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4835 bwr.read_size,
4836 &bwr.read_consumed,
4837 filp->f_flags & O_NONBLOCK);
4838 trace_binder_read_done(ret);
4839 binder_inner_proc_lock(proc);
4840 if (!binder_worklist_empty_ilocked(&proc->todo))
4841 binder_wakeup_proc_ilocked(proc);
4842 binder_inner_proc_unlock(proc);
4843 if (ret < 0) {
4844 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4845 ret = -EFAULT;
4846 goto out;
4847 }
4848 }
4849 binder_debug(BINDER_DEBUG_READ_WRITE,
4850 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4851 proc->pid, thread->pid,
4852 (u64)bwr.write_consumed, (u64)bwr.write_size,
4853 (u64)bwr.read_consumed, (u64)bwr.read_size);
4854 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4855 ret = -EFAULT;
4856 goto out;
4857 }
4858out:
4859 return ret;
4860}
4861
4862static int binder_ioctl_set_ctx_mgr(struct file *filp,
4863 struct flat_binder_object *fbo)
4864{
4865 int ret = 0;
4866 struct binder_proc *proc = filp->private_data;
4867 struct binder_context *context = proc->context;
4868 struct binder_node *new_node;
4869 kuid_t curr_euid = current_euid();
4870
4871 mutex_lock(&context->context_mgr_node_lock);
4872 if (context->binder_context_mgr_node) {
4873 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4874 ret = -EBUSY;
4875 goto out;
4876 }
4877 ret = security_binder_set_context_mgr(proc->cred);
4878 if (ret < 0)
4879 goto out;
4880 if (uid_valid(context->binder_context_mgr_uid)) {
4881 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4882 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4883 from_kuid(&init_user_ns, curr_euid),
4884 from_kuid(&init_user_ns,
4885 context->binder_context_mgr_uid));
4886 ret = -EPERM;
4887 goto out;
4888 }
4889 } else {
4890 context->binder_context_mgr_uid = curr_euid;
4891 }
4892 new_node = binder_new_node(proc, fbo);
4893 if (!new_node) {
4894 ret = -ENOMEM;
4895 goto out;
4896 }
4897 binder_node_lock(new_node);
4898 new_node->local_weak_refs++;
4899 new_node->local_strong_refs++;
4900 new_node->has_strong_ref = 1;
4901 new_node->has_weak_ref = 1;
4902 context->binder_context_mgr_node = new_node;
4903 binder_node_unlock(new_node);
4904 binder_put_node(new_node);
4905out:
4906 mutex_unlock(&context->context_mgr_node_lock);
4907 return ret;
4908}
4909
4910static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4911 struct binder_node_info_for_ref *info)
4912{
4913 struct binder_node *node;
4914 struct binder_context *context = proc->context;
4915 __u32 handle = info->handle;
4916
4917 if (info->strong_count || info->weak_count || info->reserved1 ||
4918 info->reserved2 || info->reserved3) {
4919 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4920 proc->pid);
4921 return -EINVAL;
4922 }
4923
4924
4925 mutex_lock(&context->context_mgr_node_lock);
4926 if (!context->binder_context_mgr_node ||
4927 context->binder_context_mgr_node->proc != proc) {
4928 mutex_unlock(&context->context_mgr_node_lock);
4929 return -EPERM;
4930 }
4931 mutex_unlock(&context->context_mgr_node_lock);
4932
4933 node = binder_get_node_from_ref(proc, handle, true, NULL);
4934 if (!node)
4935 return -EINVAL;
4936
4937 info->strong_count = node->local_strong_refs +
4938 node->internal_strong_refs;
4939 info->weak_count = node->local_weak_refs;
4940
4941 binder_put_node(node);
4942
4943 return 0;
4944}
4945
4946static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4947 struct binder_node_debug_info *info)
4948{
4949 struct rb_node *n;
4950 binder_uintptr_t ptr = info->ptr;
4951
4952 memset(info, 0, sizeof(*info));
4953
4954 binder_inner_proc_lock(proc);
4955 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4956 struct binder_node *node = rb_entry(n, struct binder_node,
4957 rb_node);
4958 if (node->ptr > ptr) {
4959 info->ptr = node->ptr;
4960 info->cookie = node->cookie;
4961 info->has_strong_ref = node->has_strong_ref;
4962 info->has_weak_ref = node->has_weak_ref;
4963 break;
4964 }
4965 }
4966 binder_inner_proc_unlock(proc);
4967
4968 return 0;
4969}
4970
4971static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4972{
4973 struct rb_node *n;
4974 struct binder_thread *thread;
4975
4976 if (proc->outstanding_txns > 0)
4977 return true;
4978
4979 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4980 thread = rb_entry(n, struct binder_thread, rb_node);
4981 if (thread->transaction_stack)
4982 return true;
4983 }
4984 return false;
4985}
4986
4987static int binder_ioctl_freeze(struct binder_freeze_info *info,
4988 struct binder_proc *target_proc)
4989{
4990 int ret = 0;
4991
4992 if (!info->enable) {
4993 binder_inner_proc_lock(target_proc);
4994 target_proc->sync_recv = false;
4995 target_proc->async_recv = false;
4996 target_proc->is_frozen = false;
4997 binder_inner_proc_unlock(target_proc);
4998 return 0;
4999 }
5000
5001
5002
5003
5004
5005
5006 binder_inner_proc_lock(target_proc);
5007 target_proc->sync_recv = false;
5008 target_proc->async_recv = false;
5009 target_proc->is_frozen = true;
5010 binder_inner_proc_unlock(target_proc);
5011
5012 if (info->timeout_ms > 0)
5013 ret = wait_event_interruptible_timeout(
5014 target_proc->freeze_wait,
5015 (!target_proc->outstanding_txns),
5016 msecs_to_jiffies(info->timeout_ms));
5017
5018
5019 if (ret >= 0) {
5020 binder_inner_proc_lock(target_proc);
5021 if (binder_txns_pending_ilocked(target_proc))
5022 ret = -EAGAIN;
5023 binder_inner_proc_unlock(target_proc);
5024 }
5025
5026 if (ret < 0) {
5027 binder_inner_proc_lock(target_proc);
5028 target_proc->is_frozen = false;
5029 binder_inner_proc_unlock(target_proc);
5030 }
5031
5032 return ret;
5033}
5034
5035static int binder_ioctl_get_freezer_info(
5036 struct binder_frozen_status_info *info)
5037{
5038 struct binder_proc *target_proc;
5039 bool found = false;
5040 __u32 txns_pending;
5041
5042 info->sync_recv = 0;
5043 info->async_recv = 0;
5044
5045 mutex_lock(&binder_procs_lock);
5046 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5047 if (target_proc->pid == info->pid) {
5048 found = true;
5049 binder_inner_proc_lock(target_proc);
5050 txns_pending = binder_txns_pending_ilocked(target_proc);
5051 info->sync_recv |= target_proc->sync_recv |
5052 (txns_pending << 1);
5053 info->async_recv |= target_proc->async_recv;
5054 binder_inner_proc_unlock(target_proc);
5055 }
5056 }
5057 mutex_unlock(&binder_procs_lock);
5058
5059 if (!found)
5060 return -EINVAL;
5061
5062 return 0;
5063}
5064
5065static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5066{
5067 int ret;
5068 struct binder_proc *proc = filp->private_data;
5069 struct binder_thread *thread;
5070 unsigned int size = _IOC_SIZE(cmd);
5071 void __user *ubuf = (void __user *)arg;
5072
5073
5074
5075
5076 binder_selftest_alloc(&proc->alloc);
5077
5078 trace_binder_ioctl(cmd, arg);
5079
5080 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5081 if (ret)
5082 goto err_unlocked;
5083
5084 thread = binder_get_thread(proc);
5085 if (thread == NULL) {
5086 ret = -ENOMEM;
5087 goto err;
5088 }
5089
5090 switch (cmd) {
5091 case BINDER_WRITE_READ:
5092 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5093 if (ret)
5094 goto err;
5095 break;
5096 case BINDER_SET_MAX_THREADS: {
5097 int max_threads;
5098
5099 if (copy_from_user(&max_threads, ubuf,
5100 sizeof(max_threads))) {
5101 ret = -EINVAL;
5102 goto err;
5103 }
5104 binder_inner_proc_lock(proc);
5105 proc->max_threads = max_threads;
5106 binder_inner_proc_unlock(proc);
5107 break;
5108 }
5109 case BINDER_SET_CONTEXT_MGR_EXT: {
5110 struct flat_binder_object fbo;
5111
5112 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5113 ret = -EINVAL;
5114 goto err;
5115 }
5116 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5117 if (ret)
5118 goto err;
5119 break;
5120 }
5121 case BINDER_SET_CONTEXT_MGR:
5122 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5123 if (ret)
5124 goto err;
5125 break;
5126 case BINDER_THREAD_EXIT:
5127 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5128 proc->pid, thread->pid);
5129 binder_thread_release(proc, thread);
5130 thread = NULL;
5131 break;
5132 case BINDER_VERSION: {
5133 struct binder_version __user *ver = ubuf;
5134
5135 if (size != sizeof(struct binder_version)) {
5136 ret = -EINVAL;
5137 goto err;
5138 }
5139 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5140 &ver->protocol_version)) {
5141 ret = -EINVAL;
5142 goto err;
5143 }
5144 break;
5145 }
5146 case BINDER_GET_NODE_INFO_FOR_REF: {
5147 struct binder_node_info_for_ref info;
5148
5149 if (copy_from_user(&info, ubuf, sizeof(info))) {
5150 ret = -EFAULT;
5151 goto err;
5152 }
5153
5154 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5155 if (ret < 0)
5156 goto err;
5157
5158 if (copy_to_user(ubuf, &info, sizeof(info))) {
5159 ret = -EFAULT;
5160 goto err;
5161 }
5162
5163 break;
5164 }
5165 case BINDER_GET_NODE_DEBUG_INFO: {
5166 struct binder_node_debug_info info;
5167
5168 if (copy_from_user(&info, ubuf, sizeof(info))) {
5169 ret = -EFAULT;
5170 goto err;
5171 }
5172
5173 ret = binder_ioctl_get_node_debug_info(proc, &info);
5174 if (ret < 0)
5175 goto err;
5176
5177 if (copy_to_user(ubuf, &info, sizeof(info))) {
5178 ret = -EFAULT;
5179 goto err;
5180 }
5181 break;
5182 }
5183 case BINDER_FREEZE: {
5184 struct binder_freeze_info info;
5185 struct binder_proc **target_procs = NULL, *target_proc;
5186 int target_procs_count = 0, i = 0;
5187
5188 ret = 0;
5189
5190 if (copy_from_user(&info, ubuf, sizeof(info))) {
5191 ret = -EFAULT;
5192 goto err;
5193 }
5194
5195 mutex_lock(&binder_procs_lock);
5196 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5197 if (target_proc->pid == info.pid)
5198 target_procs_count++;
5199 }
5200
5201 if (target_procs_count == 0) {
5202 mutex_unlock(&binder_procs_lock);
5203 ret = -EINVAL;
5204 goto err;
5205 }
5206
5207 target_procs = kcalloc(target_procs_count,
5208 sizeof(struct binder_proc *),
5209 GFP_KERNEL);
5210
5211 if (!target_procs) {
5212 mutex_unlock(&binder_procs_lock);
5213 ret = -ENOMEM;
5214 goto err;
5215 }
5216
5217 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5218 if (target_proc->pid != info.pid)
5219 continue;
5220
5221 binder_inner_proc_lock(target_proc);
5222 target_proc->tmp_ref++;
5223 binder_inner_proc_unlock(target_proc);
5224
5225 target_procs[i++] = target_proc;
5226 }
5227 mutex_unlock(&binder_procs_lock);
5228
5229 for (i = 0; i < target_procs_count; i++) {
5230 if (ret >= 0)
5231 ret = binder_ioctl_freeze(&info,
5232 target_procs[i]);
5233
5234 binder_proc_dec_tmpref(target_procs[i]);
5235 }
5236
5237 kfree(target_procs);
5238
5239 if (ret < 0)
5240 goto err;
5241 break;
5242 }
5243 case BINDER_GET_FROZEN_INFO: {
5244 struct binder_frozen_status_info info;
5245
5246 if (copy_from_user(&info, ubuf, sizeof(info))) {
5247 ret = -EFAULT;
5248 goto err;
5249 }
5250
5251 ret = binder_ioctl_get_freezer_info(&info);
5252 if (ret < 0)
5253 goto err;
5254
5255 if (copy_to_user(ubuf, &info, sizeof(info))) {
5256 ret = -EFAULT;
5257 goto err;
5258 }
5259 break;
5260 }
5261 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5262 uint32_t enable;
5263
5264 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5265 ret = -EFAULT;
5266 goto err;
5267 }
5268 binder_inner_proc_lock(proc);
5269 proc->oneway_spam_detection_enabled = (bool)enable;
5270 binder_inner_proc_unlock(proc);
5271 break;
5272 }
5273 default:
5274 ret = -EINVAL;
5275 goto err;
5276 }
5277 ret = 0;
5278err:
5279 if (thread)
5280 thread->looper_need_return = false;
5281 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5282 if (ret && ret != -EINTR)
5283 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5284err_unlocked:
5285 trace_binder_ioctl_done(ret);
5286 return ret;
5287}
5288
5289static void binder_vma_open(struct vm_area_struct *vma)
5290{
5291 struct binder_proc *proc = vma->vm_private_data;
5292
5293 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5294 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5295 proc->pid, vma->vm_start, vma->vm_end,
5296 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5297 (unsigned long)pgprot_val(vma->vm_page_prot));
5298}
5299
5300static void binder_vma_close(struct vm_area_struct *vma)
5301{
5302 struct binder_proc *proc = vma->vm_private_data;
5303
5304 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5305 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5306 proc->pid, vma->vm_start, vma->vm_end,
5307 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5308 (unsigned long)pgprot_val(vma->vm_page_prot));
5309 binder_alloc_vma_close(&proc->alloc);
5310}
5311
5312static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5313{
5314 return VM_FAULT_SIGBUS;
5315}
5316
5317static const struct vm_operations_struct binder_vm_ops = {
5318 .open = binder_vma_open,
5319 .close = binder_vma_close,
5320 .fault = binder_vm_fault,
5321};
5322
5323static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5324{
5325 struct binder_proc *proc = filp->private_data;
5326
5327 if (proc->tsk != current->group_leader)
5328 return -EINVAL;
5329
5330 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5331 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5332 __func__, proc->pid, vma->vm_start, vma->vm_end,
5333 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5334 (unsigned long)pgprot_val(vma->vm_page_prot));
5335
5336 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5337 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5338 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5339 return -EPERM;
5340 }
5341 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5342 vma->vm_flags &= ~VM_MAYWRITE;
5343
5344 vma->vm_ops = &binder_vm_ops;
5345 vma->vm_private_data = proc;
5346
5347 return binder_alloc_mmap_handler(&proc->alloc, vma);
5348}
5349
5350static int binder_open(struct inode *nodp, struct file *filp)
5351{
5352 struct binder_proc *proc, *itr;
5353 struct binder_device *binder_dev;
5354 struct binderfs_info *info;
5355 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5356 bool existing_pid = false;
5357
5358 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5359 current->group_leader->pid, current->pid);
5360
5361 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5362 if (proc == NULL)
5363 return -ENOMEM;
5364 spin_lock_init(&proc->inner_lock);
5365 spin_lock_init(&proc->outer_lock);
5366 get_task_struct(current->group_leader);
5367 proc->tsk = current->group_leader;
5368 proc->cred = get_cred(filp->f_cred);
5369 INIT_LIST_HEAD(&proc->todo);
5370 init_waitqueue_head(&proc->freeze_wait);
5371 proc->default_priority = task_nice(current);
5372
5373 if (is_binderfs_device(nodp)) {
5374 binder_dev = nodp->i_private;
5375 info = nodp->i_sb->s_fs_info;
5376 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5377 } else {
5378 binder_dev = container_of(filp->private_data,
5379 struct binder_device, miscdev);
5380 }
5381 refcount_inc(&binder_dev->ref);
5382 proc->context = &binder_dev->context;
5383 binder_alloc_init(&proc->alloc);
5384
5385 binder_stats_created(BINDER_STAT_PROC);
5386 proc->pid = current->group_leader->pid;
5387 INIT_LIST_HEAD(&proc->delivered_death);
5388 INIT_LIST_HEAD(&proc->waiting_threads);
5389 filp->private_data = proc;
5390
5391 mutex_lock(&binder_procs_lock);
5392 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5393 if (itr->pid == proc->pid) {
5394 existing_pid = true;
5395 break;
5396 }
5397 }
5398 hlist_add_head(&proc->proc_node, &binder_procs);
5399 mutex_unlock(&binder_procs_lock);
5400
5401 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5402 char strbuf[11];
5403
5404 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5405
5406
5407
5408
5409
5410
5411 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5412 binder_debugfs_dir_entry_proc,
5413 (void *)(unsigned long)proc->pid,
5414 &proc_fops);
5415 }
5416
5417 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5418 char strbuf[11];
5419 struct dentry *binderfs_entry;
5420
5421 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5422
5423
5424
5425
5426
5427
5428 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5429 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5430 if (!IS_ERR(binderfs_entry)) {
5431 proc->binderfs_entry = binderfs_entry;
5432 } else {
5433 int error;
5434
5435 error = PTR_ERR(binderfs_entry);
5436 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5437 strbuf, error);
5438 }
5439 }
5440
5441 return 0;
5442}
5443
5444static int binder_flush(struct file *filp, fl_owner_t id)
5445{
5446 struct binder_proc *proc = filp->private_data;
5447
5448 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5449
5450 return 0;
5451}
5452
5453static void binder_deferred_flush(struct binder_proc *proc)
5454{
5455 struct rb_node *n;
5456 int wake_count = 0;
5457
5458 binder_inner_proc_lock(proc);
5459 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5460 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5461
5462 thread->looper_need_return = true;
5463 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5464 wake_up_interruptible(&thread->wait);
5465 wake_count++;
5466 }
5467 }
5468 binder_inner_proc_unlock(proc);
5469
5470 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5471 "binder_flush: %d woke %d threads\n", proc->pid,
5472 wake_count);
5473}
5474
5475static int binder_release(struct inode *nodp, struct file *filp)
5476{
5477 struct binder_proc *proc = filp->private_data;
5478
5479 debugfs_remove(proc->debugfs_entry);
5480
5481 if (proc->binderfs_entry) {
5482 binderfs_remove_file(proc->binderfs_entry);
5483 proc->binderfs_entry = NULL;
5484 }
5485
5486 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5487
5488 return 0;
5489}
5490
5491static int binder_node_release(struct binder_node *node, int refs)
5492{
5493 struct binder_ref *ref;
5494 int death = 0;
5495 struct binder_proc *proc = node->proc;
5496
5497 binder_release_work(proc, &node->async_todo);
5498
5499 binder_node_lock(node);
5500 binder_inner_proc_lock(proc);
5501 binder_dequeue_work_ilocked(&node->work);
5502
5503
5504
5505 BUG_ON(!node->tmp_refs);
5506 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5507 binder_inner_proc_unlock(proc);
5508 binder_node_unlock(node);
5509 binder_free_node(node);
5510
5511 return refs;
5512 }
5513
5514 node->proc = NULL;
5515 node->local_strong_refs = 0;
5516 node->local_weak_refs = 0;
5517 binder_inner_proc_unlock(proc);
5518
5519 spin_lock(&binder_dead_nodes_lock);
5520 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5521 spin_unlock(&binder_dead_nodes_lock);
5522
5523 hlist_for_each_entry(ref, &node->refs, node_entry) {
5524 refs++;
5525
5526
5527
5528
5529
5530
5531 binder_inner_proc_lock(ref->proc);
5532 if (!ref->death) {
5533 binder_inner_proc_unlock(ref->proc);
5534 continue;
5535 }
5536
5537 death++;
5538
5539 BUG_ON(!list_empty(&ref->death->work.entry));
5540 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5541 binder_enqueue_work_ilocked(&ref->death->work,
5542 &ref->proc->todo);
5543 binder_wakeup_proc_ilocked(ref->proc);
5544 binder_inner_proc_unlock(ref->proc);
5545 }
5546
5547 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5548 "node %d now dead, refs %d, death %d\n",
5549 node->debug_id, refs, death);
5550 binder_node_unlock(node);
5551 binder_put_node(node);
5552
5553 return refs;
5554}
5555
5556static void binder_deferred_release(struct binder_proc *proc)
5557{
5558 struct binder_context *context = proc->context;
5559 struct rb_node *n;
5560 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5561
5562 mutex_lock(&binder_procs_lock);
5563 hlist_del(&proc->proc_node);
5564 mutex_unlock(&binder_procs_lock);
5565
5566 mutex_lock(&context->context_mgr_node_lock);
5567 if (context->binder_context_mgr_node &&
5568 context->binder_context_mgr_node->proc == proc) {
5569 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5570 "%s: %d context_mgr_node gone\n",
5571 __func__, proc->pid);
5572 context->binder_context_mgr_node = NULL;
5573 }
5574 mutex_unlock(&context->context_mgr_node_lock);
5575 binder_inner_proc_lock(proc);
5576
5577
5578
5579
5580 proc->tmp_ref++;
5581
5582 proc->is_dead = true;
5583 proc->is_frozen = false;
5584 proc->sync_recv = false;
5585 proc->async_recv = false;
5586 threads = 0;
5587 active_transactions = 0;
5588 while ((n = rb_first(&proc->threads))) {
5589 struct binder_thread *thread;
5590
5591 thread = rb_entry(n, struct binder_thread, rb_node);
5592 binder_inner_proc_unlock(proc);
5593 threads++;
5594 active_transactions += binder_thread_release(proc, thread);
5595 binder_inner_proc_lock(proc);
5596 }
5597
5598 nodes = 0;
5599 incoming_refs = 0;
5600 while ((n = rb_first(&proc->nodes))) {
5601 struct binder_node *node;
5602
5603 node = rb_entry(n, struct binder_node, rb_node);
5604 nodes++;
5605
5606
5607
5608
5609
5610 binder_inc_node_tmpref_ilocked(node);
5611 rb_erase(&node->rb_node, &proc->nodes);
5612 binder_inner_proc_unlock(proc);
5613 incoming_refs = binder_node_release(node, incoming_refs);
5614 binder_inner_proc_lock(proc);
5615 }
5616 binder_inner_proc_unlock(proc);
5617
5618 outgoing_refs = 0;
5619 binder_proc_lock(proc);
5620 while ((n = rb_first(&proc->refs_by_desc))) {
5621 struct binder_ref *ref;
5622
5623 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5624 outgoing_refs++;
5625 binder_cleanup_ref_olocked(ref);
5626 binder_proc_unlock(proc);
5627 binder_free_ref(ref);
5628 binder_proc_lock(proc);
5629 }
5630 binder_proc_unlock(proc);
5631
5632 binder_release_work(proc, &proc->todo);
5633 binder_release_work(proc, &proc->delivered_death);
5634
5635 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5636 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5637 __func__, proc->pid, threads, nodes, incoming_refs,
5638 outgoing_refs, active_transactions);
5639
5640 binder_proc_dec_tmpref(proc);
5641}
5642
5643static void binder_deferred_func(struct work_struct *work)
5644{
5645 struct binder_proc *proc;
5646
5647 int defer;
5648
5649 do {
5650 mutex_lock(&binder_deferred_lock);
5651 if (!hlist_empty(&binder_deferred_list)) {
5652 proc = hlist_entry(binder_deferred_list.first,
5653 struct binder_proc, deferred_work_node);
5654 hlist_del_init(&proc->deferred_work_node);
5655 defer = proc->deferred_work;
5656 proc->deferred_work = 0;
5657 } else {
5658 proc = NULL;
5659 defer = 0;
5660 }
5661 mutex_unlock(&binder_deferred_lock);
5662
5663 if (defer & BINDER_DEFERRED_FLUSH)
5664 binder_deferred_flush(proc);
5665
5666 if (defer & BINDER_DEFERRED_RELEASE)
5667 binder_deferred_release(proc);
5668 } while (proc);
5669}
5670static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5671
5672static void
5673binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5674{
5675 mutex_lock(&binder_deferred_lock);
5676 proc->deferred_work |= defer;
5677 if (hlist_unhashed(&proc->deferred_work_node)) {
5678 hlist_add_head(&proc->deferred_work_node,
5679 &binder_deferred_list);
5680 schedule_work(&binder_deferred_work);
5681 }
5682 mutex_unlock(&binder_deferred_lock);
5683}
5684
5685static void print_binder_transaction_ilocked(struct seq_file *m,
5686 struct binder_proc *proc,
5687 const char *prefix,
5688 struct binder_transaction *t)
5689{
5690 struct binder_proc *to_proc;
5691 struct binder_buffer *buffer = t->buffer;
5692
5693 spin_lock(&t->lock);
5694 to_proc = t->to_proc;
5695 seq_printf(m,
5696 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5697 prefix, t->debug_id, t,
5698 t->from ? t->from->proc->pid : 0,
5699 t->from ? t->from->pid : 0,
5700 to_proc ? to_proc->pid : 0,
5701 t->to_thread ? t->to_thread->pid : 0,
5702 t->code, t->flags, t->priority, t->need_reply);
5703 spin_unlock(&t->lock);
5704
5705 if (proc != to_proc) {
5706
5707
5708
5709
5710 seq_puts(m, "\n");
5711 return;
5712 }
5713
5714 if (buffer == NULL) {
5715 seq_puts(m, " buffer free\n");
5716 return;
5717 }
5718 if (buffer->target_node)
5719 seq_printf(m, " node %d", buffer->target_node->debug_id);
5720 seq_printf(m, " size %zd:%zd data %pK\n",
5721 buffer->data_size, buffer->offsets_size,
5722 buffer->user_data);
5723}
5724
5725static void print_binder_work_ilocked(struct seq_file *m,
5726 struct binder_proc *proc,
5727 const char *prefix,
5728 const char *transaction_prefix,
5729 struct binder_work *w)
5730{
5731 struct binder_node *node;
5732 struct binder_transaction *t;
5733
5734 switch (w->type) {
5735 case BINDER_WORK_TRANSACTION:
5736 t = container_of(w, struct binder_transaction, work);
5737 print_binder_transaction_ilocked(
5738 m, proc, transaction_prefix, t);
5739 break;
5740 case BINDER_WORK_RETURN_ERROR: {
5741 struct binder_error *e = container_of(
5742 w, struct binder_error, work);
5743
5744 seq_printf(m, "%stransaction error: %u\n",
5745 prefix, e->cmd);
5746 } break;
5747 case BINDER_WORK_TRANSACTION_COMPLETE:
5748 seq_printf(m, "%stransaction complete\n", prefix);
5749 break;
5750 case BINDER_WORK_NODE:
5751 node = container_of(w, struct binder_node, work);
5752 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5753 prefix, node->debug_id,
5754 (u64)node->ptr, (u64)node->cookie);
5755 break;
5756 case BINDER_WORK_DEAD_BINDER:
5757 seq_printf(m, "%shas dead binder\n", prefix);
5758 break;
5759 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5760 seq_printf(m, "%shas cleared dead binder\n", prefix);
5761 break;
5762 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5763 seq_printf(m, "%shas cleared death notification\n", prefix);
5764 break;
5765 default:
5766 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5767 break;
5768 }
5769}
5770
5771static void print_binder_thread_ilocked(struct seq_file *m,
5772 struct binder_thread *thread,
5773 int print_always)
5774{
5775 struct binder_transaction *t;
5776 struct binder_work *w;
5777 size_t start_pos = m->count;
5778 size_t header_pos;
5779
5780 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5781 thread->pid, thread->looper,
5782 thread->looper_need_return,
5783 atomic_read(&thread->tmp_ref));
5784 header_pos = m->count;
5785 t = thread->transaction_stack;
5786 while (t) {
5787 if (t->from == thread) {
5788 print_binder_transaction_ilocked(m, thread->proc,
5789 " outgoing transaction", t);
5790 t = t->from_parent;
5791 } else if (t->to_thread == thread) {
5792 print_binder_transaction_ilocked(m, thread->proc,
5793 " incoming transaction", t);
5794 t = t->to_parent;
5795 } else {
5796 print_binder_transaction_ilocked(m, thread->proc,
5797 " bad transaction", t);
5798 t = NULL;
5799 }
5800 }
5801 list_for_each_entry(w, &thread->todo, entry) {
5802 print_binder_work_ilocked(m, thread->proc, " ",
5803 " pending transaction", w);
5804 }
5805 if (!print_always && m->count == header_pos)
5806 m->count = start_pos;
5807}
5808
5809static void print_binder_node_nilocked(struct seq_file *m,
5810 struct binder_node *node)
5811{
5812 struct binder_ref *ref;
5813 struct binder_work *w;
5814 int count;
5815
5816 count = 0;
5817 hlist_for_each_entry(ref, &node->refs, node_entry)
5818 count++;
5819
5820 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5821 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5822 node->has_strong_ref, node->has_weak_ref,
5823 node->local_strong_refs, node->local_weak_refs,
5824 node->internal_strong_refs, count, node->tmp_refs);
5825 if (count) {
5826 seq_puts(m, " proc");
5827 hlist_for_each_entry(ref, &node->refs, node_entry)
5828 seq_printf(m, " %d", ref->proc->pid);
5829 }
5830 seq_puts(m, "\n");
5831 if (node->proc) {
5832 list_for_each_entry(w, &node->async_todo, entry)
5833 print_binder_work_ilocked(m, node->proc, " ",
5834 " pending async transaction", w);
5835 }
5836}
5837
5838static void print_binder_ref_olocked(struct seq_file *m,
5839 struct binder_ref *ref)
5840{
5841 binder_node_lock(ref->node);
5842 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5843 ref->data.debug_id, ref->data.desc,
5844 ref->node->proc ? "" : "dead ",
5845 ref->node->debug_id, ref->data.strong,
5846 ref->data.weak, ref->death);
5847 binder_node_unlock(ref->node);
5848}
5849
5850static void print_binder_proc(struct seq_file *m,
5851 struct binder_proc *proc, int print_all)
5852{
5853 struct binder_work *w;
5854 struct rb_node *n;
5855 size_t start_pos = m->count;
5856 size_t header_pos;
5857 struct binder_node *last_node = NULL;
5858
5859 seq_printf(m, "proc %d\n", proc->pid);
5860 seq_printf(m, "context %s\n", proc->context->name);
5861 header_pos = m->count;
5862
5863 binder_inner_proc_lock(proc);
5864 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5865 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5866 rb_node), print_all);
5867
5868 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5869 struct binder_node *node = rb_entry(n, struct binder_node,
5870 rb_node);
5871 if (!print_all && !node->has_async_transaction)
5872 continue;
5873
5874
5875
5876
5877
5878
5879 binder_inc_node_tmpref_ilocked(node);
5880
5881 binder_inner_proc_unlock(proc);
5882 if (last_node)
5883 binder_put_node(last_node);
5884 binder_node_inner_lock(node);
5885 print_binder_node_nilocked(m, node);
5886 binder_node_inner_unlock(node);
5887 last_node = node;
5888 binder_inner_proc_lock(proc);
5889 }
5890 binder_inner_proc_unlock(proc);
5891 if (last_node)
5892 binder_put_node(last_node);
5893
5894 if (print_all) {
5895 binder_proc_lock(proc);
5896 for (n = rb_first(&proc->refs_by_desc);
5897 n != NULL;
5898 n = rb_next(n))
5899 print_binder_ref_olocked(m, rb_entry(n,
5900 struct binder_ref,
5901 rb_node_desc));
5902 binder_proc_unlock(proc);
5903 }
5904 binder_alloc_print_allocated(m, &proc->alloc);
5905 binder_inner_proc_lock(proc);
5906 list_for_each_entry(w, &proc->todo, entry)
5907 print_binder_work_ilocked(m, proc, " ",
5908 " pending transaction", w);
5909 list_for_each_entry(w, &proc->delivered_death, entry) {
5910 seq_puts(m, " has delivered dead binder\n");
5911 break;
5912 }
5913 binder_inner_proc_unlock(proc);
5914 if (!print_all && m->count == header_pos)
5915 m->count = start_pos;
5916}
5917
5918static const char * const binder_return_strings[] = {
5919 "BR_ERROR",
5920 "BR_OK",
5921 "BR_TRANSACTION",
5922 "BR_REPLY",
5923 "BR_ACQUIRE_RESULT",
5924 "BR_DEAD_REPLY",
5925 "BR_TRANSACTION_COMPLETE",
5926 "BR_INCREFS",
5927 "BR_ACQUIRE",
5928 "BR_RELEASE",
5929 "BR_DECREFS",
5930 "BR_ATTEMPT_ACQUIRE",
5931 "BR_NOOP",
5932 "BR_SPAWN_LOOPER",
5933 "BR_FINISHED",
5934 "BR_DEAD_BINDER",
5935 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5936 "BR_FAILED_REPLY",
5937 "BR_FROZEN_REPLY",
5938 "BR_ONEWAY_SPAM_SUSPECT",
5939};
5940
5941static const char * const binder_command_strings[] = {
5942 "BC_TRANSACTION",
5943 "BC_REPLY",
5944 "BC_ACQUIRE_RESULT",
5945 "BC_FREE_BUFFER",
5946 "BC_INCREFS",
5947 "BC_ACQUIRE",
5948 "BC_RELEASE",
5949 "BC_DECREFS",
5950 "BC_INCREFS_DONE",
5951 "BC_ACQUIRE_DONE",
5952 "BC_ATTEMPT_ACQUIRE",
5953 "BC_REGISTER_LOOPER",
5954 "BC_ENTER_LOOPER",
5955 "BC_EXIT_LOOPER",
5956 "BC_REQUEST_DEATH_NOTIFICATION",
5957 "BC_CLEAR_DEATH_NOTIFICATION",
5958 "BC_DEAD_BINDER_DONE",
5959 "BC_TRANSACTION_SG",
5960 "BC_REPLY_SG",
5961};
5962
5963static const char * const binder_objstat_strings[] = {
5964 "proc",
5965 "thread",
5966 "node",
5967 "ref",
5968 "death",
5969 "transaction",
5970 "transaction_complete"
5971};
5972
5973static void print_binder_stats(struct seq_file *m, const char *prefix,
5974 struct binder_stats *stats)
5975{
5976 int i;
5977
5978 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5979 ARRAY_SIZE(binder_command_strings));
5980 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5981 int temp = atomic_read(&stats->bc[i]);
5982
5983 if (temp)
5984 seq_printf(m, "%s%s: %d\n", prefix,
5985 binder_command_strings[i], temp);
5986 }
5987
5988 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5989 ARRAY_SIZE(binder_return_strings));
5990 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5991 int temp = atomic_read(&stats->br[i]);
5992
5993 if (temp)
5994 seq_printf(m, "%s%s: %d\n", prefix,
5995 binder_return_strings[i], temp);
5996 }
5997
5998 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5999 ARRAY_SIZE(binder_objstat_strings));
6000 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6001 ARRAY_SIZE(stats->obj_deleted));
6002 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6003 int created = atomic_read(&stats->obj_created[i]);
6004 int deleted = atomic_read(&stats->obj_deleted[i]);
6005
6006 if (created || deleted)
6007 seq_printf(m, "%s%s: active %d total %d\n",
6008 prefix,
6009 binder_objstat_strings[i],
6010 created - deleted,
6011 created);
6012 }
6013}
6014
6015static void print_binder_proc_stats(struct seq_file *m,
6016 struct binder_proc *proc)
6017{
6018 struct binder_work *w;
6019 struct binder_thread *thread;
6020 struct rb_node *n;
6021 int count, strong, weak, ready_threads;
6022 size_t free_async_space =
6023 binder_alloc_get_free_async_space(&proc->alloc);
6024
6025 seq_printf(m, "proc %d\n", proc->pid);
6026 seq_printf(m, "context %s\n", proc->context->name);
6027 count = 0;
6028 ready_threads = 0;
6029 binder_inner_proc_lock(proc);
6030 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6031 count++;
6032
6033 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6034 ready_threads++;
6035
6036 seq_printf(m, " threads: %d\n", count);
6037 seq_printf(m, " requested threads: %d+%d/%d\n"
6038 " ready threads %d\n"
6039 " free async space %zd\n", proc->requested_threads,
6040 proc->requested_threads_started, proc->max_threads,
6041 ready_threads,
6042 free_async_space);
6043 count = 0;
6044 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6045 count++;
6046 binder_inner_proc_unlock(proc);
6047 seq_printf(m, " nodes: %d\n", count);
6048 count = 0;
6049 strong = 0;
6050 weak = 0;
6051 binder_proc_lock(proc);
6052 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6053 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6054 rb_node_desc);
6055 count++;
6056 strong += ref->data.strong;
6057 weak += ref->data.weak;
6058 }
6059 binder_proc_unlock(proc);
6060 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6061
6062 count = binder_alloc_get_allocated_count(&proc->alloc);
6063 seq_printf(m, " buffers: %d\n", count);
6064
6065 binder_alloc_print_pages(m, &proc->alloc);
6066
6067 count = 0;
6068 binder_inner_proc_lock(proc);
6069 list_for_each_entry(w, &proc->todo, entry) {
6070 if (w->type == BINDER_WORK_TRANSACTION)
6071 count++;
6072 }
6073 binder_inner_proc_unlock(proc);
6074 seq_printf(m, " pending transactions: %d\n", count);
6075
6076 print_binder_stats(m, " ", &proc->stats);
6077}
6078
6079
6080int binder_state_show(struct seq_file *m, void *unused)
6081{
6082 struct binder_proc *proc;
6083 struct binder_node *node;
6084 struct binder_node *last_node = NULL;
6085
6086 seq_puts(m, "binder state:\n");
6087
6088 spin_lock(&binder_dead_nodes_lock);
6089 if (!hlist_empty(&binder_dead_nodes))
6090 seq_puts(m, "dead nodes:\n");
6091 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6092
6093
6094
6095
6096
6097 node->tmp_refs++;
6098 spin_unlock(&binder_dead_nodes_lock);
6099 if (last_node)
6100 binder_put_node(last_node);
6101 binder_node_lock(node);
6102 print_binder_node_nilocked(m, node);
6103 binder_node_unlock(node);
6104 last_node = node;
6105 spin_lock(&binder_dead_nodes_lock);
6106 }
6107 spin_unlock(&binder_dead_nodes_lock);
6108 if (last_node)
6109 binder_put_node(last_node);
6110
6111 mutex_lock(&binder_procs_lock);
6112 hlist_for_each_entry(proc, &binder_procs, proc_node)
6113 print_binder_proc(m, proc, 1);
6114 mutex_unlock(&binder_procs_lock);
6115
6116 return 0;
6117}
6118
6119int binder_stats_show(struct seq_file *m, void *unused)
6120{
6121 struct binder_proc *proc;
6122
6123 seq_puts(m, "binder stats:\n");
6124
6125 print_binder_stats(m, "", &binder_stats);
6126
6127 mutex_lock(&binder_procs_lock);
6128 hlist_for_each_entry(proc, &binder_procs, proc_node)
6129 print_binder_proc_stats(m, proc);
6130 mutex_unlock(&binder_procs_lock);
6131
6132 return 0;
6133}
6134
6135int binder_transactions_show(struct seq_file *m, void *unused)
6136{
6137 struct binder_proc *proc;
6138
6139 seq_puts(m, "binder transactions:\n");
6140 mutex_lock(&binder_procs_lock);
6141 hlist_for_each_entry(proc, &binder_procs, proc_node)
6142 print_binder_proc(m, proc, 0);
6143 mutex_unlock(&binder_procs_lock);
6144
6145 return 0;
6146}
6147
6148static int proc_show(struct seq_file *m, void *unused)
6149{
6150 struct binder_proc *itr;
6151 int pid = (unsigned long)m->private;
6152
6153 mutex_lock(&binder_procs_lock);
6154 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6155 if (itr->pid == pid) {
6156 seq_puts(m, "binder proc state:\n");
6157 print_binder_proc(m, itr, 1);
6158 }
6159 }
6160 mutex_unlock(&binder_procs_lock);
6161
6162 return 0;
6163}
6164
6165static void print_binder_transaction_log_entry(struct seq_file *m,
6166 struct binder_transaction_log_entry *e)
6167{
6168 int debug_id = READ_ONCE(e->debug_id_done);
6169
6170
6171
6172
6173 smp_rmb();
6174 seq_printf(m,
6175 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6176 e->debug_id, (e->call_type == 2) ? "reply" :
6177 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6178 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6179 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6180 e->return_error, e->return_error_param,
6181 e->return_error_line);
6182
6183
6184
6185
6186 smp_rmb();
6187 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6188 "\n" : " (incomplete)\n");
6189}
6190
6191int binder_transaction_log_show(struct seq_file *m, void *unused)
6192{
6193 struct binder_transaction_log *log = m->private;
6194 unsigned int log_cur = atomic_read(&log->cur);
6195 unsigned int count;
6196 unsigned int cur;
6197 int i;
6198
6199 count = log_cur + 1;
6200 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6201 0 : count % ARRAY_SIZE(log->entry);
6202 if (count > ARRAY_SIZE(log->entry) || log->full)
6203 count = ARRAY_SIZE(log->entry);
6204 for (i = 0; i < count; i++) {
6205 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6206
6207 print_binder_transaction_log_entry(m, &log->entry[index]);
6208 }
6209 return 0;
6210}
6211
6212const struct file_operations binder_fops = {
6213 .owner = THIS_MODULE,
6214 .poll = binder_poll,
6215 .unlocked_ioctl = binder_ioctl,
6216 .compat_ioctl = compat_ptr_ioctl,
6217 .mmap = binder_mmap,
6218 .open = binder_open,
6219 .flush = binder_flush,
6220 .release = binder_release,
6221};
6222
6223static int __init init_binder_device(const char *name)
6224{
6225 int ret;
6226 struct binder_device *binder_device;
6227
6228 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6229 if (!binder_device)
6230 return -ENOMEM;
6231
6232 binder_device->miscdev.fops = &binder_fops;
6233 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6234 binder_device->miscdev.name = name;
6235
6236 refcount_set(&binder_device->ref, 1);
6237 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6238 binder_device->context.name = name;
6239 mutex_init(&binder_device->context.context_mgr_node_lock);
6240
6241 ret = misc_register(&binder_device->miscdev);
6242 if (ret < 0) {
6243 kfree(binder_device);
6244 return ret;
6245 }
6246
6247 hlist_add_head(&binder_device->hlist, &binder_devices);
6248
6249 return ret;
6250}
6251
6252static int __init binder_init(void)
6253{
6254 int ret;
6255 char *device_name, *device_tmp;
6256 struct binder_device *device;
6257 struct hlist_node *tmp;
6258 char *device_names = NULL;
6259
6260 ret = binder_alloc_shrinker_init();
6261 if (ret)
6262 return ret;
6263
6264 atomic_set(&binder_transaction_log.cur, ~0U);
6265 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6266
6267 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6268 if (binder_debugfs_dir_entry_root)
6269 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6270 binder_debugfs_dir_entry_root);
6271
6272 if (binder_debugfs_dir_entry_root) {
6273 debugfs_create_file("state",
6274 0444,
6275 binder_debugfs_dir_entry_root,
6276 NULL,
6277 &binder_state_fops);
6278 debugfs_create_file("stats",
6279 0444,
6280 binder_debugfs_dir_entry_root,
6281 NULL,
6282 &binder_stats_fops);
6283 debugfs_create_file("transactions",
6284 0444,
6285 binder_debugfs_dir_entry_root,
6286 NULL,
6287 &binder_transactions_fops);
6288 debugfs_create_file("transaction_log",
6289 0444,
6290 binder_debugfs_dir_entry_root,
6291 &binder_transaction_log,
6292 &binder_transaction_log_fops);
6293 debugfs_create_file("failed_transaction_log",
6294 0444,
6295 binder_debugfs_dir_entry_root,
6296 &binder_transaction_log_failed,
6297 &binder_transaction_log_fops);
6298 }
6299
6300 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6301 strcmp(binder_devices_param, "") != 0) {
6302
6303
6304
6305
6306 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6307 if (!device_names) {
6308 ret = -ENOMEM;
6309 goto err_alloc_device_names_failed;
6310 }
6311
6312 device_tmp = device_names;
6313 while ((device_name = strsep(&device_tmp, ","))) {
6314 ret = init_binder_device(device_name);
6315 if (ret)
6316 goto err_init_binder_device_failed;
6317 }
6318 }
6319
6320 ret = init_binderfs();
6321 if (ret)
6322 goto err_init_binder_device_failed;
6323
6324 return ret;
6325
6326err_init_binder_device_failed:
6327 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6328 misc_deregister(&device->miscdev);
6329 hlist_del(&device->hlist);
6330 kfree(device);
6331 }
6332
6333 kfree(device_names);
6334
6335err_alloc_device_names_failed:
6336 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6337
6338 return ret;
6339}
6340
6341device_initcall(binder_init);
6342
6343#define CREATE_TRACE_POINTS
6344#include "binder_trace.h"
6345
6346MODULE_LICENSE("GPL v2");
6347