1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/uaccess.h>
61#include <linux/pid_namespace.h>
62#include <linux/security.h>
63#include <linux/spinlock.h>
64#include <linux/ratelimit.h>
65#include <linux/syscalls.h>
66#include <linux/task_work.h>
67
68#include <uapi/linux/android/binder.h>
69
70#include <asm/cacheflush.h>
71
72#include "binder_alloc.h"
73#include "binder_internal.h"
74#include "binder_trace.h"
75
76static HLIST_HEAD(binder_deferred_list);
77static DEFINE_MUTEX(binder_deferred_lock);
78
79static HLIST_HEAD(binder_devices);
80static HLIST_HEAD(binder_procs);
81static DEFINE_MUTEX(binder_procs_lock);
82
83static HLIST_HEAD(binder_dead_nodes);
84static DEFINE_SPINLOCK(binder_dead_nodes_lock);
85
86static struct dentry *binder_debugfs_dir_entry_root;
87static struct dentry *binder_debugfs_dir_entry_proc;
88static atomic_t binder_last_id;
89
90static int proc_show(struct seq_file *m, void *unused);
91DEFINE_SHOW_ATTRIBUTE(proc);
92
93
94#ifndef SZ_1K
95#define SZ_1K 0x400
96#endif
97
98#ifndef SZ_4M
99#define SZ_4M 0x400000
100#endif
101
102#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
103
104enum {
105 BINDER_DEBUG_USER_ERROR = 1U << 0,
106 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
107 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
108 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
109 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
110 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
111 BINDER_DEBUG_READ_WRITE = 1U << 6,
112 BINDER_DEBUG_USER_REFS = 1U << 7,
113 BINDER_DEBUG_THREADS = 1U << 8,
114 BINDER_DEBUG_TRANSACTION = 1U << 9,
115 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
116 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
117 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
118 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
119 BINDER_DEBUG_SPINLOCKS = 1U << 14,
120};
121static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
122 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
123module_param_named(debug_mask, binder_debug_mask, uint, 0644);
124
125static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
126module_param_named(devices, binder_devices_param, charp, 0444);
127
128static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
129static int binder_stop_on_user_error;
130
131static int binder_set_stop_on_user_error(const char *val,
132 const struct kernel_param *kp)
133{
134 int ret;
135
136 ret = param_set_int(val, kp);
137 if (binder_stop_on_user_error < 2)
138 wake_up(&binder_user_error_wait);
139 return ret;
140}
141module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
142 param_get_int, &binder_stop_on_user_error, 0644);
143
144#define binder_debug(mask, x...) \
145 do { \
146 if (binder_debug_mask & mask) \
147 pr_info_ratelimited(x); \
148 } while (0)
149
150#define binder_user_error(x...) \
151 do { \
152 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
153 pr_info_ratelimited(x); \
154 if (binder_stop_on_user_error) \
155 binder_stop_on_user_error = 2; \
156 } while (0)
157
158#define to_flat_binder_object(hdr) \
159 container_of(hdr, struct flat_binder_object, hdr)
160
161#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
162
163#define to_binder_buffer_object(hdr) \
164 container_of(hdr, struct binder_buffer_object, hdr)
165
166#define to_binder_fd_array_object(hdr) \
167 container_of(hdr, struct binder_fd_array_object, hdr)
168
169enum binder_stat_types {
170 BINDER_STAT_PROC,
171 BINDER_STAT_THREAD,
172 BINDER_STAT_NODE,
173 BINDER_STAT_REF,
174 BINDER_STAT_DEATH,
175 BINDER_STAT_TRANSACTION,
176 BINDER_STAT_TRANSACTION_COMPLETE,
177 BINDER_STAT_COUNT
178};
179
180struct binder_stats {
181 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
182 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
183 atomic_t obj_created[BINDER_STAT_COUNT];
184 atomic_t obj_deleted[BINDER_STAT_COUNT];
185};
186
187static struct binder_stats binder_stats;
188
189static inline void binder_stats_deleted(enum binder_stat_types type)
190{
191 atomic_inc(&binder_stats.obj_deleted[type]);
192}
193
194static inline void binder_stats_created(enum binder_stat_types type)
195{
196 atomic_inc(&binder_stats.obj_created[type]);
197}
198
199struct binder_transaction_log_entry {
200 int debug_id;
201 int debug_id_done;
202 int call_type;
203 int from_proc;
204 int from_thread;
205 int target_handle;
206 int to_proc;
207 int to_thread;
208 int to_node;
209 int data_size;
210 int offsets_size;
211 int return_error_line;
212 uint32_t return_error;
213 uint32_t return_error_param;
214 const char *context_name;
215};
216struct binder_transaction_log {
217 atomic_t cur;
218 bool full;
219 struct binder_transaction_log_entry entry[32];
220};
221static struct binder_transaction_log binder_transaction_log;
222static struct binder_transaction_log binder_transaction_log_failed;
223
224static struct binder_transaction_log_entry *binder_transaction_log_add(
225 struct binder_transaction_log *log)
226{
227 struct binder_transaction_log_entry *e;
228 unsigned int cur = atomic_inc_return(&log->cur);
229
230 if (cur >= ARRAY_SIZE(log->entry))
231 log->full = true;
232 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
233 WRITE_ONCE(e->debug_id_done, 0);
234
235
236
237
238
239 smp_wmb();
240 memset(e, 0, sizeof(*e));
241 return e;
242}
243
244
245
246
247
248
249
250
251struct binder_work {
252 struct list_head entry;
253
254 enum {
255 BINDER_WORK_TRANSACTION = 1,
256 BINDER_WORK_TRANSACTION_COMPLETE,
257 BINDER_WORK_RETURN_ERROR,
258 BINDER_WORK_NODE,
259 BINDER_WORK_DEAD_BINDER,
260 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
261 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
262 } type;
263};
264
265struct binder_error {
266 struct binder_work work;
267 uint32_t cmd;
268};
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330struct binder_node {
331 int debug_id;
332 spinlock_t lock;
333 struct binder_work work;
334 union {
335 struct rb_node rb_node;
336 struct hlist_node dead_node;
337 };
338 struct binder_proc *proc;
339 struct hlist_head refs;
340 int internal_strong_refs;
341 int local_weak_refs;
342 int local_strong_refs;
343 int tmp_refs;
344 binder_uintptr_t ptr;
345 binder_uintptr_t cookie;
346 struct {
347
348
349
350
351 u8 has_strong_ref:1;
352 u8 pending_strong_ref:1;
353 u8 has_weak_ref:1;
354 u8 pending_weak_ref:1;
355 };
356 struct {
357
358
359
360 u8 accept_fds:1;
361 u8 txn_security_ctx:1;
362 u8 min_priority;
363 };
364 bool has_async_transaction;
365 struct list_head async_todo;
366};
367
368struct binder_ref_death {
369
370
371
372
373
374 struct binder_work work;
375 binder_uintptr_t cookie;
376};
377
378
379
380
381
382
383
384
385
386
387
388
389
390struct binder_ref_data {
391 int debug_id;
392 uint32_t desc;
393 int strong;
394 int weak;
395};
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414struct binder_ref {
415
416
417
418
419 struct binder_ref_data data;
420 struct rb_node rb_node_desc;
421 struct rb_node rb_node_node;
422 struct hlist_node node_entry;
423 struct binder_proc *proc;
424 struct binder_node *node;
425 struct binder_ref_death *death;
426};
427
428enum binder_deferred_state {
429 BINDER_DEFERRED_FLUSH = 0x01,
430 BINDER_DEFERRED_RELEASE = 0x02,
431};
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486struct binder_proc {
487 struct hlist_node proc_node;
488 struct rb_root threads;
489 struct rb_root nodes;
490 struct rb_root refs_by_desc;
491 struct rb_root refs_by_node;
492 struct list_head waiting_threads;
493 int pid;
494 struct task_struct *tsk;
495 struct hlist_node deferred_work_node;
496 int deferred_work;
497 bool is_dead;
498
499 struct list_head todo;
500 struct binder_stats stats;
501 struct list_head delivered_death;
502 int max_threads;
503 int requested_threads;
504 int requested_threads_started;
505 int tmp_ref;
506 long default_priority;
507 struct dentry *debugfs_entry;
508 struct binder_alloc alloc;
509 struct binder_context *context;
510 spinlock_t inner_lock;
511 spinlock_t outer_lock;
512};
513
514enum {
515 BINDER_LOOPER_STATE_REGISTERED = 0x01,
516 BINDER_LOOPER_STATE_ENTERED = 0x02,
517 BINDER_LOOPER_STATE_EXITED = 0x04,
518 BINDER_LOOPER_STATE_INVALID = 0x08,
519 BINDER_LOOPER_STATE_WAITING = 0x10,
520 BINDER_LOOPER_STATE_POLL = 0x20,
521};
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559struct binder_thread {
560 struct binder_proc *proc;
561 struct rb_node rb_node;
562 struct list_head waiting_thread_node;
563 int pid;
564 int looper;
565 bool looper_need_return;
566 struct binder_transaction *transaction_stack;
567 struct list_head todo;
568 bool process_todo;
569 struct binder_error return_error;
570 struct binder_error reply_error;
571 wait_queue_head_t wait;
572 struct binder_stats stats;
573 atomic_t tmp_ref;
574 bool is_dead;
575};
576
577
578
579
580
581
582
583
584
585
586
587
588struct binder_txn_fd_fixup {
589 struct list_head fixup_entry;
590 struct file *file;
591 size_t offset;
592};
593
594struct binder_transaction {
595 int debug_id;
596 struct binder_work work;
597 struct binder_thread *from;
598 struct binder_transaction *from_parent;
599 struct binder_proc *to_proc;
600 struct binder_thread *to_thread;
601 struct binder_transaction *to_parent;
602 unsigned need_reply:1;
603
604
605 struct binder_buffer *buffer;
606 unsigned int code;
607 unsigned int flags;
608 long priority;
609 long saved_priority;
610 kuid_t sender_euid;
611 struct list_head fd_fixups;
612 binder_uintptr_t security_ctx;
613
614
615
616
617
618
619 spinlock_t lock;
620};
621
622
623
624
625
626
627
628
629
630
631
632struct binder_object {
633 union {
634 struct binder_object_header hdr;
635 struct flat_binder_object fbo;
636 struct binder_fd_object fdo;
637 struct binder_buffer_object bbo;
638 struct binder_fd_array_object fdao;
639 };
640};
641
642
643
644
645
646
647
648
649#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
650static void
651_binder_proc_lock(struct binder_proc *proc, int line)
652 __acquires(&proc->outer_lock)
653{
654 binder_debug(BINDER_DEBUG_SPINLOCKS,
655 "%s: line=%d\n", __func__, line);
656 spin_lock(&proc->outer_lock);
657}
658
659
660
661
662
663
664
665#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
666static void
667_binder_proc_unlock(struct binder_proc *proc, int line)
668 __releases(&proc->outer_lock)
669{
670 binder_debug(BINDER_DEBUG_SPINLOCKS,
671 "%s: line=%d\n", __func__, line);
672 spin_unlock(&proc->outer_lock);
673}
674
675
676
677
678
679
680
681#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
682static void
683_binder_inner_proc_lock(struct binder_proc *proc, int line)
684 __acquires(&proc->inner_lock)
685{
686 binder_debug(BINDER_DEBUG_SPINLOCKS,
687 "%s: line=%d\n", __func__, line);
688 spin_lock(&proc->inner_lock);
689}
690
691
692
693
694
695
696
697#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
698static void
699_binder_inner_proc_unlock(struct binder_proc *proc, int line)
700 __releases(&proc->inner_lock)
701{
702 binder_debug(BINDER_DEBUG_SPINLOCKS,
703 "%s: line=%d\n", __func__, line);
704 spin_unlock(&proc->inner_lock);
705}
706
707
708
709
710
711
712
713#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
714static void
715_binder_node_lock(struct binder_node *node, int line)
716 __acquires(&node->lock)
717{
718 binder_debug(BINDER_DEBUG_SPINLOCKS,
719 "%s: line=%d\n", __func__, line);
720 spin_lock(&node->lock);
721}
722
723
724
725
726
727
728
729#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
730static void
731_binder_node_unlock(struct binder_node *node, int line)
732 __releases(&node->lock)
733{
734 binder_debug(BINDER_DEBUG_SPINLOCKS,
735 "%s: line=%d\n", __func__, line);
736 spin_unlock(&node->lock);
737}
738
739
740
741
742
743
744
745
746#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
747static void
748_binder_node_inner_lock(struct binder_node *node, int line)
749 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
750{
751 binder_debug(BINDER_DEBUG_SPINLOCKS,
752 "%s: line=%d\n", __func__, line);
753 spin_lock(&node->lock);
754 if (node->proc)
755 binder_inner_proc_lock(node->proc);
756 else
757
758 __acquire(&node->proc->inner_lock);
759}
760
761
762
763
764
765
766
767#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
768static void
769_binder_node_inner_unlock(struct binder_node *node, int line)
770 __releases(&node->lock) __releases(&node->proc->inner_lock)
771{
772 struct binder_proc *proc = node->proc;
773
774 binder_debug(BINDER_DEBUG_SPINLOCKS,
775 "%s: line=%d\n", __func__, line);
776 if (proc)
777 binder_inner_proc_unlock(proc);
778 else
779
780 __release(&node->proc->inner_lock);
781 spin_unlock(&node->lock);
782}
783
784static bool binder_worklist_empty_ilocked(struct list_head *list)
785{
786 return list_empty(list);
787}
788
789
790
791
792
793
794
795
796static bool binder_worklist_empty(struct binder_proc *proc,
797 struct list_head *list)
798{
799 bool ret;
800
801 binder_inner_proc_lock(proc);
802 ret = binder_worklist_empty_ilocked(list);
803 binder_inner_proc_unlock(proc);
804 return ret;
805}
806
807
808
809
810
811
812
813
814
815
816
817static void
818binder_enqueue_work_ilocked(struct binder_work *work,
819 struct list_head *target_list)
820{
821 BUG_ON(target_list == NULL);
822 BUG_ON(work->entry.next && !list_empty(&work->entry));
823 list_add_tail(&work->entry, target_list);
824}
825
826
827
828
829
830
831
832
833
834
835
836
837static void
838binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
839 struct binder_work *work)
840{
841 WARN_ON(!list_empty(&thread->waiting_thread_node));
842 binder_enqueue_work_ilocked(work, &thread->todo);
843}
844
845
846
847
848
849
850
851
852
853
854
855static void
856binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
857 struct binder_work *work)
858{
859 WARN_ON(!list_empty(&thread->waiting_thread_node));
860 binder_enqueue_work_ilocked(work, &thread->todo);
861 thread->process_todo = true;
862}
863
864
865
866
867
868
869
870
871
872static void
873binder_enqueue_thread_work(struct binder_thread *thread,
874 struct binder_work *work)
875{
876 binder_inner_proc_lock(thread->proc);
877 binder_enqueue_thread_work_ilocked(thread, work);
878 binder_inner_proc_unlock(thread->proc);
879}
880
881static void
882binder_dequeue_work_ilocked(struct binder_work *work)
883{
884 list_del_init(&work->entry);
885}
886
887
888
889
890
891
892
893
894
895static void
896binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
897{
898 binder_inner_proc_lock(proc);
899 binder_dequeue_work_ilocked(work);
900 binder_inner_proc_unlock(proc);
901}
902
903static struct binder_work *binder_dequeue_work_head_ilocked(
904 struct list_head *list)
905{
906 struct binder_work *w;
907
908 w = list_first_entry_or_null(list, struct binder_work, entry);
909 if (w)
910 list_del_init(&w->entry);
911 return w;
912}
913
914
915
916
917
918
919
920
921
922
923static struct binder_work *binder_dequeue_work_head(
924 struct binder_proc *proc,
925 struct list_head *list)
926{
927 struct binder_work *w;
928
929 binder_inner_proc_lock(proc);
930 w = binder_dequeue_work_head_ilocked(list);
931 binder_inner_proc_unlock(proc);
932 return w;
933}
934
935static void
936binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
937static void binder_free_thread(struct binder_thread *thread);
938static void binder_free_proc(struct binder_proc *proc);
939static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
940
941static bool binder_has_work_ilocked(struct binder_thread *thread,
942 bool do_proc_work)
943{
944 return thread->process_todo ||
945 thread->looper_need_return ||
946 (do_proc_work &&
947 !binder_worklist_empty_ilocked(&thread->proc->todo));
948}
949
950static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
951{
952 bool has_work;
953
954 binder_inner_proc_lock(thread->proc);
955 has_work = binder_has_work_ilocked(thread, do_proc_work);
956 binder_inner_proc_unlock(thread->proc);
957
958 return has_work;
959}
960
961static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
962{
963 return !thread->transaction_stack &&
964 binder_worklist_empty_ilocked(&thread->todo) &&
965 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
966 BINDER_LOOPER_STATE_REGISTERED));
967}
968
969static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
970 bool sync)
971{
972 struct rb_node *n;
973 struct binder_thread *thread;
974
975 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
976 thread = rb_entry(n, struct binder_thread, rb_node);
977 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
978 binder_available_for_proc_work_ilocked(thread)) {
979 if (sync)
980 wake_up_interruptible_sync(&thread->wait);
981 else
982 wake_up_interruptible(&thread->wait);
983 }
984 }
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999static struct binder_thread *
1000binder_select_thread_ilocked(struct binder_proc *proc)
1001{
1002 struct binder_thread *thread;
1003
1004 assert_spin_locked(&proc->inner_lock);
1005 thread = list_first_entry_or_null(&proc->waiting_threads,
1006 struct binder_thread,
1007 waiting_thread_node);
1008
1009 if (thread)
1010 list_del_init(&thread->waiting_thread_node);
1011
1012 return thread;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1032 struct binder_thread *thread,
1033 bool sync)
1034{
1035 assert_spin_locked(&proc->inner_lock);
1036
1037 if (thread) {
1038 if (sync)
1039 wake_up_interruptible_sync(&thread->wait);
1040 else
1041 wake_up_interruptible(&thread->wait);
1042 return;
1043 }
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 binder_wakeup_poll_threads_ilocked(proc, sync);
1059}
1060
1061static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1062{
1063 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1064
1065 binder_wakeup_thread_ilocked(proc, thread, false);
1066}
1067
1068static void binder_set_nice(long nice)
1069{
1070 long min_nice;
1071
1072 if (can_nice(current, nice)) {
1073 set_user_nice(current, nice);
1074 return;
1075 }
1076 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1077 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1078 "%d: nice value %ld not allowed use %ld instead\n",
1079 current->pid, nice, min_nice);
1080 set_user_nice(current, min_nice);
1081 if (min_nice <= MAX_NICE)
1082 return;
1083 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1084}
1085
1086static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1087 binder_uintptr_t ptr)
1088{
1089 struct rb_node *n = proc->nodes.rb_node;
1090 struct binder_node *node;
1091
1092 assert_spin_locked(&proc->inner_lock);
1093
1094 while (n) {
1095 node = rb_entry(n, struct binder_node, rb_node);
1096
1097 if (ptr < node->ptr)
1098 n = n->rb_left;
1099 else if (ptr > node->ptr)
1100 n = n->rb_right;
1101 else {
1102
1103
1104
1105
1106
1107 binder_inc_node_tmpref_ilocked(node);
1108 return node;
1109 }
1110 }
1111 return NULL;
1112}
1113
1114static struct binder_node *binder_get_node(struct binder_proc *proc,
1115 binder_uintptr_t ptr)
1116{
1117 struct binder_node *node;
1118
1119 binder_inner_proc_lock(proc);
1120 node = binder_get_node_ilocked(proc, ptr);
1121 binder_inner_proc_unlock(proc);
1122 return node;
1123}
1124
1125static struct binder_node *binder_init_node_ilocked(
1126 struct binder_proc *proc,
1127 struct binder_node *new_node,
1128 struct flat_binder_object *fp)
1129{
1130 struct rb_node **p = &proc->nodes.rb_node;
1131 struct rb_node *parent = NULL;
1132 struct binder_node *node;
1133 binder_uintptr_t ptr = fp ? fp->binder : 0;
1134 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1135 __u32 flags = fp ? fp->flags : 0;
1136
1137 assert_spin_locked(&proc->inner_lock);
1138
1139 while (*p) {
1140
1141 parent = *p;
1142 node = rb_entry(parent, struct binder_node, rb_node);
1143
1144 if (ptr < node->ptr)
1145 p = &(*p)->rb_left;
1146 else if (ptr > node->ptr)
1147 p = &(*p)->rb_right;
1148 else {
1149
1150
1151
1152
1153
1154 binder_inc_node_tmpref_ilocked(node);
1155 return node;
1156 }
1157 }
1158 node = new_node;
1159 binder_stats_created(BINDER_STAT_NODE);
1160 node->tmp_refs++;
1161 rb_link_node(&node->rb_node, parent, p);
1162 rb_insert_color(&node->rb_node, &proc->nodes);
1163 node->debug_id = atomic_inc_return(&binder_last_id);
1164 node->proc = proc;
1165 node->ptr = ptr;
1166 node->cookie = cookie;
1167 node->work.type = BINDER_WORK_NODE;
1168 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1169 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1170 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1171 spin_lock_init(&node->lock);
1172 INIT_LIST_HEAD(&node->work.entry);
1173 INIT_LIST_HEAD(&node->async_todo);
1174 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175 "%d:%d node %d u%016llx c%016llx created\n",
1176 proc->pid, current->pid, node->debug_id,
1177 (u64)node->ptr, (u64)node->cookie);
1178
1179 return node;
1180}
1181
1182static struct binder_node *binder_new_node(struct binder_proc *proc,
1183 struct flat_binder_object *fp)
1184{
1185 struct binder_node *node;
1186 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1187
1188 if (!new_node)
1189 return NULL;
1190 binder_inner_proc_lock(proc);
1191 node = binder_init_node_ilocked(proc, new_node, fp);
1192 binder_inner_proc_unlock(proc);
1193 if (node != new_node)
1194
1195
1196
1197 kfree(new_node);
1198
1199 return node;
1200}
1201
1202static void binder_free_node(struct binder_node *node)
1203{
1204 kfree(node);
1205 binder_stats_deleted(BINDER_STAT_NODE);
1206}
1207
1208static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1209 int internal,
1210 struct list_head *target_list)
1211{
1212 struct binder_proc *proc = node->proc;
1213
1214 assert_spin_locked(&node->lock);
1215 if (proc)
1216 assert_spin_locked(&proc->inner_lock);
1217 if (strong) {
1218 if (internal) {
1219 if (target_list == NULL &&
1220 node->internal_strong_refs == 0 &&
1221 !(node->proc &&
1222 node == node->proc->context->binder_context_mgr_node &&
1223 node->has_strong_ref)) {
1224 pr_err("invalid inc strong node for %d\n",
1225 node->debug_id);
1226 return -EINVAL;
1227 }
1228 node->internal_strong_refs++;
1229 } else
1230 node->local_strong_refs++;
1231 if (!node->has_strong_ref && target_list) {
1232 struct binder_thread *thread = container_of(target_list,
1233 struct binder_thread, todo);
1234 binder_dequeue_work_ilocked(&node->work);
1235 BUG_ON(&thread->todo != target_list);
1236 binder_enqueue_deferred_thread_work_ilocked(thread,
1237 &node->work);
1238 }
1239 } else {
1240 if (!internal)
1241 node->local_weak_refs++;
1242 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1243 if (target_list == NULL) {
1244 pr_err("invalid inc weak node for %d\n",
1245 node->debug_id);
1246 return -EINVAL;
1247 }
1248
1249
1250
1251 binder_enqueue_work_ilocked(&node->work, target_list);
1252 }
1253 }
1254 return 0;
1255}
1256
1257static int binder_inc_node(struct binder_node *node, int strong, int internal,
1258 struct list_head *target_list)
1259{
1260 int ret;
1261
1262 binder_node_inner_lock(node);
1263 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1264 binder_node_inner_unlock(node);
1265
1266 return ret;
1267}
1268
1269static bool binder_dec_node_nilocked(struct binder_node *node,
1270 int strong, int internal)
1271{
1272 struct binder_proc *proc = node->proc;
1273
1274 assert_spin_locked(&node->lock);
1275 if (proc)
1276 assert_spin_locked(&proc->inner_lock);
1277 if (strong) {
1278 if (internal)
1279 node->internal_strong_refs--;
1280 else
1281 node->local_strong_refs--;
1282 if (node->local_strong_refs || node->internal_strong_refs)
1283 return false;
1284 } else {
1285 if (!internal)
1286 node->local_weak_refs--;
1287 if (node->local_weak_refs || node->tmp_refs ||
1288 !hlist_empty(&node->refs))
1289 return false;
1290 }
1291
1292 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1293 if (list_empty(&node->work.entry)) {
1294 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1295 binder_wakeup_proc_ilocked(proc);
1296 }
1297 } else {
1298 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1299 !node->local_weak_refs && !node->tmp_refs) {
1300 if (proc) {
1301 binder_dequeue_work_ilocked(&node->work);
1302 rb_erase(&node->rb_node, &proc->nodes);
1303 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1304 "refless node %d deleted\n",
1305 node->debug_id);
1306 } else {
1307 BUG_ON(!list_empty(&node->work.entry));
1308 spin_lock(&binder_dead_nodes_lock);
1309
1310
1311
1312
1313 if (node->tmp_refs) {
1314 spin_unlock(&binder_dead_nodes_lock);
1315 return false;
1316 }
1317 hlist_del(&node->dead_node);
1318 spin_unlock(&binder_dead_nodes_lock);
1319 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1320 "dead node %d deleted\n",
1321 node->debug_id);
1322 }
1323 return true;
1324 }
1325 }
1326 return false;
1327}
1328
1329static void binder_dec_node(struct binder_node *node, int strong, int internal)
1330{
1331 bool free_node;
1332
1333 binder_node_inner_lock(node);
1334 free_node = binder_dec_node_nilocked(node, strong, internal);
1335 binder_node_inner_unlock(node);
1336 if (free_node)
1337 binder_free_node(node);
1338}
1339
1340static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1341{
1342
1343
1344
1345
1346
1347 node->tmp_refs++;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363static void binder_inc_node_tmpref(struct binder_node *node)
1364{
1365 binder_node_lock(node);
1366 if (node->proc)
1367 binder_inner_proc_lock(node->proc);
1368 else
1369 spin_lock(&binder_dead_nodes_lock);
1370 binder_inc_node_tmpref_ilocked(node);
1371 if (node->proc)
1372 binder_inner_proc_unlock(node->proc);
1373 else
1374 spin_unlock(&binder_dead_nodes_lock);
1375 binder_node_unlock(node);
1376}
1377
1378
1379
1380
1381
1382
1383
1384static void binder_dec_node_tmpref(struct binder_node *node)
1385{
1386 bool free_node;
1387
1388 binder_node_inner_lock(node);
1389 if (!node->proc)
1390 spin_lock(&binder_dead_nodes_lock);
1391 else
1392 __acquire(&binder_dead_nodes_lock);
1393 node->tmp_refs--;
1394 BUG_ON(node->tmp_refs < 0);
1395 if (!node->proc)
1396 spin_unlock(&binder_dead_nodes_lock);
1397 else
1398 __release(&binder_dead_nodes_lock);
1399
1400
1401
1402
1403
1404
1405 free_node = binder_dec_node_nilocked(node, 0, 1);
1406 binder_node_inner_unlock(node);
1407 if (free_node)
1408 binder_free_node(node);
1409}
1410
1411static void binder_put_node(struct binder_node *node)
1412{
1413 binder_dec_node_tmpref(node);
1414}
1415
1416static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1417 u32 desc, bool need_strong_ref)
1418{
1419 struct rb_node *n = proc->refs_by_desc.rb_node;
1420 struct binder_ref *ref;
1421
1422 while (n) {
1423 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1424
1425 if (desc < ref->data.desc) {
1426 n = n->rb_left;
1427 } else if (desc > ref->data.desc) {
1428 n = n->rb_right;
1429 } else if (need_strong_ref && !ref->data.strong) {
1430 binder_user_error("tried to use weak ref as strong ref\n");
1431 return NULL;
1432 } else {
1433 return ref;
1434 }
1435 }
1436 return NULL;
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457static struct binder_ref *binder_get_ref_for_node_olocked(
1458 struct binder_proc *proc,
1459 struct binder_node *node,
1460 struct binder_ref *new_ref)
1461{
1462 struct binder_context *context = proc->context;
1463 struct rb_node **p = &proc->refs_by_node.rb_node;
1464 struct rb_node *parent = NULL;
1465 struct binder_ref *ref;
1466 struct rb_node *n;
1467
1468 while (*p) {
1469 parent = *p;
1470 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1471
1472 if (node < ref->node)
1473 p = &(*p)->rb_left;
1474 else if (node > ref->node)
1475 p = &(*p)->rb_right;
1476 else
1477 return ref;
1478 }
1479 if (!new_ref)
1480 return NULL;
1481
1482 binder_stats_created(BINDER_STAT_REF);
1483 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1484 new_ref->proc = proc;
1485 new_ref->node = node;
1486 rb_link_node(&new_ref->rb_node_node, parent, p);
1487 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1488
1489 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1490 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1491 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1492 if (ref->data.desc > new_ref->data.desc)
1493 break;
1494 new_ref->data.desc = ref->data.desc + 1;
1495 }
1496
1497 p = &proc->refs_by_desc.rb_node;
1498 while (*p) {
1499 parent = *p;
1500 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1501
1502 if (new_ref->data.desc < ref->data.desc)
1503 p = &(*p)->rb_left;
1504 else if (new_ref->data.desc > ref->data.desc)
1505 p = &(*p)->rb_right;
1506 else
1507 BUG();
1508 }
1509 rb_link_node(&new_ref->rb_node_desc, parent, p);
1510 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1511
1512 binder_node_lock(node);
1513 hlist_add_head(&new_ref->node_entry, &node->refs);
1514
1515 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1516 "%d new ref %d desc %d for node %d\n",
1517 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1518 node->debug_id);
1519 binder_node_unlock(node);
1520 return new_ref;
1521}
1522
1523static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1524{
1525 bool delete_node = false;
1526
1527 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1528 "%d delete ref %d desc %d for node %d\n",
1529 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1530 ref->node->debug_id);
1531
1532 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1533 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1534
1535 binder_node_inner_lock(ref->node);
1536 if (ref->data.strong)
1537 binder_dec_node_nilocked(ref->node, 1, 1);
1538
1539 hlist_del(&ref->node_entry);
1540 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1541 binder_node_inner_unlock(ref->node);
1542
1543
1544
1545 if (!delete_node) {
1546
1547
1548
1549
1550
1551 ref->node = NULL;
1552 }
1553
1554 if (ref->death) {
1555 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1556 "%d delete ref %d desc %d has death notification\n",
1557 ref->proc->pid, ref->data.debug_id,
1558 ref->data.desc);
1559 binder_dequeue_work(ref->proc, &ref->death->work);
1560 binder_stats_deleted(BINDER_STAT_DEATH);
1561 }
1562 binder_stats_deleted(BINDER_STAT_REF);
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1576 struct list_head *target_list)
1577{
1578 int ret;
1579
1580 if (strong) {
1581 if (ref->data.strong == 0) {
1582 ret = binder_inc_node(ref->node, 1, 1, target_list);
1583 if (ret)
1584 return ret;
1585 }
1586 ref->data.strong++;
1587 } else {
1588 if (ref->data.weak == 0) {
1589 ret = binder_inc_node(ref->node, 0, 1, target_list);
1590 if (ret)
1591 return ret;
1592 }
1593 ref->data.weak++;
1594 }
1595 return 0;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1608{
1609 if (strong) {
1610 if (ref->data.strong == 0) {
1611 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1612 ref->proc->pid, ref->data.debug_id,
1613 ref->data.desc, ref->data.strong,
1614 ref->data.weak);
1615 return false;
1616 }
1617 ref->data.strong--;
1618 if (ref->data.strong == 0)
1619 binder_dec_node(ref->node, strong, 1);
1620 } else {
1621 if (ref->data.weak == 0) {
1622 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1623 ref->proc->pid, ref->data.debug_id,
1624 ref->data.desc, ref->data.strong,
1625 ref->data.weak);
1626 return false;
1627 }
1628 ref->data.weak--;
1629 }
1630 if (ref->data.strong == 0 && ref->data.weak == 0) {
1631 binder_cleanup_ref_olocked(ref);
1632 return true;
1633 }
1634 return false;
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static struct binder_node *binder_get_node_from_ref(
1649 struct binder_proc *proc,
1650 u32 desc, bool need_strong_ref,
1651 struct binder_ref_data *rdata)
1652{
1653 struct binder_node *node;
1654 struct binder_ref *ref;
1655
1656 binder_proc_lock(proc);
1657 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1658 if (!ref)
1659 goto err_no_ref;
1660 node = ref->node;
1661
1662
1663
1664
1665 binder_inc_node_tmpref(node);
1666 if (rdata)
1667 *rdata = ref->data;
1668 binder_proc_unlock(proc);
1669
1670 return node;
1671
1672err_no_ref:
1673 binder_proc_unlock(proc);
1674 return NULL;
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684static void binder_free_ref(struct binder_ref *ref)
1685{
1686 if (ref->node)
1687 binder_free_node(ref->node);
1688 kfree(ref->death);
1689 kfree(ref);
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705static int binder_update_ref_for_handle(struct binder_proc *proc,
1706 uint32_t desc, bool increment, bool strong,
1707 struct binder_ref_data *rdata)
1708{
1709 int ret = 0;
1710 struct binder_ref *ref;
1711 bool delete_ref = false;
1712
1713 binder_proc_lock(proc);
1714 ref = binder_get_ref_olocked(proc, desc, strong);
1715 if (!ref) {
1716 ret = -EINVAL;
1717 goto err_no_ref;
1718 }
1719 if (increment)
1720 ret = binder_inc_ref_olocked(ref, strong, NULL);
1721 else
1722 delete_ref = binder_dec_ref_olocked(ref, strong);
1723
1724 if (rdata)
1725 *rdata = ref->data;
1726 binder_proc_unlock(proc);
1727
1728 if (delete_ref)
1729 binder_free_ref(ref);
1730 return ret;
1731
1732err_no_ref:
1733 binder_proc_unlock(proc);
1734 return ret;
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748static int binder_dec_ref_for_handle(struct binder_proc *proc,
1749 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1750{
1751 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768static int binder_inc_ref_for_node(struct binder_proc *proc,
1769 struct binder_node *node,
1770 bool strong,
1771 struct list_head *target_list,
1772 struct binder_ref_data *rdata)
1773{
1774 struct binder_ref *ref;
1775 struct binder_ref *new_ref = NULL;
1776 int ret = 0;
1777
1778 binder_proc_lock(proc);
1779 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1780 if (!ref) {
1781 binder_proc_unlock(proc);
1782 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1783 if (!new_ref)
1784 return -ENOMEM;
1785 binder_proc_lock(proc);
1786 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1787 }
1788 ret = binder_inc_ref_olocked(ref, strong, target_list);
1789 *rdata = ref->data;
1790 binder_proc_unlock(proc);
1791 if (new_ref && ref != new_ref)
1792
1793
1794
1795
1796 kfree(new_ref);
1797 return ret;
1798}
1799
1800static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1801 struct binder_transaction *t)
1802{
1803 BUG_ON(!target_thread);
1804 assert_spin_locked(&target_thread->proc->inner_lock);
1805 BUG_ON(target_thread->transaction_stack != t);
1806 BUG_ON(target_thread->transaction_stack->from != target_thread);
1807 target_thread->transaction_stack =
1808 target_thread->transaction_stack->from_parent;
1809 t->from = NULL;
1810}
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static void binder_thread_dec_tmpref(struct binder_thread *thread)
1825{
1826
1827
1828
1829
1830 binder_inner_proc_lock(thread->proc);
1831 atomic_dec(&thread->tmp_ref);
1832 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1833 binder_inner_proc_unlock(thread->proc);
1834 binder_free_thread(thread);
1835 return;
1836 }
1837 binder_inner_proc_unlock(thread->proc);
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852static void binder_proc_dec_tmpref(struct binder_proc *proc)
1853{
1854 binder_inner_proc_lock(proc);
1855 proc->tmp_ref--;
1856 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1857 !proc->tmp_ref) {
1858 binder_inner_proc_unlock(proc);
1859 binder_free_proc(proc);
1860 return;
1861 }
1862 binder_inner_proc_unlock(proc);
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875static struct binder_thread *binder_get_txn_from(
1876 struct binder_transaction *t)
1877{
1878 struct binder_thread *from;
1879
1880 spin_lock(&t->lock);
1881 from = t->from;
1882 if (from)
1883 atomic_inc(&from->tmp_ref);
1884 spin_unlock(&t->lock);
1885 return from;
1886}
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899static struct binder_thread *binder_get_txn_from_and_acq_inner(
1900 struct binder_transaction *t)
1901 __acquires(&t->from->proc->inner_lock)
1902{
1903 struct binder_thread *from;
1904
1905 from = binder_get_txn_from(t);
1906 if (!from) {
1907 __acquire(&from->proc->inner_lock);
1908 return NULL;
1909 }
1910 binder_inner_proc_lock(from->proc);
1911 if (t->from) {
1912 BUG_ON(from != t->from);
1913 return from;
1914 }
1915 binder_inner_proc_unlock(from->proc);
1916 __acquire(&from->proc->inner_lock);
1917 binder_thread_dec_tmpref(from);
1918 return NULL;
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931static void binder_free_txn_fixups(struct binder_transaction *t)
1932{
1933 struct binder_txn_fd_fixup *fixup, *tmp;
1934
1935 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1936 fput(fixup->file);
1937 list_del(&fixup->fixup_entry);
1938 kfree(fixup);
1939 }
1940}
1941
1942static void binder_free_transaction(struct binder_transaction *t)
1943{
1944 struct binder_proc *target_proc = t->to_proc;
1945
1946 if (target_proc) {
1947 binder_inner_proc_lock(target_proc);
1948 if (t->buffer)
1949 t->buffer->transaction = NULL;
1950 binder_inner_proc_unlock(target_proc);
1951 }
1952
1953
1954
1955
1956 binder_free_txn_fixups(t);
1957 kfree(t);
1958 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1959}
1960
1961static void binder_send_failed_reply(struct binder_transaction *t,
1962 uint32_t error_code)
1963{
1964 struct binder_thread *target_thread;
1965 struct binder_transaction *next;
1966
1967 BUG_ON(t->flags & TF_ONE_WAY);
1968 while (1) {
1969 target_thread = binder_get_txn_from_and_acq_inner(t);
1970 if (target_thread) {
1971 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1972 "send failed reply for transaction %d to %d:%d\n",
1973 t->debug_id,
1974 target_thread->proc->pid,
1975 target_thread->pid);
1976
1977 binder_pop_transaction_ilocked(target_thread, t);
1978 if (target_thread->reply_error.cmd == BR_OK) {
1979 target_thread->reply_error.cmd = error_code;
1980 binder_enqueue_thread_work_ilocked(
1981 target_thread,
1982 &target_thread->reply_error.work);
1983 wake_up_interruptible(&target_thread->wait);
1984 } else {
1985
1986
1987
1988
1989
1990
1991 pr_warn("Unexpected reply error: %u\n",
1992 target_thread->reply_error.cmd);
1993 }
1994 binder_inner_proc_unlock(target_thread->proc);
1995 binder_thread_dec_tmpref(target_thread);
1996 binder_free_transaction(t);
1997 return;
1998 } else {
1999 __release(&target_thread->proc->inner_lock);
2000 }
2001 next = t->from_parent;
2002
2003 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2004 "send failed reply for transaction %d, target dead\n",
2005 t->debug_id);
2006
2007 binder_free_transaction(t);
2008 if (next == NULL) {
2009 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2010 "reply failed, no target thread at root\n");
2011 return;
2012 }
2013 t = next;
2014 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2015 "reply failed, no target thread -- retry %d\n",
2016 t->debug_id);
2017 }
2018}
2019
2020
2021
2022
2023
2024
2025
2026static void binder_cleanup_transaction(struct binder_transaction *t,
2027 const char *reason,
2028 uint32_t error_code)
2029{
2030 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2031 binder_send_failed_reply(t, error_code);
2032 } else {
2033 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2034 "undelivered transaction %d, %s\n",
2035 t->debug_id, reason);
2036 binder_free_transaction(t);
2037 }
2038}
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051static size_t binder_get_object(struct binder_proc *proc,
2052 struct binder_buffer *buffer,
2053 unsigned long offset,
2054 struct binder_object *object)
2055{
2056 size_t read_size;
2057 struct binder_object_header *hdr;
2058 size_t object_size = 0;
2059
2060 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2061 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2063 offset, read_size))
2064 return 0;
2065
2066
2067 hdr = &object->hdr;
2068 switch (hdr->type) {
2069 case BINDER_TYPE_BINDER:
2070 case BINDER_TYPE_WEAK_BINDER:
2071 case BINDER_TYPE_HANDLE:
2072 case BINDER_TYPE_WEAK_HANDLE:
2073 object_size = sizeof(struct flat_binder_object);
2074 break;
2075 case BINDER_TYPE_FD:
2076 object_size = sizeof(struct binder_fd_object);
2077 break;
2078 case BINDER_TYPE_PTR:
2079 object_size = sizeof(struct binder_buffer_object);
2080 break;
2081 case BINDER_TYPE_FDA:
2082 object_size = sizeof(struct binder_fd_array_object);
2083 break;
2084 default:
2085 return 0;
2086 }
2087 if (offset <= buffer->data_size - object_size &&
2088 buffer->data_size >= object_size)
2089 return object_size;
2090 else
2091 return 0;
2092}
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116static struct binder_buffer_object *binder_validate_ptr(
2117 struct binder_proc *proc,
2118 struct binder_buffer *b,
2119 struct binder_object *object,
2120 binder_size_t index,
2121 binder_size_t start_offset,
2122 binder_size_t *object_offsetp,
2123 binder_size_t num_valid)
2124{
2125 size_t object_size;
2126 binder_size_t object_offset;
2127 unsigned long buffer_offset;
2128
2129 if (index >= num_valid)
2130 return NULL;
2131
2132 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2133 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2134 b, buffer_offset,
2135 sizeof(object_offset)))
2136 return NULL;
2137 object_size = binder_get_object(proc, b, object_offset, object);
2138 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2139 return NULL;
2140 if (object_offsetp)
2141 *object_offsetp = object_offset;
2142
2143 return &object->bbo;
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185static bool binder_validate_fixup(struct binder_proc *proc,
2186 struct binder_buffer *b,
2187 binder_size_t objects_start_offset,
2188 binder_size_t buffer_obj_offset,
2189 binder_size_t fixup_offset,
2190 binder_size_t last_obj_offset,
2191 binder_size_t last_min_offset)
2192{
2193 if (!last_obj_offset) {
2194
2195 return false;
2196 }
2197
2198 while (last_obj_offset != buffer_obj_offset) {
2199 unsigned long buffer_offset;
2200 struct binder_object last_object;
2201 struct binder_buffer_object *last_bbo;
2202 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2203 &last_object);
2204 if (object_size != sizeof(*last_bbo))
2205 return false;
2206
2207 last_bbo = &last_object.bbo;
2208
2209
2210
2211
2212 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2213 return false;
2214 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2215 buffer_offset = objects_start_offset +
2216 sizeof(binder_size_t) * last_bbo->parent;
2217 if (binder_alloc_copy_from_buffer(&proc->alloc,
2218 &last_obj_offset,
2219 b, buffer_offset,
2220 sizeof(last_obj_offset)))
2221 return false;
2222 }
2223 return (fixup_offset >= last_min_offset);
2224}
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235struct binder_task_work_cb {
2236 struct callback_head twork;
2237 struct file *file;
2238};
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253static void binder_do_fd_close(struct callback_head *twork)
2254{
2255 struct binder_task_work_cb *twcb = container_of(twork,
2256 struct binder_task_work_cb, twork);
2257
2258 fput(twcb->file);
2259 kfree(twcb);
2260}
2261
2262
2263
2264
2265
2266
2267
2268
2269static void binder_deferred_fd_close(int fd)
2270{
2271 struct binder_task_work_cb *twcb;
2272
2273 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2274 if (!twcb)
2275 return;
2276 init_task_work(&twcb->twork, binder_do_fd_close);
2277 __close_fd_get_file(fd, &twcb->file);
2278 if (twcb->file)
2279 task_work_add(current, &twcb->twork, true);
2280 else
2281 kfree(twcb);
2282}
2283
2284static void binder_transaction_buffer_release(struct binder_proc *proc,
2285 struct binder_buffer *buffer,
2286 binder_size_t failed_at,
2287 bool is_failure)
2288{
2289 int debug_id = buffer->debug_id;
2290 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2291
2292 binder_debug(BINDER_DEBUG_TRANSACTION,
2293 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2294 proc->pid, buffer->debug_id,
2295 buffer->data_size, buffer->offsets_size,
2296 (unsigned long long)failed_at);
2297
2298 if (buffer->target_node)
2299 binder_dec_node(buffer->target_node, 1, 0);
2300
2301 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2302 off_end_offset = is_failure ? failed_at :
2303 off_start_offset + buffer->offsets_size;
2304 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2305 buffer_offset += sizeof(binder_size_t)) {
2306 struct binder_object_header *hdr;
2307 size_t object_size = 0;
2308 struct binder_object object;
2309 binder_size_t object_offset;
2310
2311 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2312 buffer, buffer_offset,
2313 sizeof(object_offset)))
2314 object_size = binder_get_object(proc, buffer,
2315 object_offset, &object);
2316 if (object_size == 0) {
2317 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2318 debug_id, (u64)object_offset, buffer->data_size);
2319 continue;
2320 }
2321 hdr = &object.hdr;
2322 switch (hdr->type) {
2323 case BINDER_TYPE_BINDER:
2324 case BINDER_TYPE_WEAK_BINDER: {
2325 struct flat_binder_object *fp;
2326 struct binder_node *node;
2327
2328 fp = to_flat_binder_object(hdr);
2329 node = binder_get_node(proc, fp->binder);
2330 if (node == NULL) {
2331 pr_err("transaction release %d bad node %016llx\n",
2332 debug_id, (u64)fp->binder);
2333 break;
2334 }
2335 binder_debug(BINDER_DEBUG_TRANSACTION,
2336 " node %d u%016llx\n",
2337 node->debug_id, (u64)node->ptr);
2338 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2339 0);
2340 binder_put_node(node);
2341 } break;
2342 case BINDER_TYPE_HANDLE:
2343 case BINDER_TYPE_WEAK_HANDLE: {
2344 struct flat_binder_object *fp;
2345 struct binder_ref_data rdata;
2346 int ret;
2347
2348 fp = to_flat_binder_object(hdr);
2349 ret = binder_dec_ref_for_handle(proc, fp->handle,
2350 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2351
2352 if (ret) {
2353 pr_err("transaction release %d bad handle %d, ret = %d\n",
2354 debug_id, fp->handle, ret);
2355 break;
2356 }
2357 binder_debug(BINDER_DEBUG_TRANSACTION,
2358 " ref %d desc %d\n",
2359 rdata.debug_id, rdata.desc);
2360 } break;
2361
2362 case BINDER_TYPE_FD: {
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372 WARN_ON(failed_at &&
2373 proc->tsk == current->group_leader);
2374 } break;
2375 case BINDER_TYPE_PTR:
2376
2377
2378
2379
2380 break;
2381 case BINDER_TYPE_FDA: {
2382 struct binder_fd_array_object *fda;
2383 struct binder_buffer_object *parent;
2384 struct binder_object ptr_object;
2385 binder_size_t fda_offset;
2386 size_t fd_index;
2387 binder_size_t fd_buf_size;
2388 binder_size_t num_valid;
2389
2390 if (proc->tsk != current->group_leader) {
2391
2392
2393
2394
2395
2396 continue;
2397 }
2398
2399 num_valid = (buffer_offset - off_start_offset) /
2400 sizeof(binder_size_t);
2401 fda = to_binder_fd_array_object(hdr);
2402 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2403 fda->parent,
2404 off_start_offset,
2405 NULL,
2406 num_valid);
2407 if (!parent) {
2408 pr_err("transaction release %d bad parent offset\n",
2409 debug_id);
2410 continue;
2411 }
2412 fd_buf_size = sizeof(u32) * fda->num_fds;
2413 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2414 pr_err("transaction release %d invalid number of fds (%lld)\n",
2415 debug_id, (u64)fda->num_fds);
2416 continue;
2417 }
2418 if (fd_buf_size > parent->length ||
2419 fda->parent_offset > parent->length - fd_buf_size) {
2420
2421 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2422 debug_id, (u64)fda->num_fds);
2423 continue;
2424 }
2425
2426
2427
2428
2429
2430
2431
2432 fda_offset =
2433 (parent->buffer - (uintptr_t)buffer->user_data) +
2434 fda->parent_offset;
2435 for (fd_index = 0; fd_index < fda->num_fds;
2436 fd_index++) {
2437 u32 fd;
2438 int err;
2439 binder_size_t offset = fda_offset +
2440 fd_index * sizeof(fd);
2441
2442 err = binder_alloc_copy_from_buffer(
2443 &proc->alloc, &fd, buffer,
2444 offset, sizeof(fd));
2445 WARN_ON(err);
2446 if (!err)
2447 binder_deferred_fd_close(fd);
2448 }
2449 } break;
2450 default:
2451 pr_err("transaction release %d bad object type %x\n",
2452 debug_id, hdr->type);
2453 break;
2454 }
2455 }
2456}
2457
2458static int binder_translate_binder(struct flat_binder_object *fp,
2459 struct binder_transaction *t,
2460 struct binder_thread *thread)
2461{
2462 struct binder_node *node;
2463 struct binder_proc *proc = thread->proc;
2464 struct binder_proc *target_proc = t->to_proc;
2465 struct binder_ref_data rdata;
2466 int ret = 0;
2467
2468 node = binder_get_node(proc, fp->binder);
2469 if (!node) {
2470 node = binder_new_node(proc, fp);
2471 if (!node)
2472 return -ENOMEM;
2473 }
2474 if (fp->cookie != node->cookie) {
2475 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2476 proc->pid, thread->pid, (u64)fp->binder,
2477 node->debug_id, (u64)fp->cookie,
2478 (u64)node->cookie);
2479 ret = -EINVAL;
2480 goto done;
2481 }
2482 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2483 ret = -EPERM;
2484 goto done;
2485 }
2486
2487 ret = binder_inc_ref_for_node(target_proc, node,
2488 fp->hdr.type == BINDER_TYPE_BINDER,
2489 &thread->todo, &rdata);
2490 if (ret)
2491 goto done;
2492
2493 if (fp->hdr.type == BINDER_TYPE_BINDER)
2494 fp->hdr.type = BINDER_TYPE_HANDLE;
2495 else
2496 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2497 fp->binder = 0;
2498 fp->handle = rdata.desc;
2499 fp->cookie = 0;
2500
2501 trace_binder_transaction_node_to_ref(t, node, &rdata);
2502 binder_debug(BINDER_DEBUG_TRANSACTION,
2503 " node %d u%016llx -> ref %d desc %d\n",
2504 node->debug_id, (u64)node->ptr,
2505 rdata.debug_id, rdata.desc);
2506done:
2507 binder_put_node(node);
2508 return ret;
2509}
2510
2511static int binder_translate_handle(struct flat_binder_object *fp,
2512 struct binder_transaction *t,
2513 struct binder_thread *thread)
2514{
2515 struct binder_proc *proc = thread->proc;
2516 struct binder_proc *target_proc = t->to_proc;
2517 struct binder_node *node;
2518 struct binder_ref_data src_rdata;
2519 int ret = 0;
2520
2521 node = binder_get_node_from_ref(proc, fp->handle,
2522 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2523 if (!node) {
2524 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2525 proc->pid, thread->pid, fp->handle);
2526 return -EINVAL;
2527 }
2528 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2529 ret = -EPERM;
2530 goto done;
2531 }
2532
2533 binder_node_lock(node);
2534 if (node->proc == target_proc) {
2535 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2536 fp->hdr.type = BINDER_TYPE_BINDER;
2537 else
2538 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2539 fp->binder = node->ptr;
2540 fp->cookie = node->cookie;
2541 if (node->proc)
2542 binder_inner_proc_lock(node->proc);
2543 else
2544 __acquire(&node->proc->inner_lock);
2545 binder_inc_node_nilocked(node,
2546 fp->hdr.type == BINDER_TYPE_BINDER,
2547 0, NULL);
2548 if (node->proc)
2549 binder_inner_proc_unlock(node->proc);
2550 else
2551 __release(&node->proc->inner_lock);
2552 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2553 binder_debug(BINDER_DEBUG_TRANSACTION,
2554 " ref %d desc %d -> node %d u%016llx\n",
2555 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2556 (u64)node->ptr);
2557 binder_node_unlock(node);
2558 } else {
2559 struct binder_ref_data dest_rdata;
2560
2561 binder_node_unlock(node);
2562 ret = binder_inc_ref_for_node(target_proc, node,
2563 fp->hdr.type == BINDER_TYPE_HANDLE,
2564 NULL, &dest_rdata);
2565 if (ret)
2566 goto done;
2567
2568 fp->binder = 0;
2569 fp->handle = dest_rdata.desc;
2570 fp->cookie = 0;
2571 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2572 &dest_rdata);
2573 binder_debug(BINDER_DEBUG_TRANSACTION,
2574 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2575 src_rdata.debug_id, src_rdata.desc,
2576 dest_rdata.debug_id, dest_rdata.desc,
2577 node->debug_id);
2578 }
2579done:
2580 binder_put_node(node);
2581 return ret;
2582}
2583
2584static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2585 struct binder_transaction *t,
2586 struct binder_thread *thread,
2587 struct binder_transaction *in_reply_to)
2588{
2589 struct binder_proc *proc = thread->proc;
2590 struct binder_proc *target_proc = t->to_proc;
2591 struct binder_txn_fd_fixup *fixup;
2592 struct file *file;
2593 int ret = 0;
2594 bool target_allows_fd;
2595
2596 if (in_reply_to)
2597 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2598 else
2599 target_allows_fd = t->buffer->target_node->accept_fds;
2600 if (!target_allows_fd) {
2601 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2602 proc->pid, thread->pid,
2603 in_reply_to ? "reply" : "transaction",
2604 fd);
2605 ret = -EPERM;
2606 goto err_fd_not_accepted;
2607 }
2608
2609 file = fget(fd);
2610 if (!file) {
2611 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2612 proc->pid, thread->pid, fd);
2613 ret = -EBADF;
2614 goto err_fget;
2615 }
2616 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2617 if (ret < 0) {
2618 ret = -EPERM;
2619 goto err_security;
2620 }
2621
2622
2623
2624
2625
2626
2627 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2628 if (!fixup) {
2629 ret = -ENOMEM;
2630 goto err_alloc;
2631 }
2632 fixup->file = file;
2633 fixup->offset = fd_offset;
2634 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2635 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2636
2637 return ret;
2638
2639err_alloc:
2640err_security:
2641 fput(file);
2642err_fget:
2643err_fd_not_accepted:
2644 return ret;
2645}
2646
2647static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2648 struct binder_buffer_object *parent,
2649 struct binder_transaction *t,
2650 struct binder_thread *thread,
2651 struct binder_transaction *in_reply_to)
2652{
2653 binder_size_t fdi, fd_buf_size;
2654 binder_size_t fda_offset;
2655 struct binder_proc *proc = thread->proc;
2656 struct binder_proc *target_proc = t->to_proc;
2657
2658 fd_buf_size = sizeof(u32) * fda->num_fds;
2659 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2660 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2661 proc->pid, thread->pid, (u64)fda->num_fds);
2662 return -EINVAL;
2663 }
2664 if (fd_buf_size > parent->length ||
2665 fda->parent_offset > parent->length - fd_buf_size) {
2666
2667 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2668 proc->pid, thread->pid, (u64)fda->num_fds);
2669 return -EINVAL;
2670 }
2671
2672
2673
2674
2675
2676
2677
2678 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2679 fda->parent_offset;
2680 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2681 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2682 proc->pid, thread->pid);
2683 return -EINVAL;
2684 }
2685 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2686 u32 fd;
2687 int ret;
2688 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2689
2690 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2691 &fd, t->buffer,
2692 offset, sizeof(fd));
2693 if (!ret)
2694 ret = binder_translate_fd(fd, offset, t, thread,
2695 in_reply_to);
2696 if (ret < 0)
2697 return ret;
2698 }
2699 return 0;
2700}
2701
2702static int binder_fixup_parent(struct binder_transaction *t,
2703 struct binder_thread *thread,
2704 struct binder_buffer_object *bp,
2705 binder_size_t off_start_offset,
2706 binder_size_t num_valid,
2707 binder_size_t last_fixup_obj_off,
2708 binder_size_t last_fixup_min_off)
2709{
2710 struct binder_buffer_object *parent;
2711 struct binder_buffer *b = t->buffer;
2712 struct binder_proc *proc = thread->proc;
2713 struct binder_proc *target_proc = t->to_proc;
2714 struct binder_object object;
2715 binder_size_t buffer_offset;
2716 binder_size_t parent_offset;
2717
2718 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2719 return 0;
2720
2721 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2722 off_start_offset, &parent_offset,
2723 num_valid);
2724 if (!parent) {
2725 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2726 proc->pid, thread->pid);
2727 return -EINVAL;
2728 }
2729
2730 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2731 parent_offset, bp->parent_offset,
2732 last_fixup_obj_off,
2733 last_fixup_min_off)) {
2734 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2735 proc->pid, thread->pid);
2736 return -EINVAL;
2737 }
2738
2739 if (parent->length < sizeof(binder_uintptr_t) ||
2740 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2741
2742 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2743 proc->pid, thread->pid);
2744 return -EINVAL;
2745 }
2746 buffer_offset = bp->parent_offset +
2747 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2748 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2749 &bp->buffer, sizeof(bp->buffer))) {
2750 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2751 proc->pid, thread->pid);
2752 return -EINVAL;
2753 }
2754
2755 return 0;
2756}
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775static bool binder_proc_transaction(struct binder_transaction *t,
2776 struct binder_proc *proc,
2777 struct binder_thread *thread)
2778{
2779 struct binder_node *node = t->buffer->target_node;
2780 bool oneway = !!(t->flags & TF_ONE_WAY);
2781 bool pending_async = false;
2782
2783 BUG_ON(!node);
2784 binder_node_lock(node);
2785 if (oneway) {
2786 BUG_ON(thread);
2787 if (node->has_async_transaction) {
2788 pending_async = true;
2789 } else {
2790 node->has_async_transaction = true;
2791 }
2792 }
2793
2794 binder_inner_proc_lock(proc);
2795
2796 if (proc->is_dead || (thread && thread->is_dead)) {
2797 binder_inner_proc_unlock(proc);
2798 binder_node_unlock(node);
2799 return false;
2800 }
2801
2802 if (!thread && !pending_async)
2803 thread = binder_select_thread_ilocked(proc);
2804
2805 if (thread)
2806 binder_enqueue_thread_work_ilocked(thread, &t->work);
2807 else if (!pending_async)
2808 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2809 else
2810 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2811
2812 if (!pending_async)
2813 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2814
2815 binder_inner_proc_unlock(proc);
2816 binder_node_unlock(node);
2817
2818 return true;
2819}
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842static struct binder_node *binder_get_node_refs_for_txn(
2843 struct binder_node *node,
2844 struct binder_proc **procp,
2845 uint32_t *error)
2846{
2847 struct binder_node *target_node = NULL;
2848
2849 binder_node_inner_lock(node);
2850 if (node->proc) {
2851 target_node = node;
2852 binder_inc_node_nilocked(node, 1, 0, NULL);
2853 binder_inc_node_tmpref_ilocked(node);
2854 node->proc->tmp_ref++;
2855 *procp = node->proc;
2856 } else
2857 *error = BR_DEAD_REPLY;
2858 binder_node_inner_unlock(node);
2859
2860 return target_node;
2861}
2862
2863static void binder_transaction(struct binder_proc *proc,
2864 struct binder_thread *thread,
2865 struct binder_transaction_data *tr, int reply,
2866 binder_size_t extra_buffers_size)
2867{
2868 int ret;
2869 struct binder_transaction *t;
2870 struct binder_work *w;
2871 struct binder_work *tcomplete;
2872 binder_size_t buffer_offset = 0;
2873 binder_size_t off_start_offset, off_end_offset;
2874 binder_size_t off_min;
2875 binder_size_t sg_buf_offset, sg_buf_end_offset;
2876 struct binder_proc *target_proc = NULL;
2877 struct binder_thread *target_thread = NULL;
2878 struct binder_node *target_node = NULL;
2879 struct binder_transaction *in_reply_to = NULL;
2880 struct binder_transaction_log_entry *e;
2881 uint32_t return_error = 0;
2882 uint32_t return_error_param = 0;
2883 uint32_t return_error_line = 0;
2884 binder_size_t last_fixup_obj_off = 0;
2885 binder_size_t last_fixup_min_off = 0;
2886 struct binder_context *context = proc->context;
2887 int t_debug_id = atomic_inc_return(&binder_last_id);
2888 char *secctx = NULL;
2889 u32 secctx_sz = 0;
2890
2891 e = binder_transaction_log_add(&binder_transaction_log);
2892 e->debug_id = t_debug_id;
2893 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2894 e->from_proc = proc->pid;
2895 e->from_thread = thread->pid;
2896 e->target_handle = tr->target.handle;
2897 e->data_size = tr->data_size;
2898 e->offsets_size = tr->offsets_size;
2899 e->context_name = proc->context->name;
2900
2901 if (reply) {
2902 binder_inner_proc_lock(proc);
2903 in_reply_to = thread->transaction_stack;
2904 if (in_reply_to == NULL) {
2905 binder_inner_proc_unlock(proc);
2906 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2907 proc->pid, thread->pid);
2908 return_error = BR_FAILED_REPLY;
2909 return_error_param = -EPROTO;
2910 return_error_line = __LINE__;
2911 goto err_empty_call_stack;
2912 }
2913 if (in_reply_to->to_thread != thread) {
2914 spin_lock(&in_reply_to->lock);
2915 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2916 proc->pid, thread->pid, in_reply_to->debug_id,
2917 in_reply_to->to_proc ?
2918 in_reply_to->to_proc->pid : 0,
2919 in_reply_to->to_thread ?
2920 in_reply_to->to_thread->pid : 0);
2921 spin_unlock(&in_reply_to->lock);
2922 binder_inner_proc_unlock(proc);
2923 return_error = BR_FAILED_REPLY;
2924 return_error_param = -EPROTO;
2925 return_error_line = __LINE__;
2926 in_reply_to = NULL;
2927 goto err_bad_call_stack;
2928 }
2929 thread->transaction_stack = in_reply_to->to_parent;
2930 binder_inner_proc_unlock(proc);
2931 binder_set_nice(in_reply_to->saved_priority);
2932 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2933 if (target_thread == NULL) {
2934
2935 __release(&target_thread->proc->inner_lock);
2936 return_error = BR_DEAD_REPLY;
2937 return_error_line = __LINE__;
2938 goto err_dead_binder;
2939 }
2940 if (target_thread->transaction_stack != in_reply_to) {
2941 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2942 proc->pid, thread->pid,
2943 target_thread->transaction_stack ?
2944 target_thread->transaction_stack->debug_id : 0,
2945 in_reply_to->debug_id);
2946 binder_inner_proc_unlock(target_thread->proc);
2947 return_error = BR_FAILED_REPLY;
2948 return_error_param = -EPROTO;
2949 return_error_line = __LINE__;
2950 in_reply_to = NULL;
2951 target_thread = NULL;
2952 goto err_dead_binder;
2953 }
2954 target_proc = target_thread->proc;
2955 target_proc->tmp_ref++;
2956 binder_inner_proc_unlock(target_thread->proc);
2957 } else {
2958 if (tr->target.handle) {
2959 struct binder_ref *ref;
2960
2961
2962
2963
2964
2965
2966
2967
2968 binder_proc_lock(proc);
2969 ref = binder_get_ref_olocked(proc, tr->target.handle,
2970 true);
2971 if (ref) {
2972 target_node = binder_get_node_refs_for_txn(
2973 ref->node, &target_proc,
2974 &return_error);
2975 } else {
2976 binder_user_error("%d:%d got transaction to invalid handle\n",
2977 proc->pid, thread->pid);
2978 return_error = BR_FAILED_REPLY;
2979 }
2980 binder_proc_unlock(proc);
2981 } else {
2982 mutex_lock(&context->context_mgr_node_lock);
2983 target_node = context->binder_context_mgr_node;
2984 if (target_node)
2985 target_node = binder_get_node_refs_for_txn(
2986 target_node, &target_proc,
2987 &return_error);
2988 else
2989 return_error = BR_DEAD_REPLY;
2990 mutex_unlock(&context->context_mgr_node_lock);
2991 if (target_node && target_proc->pid == proc->pid) {
2992 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2993 proc->pid, thread->pid);
2994 return_error = BR_FAILED_REPLY;
2995 return_error_param = -EINVAL;
2996 return_error_line = __LINE__;
2997 goto err_invalid_target_handle;
2998 }
2999 }
3000 if (!target_node) {
3001
3002
3003
3004 return_error_param = -EINVAL;
3005 return_error_line = __LINE__;
3006 goto err_dead_binder;
3007 }
3008 e->to_node = target_node->debug_id;
3009 if (security_binder_transaction(proc->tsk,
3010 target_proc->tsk) < 0) {
3011 return_error = BR_FAILED_REPLY;
3012 return_error_param = -EPERM;
3013 return_error_line = __LINE__;
3014 goto err_invalid_target_handle;
3015 }
3016 binder_inner_proc_lock(proc);
3017
3018 w = list_first_entry_or_null(&thread->todo,
3019 struct binder_work, entry);
3020 if (!(tr->flags & TF_ONE_WAY) && w &&
3021 w->type == BINDER_WORK_TRANSACTION) {
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3032 proc->pid, thread->pid);
3033 binder_inner_proc_unlock(proc);
3034 return_error = BR_FAILED_REPLY;
3035 return_error_param = -EPROTO;
3036 return_error_line = __LINE__;
3037 goto err_bad_todo_list;
3038 }
3039
3040 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3041 struct binder_transaction *tmp;
3042
3043 tmp = thread->transaction_stack;
3044 if (tmp->to_thread != thread) {
3045 spin_lock(&tmp->lock);
3046 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3047 proc->pid, thread->pid, tmp->debug_id,
3048 tmp->to_proc ? tmp->to_proc->pid : 0,
3049 tmp->to_thread ?
3050 tmp->to_thread->pid : 0);
3051 spin_unlock(&tmp->lock);
3052 binder_inner_proc_unlock(proc);
3053 return_error = BR_FAILED_REPLY;
3054 return_error_param = -EPROTO;
3055 return_error_line = __LINE__;
3056 goto err_bad_call_stack;
3057 }
3058 while (tmp) {
3059 struct binder_thread *from;
3060
3061 spin_lock(&tmp->lock);
3062 from = tmp->from;
3063 if (from && from->proc == target_proc) {
3064 atomic_inc(&from->tmp_ref);
3065 target_thread = from;
3066 spin_unlock(&tmp->lock);
3067 break;
3068 }
3069 spin_unlock(&tmp->lock);
3070 tmp = tmp->from_parent;
3071 }
3072 }
3073 binder_inner_proc_unlock(proc);
3074 }
3075 if (target_thread)
3076 e->to_thread = target_thread->pid;
3077 e->to_proc = target_proc->pid;
3078
3079
3080 t = kzalloc(sizeof(*t), GFP_KERNEL);
3081 if (t == NULL) {
3082 return_error = BR_FAILED_REPLY;
3083 return_error_param = -ENOMEM;
3084 return_error_line = __LINE__;
3085 goto err_alloc_t_failed;
3086 }
3087 INIT_LIST_HEAD(&t->fd_fixups);
3088 binder_stats_created(BINDER_STAT_TRANSACTION);
3089 spin_lock_init(&t->lock);
3090
3091 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3092 if (tcomplete == NULL) {
3093 return_error = BR_FAILED_REPLY;
3094 return_error_param = -ENOMEM;
3095 return_error_line = __LINE__;
3096 goto err_alloc_tcomplete_failed;
3097 }
3098 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3099
3100 t->debug_id = t_debug_id;
3101
3102 if (reply)
3103 binder_debug(BINDER_DEBUG_TRANSACTION,
3104 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3105 proc->pid, thread->pid, t->debug_id,
3106 target_proc->pid, target_thread->pid,
3107 (u64)tr->data.ptr.buffer,
3108 (u64)tr->data.ptr.offsets,
3109 (u64)tr->data_size, (u64)tr->offsets_size,
3110 (u64)extra_buffers_size);
3111 else
3112 binder_debug(BINDER_DEBUG_TRANSACTION,
3113 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3114 proc->pid, thread->pid, t->debug_id,
3115 target_proc->pid, target_node->debug_id,
3116 (u64)tr->data.ptr.buffer,
3117 (u64)tr->data.ptr.offsets,
3118 (u64)tr->data_size, (u64)tr->offsets_size,
3119 (u64)extra_buffers_size);
3120
3121 if (!reply && !(tr->flags & TF_ONE_WAY))
3122 t->from = thread;
3123 else
3124 t->from = NULL;
3125 t->sender_euid = task_euid(proc->tsk);
3126 t->to_proc = target_proc;
3127 t->to_thread = target_thread;
3128 t->code = tr->code;
3129 t->flags = tr->flags;
3130 t->priority = task_nice(current);
3131
3132 if (target_node && target_node->txn_security_ctx) {
3133 u32 secid;
3134 size_t added_size;
3135
3136 security_task_getsecid(proc->tsk, &secid);
3137 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3138 if (ret) {
3139 return_error = BR_FAILED_REPLY;
3140 return_error_param = ret;
3141 return_error_line = __LINE__;
3142 goto err_get_secctx_failed;
3143 }
3144 added_size = ALIGN(secctx_sz, sizeof(u64));
3145 extra_buffers_size += added_size;
3146 if (extra_buffers_size < added_size) {
3147
3148 return_error = BR_FAILED_REPLY;
3149 return_error_param = EINVAL;
3150 return_error_line = __LINE__;
3151 goto err_bad_extra_size;
3152 }
3153 }
3154
3155 trace_binder_transaction(reply, t, target_node);
3156
3157 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3158 tr->offsets_size, extra_buffers_size,
3159 !reply && (t->flags & TF_ONE_WAY));
3160 if (IS_ERR(t->buffer)) {
3161
3162
3163
3164 return_error_param = PTR_ERR(t->buffer);
3165 return_error = return_error_param == -ESRCH ?
3166 BR_DEAD_REPLY : BR_FAILED_REPLY;
3167 return_error_line = __LINE__;
3168 t->buffer = NULL;
3169 goto err_binder_alloc_buf_failed;
3170 }
3171 if (secctx) {
3172 int err;
3173 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3174 ALIGN(tr->offsets_size, sizeof(void *)) +
3175 ALIGN(extra_buffers_size, sizeof(void *)) -
3176 ALIGN(secctx_sz, sizeof(u64));
3177
3178 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3179 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3180 t->buffer, buf_offset,
3181 secctx, secctx_sz);
3182 if (err) {
3183 t->security_ctx = 0;
3184 WARN_ON(1);
3185 }
3186 security_release_secctx(secctx, secctx_sz);
3187 secctx = NULL;
3188 }
3189 t->buffer->debug_id = t->debug_id;
3190 t->buffer->transaction = t;
3191 t->buffer->target_node = target_node;
3192 trace_binder_transaction_alloc_buf(t->buffer);
3193
3194 if (binder_alloc_copy_user_to_buffer(
3195 &target_proc->alloc,
3196 t->buffer, 0,
3197 (const void __user *)
3198 (uintptr_t)tr->data.ptr.buffer,
3199 tr->data_size)) {
3200 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3201 proc->pid, thread->pid);
3202 return_error = BR_FAILED_REPLY;
3203 return_error_param = -EFAULT;
3204 return_error_line = __LINE__;
3205 goto err_copy_data_failed;
3206 }
3207 if (binder_alloc_copy_user_to_buffer(
3208 &target_proc->alloc,
3209 t->buffer,
3210 ALIGN(tr->data_size, sizeof(void *)),
3211 (const void __user *)
3212 (uintptr_t)tr->data.ptr.offsets,
3213 tr->offsets_size)) {
3214 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3215 proc->pid, thread->pid);
3216 return_error = BR_FAILED_REPLY;
3217 return_error_param = -EFAULT;
3218 return_error_line = __LINE__;
3219 goto err_copy_data_failed;
3220 }
3221 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3222 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3223 proc->pid, thread->pid, (u64)tr->offsets_size);
3224 return_error = BR_FAILED_REPLY;
3225 return_error_param = -EINVAL;
3226 return_error_line = __LINE__;
3227 goto err_bad_offset;
3228 }
3229 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3230 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3231 proc->pid, thread->pid,
3232 (u64)extra_buffers_size);
3233 return_error = BR_FAILED_REPLY;
3234 return_error_param = -EINVAL;
3235 return_error_line = __LINE__;
3236 goto err_bad_offset;
3237 }
3238 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3239 buffer_offset = off_start_offset;
3240 off_end_offset = off_start_offset + tr->offsets_size;
3241 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3242 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3243 ALIGN(secctx_sz, sizeof(u64));
3244 off_min = 0;
3245 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3246 buffer_offset += sizeof(binder_size_t)) {
3247 struct binder_object_header *hdr;
3248 size_t object_size;
3249 struct binder_object object;
3250 binder_size_t object_offset;
3251
3252 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3253 &object_offset,
3254 t->buffer,
3255 buffer_offset,
3256 sizeof(object_offset))) {
3257 return_error = BR_FAILED_REPLY;
3258 return_error_param = -EINVAL;
3259 return_error_line = __LINE__;
3260 goto err_bad_offset;
3261 }
3262 object_size = binder_get_object(target_proc, t->buffer,
3263 object_offset, &object);
3264 if (object_size == 0 || object_offset < off_min) {
3265 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3266 proc->pid, thread->pid,
3267 (u64)object_offset,
3268 (u64)off_min,
3269 (u64)t->buffer->data_size);
3270 return_error = BR_FAILED_REPLY;
3271 return_error_param = -EINVAL;
3272 return_error_line = __LINE__;
3273 goto err_bad_offset;
3274 }
3275
3276 hdr = &object.hdr;
3277 off_min = object_offset + object_size;
3278 switch (hdr->type) {
3279 case BINDER_TYPE_BINDER:
3280 case BINDER_TYPE_WEAK_BINDER: {
3281 struct flat_binder_object *fp;
3282
3283 fp = to_flat_binder_object(hdr);
3284 ret = binder_translate_binder(fp, t, thread);
3285
3286 if (ret < 0 ||
3287 binder_alloc_copy_to_buffer(&target_proc->alloc,
3288 t->buffer,
3289 object_offset,
3290 fp, sizeof(*fp))) {
3291 return_error = BR_FAILED_REPLY;
3292 return_error_param = ret;
3293 return_error_line = __LINE__;
3294 goto err_translate_failed;
3295 }
3296 } break;
3297 case BINDER_TYPE_HANDLE:
3298 case BINDER_TYPE_WEAK_HANDLE: {
3299 struct flat_binder_object *fp;
3300
3301 fp = to_flat_binder_object(hdr);
3302 ret = binder_translate_handle(fp, t, thread);
3303 if (ret < 0 ||
3304 binder_alloc_copy_to_buffer(&target_proc->alloc,
3305 t->buffer,
3306 object_offset,
3307 fp, sizeof(*fp))) {
3308 return_error = BR_FAILED_REPLY;
3309 return_error_param = ret;
3310 return_error_line = __LINE__;
3311 goto err_translate_failed;
3312 }
3313 } break;
3314
3315 case BINDER_TYPE_FD: {
3316 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3317 binder_size_t fd_offset = object_offset +
3318 (uintptr_t)&fp->fd - (uintptr_t)fp;
3319 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3320 thread, in_reply_to);
3321
3322 fp->pad_binder = 0;
3323 if (ret < 0 ||
3324 binder_alloc_copy_to_buffer(&target_proc->alloc,
3325 t->buffer,
3326 object_offset,
3327 fp, sizeof(*fp))) {
3328 return_error = BR_FAILED_REPLY;
3329 return_error_param = ret;
3330 return_error_line = __LINE__;
3331 goto err_translate_failed;
3332 }
3333 } break;
3334 case BINDER_TYPE_FDA: {
3335 struct binder_object ptr_object;
3336 binder_size_t parent_offset;
3337 struct binder_fd_array_object *fda =
3338 to_binder_fd_array_object(hdr);
3339 size_t num_valid = (buffer_offset - off_start_offset) *
3340 sizeof(binder_size_t);
3341 struct binder_buffer_object *parent =
3342 binder_validate_ptr(target_proc, t->buffer,
3343 &ptr_object, fda->parent,
3344 off_start_offset,
3345 &parent_offset,
3346 num_valid);
3347 if (!parent) {
3348 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3349 proc->pid, thread->pid);
3350 return_error = BR_FAILED_REPLY;
3351 return_error_param = -EINVAL;
3352 return_error_line = __LINE__;
3353 goto err_bad_parent;
3354 }
3355 if (!binder_validate_fixup(target_proc, t->buffer,
3356 off_start_offset,
3357 parent_offset,
3358 fda->parent_offset,
3359 last_fixup_obj_off,
3360 last_fixup_min_off)) {
3361 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3362 proc->pid, thread->pid);
3363 return_error = BR_FAILED_REPLY;
3364 return_error_param = -EINVAL;
3365 return_error_line = __LINE__;
3366 goto err_bad_parent;
3367 }
3368 ret = binder_translate_fd_array(fda, parent, t, thread,
3369 in_reply_to);
3370 if (ret < 0) {
3371 return_error = BR_FAILED_REPLY;
3372 return_error_param = ret;
3373 return_error_line = __LINE__;
3374 goto err_translate_failed;
3375 }
3376 last_fixup_obj_off = parent_offset;
3377 last_fixup_min_off =
3378 fda->parent_offset + sizeof(u32) * fda->num_fds;
3379 } break;
3380 case BINDER_TYPE_PTR: {
3381 struct binder_buffer_object *bp =
3382 to_binder_buffer_object(hdr);
3383 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3384 size_t num_valid;
3385
3386 if (bp->length > buf_left) {
3387 binder_user_error("%d:%d got transaction with too large buffer\n",
3388 proc->pid, thread->pid);
3389 return_error = BR_FAILED_REPLY;
3390 return_error_param = -EINVAL;
3391 return_error_line = __LINE__;
3392 goto err_bad_offset;
3393 }
3394 if (binder_alloc_copy_user_to_buffer(
3395 &target_proc->alloc,
3396 t->buffer,
3397 sg_buf_offset,
3398 (const void __user *)
3399 (uintptr_t)bp->buffer,
3400 bp->length)) {
3401 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3402 proc->pid, thread->pid);
3403 return_error_param = -EFAULT;
3404 return_error = BR_FAILED_REPLY;
3405 return_error_line = __LINE__;
3406 goto err_copy_data_failed;
3407 }
3408
3409 bp->buffer = (uintptr_t)
3410 t->buffer->user_data + sg_buf_offset;
3411 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3412
3413 num_valid = (buffer_offset - off_start_offset) *
3414 sizeof(binder_size_t);
3415 ret = binder_fixup_parent(t, thread, bp,
3416 off_start_offset,
3417 num_valid,
3418 last_fixup_obj_off,
3419 last_fixup_min_off);
3420 if (ret < 0 ||
3421 binder_alloc_copy_to_buffer(&target_proc->alloc,
3422 t->buffer,
3423 object_offset,
3424 bp, sizeof(*bp))) {
3425 return_error = BR_FAILED_REPLY;
3426 return_error_param = ret;
3427 return_error_line = __LINE__;
3428 goto err_translate_failed;
3429 }
3430 last_fixup_obj_off = object_offset;
3431 last_fixup_min_off = 0;
3432 } break;
3433 default:
3434 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3435 proc->pid, thread->pid, hdr->type);
3436 return_error = BR_FAILED_REPLY;
3437 return_error_param = -EINVAL;
3438 return_error_line = __LINE__;
3439 goto err_bad_object_type;
3440 }
3441 }
3442 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3443 t->work.type = BINDER_WORK_TRANSACTION;
3444
3445 if (reply) {
3446 binder_enqueue_thread_work(thread, tcomplete);
3447 binder_inner_proc_lock(target_proc);
3448 if (target_thread->is_dead) {
3449 binder_inner_proc_unlock(target_proc);
3450 goto err_dead_proc_or_thread;
3451 }
3452 BUG_ON(t->buffer->async_transaction != 0);
3453 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3454 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3455 binder_inner_proc_unlock(target_proc);
3456 wake_up_interruptible_sync(&target_thread->wait);
3457 binder_free_transaction(in_reply_to);
3458 } else if (!(t->flags & TF_ONE_WAY)) {
3459 BUG_ON(t->buffer->async_transaction != 0);
3460 binder_inner_proc_lock(proc);
3461
3462
3463
3464
3465
3466
3467
3468 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3469 t->need_reply = 1;
3470 t->from_parent = thread->transaction_stack;
3471 thread->transaction_stack = t;
3472 binder_inner_proc_unlock(proc);
3473 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3474 binder_inner_proc_lock(proc);
3475 binder_pop_transaction_ilocked(thread, t);
3476 binder_inner_proc_unlock(proc);
3477 goto err_dead_proc_or_thread;
3478 }
3479 } else {
3480 BUG_ON(target_node == NULL);
3481 BUG_ON(t->buffer->async_transaction != 1);
3482 binder_enqueue_thread_work(thread, tcomplete);
3483 if (!binder_proc_transaction(t, target_proc, NULL))
3484 goto err_dead_proc_or_thread;
3485 }
3486 if (target_thread)
3487 binder_thread_dec_tmpref(target_thread);
3488 binder_proc_dec_tmpref(target_proc);
3489 if (target_node)
3490 binder_dec_node_tmpref(target_node);
3491
3492
3493
3494
3495 smp_wmb();
3496 WRITE_ONCE(e->debug_id_done, t_debug_id);
3497 return;
3498
3499err_dead_proc_or_thread:
3500 return_error = BR_DEAD_REPLY;
3501 return_error_line = __LINE__;
3502 binder_dequeue_work(proc, tcomplete);
3503err_translate_failed:
3504err_bad_object_type:
3505err_bad_offset:
3506err_bad_parent:
3507err_copy_data_failed:
3508 binder_free_txn_fixups(t);
3509 trace_binder_transaction_failed_buffer_release(t->buffer);
3510 binder_transaction_buffer_release(target_proc, t->buffer,
3511 buffer_offset, true);
3512 if (target_node)
3513 binder_dec_node_tmpref(target_node);
3514 target_node = NULL;
3515 t->buffer->transaction = NULL;
3516 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3517err_binder_alloc_buf_failed:
3518err_bad_extra_size:
3519 if (secctx)
3520 security_release_secctx(secctx, secctx_sz);
3521err_get_secctx_failed:
3522 kfree(tcomplete);
3523 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3524err_alloc_tcomplete_failed:
3525 kfree(t);
3526 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3527err_alloc_t_failed:
3528err_bad_todo_list:
3529err_bad_call_stack:
3530err_empty_call_stack:
3531err_dead_binder:
3532err_invalid_target_handle:
3533 if (target_thread)
3534 binder_thread_dec_tmpref(target_thread);
3535 if (target_proc)
3536 binder_proc_dec_tmpref(target_proc);
3537 if (target_node) {
3538 binder_dec_node(target_node, 1, 0);
3539 binder_dec_node_tmpref(target_node);
3540 }
3541
3542 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3543 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3544 proc->pid, thread->pid, return_error, return_error_param,
3545 (u64)tr->data_size, (u64)tr->offsets_size,
3546 return_error_line);
3547
3548 {
3549 struct binder_transaction_log_entry *fe;
3550
3551 e->return_error = return_error;
3552 e->return_error_param = return_error_param;
3553 e->return_error_line = return_error_line;
3554 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3555 *fe = *e;
3556
3557
3558
3559
3560 smp_wmb();
3561 WRITE_ONCE(e->debug_id_done, t_debug_id);
3562 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3563 }
3564
3565 BUG_ON(thread->return_error.cmd != BR_OK);
3566 if (in_reply_to) {
3567 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3568 binder_enqueue_thread_work(thread, &thread->return_error.work);
3569 binder_send_failed_reply(in_reply_to, return_error);
3570 } else {
3571 thread->return_error.cmd = return_error;
3572 binder_enqueue_thread_work(thread, &thread->return_error.work);
3573 }
3574}
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586static void
3587binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3588{
3589 binder_inner_proc_lock(proc);
3590 if (buffer->transaction) {
3591 buffer->transaction->buffer = NULL;
3592 buffer->transaction = NULL;
3593 }
3594 binder_inner_proc_unlock(proc);
3595 if (buffer->async_transaction && buffer->target_node) {
3596 struct binder_node *buf_node;
3597 struct binder_work *w;
3598
3599 buf_node = buffer->target_node;
3600 binder_node_inner_lock(buf_node);
3601 BUG_ON(!buf_node->has_async_transaction);
3602 BUG_ON(buf_node->proc != proc);
3603 w = binder_dequeue_work_head_ilocked(
3604 &buf_node->async_todo);
3605 if (!w) {
3606 buf_node->has_async_transaction = false;
3607 } else {
3608 binder_enqueue_work_ilocked(
3609 w, &proc->todo);
3610 binder_wakeup_proc_ilocked(proc);
3611 }
3612 binder_node_inner_unlock(buf_node);
3613 }
3614 trace_binder_transaction_buffer_release(buffer);
3615 binder_transaction_buffer_release(proc, buffer, 0, false);
3616 binder_alloc_free_buf(&proc->alloc, buffer);
3617}
3618
3619static int binder_thread_write(struct binder_proc *proc,
3620 struct binder_thread *thread,
3621 binder_uintptr_t binder_buffer, size_t size,
3622 binder_size_t *consumed)
3623{
3624 uint32_t cmd;
3625 struct binder_context *context = proc->context;
3626 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3627 void __user *ptr = buffer + *consumed;
3628 void __user *end = buffer + size;
3629
3630 while (ptr < end && thread->return_error.cmd == BR_OK) {
3631 int ret;
3632
3633 if (get_user(cmd, (uint32_t __user *)ptr))
3634 return -EFAULT;
3635 ptr += sizeof(uint32_t);
3636 trace_binder_command(cmd);
3637 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3638 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3639 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3640 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3641 }
3642 switch (cmd) {
3643 case BC_INCREFS:
3644 case BC_ACQUIRE:
3645 case BC_RELEASE:
3646 case BC_DECREFS: {
3647 uint32_t target;
3648 const char *debug_string;
3649 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3650 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3651 struct binder_ref_data rdata;
3652
3653 if (get_user(target, (uint32_t __user *)ptr))
3654 return -EFAULT;
3655
3656 ptr += sizeof(uint32_t);
3657 ret = -1;
3658 if (increment && !target) {
3659 struct binder_node *ctx_mgr_node;
3660 mutex_lock(&context->context_mgr_node_lock);
3661 ctx_mgr_node = context->binder_context_mgr_node;
3662 if (ctx_mgr_node)
3663 ret = binder_inc_ref_for_node(
3664 proc, ctx_mgr_node,
3665 strong, NULL, &rdata);
3666 mutex_unlock(&context->context_mgr_node_lock);
3667 }
3668 if (ret)
3669 ret = binder_update_ref_for_handle(
3670 proc, target, increment, strong,
3671 &rdata);
3672 if (!ret && rdata.desc != target) {
3673 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3674 proc->pid, thread->pid,
3675 target, rdata.desc);
3676 }
3677 switch (cmd) {
3678 case BC_INCREFS:
3679 debug_string = "IncRefs";
3680 break;
3681 case BC_ACQUIRE:
3682 debug_string = "Acquire";
3683 break;
3684 case BC_RELEASE:
3685 debug_string = "Release";
3686 break;
3687 case BC_DECREFS:
3688 default:
3689 debug_string = "DecRefs";
3690 break;
3691 }
3692 if (ret) {
3693 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3694 proc->pid, thread->pid, debug_string,
3695 strong, target, ret);
3696 break;
3697 }
3698 binder_debug(BINDER_DEBUG_USER_REFS,
3699 "%d:%d %s ref %d desc %d s %d w %d\n",
3700 proc->pid, thread->pid, debug_string,
3701 rdata.debug_id, rdata.desc, rdata.strong,
3702 rdata.weak);
3703 break;
3704 }
3705 case BC_INCREFS_DONE:
3706 case BC_ACQUIRE_DONE: {
3707 binder_uintptr_t node_ptr;
3708 binder_uintptr_t cookie;
3709 struct binder_node *node;
3710 bool free_node;
3711
3712 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3713 return -EFAULT;
3714 ptr += sizeof(binder_uintptr_t);
3715 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3716 return -EFAULT;
3717 ptr += sizeof(binder_uintptr_t);
3718 node = binder_get_node(proc, node_ptr);
3719 if (node == NULL) {
3720 binder_user_error("%d:%d %s u%016llx no match\n",
3721 proc->pid, thread->pid,
3722 cmd == BC_INCREFS_DONE ?
3723 "BC_INCREFS_DONE" :
3724 "BC_ACQUIRE_DONE",
3725 (u64)node_ptr);
3726 break;
3727 }
3728 if (cookie != node->cookie) {
3729 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3730 proc->pid, thread->pid,
3731 cmd == BC_INCREFS_DONE ?
3732 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3733 (u64)node_ptr, node->debug_id,
3734 (u64)cookie, (u64)node->cookie);
3735 binder_put_node(node);
3736 break;
3737 }
3738 binder_node_inner_lock(node);
3739 if (cmd == BC_ACQUIRE_DONE) {
3740 if (node->pending_strong_ref == 0) {
3741 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3742 proc->pid, thread->pid,
3743 node->debug_id);
3744 binder_node_inner_unlock(node);
3745 binder_put_node(node);
3746 break;
3747 }
3748 node->pending_strong_ref = 0;
3749 } else {
3750 if (node->pending_weak_ref == 0) {
3751 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3752 proc->pid, thread->pid,
3753 node->debug_id);
3754 binder_node_inner_unlock(node);
3755 binder_put_node(node);
3756 break;
3757 }
3758 node->pending_weak_ref = 0;
3759 }
3760 free_node = binder_dec_node_nilocked(node,
3761 cmd == BC_ACQUIRE_DONE, 0);
3762 WARN_ON(free_node);
3763 binder_debug(BINDER_DEBUG_USER_REFS,
3764 "%d:%d %s node %d ls %d lw %d tr %d\n",
3765 proc->pid, thread->pid,
3766 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3767 node->debug_id, node->local_strong_refs,
3768 node->local_weak_refs, node->tmp_refs);
3769 binder_node_inner_unlock(node);
3770 binder_put_node(node);
3771 break;
3772 }
3773 case BC_ATTEMPT_ACQUIRE:
3774 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3775 return -EINVAL;
3776 case BC_ACQUIRE_RESULT:
3777 pr_err("BC_ACQUIRE_RESULT not supported\n");
3778 return -EINVAL;
3779
3780 case BC_FREE_BUFFER: {
3781 binder_uintptr_t data_ptr;
3782 struct binder_buffer *buffer;
3783
3784 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3785 return -EFAULT;
3786 ptr += sizeof(binder_uintptr_t);
3787
3788 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3789 data_ptr);
3790 if (IS_ERR_OR_NULL(buffer)) {
3791 if (PTR_ERR(buffer) == -EPERM) {
3792 binder_user_error(
3793 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3794 proc->pid, thread->pid,
3795 (u64)data_ptr);
3796 } else {
3797 binder_user_error(
3798 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3799 proc->pid, thread->pid,
3800 (u64)data_ptr);
3801 }
3802 break;
3803 }
3804 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3805 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3806 proc->pid, thread->pid, (u64)data_ptr,
3807 buffer->debug_id,
3808 buffer->transaction ? "active" : "finished");
3809 binder_free_buf(proc, buffer);
3810 break;
3811 }
3812
3813 case BC_TRANSACTION_SG:
3814 case BC_REPLY_SG: {
3815 struct binder_transaction_data_sg tr;
3816
3817 if (copy_from_user(&tr, ptr, sizeof(tr)))
3818 return -EFAULT;
3819 ptr += sizeof(tr);
3820 binder_transaction(proc, thread, &tr.transaction_data,
3821 cmd == BC_REPLY_SG, tr.buffers_size);
3822 break;
3823 }
3824 case BC_TRANSACTION:
3825 case BC_REPLY: {
3826 struct binder_transaction_data tr;
3827
3828 if (copy_from_user(&tr, ptr, sizeof(tr)))
3829 return -EFAULT;
3830 ptr += sizeof(tr);
3831 binder_transaction(proc, thread, &tr,
3832 cmd == BC_REPLY, 0);
3833 break;
3834 }
3835
3836 case BC_REGISTER_LOOPER:
3837 binder_debug(BINDER_DEBUG_THREADS,
3838 "%d:%d BC_REGISTER_LOOPER\n",
3839 proc->pid, thread->pid);
3840 binder_inner_proc_lock(proc);
3841 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3842 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3843 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3844 proc->pid, thread->pid);
3845 } else if (proc->requested_threads == 0) {
3846 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3847 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3848 proc->pid, thread->pid);
3849 } else {
3850 proc->requested_threads--;
3851 proc->requested_threads_started++;
3852 }
3853 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3854 binder_inner_proc_unlock(proc);
3855 break;
3856 case BC_ENTER_LOOPER:
3857 binder_debug(BINDER_DEBUG_THREADS,
3858 "%d:%d BC_ENTER_LOOPER\n",
3859 proc->pid, thread->pid);
3860 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3861 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3862 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3863 proc->pid, thread->pid);
3864 }
3865 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3866 break;
3867 case BC_EXIT_LOOPER:
3868 binder_debug(BINDER_DEBUG_THREADS,
3869 "%d:%d BC_EXIT_LOOPER\n",
3870 proc->pid, thread->pid);
3871 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3872 break;
3873
3874 case BC_REQUEST_DEATH_NOTIFICATION:
3875 case BC_CLEAR_DEATH_NOTIFICATION: {
3876 uint32_t target;
3877 binder_uintptr_t cookie;
3878 struct binder_ref *ref;
3879 struct binder_ref_death *death = NULL;
3880
3881 if (get_user(target, (uint32_t __user *)ptr))
3882 return -EFAULT;
3883 ptr += sizeof(uint32_t);
3884 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3885 return -EFAULT;
3886 ptr += sizeof(binder_uintptr_t);
3887 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3888
3889
3890
3891
3892 death = kzalloc(sizeof(*death), GFP_KERNEL);
3893 if (death == NULL) {
3894 WARN_ON(thread->return_error.cmd !=
3895 BR_OK);
3896 thread->return_error.cmd = BR_ERROR;
3897 binder_enqueue_thread_work(
3898 thread,
3899 &thread->return_error.work);
3900 binder_debug(
3901 BINDER_DEBUG_FAILED_TRANSACTION,
3902 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3903 proc->pid, thread->pid);
3904 break;
3905 }
3906 }
3907 binder_proc_lock(proc);
3908 ref = binder_get_ref_olocked(proc, target, false);
3909 if (ref == NULL) {
3910 binder_user_error("%d:%d %s invalid ref %d\n",
3911 proc->pid, thread->pid,
3912 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3913 "BC_REQUEST_DEATH_NOTIFICATION" :
3914 "BC_CLEAR_DEATH_NOTIFICATION",
3915 target);
3916 binder_proc_unlock(proc);
3917 kfree(death);
3918 break;
3919 }
3920
3921 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3922 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3923 proc->pid, thread->pid,
3924 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3925 "BC_REQUEST_DEATH_NOTIFICATION" :
3926 "BC_CLEAR_DEATH_NOTIFICATION",
3927 (u64)cookie, ref->data.debug_id,
3928 ref->data.desc, ref->data.strong,
3929 ref->data.weak, ref->node->debug_id);
3930
3931 binder_node_lock(ref->node);
3932 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3933 if (ref->death) {
3934 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3935 proc->pid, thread->pid);
3936 binder_node_unlock(ref->node);
3937 binder_proc_unlock(proc);
3938 kfree(death);
3939 break;
3940 }
3941 binder_stats_created(BINDER_STAT_DEATH);
3942 INIT_LIST_HEAD(&death->work.entry);
3943 death->cookie = cookie;
3944 ref->death = death;
3945 if (ref->node->proc == NULL) {
3946 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3947
3948 binder_inner_proc_lock(proc);
3949 binder_enqueue_work_ilocked(
3950 &ref->death->work, &proc->todo);
3951 binder_wakeup_proc_ilocked(proc);
3952 binder_inner_proc_unlock(proc);
3953 }
3954 } else {
3955 if (ref->death == NULL) {
3956 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3957 proc->pid, thread->pid);
3958 binder_node_unlock(ref->node);
3959 binder_proc_unlock(proc);
3960 break;
3961 }
3962 death = ref->death;
3963 if (death->cookie != cookie) {
3964 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3965 proc->pid, thread->pid,
3966 (u64)death->cookie,
3967 (u64)cookie);
3968 binder_node_unlock(ref->node);
3969 binder_proc_unlock(proc);
3970 break;
3971 }
3972 ref->death = NULL;
3973 binder_inner_proc_lock(proc);
3974 if (list_empty(&death->work.entry)) {
3975 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3976 if (thread->looper &
3977 (BINDER_LOOPER_STATE_REGISTERED |
3978 BINDER_LOOPER_STATE_ENTERED))
3979 binder_enqueue_thread_work_ilocked(
3980 thread,
3981 &death->work);
3982 else {
3983 binder_enqueue_work_ilocked(
3984 &death->work,
3985 &proc->todo);
3986 binder_wakeup_proc_ilocked(
3987 proc);
3988 }
3989 } else {
3990 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3991 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3992 }
3993 binder_inner_proc_unlock(proc);
3994 }
3995 binder_node_unlock(ref->node);
3996 binder_proc_unlock(proc);
3997 } break;
3998 case BC_DEAD_BINDER_DONE: {
3999 struct binder_work *w;
4000 binder_uintptr_t cookie;
4001 struct binder_ref_death *death = NULL;
4002
4003 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4004 return -EFAULT;
4005
4006 ptr += sizeof(cookie);
4007 binder_inner_proc_lock(proc);
4008 list_for_each_entry(w, &proc->delivered_death,
4009 entry) {
4010 struct binder_ref_death *tmp_death =
4011 container_of(w,
4012 struct binder_ref_death,
4013 work);
4014
4015 if (tmp_death->cookie == cookie) {
4016 death = tmp_death;
4017 break;
4018 }
4019 }
4020 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4021 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4022 proc->pid, thread->pid, (u64)cookie,
4023 death);
4024 if (death == NULL) {
4025 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4026 proc->pid, thread->pid, (u64)cookie);
4027 binder_inner_proc_unlock(proc);
4028 break;
4029 }
4030 binder_dequeue_work_ilocked(&death->work);
4031 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4032 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4033 if (thread->looper &
4034 (BINDER_LOOPER_STATE_REGISTERED |
4035 BINDER_LOOPER_STATE_ENTERED))
4036 binder_enqueue_thread_work_ilocked(
4037 thread, &death->work);
4038 else {
4039 binder_enqueue_work_ilocked(
4040 &death->work,
4041 &proc->todo);
4042 binder_wakeup_proc_ilocked(proc);
4043 }
4044 }
4045 binder_inner_proc_unlock(proc);
4046 } break;
4047
4048 default:
4049 pr_err("%d:%d unknown command %d\n",
4050 proc->pid, thread->pid, cmd);
4051 return -EINVAL;
4052 }
4053 *consumed = ptr - buffer;
4054 }
4055 return 0;
4056}
4057
4058static void binder_stat_br(struct binder_proc *proc,
4059 struct binder_thread *thread, uint32_t cmd)
4060{
4061 trace_binder_return(cmd);
4062 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4063 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4064 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4065 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4066 }
4067}
4068
4069static int binder_put_node_cmd(struct binder_proc *proc,
4070 struct binder_thread *thread,
4071 void __user **ptrp,
4072 binder_uintptr_t node_ptr,
4073 binder_uintptr_t node_cookie,
4074 int node_debug_id,
4075 uint32_t cmd, const char *cmd_name)
4076{
4077 void __user *ptr = *ptrp;
4078
4079 if (put_user(cmd, (uint32_t __user *)ptr))
4080 return -EFAULT;
4081 ptr += sizeof(uint32_t);
4082
4083 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4084 return -EFAULT;
4085 ptr += sizeof(binder_uintptr_t);
4086
4087 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4088 return -EFAULT;
4089 ptr += sizeof(binder_uintptr_t);
4090
4091 binder_stat_br(proc, thread, cmd);
4092 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4093 proc->pid, thread->pid, cmd_name, node_debug_id,
4094 (u64)node_ptr, (u64)node_cookie);
4095
4096 *ptrp = ptr;
4097 return 0;
4098}
4099
4100static int binder_wait_for_work(struct binder_thread *thread,
4101 bool do_proc_work)
4102{
4103 DEFINE_WAIT(wait);
4104 struct binder_proc *proc = thread->proc;
4105 int ret = 0;
4106
4107 freezer_do_not_count();
4108 binder_inner_proc_lock(proc);
4109 for (;;) {
4110 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4111 if (binder_has_work_ilocked(thread, do_proc_work))
4112 break;
4113 if (do_proc_work)
4114 list_add(&thread->waiting_thread_node,
4115 &proc->waiting_threads);
4116 binder_inner_proc_unlock(proc);
4117 schedule();
4118 binder_inner_proc_lock(proc);
4119 list_del_init(&thread->waiting_thread_node);
4120 if (signal_pending(current)) {
4121 ret = -ERESTARTSYS;
4122 break;
4123 }
4124 }
4125 finish_wait(&thread->wait, &wait);
4126 binder_inner_proc_unlock(proc);
4127 freezer_count();
4128
4129 return ret;
4130}
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146static int binder_apply_fd_fixups(struct binder_proc *proc,
4147 struct binder_transaction *t)
4148{
4149 struct binder_txn_fd_fixup *fixup, *tmp;
4150 int ret = 0;
4151
4152 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4153 int fd = get_unused_fd_flags(O_CLOEXEC);
4154
4155 if (fd < 0) {
4156 binder_debug(BINDER_DEBUG_TRANSACTION,
4157 "failed fd fixup txn %d fd %d\n",
4158 t->debug_id, fd);
4159 ret = -ENOMEM;
4160 break;
4161 }
4162 binder_debug(BINDER_DEBUG_TRANSACTION,
4163 "fd fixup txn %d fd %d\n",
4164 t->debug_id, fd);
4165 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4166 fd_install(fd, fixup->file);
4167 fixup->file = NULL;
4168 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4169 fixup->offset, &fd,
4170 sizeof(u32))) {
4171 ret = -EINVAL;
4172 break;
4173 }
4174 }
4175 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4176 if (fixup->file) {
4177 fput(fixup->file);
4178 } else if (ret) {
4179 u32 fd;
4180 int err;
4181
4182 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4183 t->buffer,
4184 fixup->offset,
4185 sizeof(fd));
4186 WARN_ON(err);
4187 if (!err)
4188 binder_deferred_fd_close(fd);
4189 }
4190 list_del(&fixup->fixup_entry);
4191 kfree(fixup);
4192 }
4193
4194 return ret;
4195}
4196
4197static int binder_thread_read(struct binder_proc *proc,
4198 struct binder_thread *thread,
4199 binder_uintptr_t binder_buffer, size_t size,
4200 binder_size_t *consumed, int non_block)
4201{
4202 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4203 void __user *ptr = buffer + *consumed;
4204 void __user *end = buffer + size;
4205
4206 int ret = 0;
4207 int wait_for_proc_work;
4208
4209 if (*consumed == 0) {
4210 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4211 return -EFAULT;
4212 ptr += sizeof(uint32_t);
4213 }
4214
4215retry:
4216 binder_inner_proc_lock(proc);
4217 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4218 binder_inner_proc_unlock(proc);
4219
4220 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4221
4222 trace_binder_wait_for_work(wait_for_proc_work,
4223 !!thread->transaction_stack,
4224 !binder_worklist_empty(proc, &thread->todo));
4225 if (wait_for_proc_work) {
4226 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4227 BINDER_LOOPER_STATE_ENTERED))) {
4228 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4229 proc->pid, thread->pid, thread->looper);
4230 wait_event_interruptible(binder_user_error_wait,
4231 binder_stop_on_user_error < 2);
4232 }
4233 binder_set_nice(proc->default_priority);
4234 }
4235
4236 if (non_block) {
4237 if (!binder_has_work(thread, wait_for_proc_work))
4238 ret = -EAGAIN;
4239 } else {
4240 ret = binder_wait_for_work(thread, wait_for_proc_work);
4241 }
4242
4243 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4244
4245 if (ret)
4246 return ret;
4247
4248 while (1) {
4249 uint32_t cmd;
4250 struct binder_transaction_data_secctx tr;
4251 struct binder_transaction_data *trd = &tr.transaction_data;
4252 struct binder_work *w = NULL;
4253 struct list_head *list = NULL;
4254 struct binder_transaction *t = NULL;
4255 struct binder_thread *t_from;
4256 size_t trsize = sizeof(*trd);
4257
4258 binder_inner_proc_lock(proc);
4259 if (!binder_worklist_empty_ilocked(&thread->todo))
4260 list = &thread->todo;
4261 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4262 wait_for_proc_work)
4263 list = &proc->todo;
4264 else {
4265 binder_inner_proc_unlock(proc);
4266
4267
4268 if (ptr - buffer == 4 && !thread->looper_need_return)
4269 goto retry;
4270 break;
4271 }
4272
4273 if (end - ptr < sizeof(tr) + 4) {
4274 binder_inner_proc_unlock(proc);
4275 break;
4276 }
4277 w = binder_dequeue_work_head_ilocked(list);
4278 if (binder_worklist_empty_ilocked(&thread->todo))
4279 thread->process_todo = false;
4280
4281 switch (w->type) {
4282 case BINDER_WORK_TRANSACTION: {
4283 binder_inner_proc_unlock(proc);
4284 t = container_of(w, struct binder_transaction, work);
4285 } break;
4286 case BINDER_WORK_RETURN_ERROR: {
4287 struct binder_error *e = container_of(
4288 w, struct binder_error, work);
4289
4290 WARN_ON(e->cmd == BR_OK);
4291 binder_inner_proc_unlock(proc);
4292 if (put_user(e->cmd, (uint32_t __user *)ptr))
4293 return -EFAULT;
4294 cmd = e->cmd;
4295 e->cmd = BR_OK;
4296 ptr += sizeof(uint32_t);
4297
4298 binder_stat_br(proc, thread, cmd);
4299 } break;
4300 case BINDER_WORK_TRANSACTION_COMPLETE: {
4301 binder_inner_proc_unlock(proc);
4302 cmd = BR_TRANSACTION_COMPLETE;
4303 kfree(w);
4304 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4305 if (put_user(cmd, (uint32_t __user *)ptr))
4306 return -EFAULT;
4307 ptr += sizeof(uint32_t);
4308
4309 binder_stat_br(proc, thread, cmd);
4310 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4311 "%d:%d BR_TRANSACTION_COMPLETE\n",
4312 proc->pid, thread->pid);
4313 } break;
4314 case BINDER_WORK_NODE: {
4315 struct binder_node *node = container_of(w, struct binder_node, work);
4316 int strong, weak;
4317 binder_uintptr_t node_ptr = node->ptr;
4318 binder_uintptr_t node_cookie = node->cookie;
4319 int node_debug_id = node->debug_id;
4320 int has_weak_ref;
4321 int has_strong_ref;
4322 void __user *orig_ptr = ptr;
4323
4324 BUG_ON(proc != node->proc);
4325 strong = node->internal_strong_refs ||
4326 node->local_strong_refs;
4327 weak = !hlist_empty(&node->refs) ||
4328 node->local_weak_refs ||
4329 node->tmp_refs || strong;
4330 has_strong_ref = node->has_strong_ref;
4331 has_weak_ref = node->has_weak_ref;
4332
4333 if (weak && !has_weak_ref) {
4334 node->has_weak_ref = 1;
4335 node->pending_weak_ref = 1;
4336 node->local_weak_refs++;
4337 }
4338 if (strong && !has_strong_ref) {
4339 node->has_strong_ref = 1;
4340 node->pending_strong_ref = 1;
4341 node->local_strong_refs++;
4342 }
4343 if (!strong && has_strong_ref)
4344 node->has_strong_ref = 0;
4345 if (!weak && has_weak_ref)
4346 node->has_weak_ref = 0;
4347 if (!weak && !strong) {
4348 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4349 "%d:%d node %d u%016llx c%016llx deleted\n",
4350 proc->pid, thread->pid,
4351 node_debug_id,
4352 (u64)node_ptr,
4353 (u64)node_cookie);
4354 rb_erase(&node->rb_node, &proc->nodes);
4355 binder_inner_proc_unlock(proc);
4356 binder_node_lock(node);
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366 binder_node_unlock(node);
4367 binder_free_node(node);
4368 } else
4369 binder_inner_proc_unlock(proc);
4370
4371 if (weak && !has_weak_ref)
4372 ret = binder_put_node_cmd(
4373 proc, thread, &ptr, node_ptr,
4374 node_cookie, node_debug_id,
4375 BR_INCREFS, "BR_INCREFS");
4376 if (!ret && strong && !has_strong_ref)
4377 ret = binder_put_node_cmd(
4378 proc, thread, &ptr, node_ptr,
4379 node_cookie, node_debug_id,
4380 BR_ACQUIRE, "BR_ACQUIRE");
4381 if (!ret && !strong && has_strong_ref)
4382 ret = binder_put_node_cmd(
4383 proc, thread, &ptr, node_ptr,
4384 node_cookie, node_debug_id,
4385 BR_RELEASE, "BR_RELEASE");
4386 if (!ret && !weak && has_weak_ref)
4387 ret = binder_put_node_cmd(
4388 proc, thread, &ptr, node_ptr,
4389 node_cookie, node_debug_id,
4390 BR_DECREFS, "BR_DECREFS");
4391 if (orig_ptr == ptr)
4392 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4393 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4394 proc->pid, thread->pid,
4395 node_debug_id,
4396 (u64)node_ptr,
4397 (u64)node_cookie);
4398 if (ret)
4399 return ret;
4400 } break;
4401 case BINDER_WORK_DEAD_BINDER:
4402 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4403 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4404 struct binder_ref_death *death;
4405 uint32_t cmd;
4406 binder_uintptr_t cookie;
4407
4408 death = container_of(w, struct binder_ref_death, work);
4409 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4410 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4411 else
4412 cmd = BR_DEAD_BINDER;
4413 cookie = death->cookie;
4414
4415 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4416 "%d:%d %s %016llx\n",
4417 proc->pid, thread->pid,
4418 cmd == BR_DEAD_BINDER ?
4419 "BR_DEAD_BINDER" :
4420 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4421 (u64)cookie);
4422 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4423 binder_inner_proc_unlock(proc);
4424 kfree(death);
4425 binder_stats_deleted(BINDER_STAT_DEATH);
4426 } else {
4427 binder_enqueue_work_ilocked(
4428 w, &proc->delivered_death);
4429 binder_inner_proc_unlock(proc);
4430 }
4431 if (put_user(cmd, (uint32_t __user *)ptr))
4432 return -EFAULT;
4433 ptr += sizeof(uint32_t);
4434 if (put_user(cookie,
4435 (binder_uintptr_t __user *)ptr))
4436 return -EFAULT;
4437 ptr += sizeof(binder_uintptr_t);
4438 binder_stat_br(proc, thread, cmd);
4439 if (cmd == BR_DEAD_BINDER)
4440 goto done;
4441 } break;
4442 default:
4443 binder_inner_proc_unlock(proc);
4444 pr_err("%d:%d: bad work type %d\n",
4445 proc->pid, thread->pid, w->type);
4446 break;
4447 }
4448
4449 if (!t)
4450 continue;
4451
4452 BUG_ON(t->buffer == NULL);
4453 if (t->buffer->target_node) {
4454 struct binder_node *target_node = t->buffer->target_node;
4455
4456 trd->target.ptr = target_node->ptr;
4457 trd->cookie = target_node->cookie;
4458 t->saved_priority = task_nice(current);
4459 if (t->priority < target_node->min_priority &&
4460 !(t->flags & TF_ONE_WAY))
4461 binder_set_nice(t->priority);
4462 else if (!(t->flags & TF_ONE_WAY) ||
4463 t->saved_priority > target_node->min_priority)
4464 binder_set_nice(target_node->min_priority);
4465 cmd = BR_TRANSACTION;
4466 } else {
4467 trd->target.ptr = 0;
4468 trd->cookie = 0;
4469 cmd = BR_REPLY;
4470 }
4471 trd->code = t->code;
4472 trd->flags = t->flags;
4473 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4474
4475 t_from = binder_get_txn_from(t);
4476 if (t_from) {
4477 struct task_struct *sender = t_from->proc->tsk;
4478
4479 trd->sender_pid =
4480 task_tgid_nr_ns(sender,
4481 task_active_pid_ns(current));
4482 } else {
4483 trd->sender_pid = 0;
4484 }
4485
4486 ret = binder_apply_fd_fixups(proc, t);
4487 if (ret) {
4488 struct binder_buffer *buffer = t->buffer;
4489 bool oneway = !!(t->flags & TF_ONE_WAY);
4490 int tid = t->debug_id;
4491
4492 if (t_from)
4493 binder_thread_dec_tmpref(t_from);
4494 buffer->transaction = NULL;
4495 binder_cleanup_transaction(t, "fd fixups failed",
4496 BR_FAILED_REPLY);
4497 binder_free_buf(proc, buffer);
4498 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4499 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4500 proc->pid, thread->pid,
4501 oneway ? "async " :
4502 (cmd == BR_REPLY ? "reply " : ""),
4503 tid, BR_FAILED_REPLY, ret, __LINE__);
4504 if (cmd == BR_REPLY) {
4505 cmd = BR_FAILED_REPLY;
4506 if (put_user(cmd, (uint32_t __user *)ptr))
4507 return -EFAULT;
4508 ptr += sizeof(uint32_t);
4509 binder_stat_br(proc, thread, cmd);
4510 break;
4511 }
4512 continue;
4513 }
4514 trd->data_size = t->buffer->data_size;
4515 trd->offsets_size = t->buffer->offsets_size;
4516 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4517 trd->data.ptr.offsets = trd->data.ptr.buffer +
4518 ALIGN(t->buffer->data_size,
4519 sizeof(void *));
4520
4521 tr.secctx = t->security_ctx;
4522 if (t->security_ctx) {
4523 cmd = BR_TRANSACTION_SEC_CTX;
4524 trsize = sizeof(tr);
4525 }
4526 if (put_user(cmd, (uint32_t __user *)ptr)) {
4527 if (t_from)
4528 binder_thread_dec_tmpref(t_from);
4529
4530 binder_cleanup_transaction(t, "put_user failed",
4531 BR_FAILED_REPLY);
4532
4533 return -EFAULT;
4534 }
4535 ptr += sizeof(uint32_t);
4536 if (copy_to_user(ptr, &tr, trsize)) {
4537 if (t_from)
4538 binder_thread_dec_tmpref(t_from);
4539
4540 binder_cleanup_transaction(t, "copy_to_user failed",
4541 BR_FAILED_REPLY);
4542
4543 return -EFAULT;
4544 }
4545 ptr += trsize;
4546
4547 trace_binder_transaction_received(t);
4548 binder_stat_br(proc, thread, cmd);
4549 binder_debug(BINDER_DEBUG_TRANSACTION,
4550 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4551 proc->pid, thread->pid,
4552 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4553 (cmd == BR_TRANSACTION_SEC_CTX) ?
4554 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4555 t->debug_id, t_from ? t_from->proc->pid : 0,
4556 t_from ? t_from->pid : 0, cmd,
4557 t->buffer->data_size, t->buffer->offsets_size,
4558 (u64)trd->data.ptr.buffer,
4559 (u64)trd->data.ptr.offsets);
4560
4561 if (t_from)
4562 binder_thread_dec_tmpref(t_from);
4563 t->buffer->allow_user_free = 1;
4564 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4565 binder_inner_proc_lock(thread->proc);
4566 t->to_parent = thread->transaction_stack;
4567 t->to_thread = thread;
4568 thread->transaction_stack = t;
4569 binder_inner_proc_unlock(thread->proc);
4570 } else {
4571 binder_free_transaction(t);
4572 }
4573 break;
4574 }
4575
4576done:
4577
4578 *consumed = ptr - buffer;
4579 binder_inner_proc_lock(proc);
4580 if (proc->requested_threads == 0 &&
4581 list_empty(&thread->proc->waiting_threads) &&
4582 proc->requested_threads_started < proc->max_threads &&
4583 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4584 BINDER_LOOPER_STATE_ENTERED))
4585 ) {
4586 proc->requested_threads++;
4587 binder_inner_proc_unlock(proc);
4588 binder_debug(BINDER_DEBUG_THREADS,
4589 "%d:%d BR_SPAWN_LOOPER\n",
4590 proc->pid, thread->pid);
4591 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4592 return -EFAULT;
4593 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4594 } else
4595 binder_inner_proc_unlock(proc);
4596 return 0;
4597}
4598
4599static void binder_release_work(struct binder_proc *proc,
4600 struct list_head *list)
4601{
4602 struct binder_work *w;
4603
4604 while (1) {
4605 w = binder_dequeue_work_head(proc, list);
4606 if (!w)
4607 return;
4608
4609 switch (w->type) {
4610 case BINDER_WORK_TRANSACTION: {
4611 struct binder_transaction *t;
4612
4613 t = container_of(w, struct binder_transaction, work);
4614
4615 binder_cleanup_transaction(t, "process died.",
4616 BR_DEAD_REPLY);
4617 } break;
4618 case BINDER_WORK_RETURN_ERROR: {
4619 struct binder_error *e = container_of(
4620 w, struct binder_error, work);
4621
4622 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4623 "undelivered TRANSACTION_ERROR: %u\n",
4624 e->cmd);
4625 } break;
4626 case BINDER_WORK_TRANSACTION_COMPLETE: {
4627 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4628 "undelivered TRANSACTION_COMPLETE\n");
4629 kfree(w);
4630 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4631 } break;
4632 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4633 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4634 struct binder_ref_death *death;
4635
4636 death = container_of(w, struct binder_ref_death, work);
4637 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4638 "undelivered death notification, %016llx\n",
4639 (u64)death->cookie);
4640 kfree(death);
4641 binder_stats_deleted(BINDER_STAT_DEATH);
4642 } break;
4643 default:
4644 pr_err("unexpected work type, %d, not freed\n",
4645 w->type);
4646 break;
4647 }
4648 }
4649
4650}
4651
4652static struct binder_thread *binder_get_thread_ilocked(
4653 struct binder_proc *proc, struct binder_thread *new_thread)
4654{
4655 struct binder_thread *thread = NULL;
4656 struct rb_node *parent = NULL;
4657 struct rb_node **p = &proc->threads.rb_node;
4658
4659 while (*p) {
4660 parent = *p;
4661 thread = rb_entry(parent, struct binder_thread, rb_node);
4662
4663 if (current->pid < thread->pid)
4664 p = &(*p)->rb_left;
4665 else if (current->pid > thread->pid)
4666 p = &(*p)->rb_right;
4667 else
4668 return thread;
4669 }
4670 if (!new_thread)
4671 return NULL;
4672 thread = new_thread;
4673 binder_stats_created(BINDER_STAT_THREAD);
4674 thread->proc = proc;
4675 thread->pid = current->pid;
4676 atomic_set(&thread->tmp_ref, 0);
4677 init_waitqueue_head(&thread->wait);
4678 INIT_LIST_HEAD(&thread->todo);
4679 rb_link_node(&thread->rb_node, parent, p);
4680 rb_insert_color(&thread->rb_node, &proc->threads);
4681 thread->looper_need_return = true;
4682 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4683 thread->return_error.cmd = BR_OK;
4684 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4685 thread->reply_error.cmd = BR_OK;
4686 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4687 return thread;
4688}
4689
4690static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4691{
4692 struct binder_thread *thread;
4693 struct binder_thread *new_thread;
4694
4695 binder_inner_proc_lock(proc);
4696 thread = binder_get_thread_ilocked(proc, NULL);
4697 binder_inner_proc_unlock(proc);
4698 if (!thread) {
4699 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4700 if (new_thread == NULL)
4701 return NULL;
4702 binder_inner_proc_lock(proc);
4703 thread = binder_get_thread_ilocked(proc, new_thread);
4704 binder_inner_proc_unlock(proc);
4705 if (thread != new_thread)
4706 kfree(new_thread);
4707 }
4708 return thread;
4709}
4710
4711static void binder_free_proc(struct binder_proc *proc)
4712{
4713 BUG_ON(!list_empty(&proc->todo));
4714 BUG_ON(!list_empty(&proc->delivered_death));
4715 binder_alloc_deferred_release(&proc->alloc);
4716 put_task_struct(proc->tsk);
4717 binder_stats_deleted(BINDER_STAT_PROC);
4718 kfree(proc);
4719}
4720
4721static void binder_free_thread(struct binder_thread *thread)
4722{
4723 BUG_ON(!list_empty(&thread->todo));
4724 binder_stats_deleted(BINDER_STAT_THREAD);
4725 binder_proc_dec_tmpref(thread->proc);
4726 kfree(thread);
4727}
4728
4729static int binder_thread_release(struct binder_proc *proc,
4730 struct binder_thread *thread)
4731{
4732 struct binder_transaction *t;
4733 struct binder_transaction *send_reply = NULL;
4734 int active_transactions = 0;
4735 struct binder_transaction *last_t = NULL;
4736
4737 binder_inner_proc_lock(thread->proc);
4738
4739
4740
4741
4742
4743
4744 proc->tmp_ref++;
4745
4746
4747
4748
4749 atomic_inc(&thread->tmp_ref);
4750 rb_erase(&thread->rb_node, &proc->threads);
4751 t = thread->transaction_stack;
4752 if (t) {
4753 spin_lock(&t->lock);
4754 if (t->to_thread == thread)
4755 send_reply = t;
4756 } else {
4757 __acquire(&t->lock);
4758 }
4759 thread->is_dead = true;
4760
4761 while (t) {
4762 last_t = t;
4763 active_transactions++;
4764 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4765 "release %d:%d transaction %d %s, still active\n",
4766 proc->pid, thread->pid,
4767 t->debug_id,
4768 (t->to_thread == thread) ? "in" : "out");
4769
4770 if (t->to_thread == thread) {
4771 t->to_proc = NULL;
4772 t->to_thread = NULL;
4773 if (t->buffer) {
4774 t->buffer->transaction = NULL;
4775 t->buffer = NULL;
4776 }
4777 t = t->to_parent;
4778 } else if (t->from == thread) {
4779 t->from = NULL;
4780 t = t->from_parent;
4781 } else
4782 BUG();
4783 spin_unlock(&last_t->lock);
4784 if (t)
4785 spin_lock(&t->lock);
4786 else
4787 __acquire(&t->lock);
4788 }
4789
4790 __release(&t->lock);
4791
4792
4793
4794
4795
4796
4797
4798 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4799 waitqueue_active(&thread->wait)) {
4800 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4801 }
4802
4803 binder_inner_proc_unlock(thread->proc);
4804
4805
4806
4807
4808
4809
4810
4811 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4812 synchronize_rcu();
4813
4814 if (send_reply)
4815 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4816 binder_release_work(proc, &thread->todo);
4817 binder_thread_dec_tmpref(thread);
4818 return active_transactions;
4819}
4820
4821static __poll_t binder_poll(struct file *filp,
4822 struct poll_table_struct *wait)
4823{
4824 struct binder_proc *proc = filp->private_data;
4825 struct binder_thread *thread = NULL;
4826 bool wait_for_proc_work;
4827
4828 thread = binder_get_thread(proc);
4829 if (!thread)
4830 return POLLERR;
4831
4832 binder_inner_proc_lock(thread->proc);
4833 thread->looper |= BINDER_LOOPER_STATE_POLL;
4834 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4835
4836 binder_inner_proc_unlock(thread->proc);
4837
4838 poll_wait(filp, &thread->wait, wait);
4839
4840 if (binder_has_work(thread, wait_for_proc_work))
4841 return EPOLLIN;
4842
4843 return 0;
4844}
4845
4846static int binder_ioctl_write_read(struct file *filp,
4847 unsigned int cmd, unsigned long arg,
4848 struct binder_thread *thread)
4849{
4850 int ret = 0;
4851 struct binder_proc *proc = filp->private_data;
4852 unsigned int size = _IOC_SIZE(cmd);
4853 void __user *ubuf = (void __user *)arg;
4854 struct binder_write_read bwr;
4855
4856 if (size != sizeof(struct binder_write_read)) {
4857 ret = -EINVAL;
4858 goto out;
4859 }
4860 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4861 ret = -EFAULT;
4862 goto out;
4863 }
4864 binder_debug(BINDER_DEBUG_READ_WRITE,
4865 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4866 proc->pid, thread->pid,
4867 (u64)bwr.write_size, (u64)bwr.write_buffer,
4868 (u64)bwr.read_size, (u64)bwr.read_buffer);
4869
4870 if (bwr.write_size > 0) {
4871 ret = binder_thread_write(proc, thread,
4872 bwr.write_buffer,
4873 bwr.write_size,
4874 &bwr.write_consumed);
4875 trace_binder_write_done(ret);
4876 if (ret < 0) {
4877 bwr.read_consumed = 0;
4878 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4879 ret = -EFAULT;
4880 goto out;
4881 }
4882 }
4883 if (bwr.read_size > 0) {
4884 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4885 bwr.read_size,
4886 &bwr.read_consumed,
4887 filp->f_flags & O_NONBLOCK);
4888 trace_binder_read_done(ret);
4889 binder_inner_proc_lock(proc);
4890 if (!binder_worklist_empty_ilocked(&proc->todo))
4891 binder_wakeup_proc_ilocked(proc);
4892 binder_inner_proc_unlock(proc);
4893 if (ret < 0) {
4894 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4895 ret = -EFAULT;
4896 goto out;
4897 }
4898 }
4899 binder_debug(BINDER_DEBUG_READ_WRITE,
4900 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4901 proc->pid, thread->pid,
4902 (u64)bwr.write_consumed, (u64)bwr.write_size,
4903 (u64)bwr.read_consumed, (u64)bwr.read_size);
4904 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4905 ret = -EFAULT;
4906 goto out;
4907 }
4908out:
4909 return ret;
4910}
4911
4912static int binder_ioctl_set_ctx_mgr(struct file *filp,
4913 struct flat_binder_object *fbo)
4914{
4915 int ret = 0;
4916 struct binder_proc *proc = filp->private_data;
4917 struct binder_context *context = proc->context;
4918 struct binder_node *new_node;
4919 kuid_t curr_euid = current_euid();
4920
4921 mutex_lock(&context->context_mgr_node_lock);
4922 if (context->binder_context_mgr_node) {
4923 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4924 ret = -EBUSY;
4925 goto out;
4926 }
4927 ret = security_binder_set_context_mgr(proc->tsk);
4928 if (ret < 0)
4929 goto out;
4930 if (uid_valid(context->binder_context_mgr_uid)) {
4931 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4932 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4933 from_kuid(&init_user_ns, curr_euid),
4934 from_kuid(&init_user_ns,
4935 context->binder_context_mgr_uid));
4936 ret = -EPERM;
4937 goto out;
4938 }
4939 } else {
4940 context->binder_context_mgr_uid = curr_euid;
4941 }
4942 new_node = binder_new_node(proc, fbo);
4943 if (!new_node) {
4944 ret = -ENOMEM;
4945 goto out;
4946 }
4947 binder_node_lock(new_node);
4948 new_node->local_weak_refs++;
4949 new_node->local_strong_refs++;
4950 new_node->has_strong_ref = 1;
4951 new_node->has_weak_ref = 1;
4952 context->binder_context_mgr_node = new_node;
4953 binder_node_unlock(new_node);
4954 binder_put_node(new_node);
4955out:
4956 mutex_unlock(&context->context_mgr_node_lock);
4957 return ret;
4958}
4959
4960static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4961 struct binder_node_info_for_ref *info)
4962{
4963 struct binder_node *node;
4964 struct binder_context *context = proc->context;
4965 __u32 handle = info->handle;
4966
4967 if (info->strong_count || info->weak_count || info->reserved1 ||
4968 info->reserved2 || info->reserved3) {
4969 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4970 proc->pid);
4971 return -EINVAL;
4972 }
4973
4974
4975 mutex_lock(&context->context_mgr_node_lock);
4976 if (!context->binder_context_mgr_node ||
4977 context->binder_context_mgr_node->proc != proc) {
4978 mutex_unlock(&context->context_mgr_node_lock);
4979 return -EPERM;
4980 }
4981 mutex_unlock(&context->context_mgr_node_lock);
4982
4983 node = binder_get_node_from_ref(proc, handle, true, NULL);
4984 if (!node)
4985 return -EINVAL;
4986
4987 info->strong_count = node->local_strong_refs +
4988 node->internal_strong_refs;
4989 info->weak_count = node->local_weak_refs;
4990
4991 binder_put_node(node);
4992
4993 return 0;
4994}
4995
4996static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4997 struct binder_node_debug_info *info)
4998{
4999 struct rb_node *n;
5000 binder_uintptr_t ptr = info->ptr;
5001
5002 memset(info, 0, sizeof(*info));
5003
5004 binder_inner_proc_lock(proc);
5005 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5006 struct binder_node *node = rb_entry(n, struct binder_node,
5007 rb_node);
5008 if (node->ptr > ptr) {
5009 info->ptr = node->ptr;
5010 info->cookie = node->cookie;
5011 info->has_strong_ref = node->has_strong_ref;
5012 info->has_weak_ref = node->has_weak_ref;
5013 break;
5014 }
5015 }
5016 binder_inner_proc_unlock(proc);
5017
5018 return 0;
5019}
5020
5021static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5022{
5023 int ret;
5024 struct binder_proc *proc = filp->private_data;
5025 struct binder_thread *thread;
5026 unsigned int size = _IOC_SIZE(cmd);
5027 void __user *ubuf = (void __user *)arg;
5028
5029
5030
5031
5032 binder_selftest_alloc(&proc->alloc);
5033
5034 trace_binder_ioctl(cmd, arg);
5035
5036 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5037 if (ret)
5038 goto err_unlocked;
5039
5040 thread = binder_get_thread(proc);
5041 if (thread == NULL) {
5042 ret = -ENOMEM;
5043 goto err;
5044 }
5045
5046 switch (cmd) {
5047 case BINDER_WRITE_READ:
5048 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5049 if (ret)
5050 goto err;
5051 break;
5052 case BINDER_SET_MAX_THREADS: {
5053 int max_threads;
5054
5055 if (copy_from_user(&max_threads, ubuf,
5056 sizeof(max_threads))) {
5057 ret = -EINVAL;
5058 goto err;
5059 }
5060 binder_inner_proc_lock(proc);
5061 proc->max_threads = max_threads;
5062 binder_inner_proc_unlock(proc);
5063 break;
5064 }
5065 case BINDER_SET_CONTEXT_MGR_EXT: {
5066 struct flat_binder_object fbo;
5067
5068 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5069 ret = -EINVAL;
5070 goto err;
5071 }
5072 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5073 if (ret)
5074 goto err;
5075 break;
5076 }
5077 case BINDER_SET_CONTEXT_MGR:
5078 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5079 if (ret)
5080 goto err;
5081 break;
5082 case BINDER_THREAD_EXIT:
5083 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5084 proc->pid, thread->pid);
5085 binder_thread_release(proc, thread);
5086 thread = NULL;
5087 break;
5088 case BINDER_VERSION: {
5089 struct binder_version __user *ver = ubuf;
5090
5091 if (size != sizeof(struct binder_version)) {
5092 ret = -EINVAL;
5093 goto err;
5094 }
5095 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5096 &ver->protocol_version)) {
5097 ret = -EINVAL;
5098 goto err;
5099 }
5100 break;
5101 }
5102 case BINDER_GET_NODE_INFO_FOR_REF: {
5103 struct binder_node_info_for_ref info;
5104
5105 if (copy_from_user(&info, ubuf, sizeof(info))) {
5106 ret = -EFAULT;
5107 goto err;
5108 }
5109
5110 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5111 if (ret < 0)
5112 goto err;
5113
5114 if (copy_to_user(ubuf, &info, sizeof(info))) {
5115 ret = -EFAULT;
5116 goto err;
5117 }
5118
5119 break;
5120 }
5121 case BINDER_GET_NODE_DEBUG_INFO: {
5122 struct binder_node_debug_info info;
5123
5124 if (copy_from_user(&info, ubuf, sizeof(info))) {
5125 ret = -EFAULT;
5126 goto err;
5127 }
5128
5129 ret = binder_ioctl_get_node_debug_info(proc, &info);
5130 if (ret < 0)
5131 goto err;
5132
5133 if (copy_to_user(ubuf, &info, sizeof(info))) {
5134 ret = -EFAULT;
5135 goto err;
5136 }
5137 break;
5138 }
5139 default:
5140 ret = -EINVAL;
5141 goto err;
5142 }
5143 ret = 0;
5144err:
5145 if (thread)
5146 thread->looper_need_return = false;
5147 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5148 if (ret && ret != -ERESTARTSYS)
5149 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5150err_unlocked:
5151 trace_binder_ioctl_done(ret);
5152 return ret;
5153}
5154
5155static void binder_vma_open(struct vm_area_struct *vma)
5156{
5157 struct binder_proc *proc = vma->vm_private_data;
5158
5159 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5160 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5161 proc->pid, vma->vm_start, vma->vm_end,
5162 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5163 (unsigned long)pgprot_val(vma->vm_page_prot));
5164}
5165
5166static void binder_vma_close(struct vm_area_struct *vma)
5167{
5168 struct binder_proc *proc = vma->vm_private_data;
5169
5170 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5171 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5172 proc->pid, vma->vm_start, vma->vm_end,
5173 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5174 (unsigned long)pgprot_val(vma->vm_page_prot));
5175 binder_alloc_vma_close(&proc->alloc);
5176}
5177
5178static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5179{
5180 return VM_FAULT_SIGBUS;
5181}
5182
5183static const struct vm_operations_struct binder_vm_ops = {
5184 .open = binder_vma_open,
5185 .close = binder_vma_close,
5186 .fault = binder_vm_fault,
5187};
5188
5189static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5190{
5191 int ret;
5192 struct binder_proc *proc = filp->private_data;
5193 const char *failure_string;
5194
5195 if (proc->tsk != current->group_leader)
5196 return -EINVAL;
5197
5198 if ((vma->vm_end - vma->vm_start) > SZ_4M)
5199 vma->vm_end = vma->vm_start + SZ_4M;
5200
5201 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5202 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5203 __func__, proc->pid, vma->vm_start, vma->vm_end,
5204 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5205 (unsigned long)pgprot_val(vma->vm_page_prot));
5206
5207 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5208 ret = -EPERM;
5209 failure_string = "bad vm_flags";
5210 goto err_bad_arg;
5211 }
5212 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5213 vma->vm_flags &= ~VM_MAYWRITE;
5214
5215 vma->vm_ops = &binder_vm_ops;
5216 vma->vm_private_data = proc;
5217
5218 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5219 if (ret)
5220 return ret;
5221 return 0;
5222
5223err_bad_arg:
5224 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5225 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5226 return ret;
5227}
5228
5229static int binder_open(struct inode *nodp, struct file *filp)
5230{
5231 struct binder_proc *proc;
5232 struct binder_device *binder_dev;
5233
5234 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5235 current->group_leader->pid, current->pid);
5236
5237 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5238 if (proc == NULL)
5239 return -ENOMEM;
5240 spin_lock_init(&proc->inner_lock);
5241 spin_lock_init(&proc->outer_lock);
5242 get_task_struct(current->group_leader);
5243 proc->tsk = current->group_leader;
5244 INIT_LIST_HEAD(&proc->todo);
5245 proc->default_priority = task_nice(current);
5246
5247 if (is_binderfs_device(nodp))
5248 binder_dev = nodp->i_private;
5249 else
5250 binder_dev = container_of(filp->private_data,
5251 struct binder_device, miscdev);
5252 proc->context = &binder_dev->context;
5253 binder_alloc_init(&proc->alloc);
5254
5255 binder_stats_created(BINDER_STAT_PROC);
5256 proc->pid = current->group_leader->pid;
5257 INIT_LIST_HEAD(&proc->delivered_death);
5258 INIT_LIST_HEAD(&proc->waiting_threads);
5259 filp->private_data = proc;
5260
5261 mutex_lock(&binder_procs_lock);
5262 hlist_add_head(&proc->proc_node, &binder_procs);
5263 mutex_unlock(&binder_procs_lock);
5264
5265 if (binder_debugfs_dir_entry_proc) {
5266 char strbuf[11];
5267
5268 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5269
5270
5271
5272
5273
5274
5275
5276 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5277 binder_debugfs_dir_entry_proc,
5278 (void *)(unsigned long)proc->pid,
5279 &proc_fops);
5280 }
5281
5282 return 0;
5283}
5284
5285static int binder_flush(struct file *filp, fl_owner_t id)
5286{
5287 struct binder_proc *proc = filp->private_data;
5288
5289 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5290
5291 return 0;
5292}
5293
5294static void binder_deferred_flush(struct binder_proc *proc)
5295{
5296 struct rb_node *n;
5297 int wake_count = 0;
5298
5299 binder_inner_proc_lock(proc);
5300 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5301 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5302
5303 thread->looper_need_return = true;
5304 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5305 wake_up_interruptible(&thread->wait);
5306 wake_count++;
5307 }
5308 }
5309 binder_inner_proc_unlock(proc);
5310
5311 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5312 "binder_flush: %d woke %d threads\n", proc->pid,
5313 wake_count);
5314}
5315
5316static int binder_release(struct inode *nodp, struct file *filp)
5317{
5318 struct binder_proc *proc = filp->private_data;
5319
5320 debugfs_remove(proc->debugfs_entry);
5321 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5322
5323 return 0;
5324}
5325
5326static int binder_node_release(struct binder_node *node, int refs)
5327{
5328 struct binder_ref *ref;
5329 int death = 0;
5330 struct binder_proc *proc = node->proc;
5331
5332 binder_release_work(proc, &node->async_todo);
5333
5334 binder_node_lock(node);
5335 binder_inner_proc_lock(proc);
5336 binder_dequeue_work_ilocked(&node->work);
5337
5338
5339
5340 BUG_ON(!node->tmp_refs);
5341 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5342 binder_inner_proc_unlock(proc);
5343 binder_node_unlock(node);
5344 binder_free_node(node);
5345
5346 return refs;
5347 }
5348
5349 node->proc = NULL;
5350 node->local_strong_refs = 0;
5351 node->local_weak_refs = 0;
5352 binder_inner_proc_unlock(proc);
5353
5354 spin_lock(&binder_dead_nodes_lock);
5355 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5356 spin_unlock(&binder_dead_nodes_lock);
5357
5358 hlist_for_each_entry(ref, &node->refs, node_entry) {
5359 refs++;
5360
5361
5362
5363
5364
5365
5366 binder_inner_proc_lock(ref->proc);
5367 if (!ref->death) {
5368 binder_inner_proc_unlock(ref->proc);
5369 continue;
5370 }
5371
5372 death++;
5373
5374 BUG_ON(!list_empty(&ref->death->work.entry));
5375 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5376 binder_enqueue_work_ilocked(&ref->death->work,
5377 &ref->proc->todo);
5378 binder_wakeup_proc_ilocked(ref->proc);
5379 binder_inner_proc_unlock(ref->proc);
5380 }
5381
5382 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5383 "node %d now dead, refs %d, death %d\n",
5384 node->debug_id, refs, death);
5385 binder_node_unlock(node);
5386 binder_put_node(node);
5387
5388 return refs;
5389}
5390
5391static void binder_deferred_release(struct binder_proc *proc)
5392{
5393 struct binder_context *context = proc->context;
5394 struct rb_node *n;
5395 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5396
5397 mutex_lock(&binder_procs_lock);
5398 hlist_del(&proc->proc_node);
5399 mutex_unlock(&binder_procs_lock);
5400
5401 mutex_lock(&context->context_mgr_node_lock);
5402 if (context->binder_context_mgr_node &&
5403 context->binder_context_mgr_node->proc == proc) {
5404 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5405 "%s: %d context_mgr_node gone\n",
5406 __func__, proc->pid);
5407 context->binder_context_mgr_node = NULL;
5408 }
5409 mutex_unlock(&context->context_mgr_node_lock);
5410 binder_inner_proc_lock(proc);
5411
5412
5413
5414
5415 proc->tmp_ref++;
5416
5417 proc->is_dead = true;
5418 threads = 0;
5419 active_transactions = 0;
5420 while ((n = rb_first(&proc->threads))) {
5421 struct binder_thread *thread;
5422
5423 thread = rb_entry(n, struct binder_thread, rb_node);
5424 binder_inner_proc_unlock(proc);
5425 threads++;
5426 active_transactions += binder_thread_release(proc, thread);
5427 binder_inner_proc_lock(proc);
5428 }
5429
5430 nodes = 0;
5431 incoming_refs = 0;
5432 while ((n = rb_first(&proc->nodes))) {
5433 struct binder_node *node;
5434
5435 node = rb_entry(n, struct binder_node, rb_node);
5436 nodes++;
5437
5438
5439
5440
5441
5442 binder_inc_node_tmpref_ilocked(node);
5443 rb_erase(&node->rb_node, &proc->nodes);
5444 binder_inner_proc_unlock(proc);
5445 incoming_refs = binder_node_release(node, incoming_refs);
5446 binder_inner_proc_lock(proc);
5447 }
5448 binder_inner_proc_unlock(proc);
5449
5450 outgoing_refs = 0;
5451 binder_proc_lock(proc);
5452 while ((n = rb_first(&proc->refs_by_desc))) {
5453 struct binder_ref *ref;
5454
5455 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5456 outgoing_refs++;
5457 binder_cleanup_ref_olocked(ref);
5458 binder_proc_unlock(proc);
5459 binder_free_ref(ref);
5460 binder_proc_lock(proc);
5461 }
5462 binder_proc_unlock(proc);
5463
5464 binder_release_work(proc, &proc->todo);
5465 binder_release_work(proc, &proc->delivered_death);
5466
5467 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5468 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5469 __func__, proc->pid, threads, nodes, incoming_refs,
5470 outgoing_refs, active_transactions);
5471
5472 binder_proc_dec_tmpref(proc);
5473}
5474
5475static void binder_deferred_func(struct work_struct *work)
5476{
5477 struct binder_proc *proc;
5478
5479 int defer;
5480
5481 do {
5482 mutex_lock(&binder_deferred_lock);
5483 if (!hlist_empty(&binder_deferred_list)) {
5484 proc = hlist_entry(binder_deferred_list.first,
5485 struct binder_proc, deferred_work_node);
5486 hlist_del_init(&proc->deferred_work_node);
5487 defer = proc->deferred_work;
5488 proc->deferred_work = 0;
5489 } else {
5490 proc = NULL;
5491 defer = 0;
5492 }
5493 mutex_unlock(&binder_deferred_lock);
5494
5495 if (defer & BINDER_DEFERRED_FLUSH)
5496 binder_deferred_flush(proc);
5497
5498 if (defer & BINDER_DEFERRED_RELEASE)
5499 binder_deferred_release(proc);
5500 } while (proc);
5501}
5502static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5503
5504static void
5505binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5506{
5507 mutex_lock(&binder_deferred_lock);
5508 proc->deferred_work |= defer;
5509 if (hlist_unhashed(&proc->deferred_work_node)) {
5510 hlist_add_head(&proc->deferred_work_node,
5511 &binder_deferred_list);
5512 schedule_work(&binder_deferred_work);
5513 }
5514 mutex_unlock(&binder_deferred_lock);
5515}
5516
5517static void print_binder_transaction_ilocked(struct seq_file *m,
5518 struct binder_proc *proc,
5519 const char *prefix,
5520 struct binder_transaction *t)
5521{
5522 struct binder_proc *to_proc;
5523 struct binder_buffer *buffer = t->buffer;
5524
5525 spin_lock(&t->lock);
5526 to_proc = t->to_proc;
5527 seq_printf(m,
5528 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5529 prefix, t->debug_id, t,
5530 t->from ? t->from->proc->pid : 0,
5531 t->from ? t->from->pid : 0,
5532 to_proc ? to_proc->pid : 0,
5533 t->to_thread ? t->to_thread->pid : 0,
5534 t->code, t->flags, t->priority, t->need_reply);
5535 spin_unlock(&t->lock);
5536
5537 if (proc != to_proc) {
5538
5539
5540
5541
5542 seq_puts(m, "\n");
5543 return;
5544 }
5545
5546 if (buffer == NULL) {
5547 seq_puts(m, " buffer free\n");
5548 return;
5549 }
5550 if (buffer->target_node)
5551 seq_printf(m, " node %d", buffer->target_node->debug_id);
5552 seq_printf(m, " size %zd:%zd data %pK\n",
5553 buffer->data_size, buffer->offsets_size,
5554 buffer->user_data);
5555}
5556
5557static void print_binder_work_ilocked(struct seq_file *m,
5558 struct binder_proc *proc,
5559 const char *prefix,
5560 const char *transaction_prefix,
5561 struct binder_work *w)
5562{
5563 struct binder_node *node;
5564 struct binder_transaction *t;
5565
5566 switch (w->type) {
5567 case BINDER_WORK_TRANSACTION:
5568 t = container_of(w, struct binder_transaction, work);
5569 print_binder_transaction_ilocked(
5570 m, proc, transaction_prefix, t);
5571 break;
5572 case BINDER_WORK_RETURN_ERROR: {
5573 struct binder_error *e = container_of(
5574 w, struct binder_error, work);
5575
5576 seq_printf(m, "%stransaction error: %u\n",
5577 prefix, e->cmd);
5578 } break;
5579 case BINDER_WORK_TRANSACTION_COMPLETE:
5580 seq_printf(m, "%stransaction complete\n", prefix);
5581 break;
5582 case BINDER_WORK_NODE:
5583 node = container_of(w, struct binder_node, work);
5584 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5585 prefix, node->debug_id,
5586 (u64)node->ptr, (u64)node->cookie);
5587 break;
5588 case BINDER_WORK_DEAD_BINDER:
5589 seq_printf(m, "%shas dead binder\n", prefix);
5590 break;
5591 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5592 seq_printf(m, "%shas cleared dead binder\n", prefix);
5593 break;
5594 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5595 seq_printf(m, "%shas cleared death notification\n", prefix);
5596 break;
5597 default:
5598 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5599 break;
5600 }
5601}
5602
5603static void print_binder_thread_ilocked(struct seq_file *m,
5604 struct binder_thread *thread,
5605 int print_always)
5606{
5607 struct binder_transaction *t;
5608 struct binder_work *w;
5609 size_t start_pos = m->count;
5610 size_t header_pos;
5611
5612 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5613 thread->pid, thread->looper,
5614 thread->looper_need_return,
5615 atomic_read(&thread->tmp_ref));
5616 header_pos = m->count;
5617 t = thread->transaction_stack;
5618 while (t) {
5619 if (t->from == thread) {
5620 print_binder_transaction_ilocked(m, thread->proc,
5621 " outgoing transaction", t);
5622 t = t->from_parent;
5623 } else if (t->to_thread == thread) {
5624 print_binder_transaction_ilocked(m, thread->proc,
5625 " incoming transaction", t);
5626 t = t->to_parent;
5627 } else {
5628 print_binder_transaction_ilocked(m, thread->proc,
5629 " bad transaction", t);
5630 t = NULL;
5631 }
5632 }
5633 list_for_each_entry(w, &thread->todo, entry) {
5634 print_binder_work_ilocked(m, thread->proc, " ",
5635 " pending transaction", w);
5636 }
5637 if (!print_always && m->count == header_pos)
5638 m->count = start_pos;
5639}
5640
5641static void print_binder_node_nilocked(struct seq_file *m,
5642 struct binder_node *node)
5643{
5644 struct binder_ref *ref;
5645 struct binder_work *w;
5646 int count;
5647
5648 count = 0;
5649 hlist_for_each_entry(ref, &node->refs, node_entry)
5650 count++;
5651
5652 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5653 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5654 node->has_strong_ref, node->has_weak_ref,
5655 node->local_strong_refs, node->local_weak_refs,
5656 node->internal_strong_refs, count, node->tmp_refs);
5657 if (count) {
5658 seq_puts(m, " proc");
5659 hlist_for_each_entry(ref, &node->refs, node_entry)
5660 seq_printf(m, " %d", ref->proc->pid);
5661 }
5662 seq_puts(m, "\n");
5663 if (node->proc) {
5664 list_for_each_entry(w, &node->async_todo, entry)
5665 print_binder_work_ilocked(m, node->proc, " ",
5666 " pending async transaction", w);
5667 }
5668}
5669
5670static void print_binder_ref_olocked(struct seq_file *m,
5671 struct binder_ref *ref)
5672{
5673 binder_node_lock(ref->node);
5674 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5675 ref->data.debug_id, ref->data.desc,
5676 ref->node->proc ? "" : "dead ",
5677 ref->node->debug_id, ref->data.strong,
5678 ref->data.weak, ref->death);
5679 binder_node_unlock(ref->node);
5680}
5681
5682static void print_binder_proc(struct seq_file *m,
5683 struct binder_proc *proc, int print_all)
5684{
5685 struct binder_work *w;
5686 struct rb_node *n;
5687 size_t start_pos = m->count;
5688 size_t header_pos;
5689 struct binder_node *last_node = NULL;
5690
5691 seq_printf(m, "proc %d\n", proc->pid);
5692 seq_printf(m, "context %s\n", proc->context->name);
5693 header_pos = m->count;
5694
5695 binder_inner_proc_lock(proc);
5696 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5697 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5698 rb_node), print_all);
5699
5700 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5701 struct binder_node *node = rb_entry(n, struct binder_node,
5702 rb_node);
5703 if (!print_all && !node->has_async_transaction)
5704 continue;
5705
5706
5707
5708
5709
5710
5711 binder_inc_node_tmpref_ilocked(node);
5712
5713 binder_inner_proc_unlock(proc);
5714 if (last_node)
5715 binder_put_node(last_node);
5716 binder_node_inner_lock(node);
5717 print_binder_node_nilocked(m, node);
5718 binder_node_inner_unlock(node);
5719 last_node = node;
5720 binder_inner_proc_lock(proc);
5721 }
5722 binder_inner_proc_unlock(proc);
5723 if (last_node)
5724 binder_put_node(last_node);
5725
5726 if (print_all) {
5727 binder_proc_lock(proc);
5728 for (n = rb_first(&proc->refs_by_desc);
5729 n != NULL;
5730 n = rb_next(n))
5731 print_binder_ref_olocked(m, rb_entry(n,
5732 struct binder_ref,
5733 rb_node_desc));
5734 binder_proc_unlock(proc);
5735 }
5736 binder_alloc_print_allocated(m, &proc->alloc);
5737 binder_inner_proc_lock(proc);
5738 list_for_each_entry(w, &proc->todo, entry)
5739 print_binder_work_ilocked(m, proc, " ",
5740 " pending transaction", w);
5741 list_for_each_entry(w, &proc->delivered_death, entry) {
5742 seq_puts(m, " has delivered dead binder\n");
5743 break;
5744 }
5745 binder_inner_proc_unlock(proc);
5746 if (!print_all && m->count == header_pos)
5747 m->count = start_pos;
5748}
5749
5750static const char * const binder_return_strings[] = {
5751 "BR_ERROR",
5752 "BR_OK",
5753 "BR_TRANSACTION",
5754 "BR_REPLY",
5755 "BR_ACQUIRE_RESULT",
5756 "BR_DEAD_REPLY",
5757 "BR_TRANSACTION_COMPLETE",
5758 "BR_INCREFS",
5759 "BR_ACQUIRE",
5760 "BR_RELEASE",
5761 "BR_DECREFS",
5762 "BR_ATTEMPT_ACQUIRE",
5763 "BR_NOOP",
5764 "BR_SPAWN_LOOPER",
5765 "BR_FINISHED",
5766 "BR_DEAD_BINDER",
5767 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5768 "BR_FAILED_REPLY"
5769};
5770
5771static const char * const binder_command_strings[] = {
5772 "BC_TRANSACTION",
5773 "BC_REPLY",
5774 "BC_ACQUIRE_RESULT",
5775 "BC_FREE_BUFFER",
5776 "BC_INCREFS",
5777 "BC_ACQUIRE",
5778 "BC_RELEASE",
5779 "BC_DECREFS",
5780 "BC_INCREFS_DONE",
5781 "BC_ACQUIRE_DONE",
5782 "BC_ATTEMPT_ACQUIRE",
5783 "BC_REGISTER_LOOPER",
5784 "BC_ENTER_LOOPER",
5785 "BC_EXIT_LOOPER",
5786 "BC_REQUEST_DEATH_NOTIFICATION",
5787 "BC_CLEAR_DEATH_NOTIFICATION",
5788 "BC_DEAD_BINDER_DONE",
5789 "BC_TRANSACTION_SG",
5790 "BC_REPLY_SG",
5791};
5792
5793static const char * const binder_objstat_strings[] = {
5794 "proc",
5795 "thread",
5796 "node",
5797 "ref",
5798 "death",
5799 "transaction",
5800 "transaction_complete"
5801};
5802
5803static void print_binder_stats(struct seq_file *m, const char *prefix,
5804 struct binder_stats *stats)
5805{
5806 int i;
5807
5808 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5809 ARRAY_SIZE(binder_command_strings));
5810 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5811 int temp = atomic_read(&stats->bc[i]);
5812
5813 if (temp)
5814 seq_printf(m, "%s%s: %d\n", prefix,
5815 binder_command_strings[i], temp);
5816 }
5817
5818 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5819 ARRAY_SIZE(binder_return_strings));
5820 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5821 int temp = atomic_read(&stats->br[i]);
5822
5823 if (temp)
5824 seq_printf(m, "%s%s: %d\n", prefix,
5825 binder_return_strings[i], temp);
5826 }
5827
5828 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5829 ARRAY_SIZE(binder_objstat_strings));
5830 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5831 ARRAY_SIZE(stats->obj_deleted));
5832 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5833 int created = atomic_read(&stats->obj_created[i]);
5834 int deleted = atomic_read(&stats->obj_deleted[i]);
5835
5836 if (created || deleted)
5837 seq_printf(m, "%s%s: active %d total %d\n",
5838 prefix,
5839 binder_objstat_strings[i],
5840 created - deleted,
5841 created);
5842 }
5843}
5844
5845static void print_binder_proc_stats(struct seq_file *m,
5846 struct binder_proc *proc)
5847{
5848 struct binder_work *w;
5849 struct binder_thread *thread;
5850 struct rb_node *n;
5851 int count, strong, weak, ready_threads;
5852 size_t free_async_space =
5853 binder_alloc_get_free_async_space(&proc->alloc);
5854
5855 seq_printf(m, "proc %d\n", proc->pid);
5856 seq_printf(m, "context %s\n", proc->context->name);
5857 count = 0;
5858 ready_threads = 0;
5859 binder_inner_proc_lock(proc);
5860 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5861 count++;
5862
5863 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5864 ready_threads++;
5865
5866 seq_printf(m, " threads: %d\n", count);
5867 seq_printf(m, " requested threads: %d+%d/%d\n"
5868 " ready threads %d\n"
5869 " free async space %zd\n", proc->requested_threads,
5870 proc->requested_threads_started, proc->max_threads,
5871 ready_threads,
5872 free_async_space);
5873 count = 0;
5874 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5875 count++;
5876 binder_inner_proc_unlock(proc);
5877 seq_printf(m, " nodes: %d\n", count);
5878 count = 0;
5879 strong = 0;
5880 weak = 0;
5881 binder_proc_lock(proc);
5882 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5883 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5884 rb_node_desc);
5885 count++;
5886 strong += ref->data.strong;
5887 weak += ref->data.weak;
5888 }
5889 binder_proc_unlock(proc);
5890 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5891
5892 count = binder_alloc_get_allocated_count(&proc->alloc);
5893 seq_printf(m, " buffers: %d\n", count);
5894
5895 binder_alloc_print_pages(m, &proc->alloc);
5896
5897 count = 0;
5898 binder_inner_proc_lock(proc);
5899 list_for_each_entry(w, &proc->todo, entry) {
5900 if (w->type == BINDER_WORK_TRANSACTION)
5901 count++;
5902 }
5903 binder_inner_proc_unlock(proc);
5904 seq_printf(m, " pending transactions: %d\n", count);
5905
5906 print_binder_stats(m, " ", &proc->stats);
5907}
5908
5909
5910static int state_show(struct seq_file *m, void *unused)
5911{
5912 struct binder_proc *proc;
5913 struct binder_node *node;
5914 struct binder_node *last_node = NULL;
5915
5916 seq_puts(m, "binder state:\n");
5917
5918 spin_lock(&binder_dead_nodes_lock);
5919 if (!hlist_empty(&binder_dead_nodes))
5920 seq_puts(m, "dead nodes:\n");
5921 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5922
5923
5924
5925
5926
5927 node->tmp_refs++;
5928 spin_unlock(&binder_dead_nodes_lock);
5929 if (last_node)
5930 binder_put_node(last_node);
5931 binder_node_lock(node);
5932 print_binder_node_nilocked(m, node);
5933 binder_node_unlock(node);
5934 last_node = node;
5935 spin_lock(&binder_dead_nodes_lock);
5936 }
5937 spin_unlock(&binder_dead_nodes_lock);
5938 if (last_node)
5939 binder_put_node(last_node);
5940
5941 mutex_lock(&binder_procs_lock);
5942 hlist_for_each_entry(proc, &binder_procs, proc_node)
5943 print_binder_proc(m, proc, 1);
5944 mutex_unlock(&binder_procs_lock);
5945
5946 return 0;
5947}
5948
5949static int stats_show(struct seq_file *m, void *unused)
5950{
5951 struct binder_proc *proc;
5952
5953 seq_puts(m, "binder stats:\n");
5954
5955 print_binder_stats(m, "", &binder_stats);
5956
5957 mutex_lock(&binder_procs_lock);
5958 hlist_for_each_entry(proc, &binder_procs, proc_node)
5959 print_binder_proc_stats(m, proc);
5960 mutex_unlock(&binder_procs_lock);
5961
5962 return 0;
5963}
5964
5965static int transactions_show(struct seq_file *m, void *unused)
5966{
5967 struct binder_proc *proc;
5968
5969 seq_puts(m, "binder transactions:\n");
5970 mutex_lock(&binder_procs_lock);
5971 hlist_for_each_entry(proc, &binder_procs, proc_node)
5972 print_binder_proc(m, proc, 0);
5973 mutex_unlock(&binder_procs_lock);
5974
5975 return 0;
5976}
5977
5978static int proc_show(struct seq_file *m, void *unused)
5979{
5980 struct binder_proc *itr;
5981 int pid = (unsigned long)m->private;
5982
5983 mutex_lock(&binder_procs_lock);
5984 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5985 if (itr->pid == pid) {
5986 seq_puts(m, "binder proc state:\n");
5987 print_binder_proc(m, itr, 1);
5988 }
5989 }
5990 mutex_unlock(&binder_procs_lock);
5991
5992 return 0;
5993}
5994
5995static void print_binder_transaction_log_entry(struct seq_file *m,
5996 struct binder_transaction_log_entry *e)
5997{
5998 int debug_id = READ_ONCE(e->debug_id_done);
5999
6000
6001
6002
6003 smp_rmb();
6004 seq_printf(m,
6005 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6006 e->debug_id, (e->call_type == 2) ? "reply" :
6007 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6008 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6009 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6010 e->return_error, e->return_error_param,
6011 e->return_error_line);
6012
6013
6014
6015
6016 smp_rmb();
6017 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6018 "\n" : " (incomplete)\n");
6019}
6020
6021static int transaction_log_show(struct seq_file *m, void *unused)
6022{
6023 struct binder_transaction_log *log = m->private;
6024 unsigned int log_cur = atomic_read(&log->cur);
6025 unsigned int count;
6026 unsigned int cur;
6027 int i;
6028
6029 count = log_cur + 1;
6030 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6031 0 : count % ARRAY_SIZE(log->entry);
6032 if (count > ARRAY_SIZE(log->entry) || log->full)
6033 count = ARRAY_SIZE(log->entry);
6034 for (i = 0; i < count; i++) {
6035 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6036
6037 print_binder_transaction_log_entry(m, &log->entry[index]);
6038 }
6039 return 0;
6040}
6041
6042const struct file_operations binder_fops = {
6043 .owner = THIS_MODULE,
6044 .poll = binder_poll,
6045 .unlocked_ioctl = binder_ioctl,
6046 .compat_ioctl = binder_ioctl,
6047 .mmap = binder_mmap,
6048 .open = binder_open,
6049 .flush = binder_flush,
6050 .release = binder_release,
6051};
6052
6053DEFINE_SHOW_ATTRIBUTE(state);
6054DEFINE_SHOW_ATTRIBUTE(stats);
6055DEFINE_SHOW_ATTRIBUTE(transactions);
6056DEFINE_SHOW_ATTRIBUTE(transaction_log);
6057
6058static int __init init_binder_device(const char *name)
6059{
6060 int ret;
6061 struct binder_device *binder_device;
6062
6063 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6064 if (!binder_device)
6065 return -ENOMEM;
6066
6067 binder_device->miscdev.fops = &binder_fops;
6068 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6069 binder_device->miscdev.name = name;
6070
6071 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6072 binder_device->context.name = name;
6073 mutex_init(&binder_device->context.context_mgr_node_lock);
6074
6075 ret = misc_register(&binder_device->miscdev);
6076 if (ret < 0) {
6077 kfree(binder_device);
6078 return ret;
6079 }
6080
6081 hlist_add_head(&binder_device->hlist, &binder_devices);
6082
6083 return ret;
6084}
6085
6086static int __init binder_init(void)
6087{
6088 int ret;
6089 char *device_name, *device_tmp;
6090 struct binder_device *device;
6091 struct hlist_node *tmp;
6092 char *device_names = NULL;
6093
6094 ret = binder_alloc_shrinker_init();
6095 if (ret)
6096 return ret;
6097
6098 atomic_set(&binder_transaction_log.cur, ~0U);
6099 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6100
6101 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6102 if (binder_debugfs_dir_entry_root)
6103 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6104 binder_debugfs_dir_entry_root);
6105
6106 if (binder_debugfs_dir_entry_root) {
6107 debugfs_create_file("state",
6108 0444,
6109 binder_debugfs_dir_entry_root,
6110 NULL,
6111 &state_fops);
6112 debugfs_create_file("stats",
6113 0444,
6114 binder_debugfs_dir_entry_root,
6115 NULL,
6116 &stats_fops);
6117 debugfs_create_file("transactions",
6118 0444,
6119 binder_debugfs_dir_entry_root,
6120 NULL,
6121 &transactions_fops);
6122 debugfs_create_file("transaction_log",
6123 0444,
6124 binder_debugfs_dir_entry_root,
6125 &binder_transaction_log,
6126 &transaction_log_fops);
6127 debugfs_create_file("failed_transaction_log",
6128 0444,
6129 binder_debugfs_dir_entry_root,
6130 &binder_transaction_log_failed,
6131 &transaction_log_fops);
6132 }
6133
6134 if (strcmp(binder_devices_param, "") != 0) {
6135
6136
6137
6138
6139 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6140 if (!device_names) {
6141 ret = -ENOMEM;
6142 goto err_alloc_device_names_failed;
6143 }
6144
6145 device_tmp = device_names;
6146 while ((device_name = strsep(&device_tmp, ","))) {
6147 ret = init_binder_device(device_name);
6148 if (ret)
6149 goto err_init_binder_device_failed;
6150 }
6151 }
6152
6153 ret = init_binderfs();
6154 if (ret)
6155 goto err_init_binder_device_failed;
6156
6157 return ret;
6158
6159err_init_binder_device_failed:
6160 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6161 misc_deregister(&device->miscdev);
6162 hlist_del(&device->hlist);
6163 kfree(device);
6164 }
6165
6166 kfree(device_names);
6167
6168err_alloc_device_names_failed:
6169 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6170
6171 return ret;
6172}
6173
6174device_initcall(binder_init);
6175
6176#define CREATE_TRACE_POINTS
6177#include "binder_trace.h"
6178
6179MODULE_LICENSE("GPL v2");
6180