1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
57#include <linux/freezer.h>
58#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
61#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
65#include <linux/debugfs.h>
66#include <linux/rbtree.h>
67#include <linux/sched/signal.h>
68#include <linux/sched/mm.h>
69#include <linux/seq_file.h>
70#include <linux/uaccess.h>
71#include <linux/pid_namespace.h>
72#include <linux/security.h>
73#include <linux/spinlock.h>
74
75#include <uapi/linux/android/binder.h>
76#include "binder_alloc.h"
77#include "binder_trace.h"
78
79static HLIST_HEAD(binder_deferred_list);
80static DEFINE_MUTEX(binder_deferred_lock);
81
82static HLIST_HEAD(binder_devices);
83static HLIST_HEAD(binder_procs);
84static DEFINE_MUTEX(binder_procs_lock);
85
86static HLIST_HEAD(binder_dead_nodes);
87static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88
89static struct dentry *binder_debugfs_dir_entry_root;
90static struct dentry *binder_debugfs_dir_entry_proc;
91static atomic_t binder_last_id;
92
93#define BINDER_DEBUG_ENTRY(name) \
94static int binder_##name##_open(struct inode *inode, struct file *file) \
95{ \
96 return single_open(file, binder_##name##_show, inode->i_private); \
97} \
98\
99static const struct file_operations binder_##name##_fops = { \
100 .owner = THIS_MODULE, \
101 .open = binder_##name##_open, \
102 .read = seq_read, \
103 .llseek = seq_lseek, \
104 .release = single_release, \
105}
106
107static int binder_proc_show(struct seq_file *m, void *unused);
108BINDER_DEBUG_ENTRY(proc);
109
110
111#ifndef SZ_1K
112#define SZ_1K 0x400
113#endif
114
115#ifndef SZ_4M
116#define SZ_4M 0x400000
117#endif
118
119#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
120
121enum {
122 BINDER_DEBUG_USER_ERROR = 1U << 0,
123 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
124 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
125 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
126 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
127 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
128 BINDER_DEBUG_READ_WRITE = 1U << 6,
129 BINDER_DEBUG_USER_REFS = 1U << 7,
130 BINDER_DEBUG_THREADS = 1U << 8,
131 BINDER_DEBUG_TRANSACTION = 1U << 9,
132 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
133 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
134 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
135 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
136 BINDER_DEBUG_SPINLOCKS = 1U << 14,
137};
138static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
139 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
140module_param_named(debug_mask, binder_debug_mask, uint, 0644);
141
142static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
143module_param_named(devices, binder_devices_param, charp, 0444);
144
145static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
146static int binder_stop_on_user_error;
147
148static int binder_set_stop_on_user_error(const char *val,
149 const struct kernel_param *kp)
150{
151 int ret;
152
153 ret = param_set_int(val, kp);
154 if (binder_stop_on_user_error < 2)
155 wake_up(&binder_user_error_wait);
156 return ret;
157}
158module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
159 param_get_int, &binder_stop_on_user_error, 0644);
160
161#define binder_debug(mask, x...) \
162 do { \
163 if (binder_debug_mask & mask) \
164 pr_info(x); \
165 } while (0)
166
167#define binder_user_error(x...) \
168 do { \
169 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
170 pr_info(x); \
171 if (binder_stop_on_user_error) \
172 binder_stop_on_user_error = 2; \
173 } while (0)
174
175#define to_flat_binder_object(hdr) \
176 container_of(hdr, struct flat_binder_object, hdr)
177
178#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
179
180#define to_binder_buffer_object(hdr) \
181 container_of(hdr, struct binder_buffer_object, hdr)
182
183#define to_binder_fd_array_object(hdr) \
184 container_of(hdr, struct binder_fd_array_object, hdr)
185
186enum binder_stat_types {
187 BINDER_STAT_PROC,
188 BINDER_STAT_THREAD,
189 BINDER_STAT_NODE,
190 BINDER_STAT_REF,
191 BINDER_STAT_DEATH,
192 BINDER_STAT_TRANSACTION,
193 BINDER_STAT_TRANSACTION_COMPLETE,
194 BINDER_STAT_COUNT
195};
196
197struct binder_stats {
198 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
199 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
200 atomic_t obj_created[BINDER_STAT_COUNT];
201 atomic_t obj_deleted[BINDER_STAT_COUNT];
202};
203
204static struct binder_stats binder_stats;
205
206static inline void binder_stats_deleted(enum binder_stat_types type)
207{
208 atomic_inc(&binder_stats.obj_deleted[type]);
209}
210
211static inline void binder_stats_created(enum binder_stat_types type)
212{
213 atomic_inc(&binder_stats.obj_created[type]);
214}
215
216struct binder_transaction_log_entry {
217 int debug_id;
218 int debug_id_done;
219 int call_type;
220 int from_proc;
221 int from_thread;
222 int target_handle;
223 int to_proc;
224 int to_thread;
225 int to_node;
226 int data_size;
227 int offsets_size;
228 int return_error_line;
229 uint32_t return_error;
230 uint32_t return_error_param;
231 const char *context_name;
232};
233struct binder_transaction_log {
234 atomic_t cur;
235 bool full;
236 struct binder_transaction_log_entry entry[32];
237};
238static struct binder_transaction_log binder_transaction_log;
239static struct binder_transaction_log binder_transaction_log_failed;
240
241static struct binder_transaction_log_entry *binder_transaction_log_add(
242 struct binder_transaction_log *log)
243{
244 struct binder_transaction_log_entry *e;
245 unsigned int cur = atomic_inc_return(&log->cur);
246
247 if (cur >= ARRAY_SIZE(log->entry))
248 log->full = true;
249 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
250 WRITE_ONCE(e->debug_id_done, 0);
251
252
253
254
255
256 smp_wmb();
257 memset(e, 0, sizeof(*e));
258 return e;
259}
260
261struct binder_context {
262 struct binder_node *binder_context_mgr_node;
263 struct mutex context_mgr_node_lock;
264
265 kuid_t binder_context_mgr_uid;
266 const char *name;
267};
268
269struct binder_device {
270 struct hlist_node hlist;
271 struct miscdevice miscdev;
272 struct binder_context context;
273};
274
275
276
277
278
279
280
281
282struct binder_work {
283 struct list_head entry;
284
285 enum {
286 BINDER_WORK_TRANSACTION = 1,
287 BINDER_WORK_TRANSACTION_COMPLETE,
288 BINDER_WORK_RETURN_ERROR,
289 BINDER_WORK_NODE,
290 BINDER_WORK_DEAD_BINDER,
291 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
292 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
293 } type;
294};
295
296struct binder_error {
297 struct binder_work work;
298 uint32_t cmd;
299};
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359struct binder_node {
360 int debug_id;
361 spinlock_t lock;
362 struct binder_work work;
363 union {
364 struct rb_node rb_node;
365 struct hlist_node dead_node;
366 };
367 struct binder_proc *proc;
368 struct hlist_head refs;
369 int internal_strong_refs;
370 int local_weak_refs;
371 int local_strong_refs;
372 int tmp_refs;
373 binder_uintptr_t ptr;
374 binder_uintptr_t cookie;
375 struct {
376
377
378
379
380 u8 has_strong_ref:1;
381 u8 pending_strong_ref:1;
382 u8 has_weak_ref:1;
383 u8 pending_weak_ref:1;
384 };
385 struct {
386
387
388
389 u8 accept_fds:1;
390 u8 min_priority;
391 };
392 bool has_async_transaction;
393 struct list_head async_todo;
394};
395
396struct binder_ref_death {
397
398
399
400
401
402 struct binder_work work;
403 binder_uintptr_t cookie;
404};
405
406
407
408
409
410
411
412
413
414
415
416
417
418struct binder_ref_data {
419 int debug_id;
420 uint32_t desc;
421 int strong;
422 int weak;
423};
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442struct binder_ref {
443
444
445
446
447 struct binder_ref_data data;
448 struct rb_node rb_node_desc;
449 struct rb_node rb_node_node;
450 struct hlist_node node_entry;
451 struct binder_proc *proc;
452 struct binder_node *node;
453 struct binder_ref_death *death;
454};
455
456enum binder_deferred_state {
457 BINDER_DEFERRED_PUT_FILES = 0x01,
458 BINDER_DEFERRED_FLUSH = 0x02,
459 BINDER_DEFERRED_RELEASE = 0x04,
460};
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518struct binder_proc {
519 struct hlist_node proc_node;
520 struct rb_root threads;
521 struct rb_root nodes;
522 struct rb_root refs_by_desc;
523 struct rb_root refs_by_node;
524 struct list_head waiting_threads;
525 int pid;
526 struct task_struct *tsk;
527 struct files_struct *files;
528 struct mutex files_lock;
529 struct hlist_node deferred_work_node;
530 int deferred_work;
531 bool is_dead;
532
533 struct list_head todo;
534 struct binder_stats stats;
535 struct list_head delivered_death;
536 int max_threads;
537 int requested_threads;
538 int requested_threads_started;
539 int tmp_ref;
540 long default_priority;
541 struct dentry *debugfs_entry;
542 struct binder_alloc alloc;
543 struct binder_context *context;
544 spinlock_t inner_lock;
545 spinlock_t outer_lock;
546};
547
548enum {
549 BINDER_LOOPER_STATE_REGISTERED = 0x01,
550 BINDER_LOOPER_STATE_ENTERED = 0x02,
551 BINDER_LOOPER_STATE_EXITED = 0x04,
552 BINDER_LOOPER_STATE_INVALID = 0x08,
553 BINDER_LOOPER_STATE_WAITING = 0x10,
554 BINDER_LOOPER_STATE_POLL = 0x20,
555};
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593struct binder_thread {
594 struct binder_proc *proc;
595 struct rb_node rb_node;
596 struct list_head waiting_thread_node;
597 int pid;
598 int looper;
599 bool looper_need_return;
600 struct binder_transaction *transaction_stack;
601 struct list_head todo;
602 bool process_todo;
603 struct binder_error return_error;
604 struct binder_error reply_error;
605 wait_queue_head_t wait;
606 struct binder_stats stats;
607 atomic_t tmp_ref;
608 bool is_dead;
609};
610
611struct binder_transaction {
612 int debug_id;
613 struct binder_work work;
614 struct binder_thread *from;
615 struct binder_transaction *from_parent;
616 struct binder_proc *to_proc;
617 struct binder_thread *to_thread;
618 struct binder_transaction *to_parent;
619 unsigned need_reply:1;
620
621
622 struct binder_buffer *buffer;
623 unsigned int code;
624 unsigned int flags;
625 long priority;
626 long saved_priority;
627 kuid_t sender_euid;
628
629
630
631
632
633
634 spinlock_t lock;
635};
636
637
638
639
640
641
642
643
644#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
645static void
646_binder_proc_lock(struct binder_proc *proc, int line)
647{
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_lock(&proc->outer_lock);
651}
652
653
654
655
656
657
658
659#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
660static void
661_binder_proc_unlock(struct binder_proc *proc, int line)
662{
663 binder_debug(BINDER_DEBUG_SPINLOCKS,
664 "%s: line=%d\n", __func__, line);
665 spin_unlock(&proc->outer_lock);
666}
667
668
669
670
671
672
673
674#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
675static void
676_binder_inner_proc_lock(struct binder_proc *proc, int line)
677{
678 binder_debug(BINDER_DEBUG_SPINLOCKS,
679 "%s: line=%d\n", __func__, line);
680 spin_lock(&proc->inner_lock);
681}
682
683
684
685
686
687
688
689#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
690static void
691_binder_inner_proc_unlock(struct binder_proc *proc, int line)
692{
693 binder_debug(BINDER_DEBUG_SPINLOCKS,
694 "%s: line=%d\n", __func__, line);
695 spin_unlock(&proc->inner_lock);
696}
697
698
699
700
701
702
703
704#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
705static void
706_binder_node_lock(struct binder_node *node, int line)
707{
708 binder_debug(BINDER_DEBUG_SPINLOCKS,
709 "%s: line=%d\n", __func__, line);
710 spin_lock(&node->lock);
711}
712
713
714
715
716
717
718
719#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
720static void
721_binder_node_unlock(struct binder_node *node, int line)
722{
723 binder_debug(BINDER_DEBUG_SPINLOCKS,
724 "%s: line=%d\n", __func__, line);
725 spin_unlock(&node->lock);
726}
727
728
729
730
731
732
733
734
735#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
736static void
737_binder_node_inner_lock(struct binder_node *node, int line)
738{
739 binder_debug(BINDER_DEBUG_SPINLOCKS,
740 "%s: line=%d\n", __func__, line);
741 spin_lock(&node->lock);
742 if (node->proc)
743 binder_inner_proc_lock(node->proc);
744}
745
746
747
748
749
750
751
752#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
753static void
754_binder_node_inner_unlock(struct binder_node *node, int line)
755{
756 struct binder_proc *proc = node->proc;
757
758 binder_debug(BINDER_DEBUG_SPINLOCKS,
759 "%s: line=%d\n", __func__, line);
760 if (proc)
761 binder_inner_proc_unlock(proc);
762 spin_unlock(&node->lock);
763}
764
765static bool binder_worklist_empty_ilocked(struct list_head *list)
766{
767 return list_empty(list);
768}
769
770
771
772
773
774
775
776
777static bool binder_worklist_empty(struct binder_proc *proc,
778 struct list_head *list)
779{
780 bool ret;
781
782 binder_inner_proc_lock(proc);
783 ret = binder_worklist_empty_ilocked(list);
784 binder_inner_proc_unlock(proc);
785 return ret;
786}
787
788
789
790
791
792
793
794
795
796
797
798static void
799binder_enqueue_work_ilocked(struct binder_work *work,
800 struct list_head *target_list)
801{
802 BUG_ON(target_list == NULL);
803 BUG_ON(work->entry.next && !list_empty(&work->entry));
804 list_add_tail(&work->entry, target_list);
805}
806
807
808
809
810
811
812
813
814
815
816
817
818static void
819binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
820 struct binder_work *work)
821{
822 binder_enqueue_work_ilocked(work, &thread->todo);
823}
824
825
826
827
828
829
830
831
832
833
834
835static void
836binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
837 struct binder_work *work)
838{
839 binder_enqueue_work_ilocked(work, &thread->todo);
840 thread->process_todo = true;
841}
842
843
844
845
846
847
848
849
850
851static void
852binder_enqueue_thread_work(struct binder_thread *thread,
853 struct binder_work *work)
854{
855 binder_inner_proc_lock(thread->proc);
856 binder_enqueue_thread_work_ilocked(thread, work);
857 binder_inner_proc_unlock(thread->proc);
858}
859
860static void
861binder_dequeue_work_ilocked(struct binder_work *work)
862{
863 list_del_init(&work->entry);
864}
865
866
867
868
869
870
871
872
873
874static void
875binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
876{
877 binder_inner_proc_lock(proc);
878 binder_dequeue_work_ilocked(work);
879 binder_inner_proc_unlock(proc);
880}
881
882static struct binder_work *binder_dequeue_work_head_ilocked(
883 struct list_head *list)
884{
885 struct binder_work *w;
886
887 w = list_first_entry_or_null(list, struct binder_work, entry);
888 if (w)
889 list_del_init(&w->entry);
890 return w;
891}
892
893
894
895
896
897
898
899
900
901
902static struct binder_work *binder_dequeue_work_head(
903 struct binder_proc *proc,
904 struct list_head *list)
905{
906 struct binder_work *w;
907
908 binder_inner_proc_lock(proc);
909 w = binder_dequeue_work_head_ilocked(list);
910 binder_inner_proc_unlock(proc);
911 return w;
912}
913
914static void
915binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
916static void binder_free_thread(struct binder_thread *thread);
917static void binder_free_proc(struct binder_proc *proc);
918static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
919
920static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
921{
922 unsigned long rlim_cur;
923 unsigned long irqs;
924 int ret;
925
926 mutex_lock(&proc->files_lock);
927 if (proc->files == NULL) {
928 ret = -ESRCH;
929 goto err;
930 }
931 if (!lock_task_sighand(proc->tsk, &irqs)) {
932 ret = -EMFILE;
933 goto err;
934 }
935 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
936 unlock_task_sighand(proc->tsk, &irqs);
937
938 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
939err:
940 mutex_unlock(&proc->files_lock);
941 return ret;
942}
943
944
945
946
947static void task_fd_install(
948 struct binder_proc *proc, unsigned int fd, struct file *file)
949{
950 mutex_lock(&proc->files_lock);
951 if (proc->files)
952 __fd_install(proc->files, fd, file);
953 mutex_unlock(&proc->files_lock);
954}
955
956
957
958
959static long task_close_fd(struct binder_proc *proc, unsigned int fd)
960{
961 int retval;
962
963 mutex_lock(&proc->files_lock);
964 if (proc->files == NULL) {
965 retval = -ESRCH;
966 goto err;
967 }
968 retval = __close_fd(proc->files, fd);
969
970 if (unlikely(retval == -ERESTARTSYS ||
971 retval == -ERESTARTNOINTR ||
972 retval == -ERESTARTNOHAND ||
973 retval == -ERESTART_RESTARTBLOCK))
974 retval = -EINTR;
975err:
976 mutex_unlock(&proc->files_lock);
977 return retval;
978}
979
980static bool binder_has_work_ilocked(struct binder_thread *thread,
981 bool do_proc_work)
982{
983 return thread->process_todo ||
984 thread->looper_need_return ||
985 (do_proc_work &&
986 !binder_worklist_empty_ilocked(&thread->proc->todo));
987}
988
989static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
990{
991 bool has_work;
992
993 binder_inner_proc_lock(thread->proc);
994 has_work = binder_has_work_ilocked(thread, do_proc_work);
995 binder_inner_proc_unlock(thread->proc);
996
997 return has_work;
998}
999
1000static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1001{
1002 return !thread->transaction_stack &&
1003 binder_worklist_empty_ilocked(&thread->todo) &&
1004 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1005 BINDER_LOOPER_STATE_REGISTERED));
1006}
1007
1008static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1009 bool sync)
1010{
1011 struct rb_node *n;
1012 struct binder_thread *thread;
1013
1014 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1015 thread = rb_entry(n, struct binder_thread, rb_node);
1016 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1017 binder_available_for_proc_work_ilocked(thread)) {
1018 if (sync)
1019 wake_up_interruptible_sync(&thread->wait);
1020 else
1021 wake_up_interruptible(&thread->wait);
1022 }
1023 }
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static struct binder_thread *
1039binder_select_thread_ilocked(struct binder_proc *proc)
1040{
1041 struct binder_thread *thread;
1042
1043 assert_spin_locked(&proc->inner_lock);
1044 thread = list_first_entry_or_null(&proc->waiting_threads,
1045 struct binder_thread,
1046 waiting_thread_node);
1047
1048 if (thread)
1049 list_del_init(&thread->waiting_thread_node);
1050
1051 return thread;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1071 struct binder_thread *thread,
1072 bool sync)
1073{
1074 assert_spin_locked(&proc->inner_lock);
1075
1076 if (thread) {
1077 if (sync)
1078 wake_up_interruptible_sync(&thread->wait);
1079 else
1080 wake_up_interruptible(&thread->wait);
1081 return;
1082 }
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 binder_wakeup_poll_threads_ilocked(proc, sync);
1098}
1099
1100static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1101{
1102 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1103
1104 binder_wakeup_thread_ilocked(proc, thread, false);
1105}
1106
1107static void binder_set_nice(long nice)
1108{
1109 long min_nice;
1110
1111 if (can_nice(current, nice)) {
1112 set_user_nice(current, nice);
1113 return;
1114 }
1115 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1116 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1117 "%d: nice value %ld not allowed use %ld instead\n",
1118 current->pid, nice, min_nice);
1119 set_user_nice(current, min_nice);
1120 if (min_nice <= MAX_NICE)
1121 return;
1122 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1123}
1124
1125static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1126 binder_uintptr_t ptr)
1127{
1128 struct rb_node *n = proc->nodes.rb_node;
1129 struct binder_node *node;
1130
1131 assert_spin_locked(&proc->inner_lock);
1132
1133 while (n) {
1134 node = rb_entry(n, struct binder_node, rb_node);
1135
1136 if (ptr < node->ptr)
1137 n = n->rb_left;
1138 else if (ptr > node->ptr)
1139 n = n->rb_right;
1140 else {
1141
1142
1143
1144
1145
1146 binder_inc_node_tmpref_ilocked(node);
1147 return node;
1148 }
1149 }
1150 return NULL;
1151}
1152
1153static struct binder_node *binder_get_node(struct binder_proc *proc,
1154 binder_uintptr_t ptr)
1155{
1156 struct binder_node *node;
1157
1158 binder_inner_proc_lock(proc);
1159 node = binder_get_node_ilocked(proc, ptr);
1160 binder_inner_proc_unlock(proc);
1161 return node;
1162}
1163
1164static struct binder_node *binder_init_node_ilocked(
1165 struct binder_proc *proc,
1166 struct binder_node *new_node,
1167 struct flat_binder_object *fp)
1168{
1169 struct rb_node **p = &proc->nodes.rb_node;
1170 struct rb_node *parent = NULL;
1171 struct binder_node *node;
1172 binder_uintptr_t ptr = fp ? fp->binder : 0;
1173 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1174 __u32 flags = fp ? fp->flags : 0;
1175
1176 assert_spin_locked(&proc->inner_lock);
1177
1178 while (*p) {
1179
1180 parent = *p;
1181 node = rb_entry(parent, struct binder_node, rb_node);
1182
1183 if (ptr < node->ptr)
1184 p = &(*p)->rb_left;
1185 else if (ptr > node->ptr)
1186 p = &(*p)->rb_right;
1187 else {
1188
1189
1190
1191
1192
1193 binder_inc_node_tmpref_ilocked(node);
1194 return node;
1195 }
1196 }
1197 node = new_node;
1198 binder_stats_created(BINDER_STAT_NODE);
1199 node->tmp_refs++;
1200 rb_link_node(&node->rb_node, parent, p);
1201 rb_insert_color(&node->rb_node, &proc->nodes);
1202 node->debug_id = atomic_inc_return(&binder_last_id);
1203 node->proc = proc;
1204 node->ptr = ptr;
1205 node->cookie = cookie;
1206 node->work.type = BINDER_WORK_NODE;
1207 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1208 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1209 spin_lock_init(&node->lock);
1210 INIT_LIST_HEAD(&node->work.entry);
1211 INIT_LIST_HEAD(&node->async_todo);
1212 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1213 "%d:%d node %d u%016llx c%016llx created\n",
1214 proc->pid, current->pid, node->debug_id,
1215 (u64)node->ptr, (u64)node->cookie);
1216
1217 return node;
1218}
1219
1220static struct binder_node *binder_new_node(struct binder_proc *proc,
1221 struct flat_binder_object *fp)
1222{
1223 struct binder_node *node;
1224 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1225
1226 if (!new_node)
1227 return NULL;
1228 binder_inner_proc_lock(proc);
1229 node = binder_init_node_ilocked(proc, new_node, fp);
1230 binder_inner_proc_unlock(proc);
1231 if (node != new_node)
1232
1233
1234
1235 kfree(new_node);
1236
1237 return node;
1238}
1239
1240static void binder_free_node(struct binder_node *node)
1241{
1242 kfree(node);
1243 binder_stats_deleted(BINDER_STAT_NODE);
1244}
1245
1246static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1247 int internal,
1248 struct list_head *target_list)
1249{
1250 struct binder_proc *proc = node->proc;
1251
1252 assert_spin_locked(&node->lock);
1253 if (proc)
1254 assert_spin_locked(&proc->inner_lock);
1255 if (strong) {
1256 if (internal) {
1257 if (target_list == NULL &&
1258 node->internal_strong_refs == 0 &&
1259 !(node->proc &&
1260 node == node->proc->context->binder_context_mgr_node &&
1261 node->has_strong_ref)) {
1262 pr_err("invalid inc strong node for %d\n",
1263 node->debug_id);
1264 return -EINVAL;
1265 }
1266 node->internal_strong_refs++;
1267 } else
1268 node->local_strong_refs++;
1269 if (!node->has_strong_ref && target_list) {
1270 binder_dequeue_work_ilocked(&node->work);
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 binder_enqueue_work_ilocked(&node->work, target_list);
1283 }
1284 } else {
1285 if (!internal)
1286 node->local_weak_refs++;
1287 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1288 if (target_list == NULL) {
1289 pr_err("invalid inc weak node for %d\n",
1290 node->debug_id);
1291 return -EINVAL;
1292 }
1293
1294
1295
1296 binder_enqueue_work_ilocked(&node->work, target_list);
1297 }
1298 }
1299 return 0;
1300}
1301
1302static int binder_inc_node(struct binder_node *node, int strong, int internal,
1303 struct list_head *target_list)
1304{
1305 int ret;
1306
1307 binder_node_inner_lock(node);
1308 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1309 binder_node_inner_unlock(node);
1310
1311 return ret;
1312}
1313
1314static bool binder_dec_node_nilocked(struct binder_node *node,
1315 int strong, int internal)
1316{
1317 struct binder_proc *proc = node->proc;
1318
1319 assert_spin_locked(&node->lock);
1320 if (proc)
1321 assert_spin_locked(&proc->inner_lock);
1322 if (strong) {
1323 if (internal)
1324 node->internal_strong_refs--;
1325 else
1326 node->local_strong_refs--;
1327 if (node->local_strong_refs || node->internal_strong_refs)
1328 return false;
1329 } else {
1330 if (!internal)
1331 node->local_weak_refs--;
1332 if (node->local_weak_refs || node->tmp_refs ||
1333 !hlist_empty(&node->refs))
1334 return false;
1335 }
1336
1337 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1338 if (list_empty(&node->work.entry)) {
1339 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1340 binder_wakeup_proc_ilocked(proc);
1341 }
1342 } else {
1343 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1344 !node->local_weak_refs && !node->tmp_refs) {
1345 if (proc) {
1346 binder_dequeue_work_ilocked(&node->work);
1347 rb_erase(&node->rb_node, &proc->nodes);
1348 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1349 "refless node %d deleted\n",
1350 node->debug_id);
1351 } else {
1352 BUG_ON(!list_empty(&node->work.entry));
1353 spin_lock(&binder_dead_nodes_lock);
1354
1355
1356
1357
1358 if (node->tmp_refs) {
1359 spin_unlock(&binder_dead_nodes_lock);
1360 return false;
1361 }
1362 hlist_del(&node->dead_node);
1363 spin_unlock(&binder_dead_nodes_lock);
1364 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1365 "dead node %d deleted\n",
1366 node->debug_id);
1367 }
1368 return true;
1369 }
1370 }
1371 return false;
1372}
1373
1374static void binder_dec_node(struct binder_node *node, int strong, int internal)
1375{
1376 bool free_node;
1377
1378 binder_node_inner_lock(node);
1379 free_node = binder_dec_node_nilocked(node, strong, internal);
1380 binder_node_inner_unlock(node);
1381 if (free_node)
1382 binder_free_node(node);
1383}
1384
1385static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1386{
1387
1388
1389
1390
1391
1392 node->tmp_refs++;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static void binder_inc_node_tmpref(struct binder_node *node)
1409{
1410 binder_node_lock(node);
1411 if (node->proc)
1412 binder_inner_proc_lock(node->proc);
1413 else
1414 spin_lock(&binder_dead_nodes_lock);
1415 binder_inc_node_tmpref_ilocked(node);
1416 if (node->proc)
1417 binder_inner_proc_unlock(node->proc);
1418 else
1419 spin_unlock(&binder_dead_nodes_lock);
1420 binder_node_unlock(node);
1421}
1422
1423
1424
1425
1426
1427
1428
1429static void binder_dec_node_tmpref(struct binder_node *node)
1430{
1431 bool free_node;
1432
1433 binder_node_inner_lock(node);
1434 if (!node->proc)
1435 spin_lock(&binder_dead_nodes_lock);
1436 node->tmp_refs--;
1437 BUG_ON(node->tmp_refs < 0);
1438 if (!node->proc)
1439 spin_unlock(&binder_dead_nodes_lock);
1440
1441
1442
1443
1444
1445
1446 free_node = binder_dec_node_nilocked(node, 0, 1);
1447 binder_node_inner_unlock(node);
1448 if (free_node)
1449 binder_free_node(node);
1450}
1451
1452static void binder_put_node(struct binder_node *node)
1453{
1454 binder_dec_node_tmpref(node);
1455}
1456
1457static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1458 u32 desc, bool need_strong_ref)
1459{
1460 struct rb_node *n = proc->refs_by_desc.rb_node;
1461 struct binder_ref *ref;
1462
1463 while (n) {
1464 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1465
1466 if (desc < ref->data.desc) {
1467 n = n->rb_left;
1468 } else if (desc > ref->data.desc) {
1469 n = n->rb_right;
1470 } else if (need_strong_ref && !ref->data.strong) {
1471 binder_user_error("tried to use weak ref as strong ref\n");
1472 return NULL;
1473 } else {
1474 return ref;
1475 }
1476 }
1477 return NULL;
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498static struct binder_ref *binder_get_ref_for_node_olocked(
1499 struct binder_proc *proc,
1500 struct binder_node *node,
1501 struct binder_ref *new_ref)
1502{
1503 struct binder_context *context = proc->context;
1504 struct rb_node **p = &proc->refs_by_node.rb_node;
1505 struct rb_node *parent = NULL;
1506 struct binder_ref *ref;
1507 struct rb_node *n;
1508
1509 while (*p) {
1510 parent = *p;
1511 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1512
1513 if (node < ref->node)
1514 p = &(*p)->rb_left;
1515 else if (node > ref->node)
1516 p = &(*p)->rb_right;
1517 else
1518 return ref;
1519 }
1520 if (!new_ref)
1521 return NULL;
1522
1523 binder_stats_created(BINDER_STAT_REF);
1524 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1525 new_ref->proc = proc;
1526 new_ref->node = node;
1527 rb_link_node(&new_ref->rb_node_node, parent, p);
1528 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1529
1530 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1531 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1532 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1533 if (ref->data.desc > new_ref->data.desc)
1534 break;
1535 new_ref->data.desc = ref->data.desc + 1;
1536 }
1537
1538 p = &proc->refs_by_desc.rb_node;
1539 while (*p) {
1540 parent = *p;
1541 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1542
1543 if (new_ref->data.desc < ref->data.desc)
1544 p = &(*p)->rb_left;
1545 else if (new_ref->data.desc > ref->data.desc)
1546 p = &(*p)->rb_right;
1547 else
1548 BUG();
1549 }
1550 rb_link_node(&new_ref->rb_node_desc, parent, p);
1551 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1552
1553 binder_node_lock(node);
1554 hlist_add_head(&new_ref->node_entry, &node->refs);
1555
1556 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1557 "%d new ref %d desc %d for node %d\n",
1558 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1559 node->debug_id);
1560 binder_node_unlock(node);
1561 return new_ref;
1562}
1563
1564static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1565{
1566 bool delete_node = false;
1567
1568 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1569 "%d delete ref %d desc %d for node %d\n",
1570 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1571 ref->node->debug_id);
1572
1573 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1574 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1575
1576 binder_node_inner_lock(ref->node);
1577 if (ref->data.strong)
1578 binder_dec_node_nilocked(ref->node, 1, 1);
1579
1580 hlist_del(&ref->node_entry);
1581 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1582 binder_node_inner_unlock(ref->node);
1583
1584
1585
1586 if (!delete_node) {
1587
1588
1589
1590
1591
1592 ref->node = NULL;
1593 }
1594
1595 if (ref->death) {
1596 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1597 "%d delete ref %d desc %d has death notification\n",
1598 ref->proc->pid, ref->data.debug_id,
1599 ref->data.desc);
1600 binder_dequeue_work(ref->proc, &ref->death->work);
1601 binder_stats_deleted(BINDER_STAT_DEATH);
1602 }
1603 binder_stats_deleted(BINDER_STAT_REF);
1604}
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1617 struct list_head *target_list)
1618{
1619 int ret;
1620
1621 if (strong) {
1622 if (ref->data.strong == 0) {
1623 ret = binder_inc_node(ref->node, 1, 1, target_list);
1624 if (ret)
1625 return ret;
1626 }
1627 ref->data.strong++;
1628 } else {
1629 if (ref->data.weak == 0) {
1630 ret = binder_inc_node(ref->node, 0, 1, target_list);
1631 if (ret)
1632 return ret;
1633 }
1634 ref->data.weak++;
1635 }
1636 return 0;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1649{
1650 if (strong) {
1651 if (ref->data.strong == 0) {
1652 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1653 ref->proc->pid, ref->data.debug_id,
1654 ref->data.desc, ref->data.strong,
1655 ref->data.weak);
1656 return false;
1657 }
1658 ref->data.strong--;
1659 if (ref->data.strong == 0)
1660 binder_dec_node(ref->node, strong, 1);
1661 } else {
1662 if (ref->data.weak == 0) {
1663 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1664 ref->proc->pid, ref->data.debug_id,
1665 ref->data.desc, ref->data.strong,
1666 ref->data.weak);
1667 return false;
1668 }
1669 ref->data.weak--;
1670 }
1671 if (ref->data.strong == 0 && ref->data.weak == 0) {
1672 binder_cleanup_ref_olocked(ref);
1673 return true;
1674 }
1675 return false;
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static struct binder_node *binder_get_node_from_ref(
1690 struct binder_proc *proc,
1691 u32 desc, bool need_strong_ref,
1692 struct binder_ref_data *rdata)
1693{
1694 struct binder_node *node;
1695 struct binder_ref *ref;
1696
1697 binder_proc_lock(proc);
1698 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1699 if (!ref)
1700 goto err_no_ref;
1701 node = ref->node;
1702
1703
1704
1705
1706 binder_inc_node_tmpref(node);
1707 if (rdata)
1708 *rdata = ref->data;
1709 binder_proc_unlock(proc);
1710
1711 return node;
1712
1713err_no_ref:
1714 binder_proc_unlock(proc);
1715 return NULL;
1716}
1717
1718
1719
1720
1721
1722
1723
1724
1725static void binder_free_ref(struct binder_ref *ref)
1726{
1727 if (ref->node)
1728 binder_free_node(ref->node);
1729 kfree(ref->death);
1730 kfree(ref);
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746static int binder_update_ref_for_handle(struct binder_proc *proc,
1747 uint32_t desc, bool increment, bool strong,
1748 struct binder_ref_data *rdata)
1749{
1750 int ret = 0;
1751 struct binder_ref *ref;
1752 bool delete_ref = false;
1753
1754 binder_proc_lock(proc);
1755 ref = binder_get_ref_olocked(proc, desc, strong);
1756 if (!ref) {
1757 ret = -EINVAL;
1758 goto err_no_ref;
1759 }
1760 if (increment)
1761 ret = binder_inc_ref_olocked(ref, strong, NULL);
1762 else
1763 delete_ref = binder_dec_ref_olocked(ref, strong);
1764
1765 if (rdata)
1766 *rdata = ref->data;
1767 binder_proc_unlock(proc);
1768
1769 if (delete_ref)
1770 binder_free_ref(ref);
1771 return ret;
1772
1773err_no_ref:
1774 binder_proc_unlock(proc);
1775 return ret;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789static int binder_dec_ref_for_handle(struct binder_proc *proc,
1790 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1791{
1792 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1793}
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static int binder_inc_ref_for_node(struct binder_proc *proc,
1810 struct binder_node *node,
1811 bool strong,
1812 struct list_head *target_list,
1813 struct binder_ref_data *rdata)
1814{
1815 struct binder_ref *ref;
1816 struct binder_ref *new_ref = NULL;
1817 int ret = 0;
1818
1819 binder_proc_lock(proc);
1820 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1821 if (!ref) {
1822 binder_proc_unlock(proc);
1823 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1824 if (!new_ref)
1825 return -ENOMEM;
1826 binder_proc_lock(proc);
1827 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1828 }
1829 ret = binder_inc_ref_olocked(ref, strong, target_list);
1830 *rdata = ref->data;
1831 binder_proc_unlock(proc);
1832 if (new_ref && ref != new_ref)
1833
1834
1835
1836
1837 kfree(new_ref);
1838 return ret;
1839}
1840
1841static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1842 struct binder_transaction *t)
1843{
1844 BUG_ON(!target_thread);
1845 assert_spin_locked(&target_thread->proc->inner_lock);
1846 BUG_ON(target_thread->transaction_stack != t);
1847 BUG_ON(target_thread->transaction_stack->from != target_thread);
1848 target_thread->transaction_stack =
1849 target_thread->transaction_stack->from_parent;
1850 t->from = NULL;
1851}
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865static void binder_thread_dec_tmpref(struct binder_thread *thread)
1866{
1867
1868
1869
1870
1871 binder_inner_proc_lock(thread->proc);
1872 atomic_dec(&thread->tmp_ref);
1873 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1874 binder_inner_proc_unlock(thread->proc);
1875 binder_free_thread(thread);
1876 return;
1877 }
1878 binder_inner_proc_unlock(thread->proc);
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static void binder_proc_dec_tmpref(struct binder_proc *proc)
1894{
1895 binder_inner_proc_lock(proc);
1896 proc->tmp_ref--;
1897 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1898 !proc->tmp_ref) {
1899 binder_inner_proc_unlock(proc);
1900 binder_free_proc(proc);
1901 return;
1902 }
1903 binder_inner_proc_unlock(proc);
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916static struct binder_thread *binder_get_txn_from(
1917 struct binder_transaction *t)
1918{
1919 struct binder_thread *from;
1920
1921 spin_lock(&t->lock);
1922 from = t->from;
1923 if (from)
1924 atomic_inc(&from->tmp_ref);
1925 spin_unlock(&t->lock);
1926 return from;
1927}
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940static struct binder_thread *binder_get_txn_from_and_acq_inner(
1941 struct binder_transaction *t)
1942{
1943 struct binder_thread *from;
1944
1945 from = binder_get_txn_from(t);
1946 if (!from)
1947 return NULL;
1948 binder_inner_proc_lock(from->proc);
1949 if (t->from) {
1950 BUG_ON(from != t->from);
1951 return from;
1952 }
1953 binder_inner_proc_unlock(from->proc);
1954 binder_thread_dec_tmpref(from);
1955 return NULL;
1956}
1957
1958static void binder_free_transaction(struct binder_transaction *t)
1959{
1960 if (t->buffer)
1961 t->buffer->transaction = NULL;
1962 kfree(t);
1963 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1964}
1965
1966static void binder_send_failed_reply(struct binder_transaction *t,
1967 uint32_t error_code)
1968{
1969 struct binder_thread *target_thread;
1970 struct binder_transaction *next;
1971
1972 BUG_ON(t->flags & TF_ONE_WAY);
1973 while (1) {
1974 target_thread = binder_get_txn_from_and_acq_inner(t);
1975 if (target_thread) {
1976 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1977 "send failed reply for transaction %d to %d:%d\n",
1978 t->debug_id,
1979 target_thread->proc->pid,
1980 target_thread->pid);
1981
1982 binder_pop_transaction_ilocked(target_thread, t);
1983 if (target_thread->reply_error.cmd == BR_OK) {
1984 target_thread->reply_error.cmd = error_code;
1985 binder_enqueue_thread_work_ilocked(
1986 target_thread,
1987 &target_thread->reply_error.work);
1988 wake_up_interruptible(&target_thread->wait);
1989 } else {
1990
1991
1992
1993
1994
1995
1996 pr_warn("Unexpected reply error: %u\n",
1997 target_thread->reply_error.cmd);
1998 }
1999 binder_inner_proc_unlock(target_thread->proc);
2000 binder_thread_dec_tmpref(target_thread);
2001 binder_free_transaction(t);
2002 return;
2003 }
2004 next = t->from_parent;
2005
2006 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2007 "send failed reply for transaction %d, target dead\n",
2008 t->debug_id);
2009
2010 binder_free_transaction(t);
2011 if (next == NULL) {
2012 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2013 "reply failed, no target thread at root\n");
2014 return;
2015 }
2016 t = next;
2017 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2018 "reply failed, no target thread -- retry %d\n",
2019 t->debug_id);
2020 }
2021}
2022
2023
2024
2025
2026
2027
2028
2029static void binder_cleanup_transaction(struct binder_transaction *t,
2030 const char *reason,
2031 uint32_t error_code)
2032{
2033 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2034 binder_send_failed_reply(t, error_code);
2035 } else {
2036 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2037 "undelivered transaction %d, %s\n",
2038 t->debug_id, reason);
2039 binder_free_transaction(t);
2040 }
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2052{
2053
2054 struct binder_object_header *hdr;
2055 size_t object_size = 0;
2056
2057 if (buffer->data_size < sizeof(*hdr) ||
2058 offset > buffer->data_size - sizeof(*hdr) ||
2059 !IS_ALIGNED(offset, sizeof(u32)))
2060 return 0;
2061
2062
2063 hdr = (struct binder_object_header *)(buffer->data + offset);
2064 switch (hdr->type) {
2065 case BINDER_TYPE_BINDER:
2066 case BINDER_TYPE_WEAK_BINDER:
2067 case BINDER_TYPE_HANDLE:
2068 case BINDER_TYPE_WEAK_HANDLE:
2069 object_size = sizeof(struct flat_binder_object);
2070 break;
2071 case BINDER_TYPE_FD:
2072 object_size = sizeof(struct binder_fd_object);
2073 break;
2074 case BINDER_TYPE_PTR:
2075 object_size = sizeof(struct binder_buffer_object);
2076 break;
2077 case BINDER_TYPE_FDA:
2078 object_size = sizeof(struct binder_fd_array_object);
2079 break;
2080 default:
2081 return 0;
2082 }
2083 if (offset <= buffer->data_size - object_size &&
2084 buffer->data_size >= object_size)
2085 return object_size;
2086 else
2087 return 0;
2088}
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2108 binder_size_t index,
2109 binder_size_t *start,
2110 binder_size_t num_valid)
2111{
2112 struct binder_buffer_object *buffer_obj;
2113 binder_size_t *offp;
2114
2115 if (index >= num_valid)
2116 return NULL;
2117
2118 offp = start + index;
2119 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2120 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2121 return NULL;
2122
2123 return buffer_obj;
2124}
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164static bool binder_validate_fixup(struct binder_buffer *b,
2165 binder_size_t *objects_start,
2166 struct binder_buffer_object *buffer,
2167 binder_size_t fixup_offset,
2168 struct binder_buffer_object *last_obj,
2169 binder_size_t last_min_offset)
2170{
2171 if (!last_obj) {
2172
2173 return false;
2174 }
2175
2176 while (last_obj != buffer) {
2177
2178
2179
2180
2181 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2182 return false;
2183 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2184 last_obj = (struct binder_buffer_object *)
2185 (b->data + *(objects_start + last_obj->parent));
2186 }
2187 return (fixup_offset >= last_min_offset);
2188}
2189
2190static void binder_transaction_buffer_release(struct binder_proc *proc,
2191 struct binder_buffer *buffer,
2192 binder_size_t *failed_at)
2193{
2194 binder_size_t *offp, *off_start, *off_end;
2195 int debug_id = buffer->debug_id;
2196
2197 binder_debug(BINDER_DEBUG_TRANSACTION,
2198 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2199 proc->pid, buffer->debug_id,
2200 buffer->data_size, buffer->offsets_size, failed_at);
2201
2202 if (buffer->target_node)
2203 binder_dec_node(buffer->target_node, 1, 0);
2204
2205 off_start = (binder_size_t *)(buffer->data +
2206 ALIGN(buffer->data_size, sizeof(void *)));
2207 if (failed_at)
2208 off_end = failed_at;
2209 else
2210 off_end = (void *)off_start + buffer->offsets_size;
2211 for (offp = off_start; offp < off_end; offp++) {
2212 struct binder_object_header *hdr;
2213 size_t object_size = binder_validate_object(buffer, *offp);
2214
2215 if (object_size == 0) {
2216 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2217 debug_id, (u64)*offp, buffer->data_size);
2218 continue;
2219 }
2220 hdr = (struct binder_object_header *)(buffer->data + *offp);
2221 switch (hdr->type) {
2222 case BINDER_TYPE_BINDER:
2223 case BINDER_TYPE_WEAK_BINDER: {
2224 struct flat_binder_object *fp;
2225 struct binder_node *node;
2226
2227 fp = to_flat_binder_object(hdr);
2228 node = binder_get_node(proc, fp->binder);
2229 if (node == NULL) {
2230 pr_err("transaction release %d bad node %016llx\n",
2231 debug_id, (u64)fp->binder);
2232 break;
2233 }
2234 binder_debug(BINDER_DEBUG_TRANSACTION,
2235 " node %d u%016llx\n",
2236 node->debug_id, (u64)node->ptr);
2237 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2238 0);
2239 binder_put_node(node);
2240 } break;
2241 case BINDER_TYPE_HANDLE:
2242 case BINDER_TYPE_WEAK_HANDLE: {
2243 struct flat_binder_object *fp;
2244 struct binder_ref_data rdata;
2245 int ret;
2246
2247 fp = to_flat_binder_object(hdr);
2248 ret = binder_dec_ref_for_handle(proc, fp->handle,
2249 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2250
2251 if (ret) {
2252 pr_err("transaction release %d bad handle %d, ret = %d\n",
2253 debug_id, fp->handle, ret);
2254 break;
2255 }
2256 binder_debug(BINDER_DEBUG_TRANSACTION,
2257 " ref %d desc %d\n",
2258 rdata.debug_id, rdata.desc);
2259 } break;
2260
2261 case BINDER_TYPE_FD: {
2262 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2263
2264 binder_debug(BINDER_DEBUG_TRANSACTION,
2265 " fd %d\n", fp->fd);
2266 if (failed_at)
2267 task_close_fd(proc, fp->fd);
2268 } break;
2269 case BINDER_TYPE_PTR:
2270
2271
2272
2273
2274 break;
2275 case BINDER_TYPE_FDA: {
2276 struct binder_fd_array_object *fda;
2277 struct binder_buffer_object *parent;
2278 uintptr_t parent_buffer;
2279 u32 *fd_array;
2280 size_t fd_index;
2281 binder_size_t fd_buf_size;
2282
2283 fda = to_binder_fd_array_object(hdr);
2284 parent = binder_validate_ptr(buffer, fda->parent,
2285 off_start,
2286 offp - off_start);
2287 if (!parent) {
2288 pr_err("transaction release %d bad parent offset\n",
2289 debug_id);
2290 continue;
2291 }
2292
2293
2294
2295
2296 parent_buffer = parent->buffer -
2297 binder_alloc_get_user_buffer_offset(
2298 &proc->alloc);
2299
2300 fd_buf_size = sizeof(u32) * fda->num_fds;
2301 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2302 pr_err("transaction release %d invalid number of fds (%lld)\n",
2303 debug_id, (u64)fda->num_fds);
2304 continue;
2305 }
2306 if (fd_buf_size > parent->length ||
2307 fda->parent_offset > parent->length - fd_buf_size) {
2308
2309 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2310 debug_id, (u64)fda->num_fds);
2311 continue;
2312 }
2313 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2314 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2315 task_close_fd(proc, fd_array[fd_index]);
2316 } break;
2317 default:
2318 pr_err("transaction release %d bad object type %x\n",
2319 debug_id, hdr->type);
2320 break;
2321 }
2322 }
2323}
2324
2325static int binder_translate_binder(struct flat_binder_object *fp,
2326 struct binder_transaction *t,
2327 struct binder_thread *thread)
2328{
2329 struct binder_node *node;
2330 struct binder_proc *proc = thread->proc;
2331 struct binder_proc *target_proc = t->to_proc;
2332 struct binder_ref_data rdata;
2333 int ret = 0;
2334
2335 node = binder_get_node(proc, fp->binder);
2336 if (!node) {
2337 node = binder_new_node(proc, fp);
2338 if (!node)
2339 return -ENOMEM;
2340 }
2341 if (fp->cookie != node->cookie) {
2342 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2343 proc->pid, thread->pid, (u64)fp->binder,
2344 node->debug_id, (u64)fp->cookie,
2345 (u64)node->cookie);
2346 ret = -EINVAL;
2347 goto done;
2348 }
2349 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2350 ret = -EPERM;
2351 goto done;
2352 }
2353
2354 ret = binder_inc_ref_for_node(target_proc, node,
2355 fp->hdr.type == BINDER_TYPE_BINDER,
2356 &thread->todo, &rdata);
2357 if (ret)
2358 goto done;
2359
2360 if (fp->hdr.type == BINDER_TYPE_BINDER)
2361 fp->hdr.type = BINDER_TYPE_HANDLE;
2362 else
2363 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2364 fp->binder = 0;
2365 fp->handle = rdata.desc;
2366 fp->cookie = 0;
2367
2368 trace_binder_transaction_node_to_ref(t, node, &rdata);
2369 binder_debug(BINDER_DEBUG_TRANSACTION,
2370 " node %d u%016llx -> ref %d desc %d\n",
2371 node->debug_id, (u64)node->ptr,
2372 rdata.debug_id, rdata.desc);
2373done:
2374 binder_put_node(node);
2375 return ret;
2376}
2377
2378static int binder_translate_handle(struct flat_binder_object *fp,
2379 struct binder_transaction *t,
2380 struct binder_thread *thread)
2381{
2382 struct binder_proc *proc = thread->proc;
2383 struct binder_proc *target_proc = t->to_proc;
2384 struct binder_node *node;
2385 struct binder_ref_data src_rdata;
2386 int ret = 0;
2387
2388 node = binder_get_node_from_ref(proc, fp->handle,
2389 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2390 if (!node) {
2391 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2392 proc->pid, thread->pid, fp->handle);
2393 return -EINVAL;
2394 }
2395 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2396 ret = -EPERM;
2397 goto done;
2398 }
2399
2400 binder_node_lock(node);
2401 if (node->proc == target_proc) {
2402 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2403 fp->hdr.type = BINDER_TYPE_BINDER;
2404 else
2405 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2406 fp->binder = node->ptr;
2407 fp->cookie = node->cookie;
2408 if (node->proc)
2409 binder_inner_proc_lock(node->proc);
2410 binder_inc_node_nilocked(node,
2411 fp->hdr.type == BINDER_TYPE_BINDER,
2412 0, NULL);
2413 if (node->proc)
2414 binder_inner_proc_unlock(node->proc);
2415 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2416 binder_debug(BINDER_DEBUG_TRANSACTION,
2417 " ref %d desc %d -> node %d u%016llx\n",
2418 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2419 (u64)node->ptr);
2420 binder_node_unlock(node);
2421 } else {
2422 struct binder_ref_data dest_rdata;
2423
2424 binder_node_unlock(node);
2425 ret = binder_inc_ref_for_node(target_proc, node,
2426 fp->hdr.type == BINDER_TYPE_HANDLE,
2427 NULL, &dest_rdata);
2428 if (ret)
2429 goto done;
2430
2431 fp->binder = 0;
2432 fp->handle = dest_rdata.desc;
2433 fp->cookie = 0;
2434 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2435 &dest_rdata);
2436 binder_debug(BINDER_DEBUG_TRANSACTION,
2437 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2438 src_rdata.debug_id, src_rdata.desc,
2439 dest_rdata.debug_id, dest_rdata.desc,
2440 node->debug_id);
2441 }
2442done:
2443 binder_put_node(node);
2444 return ret;
2445}
2446
2447static int binder_translate_fd(int fd,
2448 struct binder_transaction *t,
2449 struct binder_thread *thread,
2450 struct binder_transaction *in_reply_to)
2451{
2452 struct binder_proc *proc = thread->proc;
2453 struct binder_proc *target_proc = t->to_proc;
2454 int target_fd;
2455 struct file *file;
2456 int ret;
2457 bool target_allows_fd;
2458
2459 if (in_reply_to)
2460 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2461 else
2462 target_allows_fd = t->buffer->target_node->accept_fds;
2463 if (!target_allows_fd) {
2464 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2465 proc->pid, thread->pid,
2466 in_reply_to ? "reply" : "transaction",
2467 fd);
2468 ret = -EPERM;
2469 goto err_fd_not_accepted;
2470 }
2471
2472 file = fget(fd);
2473 if (!file) {
2474 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2475 proc->pid, thread->pid, fd);
2476 ret = -EBADF;
2477 goto err_fget;
2478 }
2479 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2480 if (ret < 0) {
2481 ret = -EPERM;
2482 goto err_security;
2483 }
2484
2485 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2486 if (target_fd < 0) {
2487 ret = -ENOMEM;
2488 goto err_get_unused_fd;
2489 }
2490 task_fd_install(target_proc, target_fd, file);
2491 trace_binder_transaction_fd(t, fd, target_fd);
2492 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2493 fd, target_fd);
2494
2495 return target_fd;
2496
2497err_get_unused_fd:
2498err_security:
2499 fput(file);
2500err_fget:
2501err_fd_not_accepted:
2502 return ret;
2503}
2504
2505static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2506 struct binder_buffer_object *parent,
2507 struct binder_transaction *t,
2508 struct binder_thread *thread,
2509 struct binder_transaction *in_reply_to)
2510{
2511 binder_size_t fdi, fd_buf_size, num_installed_fds;
2512 int target_fd;
2513 uintptr_t parent_buffer;
2514 u32 *fd_array;
2515 struct binder_proc *proc = thread->proc;
2516 struct binder_proc *target_proc = t->to_proc;
2517
2518 fd_buf_size = sizeof(u32) * fda->num_fds;
2519 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2520 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2521 proc->pid, thread->pid, (u64)fda->num_fds);
2522 return -EINVAL;
2523 }
2524 if (fd_buf_size > parent->length ||
2525 fda->parent_offset > parent->length - fd_buf_size) {
2526
2527 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2528 proc->pid, thread->pid, (u64)fda->num_fds);
2529 return -EINVAL;
2530 }
2531
2532
2533
2534
2535 parent_buffer = parent->buffer -
2536 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2537 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2538 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2539 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2540 proc->pid, thread->pid);
2541 return -EINVAL;
2542 }
2543 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2544 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2545 in_reply_to);
2546 if (target_fd < 0)
2547 goto err_translate_fd_failed;
2548 fd_array[fdi] = target_fd;
2549 }
2550 return 0;
2551
2552err_translate_fd_failed:
2553
2554
2555
2556
2557 num_installed_fds = fdi;
2558 for (fdi = 0; fdi < num_installed_fds; fdi++)
2559 task_close_fd(target_proc, fd_array[fdi]);
2560 return target_fd;
2561}
2562
2563static int binder_fixup_parent(struct binder_transaction *t,
2564 struct binder_thread *thread,
2565 struct binder_buffer_object *bp,
2566 binder_size_t *off_start,
2567 binder_size_t num_valid,
2568 struct binder_buffer_object *last_fixup_obj,
2569 binder_size_t last_fixup_min_off)
2570{
2571 struct binder_buffer_object *parent;
2572 u8 *parent_buffer;
2573 struct binder_buffer *b = t->buffer;
2574 struct binder_proc *proc = thread->proc;
2575 struct binder_proc *target_proc = t->to_proc;
2576
2577 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2578 return 0;
2579
2580 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2581 if (!parent) {
2582 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2583 proc->pid, thread->pid);
2584 return -EINVAL;
2585 }
2586
2587 if (!binder_validate_fixup(b, off_start,
2588 parent, bp->parent_offset,
2589 last_fixup_obj,
2590 last_fixup_min_off)) {
2591 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2592 proc->pid, thread->pid);
2593 return -EINVAL;
2594 }
2595
2596 if (parent->length < sizeof(binder_uintptr_t) ||
2597 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2598
2599 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2600 proc->pid, thread->pid);
2601 return -EINVAL;
2602 }
2603 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2604 binder_alloc_get_user_buffer_offset(
2605 &target_proc->alloc));
2606 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2607
2608 return 0;
2609}
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628static bool binder_proc_transaction(struct binder_transaction *t,
2629 struct binder_proc *proc,
2630 struct binder_thread *thread)
2631{
2632 struct binder_node *node = t->buffer->target_node;
2633 bool oneway = !!(t->flags & TF_ONE_WAY);
2634 bool pending_async = false;
2635
2636 BUG_ON(!node);
2637 binder_node_lock(node);
2638 if (oneway) {
2639 BUG_ON(thread);
2640 if (node->has_async_transaction) {
2641 pending_async = true;
2642 } else {
2643 node->has_async_transaction = true;
2644 }
2645 }
2646
2647 binder_inner_proc_lock(proc);
2648
2649 if (proc->is_dead || (thread && thread->is_dead)) {
2650 binder_inner_proc_unlock(proc);
2651 binder_node_unlock(node);
2652 return false;
2653 }
2654
2655 if (!thread && !pending_async)
2656 thread = binder_select_thread_ilocked(proc);
2657
2658 if (thread)
2659 binder_enqueue_thread_work_ilocked(thread, &t->work);
2660 else if (!pending_async)
2661 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2662 else
2663 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2664
2665 if (!pending_async)
2666 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2667
2668 binder_inner_proc_unlock(proc);
2669 binder_node_unlock(node);
2670
2671 return true;
2672}
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695static struct binder_node *binder_get_node_refs_for_txn(
2696 struct binder_node *node,
2697 struct binder_proc **procp,
2698 uint32_t *error)
2699{
2700 struct binder_node *target_node = NULL;
2701
2702 binder_node_inner_lock(node);
2703 if (node->proc) {
2704 target_node = node;
2705 binder_inc_node_nilocked(node, 1, 0, NULL);
2706 binder_inc_node_tmpref_ilocked(node);
2707 node->proc->tmp_ref++;
2708 *procp = node->proc;
2709 } else
2710 *error = BR_DEAD_REPLY;
2711 binder_node_inner_unlock(node);
2712
2713 return target_node;
2714}
2715
2716static void binder_transaction(struct binder_proc *proc,
2717 struct binder_thread *thread,
2718 struct binder_transaction_data *tr, int reply,
2719 binder_size_t extra_buffers_size)
2720{
2721 int ret;
2722 struct binder_transaction *t;
2723 struct binder_work *tcomplete;
2724 binder_size_t *offp, *off_end, *off_start;
2725 binder_size_t off_min;
2726 u8 *sg_bufp, *sg_buf_end;
2727 struct binder_proc *target_proc = NULL;
2728 struct binder_thread *target_thread = NULL;
2729 struct binder_node *target_node = NULL;
2730 struct binder_transaction *in_reply_to = NULL;
2731 struct binder_transaction_log_entry *e;
2732 uint32_t return_error = 0;
2733 uint32_t return_error_param = 0;
2734 uint32_t return_error_line = 0;
2735 struct binder_buffer_object *last_fixup_obj = NULL;
2736 binder_size_t last_fixup_min_off = 0;
2737 struct binder_context *context = proc->context;
2738 int t_debug_id = atomic_inc_return(&binder_last_id);
2739
2740 e = binder_transaction_log_add(&binder_transaction_log);
2741 e->debug_id = t_debug_id;
2742 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2743 e->from_proc = proc->pid;
2744 e->from_thread = thread->pid;
2745 e->target_handle = tr->target.handle;
2746 e->data_size = tr->data_size;
2747 e->offsets_size = tr->offsets_size;
2748 e->context_name = proc->context->name;
2749
2750 if (reply) {
2751 binder_inner_proc_lock(proc);
2752 in_reply_to = thread->transaction_stack;
2753 if (in_reply_to == NULL) {
2754 binder_inner_proc_unlock(proc);
2755 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2756 proc->pid, thread->pid);
2757 return_error = BR_FAILED_REPLY;
2758 return_error_param = -EPROTO;
2759 return_error_line = __LINE__;
2760 goto err_empty_call_stack;
2761 }
2762 if (in_reply_to->to_thread != thread) {
2763 spin_lock(&in_reply_to->lock);
2764 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2765 proc->pid, thread->pid, in_reply_to->debug_id,
2766 in_reply_to->to_proc ?
2767 in_reply_to->to_proc->pid : 0,
2768 in_reply_to->to_thread ?
2769 in_reply_to->to_thread->pid : 0);
2770 spin_unlock(&in_reply_to->lock);
2771 binder_inner_proc_unlock(proc);
2772 return_error = BR_FAILED_REPLY;
2773 return_error_param = -EPROTO;
2774 return_error_line = __LINE__;
2775 in_reply_to = NULL;
2776 goto err_bad_call_stack;
2777 }
2778 thread->transaction_stack = in_reply_to->to_parent;
2779 binder_inner_proc_unlock(proc);
2780 binder_set_nice(in_reply_to->saved_priority);
2781 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2782 if (target_thread == NULL) {
2783 return_error = BR_DEAD_REPLY;
2784 return_error_line = __LINE__;
2785 goto err_dead_binder;
2786 }
2787 if (target_thread->transaction_stack != in_reply_to) {
2788 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2789 proc->pid, thread->pid,
2790 target_thread->transaction_stack ?
2791 target_thread->transaction_stack->debug_id : 0,
2792 in_reply_to->debug_id);
2793 binder_inner_proc_unlock(target_thread->proc);
2794 return_error = BR_FAILED_REPLY;
2795 return_error_param = -EPROTO;
2796 return_error_line = __LINE__;
2797 in_reply_to = NULL;
2798 target_thread = NULL;
2799 goto err_dead_binder;
2800 }
2801 target_proc = target_thread->proc;
2802 target_proc->tmp_ref++;
2803 binder_inner_proc_unlock(target_thread->proc);
2804 } else {
2805 if (tr->target.handle) {
2806 struct binder_ref *ref;
2807
2808
2809
2810
2811
2812
2813
2814
2815 binder_proc_lock(proc);
2816 ref = binder_get_ref_olocked(proc, tr->target.handle,
2817 true);
2818 if (ref) {
2819 target_node = binder_get_node_refs_for_txn(
2820 ref->node, &target_proc,
2821 &return_error);
2822 } else {
2823 binder_user_error("%d:%d got transaction to invalid handle\n",
2824 proc->pid, thread->pid);
2825 return_error = BR_FAILED_REPLY;
2826 }
2827 binder_proc_unlock(proc);
2828 } else {
2829 mutex_lock(&context->context_mgr_node_lock);
2830 target_node = context->binder_context_mgr_node;
2831 if (target_node)
2832 target_node = binder_get_node_refs_for_txn(
2833 target_node, &target_proc,
2834 &return_error);
2835 else
2836 return_error = BR_DEAD_REPLY;
2837 mutex_unlock(&context->context_mgr_node_lock);
2838 if (target_node && target_proc == proc) {
2839 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2840 proc->pid, thread->pid);
2841 return_error = BR_FAILED_REPLY;
2842 return_error_param = -EINVAL;
2843 return_error_line = __LINE__;
2844 goto err_invalid_target_handle;
2845 }
2846 }
2847 if (!target_node) {
2848
2849
2850
2851 return_error_param = -EINVAL;
2852 return_error_line = __LINE__;
2853 goto err_dead_binder;
2854 }
2855 e->to_node = target_node->debug_id;
2856 if (security_binder_transaction(proc->tsk,
2857 target_proc->tsk) < 0) {
2858 return_error = BR_FAILED_REPLY;
2859 return_error_param = -EPERM;
2860 return_error_line = __LINE__;
2861 goto err_invalid_target_handle;
2862 }
2863 binder_inner_proc_lock(proc);
2864 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2865 struct binder_transaction *tmp;
2866
2867 tmp = thread->transaction_stack;
2868 if (tmp->to_thread != thread) {
2869 spin_lock(&tmp->lock);
2870 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2871 proc->pid, thread->pid, tmp->debug_id,
2872 tmp->to_proc ? tmp->to_proc->pid : 0,
2873 tmp->to_thread ?
2874 tmp->to_thread->pid : 0);
2875 spin_unlock(&tmp->lock);
2876 binder_inner_proc_unlock(proc);
2877 return_error = BR_FAILED_REPLY;
2878 return_error_param = -EPROTO;
2879 return_error_line = __LINE__;
2880 goto err_bad_call_stack;
2881 }
2882 while (tmp) {
2883 struct binder_thread *from;
2884
2885 spin_lock(&tmp->lock);
2886 from = tmp->from;
2887 if (from && from->proc == target_proc) {
2888 atomic_inc(&from->tmp_ref);
2889 target_thread = from;
2890 spin_unlock(&tmp->lock);
2891 break;
2892 }
2893 spin_unlock(&tmp->lock);
2894 tmp = tmp->from_parent;
2895 }
2896 }
2897 binder_inner_proc_unlock(proc);
2898 }
2899 if (target_thread)
2900 e->to_thread = target_thread->pid;
2901 e->to_proc = target_proc->pid;
2902
2903
2904 t = kzalloc(sizeof(*t), GFP_KERNEL);
2905 if (t == NULL) {
2906 return_error = BR_FAILED_REPLY;
2907 return_error_param = -ENOMEM;
2908 return_error_line = __LINE__;
2909 goto err_alloc_t_failed;
2910 }
2911 binder_stats_created(BINDER_STAT_TRANSACTION);
2912 spin_lock_init(&t->lock);
2913
2914 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2915 if (tcomplete == NULL) {
2916 return_error = BR_FAILED_REPLY;
2917 return_error_param = -ENOMEM;
2918 return_error_line = __LINE__;
2919 goto err_alloc_tcomplete_failed;
2920 }
2921 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2922
2923 t->debug_id = t_debug_id;
2924
2925 if (reply)
2926 binder_debug(BINDER_DEBUG_TRANSACTION,
2927 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2928 proc->pid, thread->pid, t->debug_id,
2929 target_proc->pid, target_thread->pid,
2930 (u64)tr->data.ptr.buffer,
2931 (u64)tr->data.ptr.offsets,
2932 (u64)tr->data_size, (u64)tr->offsets_size,
2933 (u64)extra_buffers_size);
2934 else
2935 binder_debug(BINDER_DEBUG_TRANSACTION,
2936 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2937 proc->pid, thread->pid, t->debug_id,
2938 target_proc->pid, target_node->debug_id,
2939 (u64)tr->data.ptr.buffer,
2940 (u64)tr->data.ptr.offsets,
2941 (u64)tr->data_size, (u64)tr->offsets_size,
2942 (u64)extra_buffers_size);
2943
2944 if (!reply && !(tr->flags & TF_ONE_WAY))
2945 t->from = thread;
2946 else
2947 t->from = NULL;
2948 t->sender_euid = task_euid(proc->tsk);
2949 t->to_proc = target_proc;
2950 t->to_thread = target_thread;
2951 t->code = tr->code;
2952 t->flags = tr->flags;
2953 t->priority = task_nice(current);
2954
2955 trace_binder_transaction(reply, t, target_node);
2956
2957 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2958 tr->offsets_size, extra_buffers_size,
2959 !reply && (t->flags & TF_ONE_WAY));
2960 if (IS_ERR(t->buffer)) {
2961
2962
2963
2964 return_error_param = PTR_ERR(t->buffer);
2965 return_error = return_error_param == -ESRCH ?
2966 BR_DEAD_REPLY : BR_FAILED_REPLY;
2967 return_error_line = __LINE__;
2968 t->buffer = NULL;
2969 goto err_binder_alloc_buf_failed;
2970 }
2971 t->buffer->allow_user_free = 0;
2972 t->buffer->debug_id = t->debug_id;
2973 t->buffer->transaction = t;
2974 t->buffer->target_node = target_node;
2975 trace_binder_transaction_alloc_buf(t->buffer);
2976 off_start = (binder_size_t *)(t->buffer->data +
2977 ALIGN(tr->data_size, sizeof(void *)));
2978 offp = off_start;
2979
2980 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2981 tr->data.ptr.buffer, tr->data_size)) {
2982 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2983 proc->pid, thread->pid);
2984 return_error = BR_FAILED_REPLY;
2985 return_error_param = -EFAULT;
2986 return_error_line = __LINE__;
2987 goto err_copy_data_failed;
2988 }
2989 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2990 tr->data.ptr.offsets, tr->offsets_size)) {
2991 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2992 proc->pid, thread->pid);
2993 return_error = BR_FAILED_REPLY;
2994 return_error_param = -EFAULT;
2995 return_error_line = __LINE__;
2996 goto err_copy_data_failed;
2997 }
2998 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2999 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3000 proc->pid, thread->pid, (u64)tr->offsets_size);
3001 return_error = BR_FAILED_REPLY;
3002 return_error_param = -EINVAL;
3003 return_error_line = __LINE__;
3004 goto err_bad_offset;
3005 }
3006 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3007 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3008 proc->pid, thread->pid,
3009 (u64)extra_buffers_size);
3010 return_error = BR_FAILED_REPLY;
3011 return_error_param = -EINVAL;
3012 return_error_line = __LINE__;
3013 goto err_bad_offset;
3014 }
3015 off_end = (void *)off_start + tr->offsets_size;
3016 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3017 sg_buf_end = sg_bufp + extra_buffers_size;
3018 off_min = 0;
3019 for (; offp < off_end; offp++) {
3020 struct binder_object_header *hdr;
3021 size_t object_size = binder_validate_object(t->buffer, *offp);
3022
3023 if (object_size == 0 || *offp < off_min) {
3024 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3025 proc->pid, thread->pid, (u64)*offp,
3026 (u64)off_min,
3027 (u64)t->buffer->data_size);
3028 return_error = BR_FAILED_REPLY;
3029 return_error_param = -EINVAL;
3030 return_error_line = __LINE__;
3031 goto err_bad_offset;
3032 }
3033
3034 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3035 off_min = *offp + object_size;
3036 switch (hdr->type) {
3037 case BINDER_TYPE_BINDER:
3038 case BINDER_TYPE_WEAK_BINDER: {
3039 struct flat_binder_object *fp;
3040
3041 fp = to_flat_binder_object(hdr);
3042 ret = binder_translate_binder(fp, t, thread);
3043 if (ret < 0) {
3044 return_error = BR_FAILED_REPLY;
3045 return_error_param = ret;
3046 return_error_line = __LINE__;
3047 goto err_translate_failed;
3048 }
3049 } break;
3050 case BINDER_TYPE_HANDLE:
3051 case BINDER_TYPE_WEAK_HANDLE: {
3052 struct flat_binder_object *fp;
3053
3054 fp = to_flat_binder_object(hdr);
3055 ret = binder_translate_handle(fp, t, thread);
3056 if (ret < 0) {
3057 return_error = BR_FAILED_REPLY;
3058 return_error_param = ret;
3059 return_error_line = __LINE__;
3060 goto err_translate_failed;
3061 }
3062 } break;
3063
3064 case BINDER_TYPE_FD: {
3065 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3066 int target_fd = binder_translate_fd(fp->fd, t, thread,
3067 in_reply_to);
3068
3069 if (target_fd < 0) {
3070 return_error = BR_FAILED_REPLY;
3071 return_error_param = target_fd;
3072 return_error_line = __LINE__;
3073 goto err_translate_failed;
3074 }
3075 fp->pad_binder = 0;
3076 fp->fd = target_fd;
3077 } break;
3078 case BINDER_TYPE_FDA: {
3079 struct binder_fd_array_object *fda =
3080 to_binder_fd_array_object(hdr);
3081 struct binder_buffer_object *parent =
3082 binder_validate_ptr(t->buffer, fda->parent,
3083 off_start,
3084 offp - off_start);
3085 if (!parent) {
3086 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3087 proc->pid, thread->pid);
3088 return_error = BR_FAILED_REPLY;
3089 return_error_param = -EINVAL;
3090 return_error_line = __LINE__;
3091 goto err_bad_parent;
3092 }
3093 if (!binder_validate_fixup(t->buffer, off_start,
3094 parent, fda->parent_offset,
3095 last_fixup_obj,
3096 last_fixup_min_off)) {
3097 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3098 proc->pid, thread->pid);
3099 return_error = BR_FAILED_REPLY;
3100 return_error_param = -EINVAL;
3101 return_error_line = __LINE__;
3102 goto err_bad_parent;
3103 }
3104 ret = binder_translate_fd_array(fda, parent, t, thread,
3105 in_reply_to);
3106 if (ret < 0) {
3107 return_error = BR_FAILED_REPLY;
3108 return_error_param = ret;
3109 return_error_line = __LINE__;
3110 goto err_translate_failed;
3111 }
3112 last_fixup_obj = parent;
3113 last_fixup_min_off =
3114 fda->parent_offset + sizeof(u32) * fda->num_fds;
3115 } break;
3116 case BINDER_TYPE_PTR: {
3117 struct binder_buffer_object *bp =
3118 to_binder_buffer_object(hdr);
3119 size_t buf_left = sg_buf_end - sg_bufp;
3120
3121 if (bp->length > buf_left) {
3122 binder_user_error("%d:%d got transaction with too large buffer\n",
3123 proc->pid, thread->pid);
3124 return_error = BR_FAILED_REPLY;
3125 return_error_param = -EINVAL;
3126 return_error_line = __LINE__;
3127 goto err_bad_offset;
3128 }
3129 if (copy_from_user(sg_bufp,
3130 (const void __user *)(uintptr_t)
3131 bp->buffer, bp->length)) {
3132 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3133 proc->pid, thread->pid);
3134 return_error_param = -EFAULT;
3135 return_error = BR_FAILED_REPLY;
3136 return_error_line = __LINE__;
3137 goto err_copy_data_failed;
3138 }
3139
3140 bp->buffer = (uintptr_t)sg_bufp +
3141 binder_alloc_get_user_buffer_offset(
3142 &target_proc->alloc);
3143 sg_bufp += ALIGN(bp->length, sizeof(u64));
3144
3145 ret = binder_fixup_parent(t, thread, bp, off_start,
3146 offp - off_start,
3147 last_fixup_obj,
3148 last_fixup_min_off);
3149 if (ret < 0) {
3150 return_error = BR_FAILED_REPLY;
3151 return_error_param = ret;
3152 return_error_line = __LINE__;
3153 goto err_translate_failed;
3154 }
3155 last_fixup_obj = bp;
3156 last_fixup_min_off = 0;
3157 } break;
3158 default:
3159 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3160 proc->pid, thread->pid, hdr->type);
3161 return_error = BR_FAILED_REPLY;
3162 return_error_param = -EINVAL;
3163 return_error_line = __LINE__;
3164 goto err_bad_object_type;
3165 }
3166 }
3167 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3168 t->work.type = BINDER_WORK_TRANSACTION;
3169
3170 if (reply) {
3171 binder_enqueue_thread_work(thread, tcomplete);
3172 binder_inner_proc_lock(target_proc);
3173 if (target_thread->is_dead) {
3174 binder_inner_proc_unlock(target_proc);
3175 goto err_dead_proc_or_thread;
3176 }
3177 BUG_ON(t->buffer->async_transaction != 0);
3178 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3179 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3180 binder_inner_proc_unlock(target_proc);
3181 wake_up_interruptible_sync(&target_thread->wait);
3182 binder_free_transaction(in_reply_to);
3183 } else if (!(t->flags & TF_ONE_WAY)) {
3184 BUG_ON(t->buffer->async_transaction != 0);
3185 binder_inner_proc_lock(proc);
3186
3187
3188
3189
3190
3191
3192
3193 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3194 t->need_reply = 1;
3195 t->from_parent = thread->transaction_stack;
3196 thread->transaction_stack = t;
3197 binder_inner_proc_unlock(proc);
3198 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3199 binder_inner_proc_lock(proc);
3200 binder_pop_transaction_ilocked(thread, t);
3201 binder_inner_proc_unlock(proc);
3202 goto err_dead_proc_or_thread;
3203 }
3204 } else {
3205 BUG_ON(target_node == NULL);
3206 BUG_ON(t->buffer->async_transaction != 1);
3207 binder_enqueue_thread_work(thread, tcomplete);
3208 if (!binder_proc_transaction(t, target_proc, NULL))
3209 goto err_dead_proc_or_thread;
3210 }
3211 if (target_thread)
3212 binder_thread_dec_tmpref(target_thread);
3213 binder_proc_dec_tmpref(target_proc);
3214 if (target_node)
3215 binder_dec_node_tmpref(target_node);
3216
3217
3218
3219
3220 smp_wmb();
3221 WRITE_ONCE(e->debug_id_done, t_debug_id);
3222 return;
3223
3224err_dead_proc_or_thread:
3225 return_error = BR_DEAD_REPLY;
3226 return_error_line = __LINE__;
3227 binder_dequeue_work(proc, tcomplete);
3228err_translate_failed:
3229err_bad_object_type:
3230err_bad_offset:
3231err_bad_parent:
3232err_copy_data_failed:
3233 trace_binder_transaction_failed_buffer_release(t->buffer);
3234 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3235 if (target_node)
3236 binder_dec_node_tmpref(target_node);
3237 target_node = NULL;
3238 t->buffer->transaction = NULL;
3239 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3240err_binder_alloc_buf_failed:
3241 kfree(tcomplete);
3242 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3243err_alloc_tcomplete_failed:
3244 kfree(t);
3245 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3246err_alloc_t_failed:
3247err_bad_call_stack:
3248err_empty_call_stack:
3249err_dead_binder:
3250err_invalid_target_handle:
3251 if (target_thread)
3252 binder_thread_dec_tmpref(target_thread);
3253 if (target_proc)
3254 binder_proc_dec_tmpref(target_proc);
3255 if (target_node) {
3256 binder_dec_node(target_node, 1, 0);
3257 binder_dec_node_tmpref(target_node);
3258 }
3259
3260 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3261 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3262 proc->pid, thread->pid, return_error, return_error_param,
3263 (u64)tr->data_size, (u64)tr->offsets_size,
3264 return_error_line);
3265
3266 {
3267 struct binder_transaction_log_entry *fe;
3268
3269 e->return_error = return_error;
3270 e->return_error_param = return_error_param;
3271 e->return_error_line = return_error_line;
3272 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3273 *fe = *e;
3274
3275
3276
3277
3278 smp_wmb();
3279 WRITE_ONCE(e->debug_id_done, t_debug_id);
3280 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3281 }
3282
3283 BUG_ON(thread->return_error.cmd != BR_OK);
3284 if (in_reply_to) {
3285 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3286 binder_enqueue_thread_work(thread, &thread->return_error.work);
3287 binder_send_failed_reply(in_reply_to, return_error);
3288 } else {
3289 thread->return_error.cmd = return_error;
3290 binder_enqueue_thread_work(thread, &thread->return_error.work);
3291 }
3292}
3293
3294static int binder_thread_write(struct binder_proc *proc,
3295 struct binder_thread *thread,
3296 binder_uintptr_t binder_buffer, size_t size,
3297 binder_size_t *consumed)
3298{
3299 uint32_t cmd;
3300 struct binder_context *context = proc->context;
3301 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3302 void __user *ptr = buffer + *consumed;
3303 void __user *end = buffer + size;
3304
3305 while (ptr < end && thread->return_error.cmd == BR_OK) {
3306 int ret;
3307
3308 if (get_user(cmd, (uint32_t __user *)ptr))
3309 return -EFAULT;
3310 ptr += sizeof(uint32_t);
3311 trace_binder_command(cmd);
3312 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3313 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3314 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3315 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3316 }
3317 switch (cmd) {
3318 case BC_INCREFS:
3319 case BC_ACQUIRE:
3320 case BC_RELEASE:
3321 case BC_DECREFS: {
3322 uint32_t target;
3323 const char *debug_string;
3324 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3325 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3326 struct binder_ref_data rdata;
3327
3328 if (get_user(target, (uint32_t __user *)ptr))
3329 return -EFAULT;
3330
3331 ptr += sizeof(uint32_t);
3332 ret = -1;
3333 if (increment && !target) {
3334 struct binder_node *ctx_mgr_node;
3335 mutex_lock(&context->context_mgr_node_lock);
3336 ctx_mgr_node = context->binder_context_mgr_node;
3337 if (ctx_mgr_node)
3338 ret = binder_inc_ref_for_node(
3339 proc, ctx_mgr_node,
3340 strong, NULL, &rdata);
3341 mutex_unlock(&context->context_mgr_node_lock);
3342 }
3343 if (ret)
3344 ret = binder_update_ref_for_handle(
3345 proc, target, increment, strong,
3346 &rdata);
3347 if (!ret && rdata.desc != target) {
3348 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3349 proc->pid, thread->pid,
3350 target, rdata.desc);
3351 }
3352 switch (cmd) {
3353 case BC_INCREFS:
3354 debug_string = "IncRefs";
3355 break;
3356 case BC_ACQUIRE:
3357 debug_string = "Acquire";
3358 break;
3359 case BC_RELEASE:
3360 debug_string = "Release";
3361 break;
3362 case BC_DECREFS:
3363 default:
3364 debug_string = "DecRefs";
3365 break;
3366 }
3367 if (ret) {
3368 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3369 proc->pid, thread->pid, debug_string,
3370 strong, target, ret);
3371 break;
3372 }
3373 binder_debug(BINDER_DEBUG_USER_REFS,
3374 "%d:%d %s ref %d desc %d s %d w %d\n",
3375 proc->pid, thread->pid, debug_string,
3376 rdata.debug_id, rdata.desc, rdata.strong,
3377 rdata.weak);
3378 break;
3379 }
3380 case BC_INCREFS_DONE:
3381 case BC_ACQUIRE_DONE: {
3382 binder_uintptr_t node_ptr;
3383 binder_uintptr_t cookie;
3384 struct binder_node *node;
3385 bool free_node;
3386
3387 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3388 return -EFAULT;
3389 ptr += sizeof(binder_uintptr_t);
3390 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3391 return -EFAULT;
3392 ptr += sizeof(binder_uintptr_t);
3393 node = binder_get_node(proc, node_ptr);
3394 if (node == NULL) {
3395 binder_user_error("%d:%d %s u%016llx no match\n",
3396 proc->pid, thread->pid,
3397 cmd == BC_INCREFS_DONE ?
3398 "BC_INCREFS_DONE" :
3399 "BC_ACQUIRE_DONE",
3400 (u64)node_ptr);
3401 break;
3402 }
3403 if (cookie != node->cookie) {
3404 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3405 proc->pid, thread->pid,
3406 cmd == BC_INCREFS_DONE ?
3407 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3408 (u64)node_ptr, node->debug_id,
3409 (u64)cookie, (u64)node->cookie);
3410 binder_put_node(node);
3411 break;
3412 }
3413 binder_node_inner_lock(node);
3414 if (cmd == BC_ACQUIRE_DONE) {
3415 if (node->pending_strong_ref == 0) {
3416 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3417 proc->pid, thread->pid,
3418 node->debug_id);
3419 binder_node_inner_unlock(node);
3420 binder_put_node(node);
3421 break;
3422 }
3423 node->pending_strong_ref = 0;
3424 } else {
3425 if (node->pending_weak_ref == 0) {
3426 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3427 proc->pid, thread->pid,
3428 node->debug_id);
3429 binder_node_inner_unlock(node);
3430 binder_put_node(node);
3431 break;
3432 }
3433 node->pending_weak_ref = 0;
3434 }
3435 free_node = binder_dec_node_nilocked(node,
3436 cmd == BC_ACQUIRE_DONE, 0);
3437 WARN_ON(free_node);
3438 binder_debug(BINDER_DEBUG_USER_REFS,
3439 "%d:%d %s node %d ls %d lw %d tr %d\n",
3440 proc->pid, thread->pid,
3441 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3442 node->debug_id, node->local_strong_refs,
3443 node->local_weak_refs, node->tmp_refs);
3444 binder_node_inner_unlock(node);
3445 binder_put_node(node);
3446 break;
3447 }
3448 case BC_ATTEMPT_ACQUIRE:
3449 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3450 return -EINVAL;
3451 case BC_ACQUIRE_RESULT:
3452 pr_err("BC_ACQUIRE_RESULT not supported\n");
3453 return -EINVAL;
3454
3455 case BC_FREE_BUFFER: {
3456 binder_uintptr_t data_ptr;
3457 struct binder_buffer *buffer;
3458
3459 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3460 return -EFAULT;
3461 ptr += sizeof(binder_uintptr_t);
3462
3463 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3464 data_ptr);
3465 if (buffer == NULL) {
3466 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3467 proc->pid, thread->pid, (u64)data_ptr);
3468 break;
3469 }
3470 if (!buffer->allow_user_free) {
3471 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3472 proc->pid, thread->pid, (u64)data_ptr);
3473 break;
3474 }
3475 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3476 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3477 proc->pid, thread->pid, (u64)data_ptr,
3478 buffer->debug_id,
3479 buffer->transaction ? "active" : "finished");
3480
3481 if (buffer->transaction) {
3482 buffer->transaction->buffer = NULL;
3483 buffer->transaction = NULL;
3484 }
3485 if (buffer->async_transaction && buffer->target_node) {
3486 struct binder_node *buf_node;
3487 struct binder_work *w;
3488
3489 buf_node = buffer->target_node;
3490 binder_node_inner_lock(buf_node);
3491 BUG_ON(!buf_node->has_async_transaction);
3492 BUG_ON(buf_node->proc != proc);
3493 w = binder_dequeue_work_head_ilocked(
3494 &buf_node->async_todo);
3495 if (!w) {
3496 buf_node->has_async_transaction = false;
3497 } else {
3498 binder_enqueue_work_ilocked(
3499 w, &proc->todo);
3500 binder_wakeup_proc_ilocked(proc);
3501 }
3502 binder_node_inner_unlock(buf_node);
3503 }
3504 trace_binder_transaction_buffer_release(buffer);
3505 binder_transaction_buffer_release(proc, buffer, NULL);
3506 binder_alloc_free_buf(&proc->alloc, buffer);
3507 break;
3508 }
3509
3510 case BC_TRANSACTION_SG:
3511 case BC_REPLY_SG: {
3512 struct binder_transaction_data_sg tr;
3513
3514 if (copy_from_user(&tr, ptr, sizeof(tr)))
3515 return -EFAULT;
3516 ptr += sizeof(tr);
3517 binder_transaction(proc, thread, &tr.transaction_data,
3518 cmd == BC_REPLY_SG, tr.buffers_size);
3519 break;
3520 }
3521 case BC_TRANSACTION:
3522 case BC_REPLY: {
3523 struct binder_transaction_data tr;
3524
3525 if (copy_from_user(&tr, ptr, sizeof(tr)))
3526 return -EFAULT;
3527 ptr += sizeof(tr);
3528 binder_transaction(proc, thread, &tr,
3529 cmd == BC_REPLY, 0);
3530 break;
3531 }
3532
3533 case BC_REGISTER_LOOPER:
3534 binder_debug(BINDER_DEBUG_THREADS,
3535 "%d:%d BC_REGISTER_LOOPER\n",
3536 proc->pid, thread->pid);
3537 binder_inner_proc_lock(proc);
3538 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3539 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3540 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3541 proc->pid, thread->pid);
3542 } else if (proc->requested_threads == 0) {
3543 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3544 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3545 proc->pid, thread->pid);
3546 } else {
3547 proc->requested_threads--;
3548 proc->requested_threads_started++;
3549 }
3550 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3551 binder_inner_proc_unlock(proc);
3552 break;
3553 case BC_ENTER_LOOPER:
3554 binder_debug(BINDER_DEBUG_THREADS,
3555 "%d:%d BC_ENTER_LOOPER\n",
3556 proc->pid, thread->pid);
3557 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3558 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3559 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3560 proc->pid, thread->pid);
3561 }
3562 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3563 break;
3564 case BC_EXIT_LOOPER:
3565 binder_debug(BINDER_DEBUG_THREADS,
3566 "%d:%d BC_EXIT_LOOPER\n",
3567 proc->pid, thread->pid);
3568 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3569 break;
3570
3571 case BC_REQUEST_DEATH_NOTIFICATION:
3572 case BC_CLEAR_DEATH_NOTIFICATION: {
3573 uint32_t target;
3574 binder_uintptr_t cookie;
3575 struct binder_ref *ref;
3576 struct binder_ref_death *death = NULL;
3577
3578 if (get_user(target, (uint32_t __user *)ptr))
3579 return -EFAULT;
3580 ptr += sizeof(uint32_t);
3581 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3582 return -EFAULT;
3583 ptr += sizeof(binder_uintptr_t);
3584 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3585
3586
3587
3588
3589 death = kzalloc(sizeof(*death), GFP_KERNEL);
3590 if (death == NULL) {
3591 WARN_ON(thread->return_error.cmd !=
3592 BR_OK);
3593 thread->return_error.cmd = BR_ERROR;
3594 binder_enqueue_thread_work(
3595 thread,
3596 &thread->return_error.work);
3597 binder_debug(
3598 BINDER_DEBUG_FAILED_TRANSACTION,
3599 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3600 proc->pid, thread->pid);
3601 break;
3602 }
3603 }
3604 binder_proc_lock(proc);
3605 ref = binder_get_ref_olocked(proc, target, false);
3606 if (ref == NULL) {
3607 binder_user_error("%d:%d %s invalid ref %d\n",
3608 proc->pid, thread->pid,
3609 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3610 "BC_REQUEST_DEATH_NOTIFICATION" :
3611 "BC_CLEAR_DEATH_NOTIFICATION",
3612 target);
3613 binder_proc_unlock(proc);
3614 kfree(death);
3615 break;
3616 }
3617
3618 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3619 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3620 proc->pid, thread->pid,
3621 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3622 "BC_REQUEST_DEATH_NOTIFICATION" :
3623 "BC_CLEAR_DEATH_NOTIFICATION",
3624 (u64)cookie, ref->data.debug_id,
3625 ref->data.desc, ref->data.strong,
3626 ref->data.weak, ref->node->debug_id);
3627
3628 binder_node_lock(ref->node);
3629 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3630 if (ref->death) {
3631 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3632 proc->pid, thread->pid);
3633 binder_node_unlock(ref->node);
3634 binder_proc_unlock(proc);
3635 kfree(death);
3636 break;
3637 }
3638 binder_stats_created(BINDER_STAT_DEATH);
3639 INIT_LIST_HEAD(&death->work.entry);
3640 death->cookie = cookie;
3641 ref->death = death;
3642 if (ref->node->proc == NULL) {
3643 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3644
3645 binder_inner_proc_lock(proc);
3646 binder_enqueue_work_ilocked(
3647 &ref->death->work, &proc->todo);
3648 binder_wakeup_proc_ilocked(proc);
3649 binder_inner_proc_unlock(proc);
3650 }
3651 } else {
3652 if (ref->death == NULL) {
3653 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3654 proc->pid, thread->pid);
3655 binder_node_unlock(ref->node);
3656 binder_proc_unlock(proc);
3657 break;
3658 }
3659 death = ref->death;
3660 if (death->cookie != cookie) {
3661 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3662 proc->pid, thread->pid,
3663 (u64)death->cookie,
3664 (u64)cookie);
3665 binder_node_unlock(ref->node);
3666 binder_proc_unlock(proc);
3667 break;
3668 }
3669 ref->death = NULL;
3670 binder_inner_proc_lock(proc);
3671 if (list_empty(&death->work.entry)) {
3672 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3673 if (thread->looper &
3674 (BINDER_LOOPER_STATE_REGISTERED |
3675 BINDER_LOOPER_STATE_ENTERED))
3676 binder_enqueue_thread_work_ilocked(
3677 thread,
3678 &death->work);
3679 else {
3680 binder_enqueue_work_ilocked(
3681 &death->work,
3682 &proc->todo);
3683 binder_wakeup_proc_ilocked(
3684 proc);
3685 }
3686 } else {
3687 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3688 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3689 }
3690 binder_inner_proc_unlock(proc);
3691 }
3692 binder_node_unlock(ref->node);
3693 binder_proc_unlock(proc);
3694 } break;
3695 case BC_DEAD_BINDER_DONE: {
3696 struct binder_work *w;
3697 binder_uintptr_t cookie;
3698 struct binder_ref_death *death = NULL;
3699
3700 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3701 return -EFAULT;
3702
3703 ptr += sizeof(cookie);
3704 binder_inner_proc_lock(proc);
3705 list_for_each_entry(w, &proc->delivered_death,
3706 entry) {
3707 struct binder_ref_death *tmp_death =
3708 container_of(w,
3709 struct binder_ref_death,
3710 work);
3711
3712 if (tmp_death->cookie == cookie) {
3713 death = tmp_death;
3714 break;
3715 }
3716 }
3717 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3718 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3719 proc->pid, thread->pid, (u64)cookie,
3720 death);
3721 if (death == NULL) {
3722 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3723 proc->pid, thread->pid, (u64)cookie);
3724 binder_inner_proc_unlock(proc);
3725 break;
3726 }
3727 binder_dequeue_work_ilocked(&death->work);
3728 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3729 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3730 if (thread->looper &
3731 (BINDER_LOOPER_STATE_REGISTERED |
3732 BINDER_LOOPER_STATE_ENTERED))
3733 binder_enqueue_thread_work_ilocked(
3734 thread, &death->work);
3735 else {
3736 binder_enqueue_work_ilocked(
3737 &death->work,
3738 &proc->todo);
3739 binder_wakeup_proc_ilocked(proc);
3740 }
3741 }
3742 binder_inner_proc_unlock(proc);
3743 } break;
3744
3745 default:
3746 pr_err("%d:%d unknown command %d\n",
3747 proc->pid, thread->pid, cmd);
3748 return -EINVAL;
3749 }
3750 *consumed = ptr - buffer;
3751 }
3752 return 0;
3753}
3754
3755static void binder_stat_br(struct binder_proc *proc,
3756 struct binder_thread *thread, uint32_t cmd)
3757{
3758 trace_binder_return(cmd);
3759 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3760 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3761 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3762 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3763 }
3764}
3765
3766static int binder_put_node_cmd(struct binder_proc *proc,
3767 struct binder_thread *thread,
3768 void __user **ptrp,
3769 binder_uintptr_t node_ptr,
3770 binder_uintptr_t node_cookie,
3771 int node_debug_id,
3772 uint32_t cmd, const char *cmd_name)
3773{
3774 void __user *ptr = *ptrp;
3775
3776 if (put_user(cmd, (uint32_t __user *)ptr))
3777 return -EFAULT;
3778 ptr += sizeof(uint32_t);
3779
3780 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3781 return -EFAULT;
3782 ptr += sizeof(binder_uintptr_t);
3783
3784 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3785 return -EFAULT;
3786 ptr += sizeof(binder_uintptr_t);
3787
3788 binder_stat_br(proc, thread, cmd);
3789 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3790 proc->pid, thread->pid, cmd_name, node_debug_id,
3791 (u64)node_ptr, (u64)node_cookie);
3792
3793 *ptrp = ptr;
3794 return 0;
3795}
3796
3797static int binder_wait_for_work(struct binder_thread *thread,
3798 bool do_proc_work)
3799{
3800 DEFINE_WAIT(wait);
3801 struct binder_proc *proc = thread->proc;
3802 int ret = 0;
3803
3804 freezer_do_not_count();
3805 binder_inner_proc_lock(proc);
3806 for (;;) {
3807 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3808 if (binder_has_work_ilocked(thread, do_proc_work))
3809 break;
3810 if (do_proc_work)
3811 list_add(&thread->waiting_thread_node,
3812 &proc->waiting_threads);
3813 binder_inner_proc_unlock(proc);
3814 schedule();
3815 binder_inner_proc_lock(proc);
3816 list_del_init(&thread->waiting_thread_node);
3817 if (signal_pending(current)) {
3818 ret = -ERESTARTSYS;
3819 break;
3820 }
3821 }
3822 finish_wait(&thread->wait, &wait);
3823 binder_inner_proc_unlock(proc);
3824 freezer_count();
3825
3826 return ret;
3827}
3828
3829static int binder_thread_read(struct binder_proc *proc,
3830 struct binder_thread *thread,
3831 binder_uintptr_t binder_buffer, size_t size,
3832 binder_size_t *consumed, int non_block)
3833{
3834 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3835 void __user *ptr = buffer + *consumed;
3836 void __user *end = buffer + size;
3837
3838 int ret = 0;
3839 int wait_for_proc_work;
3840
3841 if (*consumed == 0) {
3842 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3843 return -EFAULT;
3844 ptr += sizeof(uint32_t);
3845 }
3846
3847retry:
3848 binder_inner_proc_lock(proc);
3849 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3850 binder_inner_proc_unlock(proc);
3851
3852 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3853
3854 trace_binder_wait_for_work(wait_for_proc_work,
3855 !!thread->transaction_stack,
3856 !binder_worklist_empty(proc, &thread->todo));
3857 if (wait_for_proc_work) {
3858 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3859 BINDER_LOOPER_STATE_ENTERED))) {
3860 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3861 proc->pid, thread->pid, thread->looper);
3862 wait_event_interruptible(binder_user_error_wait,
3863 binder_stop_on_user_error < 2);
3864 }
3865 binder_set_nice(proc->default_priority);
3866 }
3867
3868 if (non_block) {
3869 if (!binder_has_work(thread, wait_for_proc_work))
3870 ret = -EAGAIN;
3871 } else {
3872 ret = binder_wait_for_work(thread, wait_for_proc_work);
3873 }
3874
3875 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3876
3877 if (ret)
3878 return ret;
3879
3880 while (1) {
3881 uint32_t cmd;
3882 struct binder_transaction_data tr;
3883 struct binder_work *w = NULL;
3884 struct list_head *list = NULL;
3885 struct binder_transaction *t = NULL;
3886 struct binder_thread *t_from;
3887
3888 binder_inner_proc_lock(proc);
3889 if (!binder_worklist_empty_ilocked(&thread->todo))
3890 list = &thread->todo;
3891 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3892 wait_for_proc_work)
3893 list = &proc->todo;
3894 else {
3895 binder_inner_proc_unlock(proc);
3896
3897
3898 if (ptr - buffer == 4 && !thread->looper_need_return)
3899 goto retry;
3900 break;
3901 }
3902
3903 if (end - ptr < sizeof(tr) + 4) {
3904 binder_inner_proc_unlock(proc);
3905 break;
3906 }
3907 w = binder_dequeue_work_head_ilocked(list);
3908 if (binder_worklist_empty_ilocked(&thread->todo))
3909 thread->process_todo = false;
3910
3911 switch (w->type) {
3912 case BINDER_WORK_TRANSACTION: {
3913 binder_inner_proc_unlock(proc);
3914 t = container_of(w, struct binder_transaction, work);
3915 } break;
3916 case BINDER_WORK_RETURN_ERROR: {
3917 struct binder_error *e = container_of(
3918 w, struct binder_error, work);
3919
3920 WARN_ON(e->cmd == BR_OK);
3921 binder_inner_proc_unlock(proc);
3922 if (put_user(e->cmd, (uint32_t __user *)ptr))
3923 return -EFAULT;
3924 cmd = e->cmd;
3925 e->cmd = BR_OK;
3926 ptr += sizeof(uint32_t);
3927
3928 binder_stat_br(proc, thread, cmd);
3929 } break;
3930 case BINDER_WORK_TRANSACTION_COMPLETE: {
3931 binder_inner_proc_unlock(proc);
3932 cmd = BR_TRANSACTION_COMPLETE;
3933 if (put_user(cmd, (uint32_t __user *)ptr))
3934 return -EFAULT;
3935 ptr += sizeof(uint32_t);
3936
3937 binder_stat_br(proc, thread, cmd);
3938 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3939 "%d:%d BR_TRANSACTION_COMPLETE\n",
3940 proc->pid, thread->pid);
3941 kfree(w);
3942 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3943 } break;
3944 case BINDER_WORK_NODE: {
3945 struct binder_node *node = container_of(w, struct binder_node, work);
3946 int strong, weak;
3947 binder_uintptr_t node_ptr = node->ptr;
3948 binder_uintptr_t node_cookie = node->cookie;
3949 int node_debug_id = node->debug_id;
3950 int has_weak_ref;
3951 int has_strong_ref;
3952 void __user *orig_ptr = ptr;
3953
3954 BUG_ON(proc != node->proc);
3955 strong = node->internal_strong_refs ||
3956 node->local_strong_refs;
3957 weak = !hlist_empty(&node->refs) ||
3958 node->local_weak_refs ||
3959 node->tmp_refs || strong;
3960 has_strong_ref = node->has_strong_ref;
3961 has_weak_ref = node->has_weak_ref;
3962
3963 if (weak && !has_weak_ref) {
3964 node->has_weak_ref = 1;
3965 node->pending_weak_ref = 1;
3966 node->local_weak_refs++;
3967 }
3968 if (strong && !has_strong_ref) {
3969 node->has_strong_ref = 1;
3970 node->pending_strong_ref = 1;
3971 node->local_strong_refs++;
3972 }
3973 if (!strong && has_strong_ref)
3974 node->has_strong_ref = 0;
3975 if (!weak && has_weak_ref)
3976 node->has_weak_ref = 0;
3977 if (!weak && !strong) {
3978 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3979 "%d:%d node %d u%016llx c%016llx deleted\n",
3980 proc->pid, thread->pid,
3981 node_debug_id,
3982 (u64)node_ptr,
3983 (u64)node_cookie);
3984 rb_erase(&node->rb_node, &proc->nodes);
3985 binder_inner_proc_unlock(proc);
3986 binder_node_lock(node);
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996 binder_node_unlock(node);
3997 binder_free_node(node);
3998 } else
3999 binder_inner_proc_unlock(proc);
4000
4001 if (weak && !has_weak_ref)
4002 ret = binder_put_node_cmd(
4003 proc, thread, &ptr, node_ptr,
4004 node_cookie, node_debug_id,
4005 BR_INCREFS, "BR_INCREFS");
4006 if (!ret && strong && !has_strong_ref)
4007 ret = binder_put_node_cmd(
4008 proc, thread, &ptr, node_ptr,
4009 node_cookie, node_debug_id,
4010 BR_ACQUIRE, "BR_ACQUIRE");
4011 if (!ret && !strong && has_strong_ref)
4012 ret = binder_put_node_cmd(
4013 proc, thread, &ptr, node_ptr,
4014 node_cookie, node_debug_id,
4015 BR_RELEASE, "BR_RELEASE");
4016 if (!ret && !weak && has_weak_ref)
4017 ret = binder_put_node_cmd(
4018 proc, thread, &ptr, node_ptr,
4019 node_cookie, node_debug_id,
4020 BR_DECREFS, "BR_DECREFS");
4021 if (orig_ptr == ptr)
4022 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4023 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4024 proc->pid, thread->pid,
4025 node_debug_id,
4026 (u64)node_ptr,
4027 (u64)node_cookie);
4028 if (ret)
4029 return ret;
4030 } break;
4031 case BINDER_WORK_DEAD_BINDER:
4032 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4033 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4034 struct binder_ref_death *death;
4035 uint32_t cmd;
4036 binder_uintptr_t cookie;
4037
4038 death = container_of(w, struct binder_ref_death, work);
4039 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4040 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4041 else
4042 cmd = BR_DEAD_BINDER;
4043 cookie = death->cookie;
4044
4045 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4046 "%d:%d %s %016llx\n",
4047 proc->pid, thread->pid,
4048 cmd == BR_DEAD_BINDER ?
4049 "BR_DEAD_BINDER" :
4050 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4051 (u64)cookie);
4052 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4053 binder_inner_proc_unlock(proc);
4054 kfree(death);
4055 binder_stats_deleted(BINDER_STAT_DEATH);
4056 } else {
4057 binder_enqueue_work_ilocked(
4058 w, &proc->delivered_death);
4059 binder_inner_proc_unlock(proc);
4060 }
4061 if (put_user(cmd, (uint32_t __user *)ptr))
4062 return -EFAULT;
4063 ptr += sizeof(uint32_t);
4064 if (put_user(cookie,
4065 (binder_uintptr_t __user *)ptr))
4066 return -EFAULT;
4067 ptr += sizeof(binder_uintptr_t);
4068 binder_stat_br(proc, thread, cmd);
4069 if (cmd == BR_DEAD_BINDER)
4070 goto done;
4071 } break;
4072 }
4073
4074 if (!t)
4075 continue;
4076
4077 BUG_ON(t->buffer == NULL);
4078 if (t->buffer->target_node) {
4079 struct binder_node *target_node = t->buffer->target_node;
4080
4081 tr.target.ptr = target_node->ptr;
4082 tr.cookie = target_node->cookie;
4083 t->saved_priority = task_nice(current);
4084 if (t->priority < target_node->min_priority &&
4085 !(t->flags & TF_ONE_WAY))
4086 binder_set_nice(t->priority);
4087 else if (!(t->flags & TF_ONE_WAY) ||
4088 t->saved_priority > target_node->min_priority)
4089 binder_set_nice(target_node->min_priority);
4090 cmd = BR_TRANSACTION;
4091 } else {
4092 tr.target.ptr = 0;
4093 tr.cookie = 0;
4094 cmd = BR_REPLY;
4095 }
4096 tr.code = t->code;
4097 tr.flags = t->flags;
4098 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4099
4100 t_from = binder_get_txn_from(t);
4101 if (t_from) {
4102 struct task_struct *sender = t_from->proc->tsk;
4103
4104 tr.sender_pid = task_tgid_nr_ns(sender,
4105 task_active_pid_ns(current));
4106 } else {
4107 tr.sender_pid = 0;
4108 }
4109
4110 tr.data_size = t->buffer->data_size;
4111 tr.offsets_size = t->buffer->offsets_size;
4112 tr.data.ptr.buffer = (binder_uintptr_t)
4113 ((uintptr_t)t->buffer->data +
4114 binder_alloc_get_user_buffer_offset(&proc->alloc));
4115 tr.data.ptr.offsets = tr.data.ptr.buffer +
4116 ALIGN(t->buffer->data_size,
4117 sizeof(void *));
4118
4119 if (put_user(cmd, (uint32_t __user *)ptr)) {
4120 if (t_from)
4121 binder_thread_dec_tmpref(t_from);
4122
4123 binder_cleanup_transaction(t, "put_user failed",
4124 BR_FAILED_REPLY);
4125
4126 return -EFAULT;
4127 }
4128 ptr += sizeof(uint32_t);
4129 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4130 if (t_from)
4131 binder_thread_dec_tmpref(t_from);
4132
4133 binder_cleanup_transaction(t, "copy_to_user failed",
4134 BR_FAILED_REPLY);
4135
4136 return -EFAULT;
4137 }
4138 ptr += sizeof(tr);
4139
4140 trace_binder_transaction_received(t);
4141 binder_stat_br(proc, thread, cmd);
4142 binder_debug(BINDER_DEBUG_TRANSACTION,
4143 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4144 proc->pid, thread->pid,
4145 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4146 "BR_REPLY",
4147 t->debug_id, t_from ? t_from->proc->pid : 0,
4148 t_from ? t_from->pid : 0, cmd,
4149 t->buffer->data_size, t->buffer->offsets_size,
4150 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4151
4152 if (t_from)
4153 binder_thread_dec_tmpref(t_from);
4154 t->buffer->allow_user_free = 1;
4155 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4156 binder_inner_proc_lock(thread->proc);
4157 t->to_parent = thread->transaction_stack;
4158 t->to_thread = thread;
4159 thread->transaction_stack = t;
4160 binder_inner_proc_unlock(thread->proc);
4161 } else {
4162 binder_free_transaction(t);
4163 }
4164 break;
4165 }
4166
4167done:
4168
4169 *consumed = ptr - buffer;
4170 binder_inner_proc_lock(proc);
4171 if (proc->requested_threads == 0 &&
4172 list_empty(&thread->proc->waiting_threads) &&
4173 proc->requested_threads_started < proc->max_threads &&
4174 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4175 BINDER_LOOPER_STATE_ENTERED))
4176 ) {
4177 proc->requested_threads++;
4178 binder_inner_proc_unlock(proc);
4179 binder_debug(BINDER_DEBUG_THREADS,
4180 "%d:%d BR_SPAWN_LOOPER\n",
4181 proc->pid, thread->pid);
4182 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4183 return -EFAULT;
4184 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4185 } else
4186 binder_inner_proc_unlock(proc);
4187 return 0;
4188}
4189
4190static void binder_release_work(struct binder_proc *proc,
4191 struct list_head *list)
4192{
4193 struct binder_work *w;
4194
4195 while (1) {
4196 w = binder_dequeue_work_head(proc, list);
4197 if (!w)
4198 return;
4199
4200 switch (w->type) {
4201 case BINDER_WORK_TRANSACTION: {
4202 struct binder_transaction *t;
4203
4204 t = container_of(w, struct binder_transaction, work);
4205
4206 binder_cleanup_transaction(t, "process died.",
4207 BR_DEAD_REPLY);
4208 } break;
4209 case BINDER_WORK_RETURN_ERROR: {
4210 struct binder_error *e = container_of(
4211 w, struct binder_error, work);
4212
4213 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4214 "undelivered TRANSACTION_ERROR: %u\n",
4215 e->cmd);
4216 } break;
4217 case BINDER_WORK_TRANSACTION_COMPLETE: {
4218 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4219 "undelivered TRANSACTION_COMPLETE\n");
4220 kfree(w);
4221 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4222 } break;
4223 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4224 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4225 struct binder_ref_death *death;
4226
4227 death = container_of(w, struct binder_ref_death, work);
4228 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4229 "undelivered death notification, %016llx\n",
4230 (u64)death->cookie);
4231 kfree(death);
4232 binder_stats_deleted(BINDER_STAT_DEATH);
4233 } break;
4234 default:
4235 pr_err("unexpected work type, %d, not freed\n",
4236 w->type);
4237 break;
4238 }
4239 }
4240
4241}
4242
4243static struct binder_thread *binder_get_thread_ilocked(
4244 struct binder_proc *proc, struct binder_thread *new_thread)
4245{
4246 struct binder_thread *thread = NULL;
4247 struct rb_node *parent = NULL;
4248 struct rb_node **p = &proc->threads.rb_node;
4249
4250 while (*p) {
4251 parent = *p;
4252 thread = rb_entry(parent, struct binder_thread, rb_node);
4253
4254 if (current->pid < thread->pid)
4255 p = &(*p)->rb_left;
4256 else if (current->pid > thread->pid)
4257 p = &(*p)->rb_right;
4258 else
4259 return thread;
4260 }
4261 if (!new_thread)
4262 return NULL;
4263 thread = new_thread;
4264 binder_stats_created(BINDER_STAT_THREAD);
4265 thread->proc = proc;
4266 thread->pid = current->pid;
4267 atomic_set(&thread->tmp_ref, 0);
4268 init_waitqueue_head(&thread->wait);
4269 INIT_LIST_HEAD(&thread->todo);
4270 rb_link_node(&thread->rb_node, parent, p);
4271 rb_insert_color(&thread->rb_node, &proc->threads);
4272 thread->looper_need_return = true;
4273 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4274 thread->return_error.cmd = BR_OK;
4275 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4276 thread->reply_error.cmd = BR_OK;
4277 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4278 return thread;
4279}
4280
4281static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4282{
4283 struct binder_thread *thread;
4284 struct binder_thread *new_thread;
4285
4286 binder_inner_proc_lock(proc);
4287 thread = binder_get_thread_ilocked(proc, NULL);
4288 binder_inner_proc_unlock(proc);
4289 if (!thread) {
4290 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4291 if (new_thread == NULL)
4292 return NULL;
4293 binder_inner_proc_lock(proc);
4294 thread = binder_get_thread_ilocked(proc, new_thread);
4295 binder_inner_proc_unlock(proc);
4296 if (thread != new_thread)
4297 kfree(new_thread);
4298 }
4299 return thread;
4300}
4301
4302static void binder_free_proc(struct binder_proc *proc)
4303{
4304 BUG_ON(!list_empty(&proc->todo));
4305 BUG_ON(!list_empty(&proc->delivered_death));
4306 binder_alloc_deferred_release(&proc->alloc);
4307 put_task_struct(proc->tsk);
4308 binder_stats_deleted(BINDER_STAT_PROC);
4309 kfree(proc);
4310}
4311
4312static void binder_free_thread(struct binder_thread *thread)
4313{
4314 BUG_ON(!list_empty(&thread->todo));
4315 binder_stats_deleted(BINDER_STAT_THREAD);
4316 binder_proc_dec_tmpref(thread->proc);
4317 kfree(thread);
4318}
4319
4320static int binder_thread_release(struct binder_proc *proc,
4321 struct binder_thread *thread)
4322{
4323 struct binder_transaction *t;
4324 struct binder_transaction *send_reply = NULL;
4325 int active_transactions = 0;
4326 struct binder_transaction *last_t = NULL;
4327
4328 binder_inner_proc_lock(thread->proc);
4329
4330
4331
4332
4333
4334
4335 proc->tmp_ref++;
4336
4337
4338
4339
4340 atomic_inc(&thread->tmp_ref);
4341 rb_erase(&thread->rb_node, &proc->threads);
4342 t = thread->transaction_stack;
4343 if (t) {
4344 spin_lock(&t->lock);
4345 if (t->to_thread == thread)
4346 send_reply = t;
4347 }
4348 thread->is_dead = true;
4349
4350 while (t) {
4351 last_t = t;
4352 active_transactions++;
4353 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4354 "release %d:%d transaction %d %s, still active\n",
4355 proc->pid, thread->pid,
4356 t->debug_id,
4357 (t->to_thread == thread) ? "in" : "out");
4358
4359 if (t->to_thread == thread) {
4360 t->to_proc = NULL;
4361 t->to_thread = NULL;
4362 if (t->buffer) {
4363 t->buffer->transaction = NULL;
4364 t->buffer = NULL;
4365 }
4366 t = t->to_parent;
4367 } else if (t->from == thread) {
4368 t->from = NULL;
4369 t = t->from_parent;
4370 } else
4371 BUG();
4372 spin_unlock(&last_t->lock);
4373 if (t)
4374 spin_lock(&t->lock);
4375 }
4376
4377
4378
4379
4380
4381
4382
4383 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4384 waitqueue_active(&thread->wait)) {
4385 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4386 }
4387
4388 binder_inner_proc_unlock(thread->proc);
4389
4390
4391
4392
4393
4394
4395
4396 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4397 synchronize_rcu();
4398
4399 if (send_reply)
4400 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4401 binder_release_work(proc, &thread->todo);
4402 binder_thread_dec_tmpref(thread);
4403 return active_transactions;
4404}
4405
4406static __poll_t binder_poll(struct file *filp,
4407 struct poll_table_struct *wait)
4408{
4409 struct binder_proc *proc = filp->private_data;
4410 struct binder_thread *thread = NULL;
4411 bool wait_for_proc_work;
4412
4413 thread = binder_get_thread(proc);
4414 if (!thread)
4415 return POLLERR;
4416
4417 binder_inner_proc_lock(thread->proc);
4418 thread->looper |= BINDER_LOOPER_STATE_POLL;
4419 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4420
4421 binder_inner_proc_unlock(thread->proc);
4422
4423 poll_wait(filp, &thread->wait, wait);
4424
4425 if (binder_has_work(thread, wait_for_proc_work))
4426 return EPOLLIN;
4427
4428 return 0;
4429}
4430
4431static int binder_ioctl_write_read(struct file *filp,
4432 unsigned int cmd, unsigned long arg,
4433 struct binder_thread *thread)
4434{
4435 int ret = 0;
4436 struct binder_proc *proc = filp->private_data;
4437 unsigned int size = _IOC_SIZE(cmd);
4438 void __user *ubuf = (void __user *)arg;
4439 struct binder_write_read bwr;
4440
4441 if (size != sizeof(struct binder_write_read)) {
4442 ret = -EINVAL;
4443 goto out;
4444 }
4445 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4446 ret = -EFAULT;
4447 goto out;
4448 }
4449 binder_debug(BINDER_DEBUG_READ_WRITE,
4450 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4451 proc->pid, thread->pid,
4452 (u64)bwr.write_size, (u64)bwr.write_buffer,
4453 (u64)bwr.read_size, (u64)bwr.read_buffer);
4454
4455 if (bwr.write_size > 0) {
4456 ret = binder_thread_write(proc, thread,
4457 bwr.write_buffer,
4458 bwr.write_size,
4459 &bwr.write_consumed);
4460 trace_binder_write_done(ret);
4461 if (ret < 0) {
4462 bwr.read_consumed = 0;
4463 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4464 ret = -EFAULT;
4465 goto out;
4466 }
4467 }
4468 if (bwr.read_size > 0) {
4469 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4470 bwr.read_size,
4471 &bwr.read_consumed,
4472 filp->f_flags & O_NONBLOCK);
4473 trace_binder_read_done(ret);
4474 binder_inner_proc_lock(proc);
4475 if (!binder_worklist_empty_ilocked(&proc->todo))
4476 binder_wakeup_proc_ilocked(proc);
4477 binder_inner_proc_unlock(proc);
4478 if (ret < 0) {
4479 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4480 ret = -EFAULT;
4481 goto out;
4482 }
4483 }
4484 binder_debug(BINDER_DEBUG_READ_WRITE,
4485 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4486 proc->pid, thread->pid,
4487 (u64)bwr.write_consumed, (u64)bwr.write_size,
4488 (u64)bwr.read_consumed, (u64)bwr.read_size);
4489 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4490 ret = -EFAULT;
4491 goto out;
4492 }
4493out:
4494 return ret;
4495}
4496
4497static int binder_ioctl_set_ctx_mgr(struct file *filp)
4498{
4499 int ret = 0;
4500 struct binder_proc *proc = filp->private_data;
4501 struct binder_context *context = proc->context;
4502 struct binder_node *new_node;
4503 kuid_t curr_euid = current_euid();
4504
4505 mutex_lock(&context->context_mgr_node_lock);
4506 if (context->binder_context_mgr_node) {
4507 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4508 ret = -EBUSY;
4509 goto out;
4510 }
4511 ret = security_binder_set_context_mgr(proc->tsk);
4512 if (ret < 0)
4513 goto out;
4514 if (uid_valid(context->binder_context_mgr_uid)) {
4515 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4516 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4517 from_kuid(&init_user_ns, curr_euid),
4518 from_kuid(&init_user_ns,
4519 context->binder_context_mgr_uid));
4520 ret = -EPERM;
4521 goto out;
4522 }
4523 } else {
4524 context->binder_context_mgr_uid = curr_euid;
4525 }
4526 new_node = binder_new_node(proc, NULL);
4527 if (!new_node) {
4528 ret = -ENOMEM;
4529 goto out;
4530 }
4531 binder_node_lock(new_node);
4532 new_node->local_weak_refs++;
4533 new_node->local_strong_refs++;
4534 new_node->has_strong_ref = 1;
4535 new_node->has_weak_ref = 1;
4536 context->binder_context_mgr_node = new_node;
4537 binder_node_unlock(new_node);
4538 binder_put_node(new_node);
4539out:
4540 mutex_unlock(&context->context_mgr_node_lock);
4541 return ret;
4542}
4543
4544static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4545 struct binder_node_debug_info *info)
4546{
4547 struct rb_node *n;
4548 binder_uintptr_t ptr = info->ptr;
4549
4550 memset(info, 0, sizeof(*info));
4551
4552 binder_inner_proc_lock(proc);
4553 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4554 struct binder_node *node = rb_entry(n, struct binder_node,
4555 rb_node);
4556 if (node->ptr > ptr) {
4557 info->ptr = node->ptr;
4558 info->cookie = node->cookie;
4559 info->has_strong_ref = node->has_strong_ref;
4560 info->has_weak_ref = node->has_weak_ref;
4561 break;
4562 }
4563 }
4564 binder_inner_proc_unlock(proc);
4565
4566 return 0;
4567}
4568
4569static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4570{
4571 int ret;
4572 struct binder_proc *proc = filp->private_data;
4573 struct binder_thread *thread;
4574 unsigned int size = _IOC_SIZE(cmd);
4575 void __user *ubuf = (void __user *)arg;
4576
4577
4578
4579
4580 binder_selftest_alloc(&proc->alloc);
4581
4582 trace_binder_ioctl(cmd, arg);
4583
4584 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4585 if (ret)
4586 goto err_unlocked;
4587
4588 thread = binder_get_thread(proc);
4589 if (thread == NULL) {
4590 ret = -ENOMEM;
4591 goto err;
4592 }
4593
4594 switch (cmd) {
4595 case BINDER_WRITE_READ:
4596 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4597 if (ret)
4598 goto err;
4599 break;
4600 case BINDER_SET_MAX_THREADS: {
4601 int max_threads;
4602
4603 if (copy_from_user(&max_threads, ubuf,
4604 sizeof(max_threads))) {
4605 ret = -EINVAL;
4606 goto err;
4607 }
4608 binder_inner_proc_lock(proc);
4609 proc->max_threads = max_threads;
4610 binder_inner_proc_unlock(proc);
4611 break;
4612 }
4613 case BINDER_SET_CONTEXT_MGR:
4614 ret = binder_ioctl_set_ctx_mgr(filp);
4615 if (ret)
4616 goto err;
4617 break;
4618 case BINDER_THREAD_EXIT:
4619 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4620 proc->pid, thread->pid);
4621 binder_thread_release(proc, thread);
4622 thread = NULL;
4623 break;
4624 case BINDER_VERSION: {
4625 struct binder_version __user *ver = ubuf;
4626
4627 if (size != sizeof(struct binder_version)) {
4628 ret = -EINVAL;
4629 goto err;
4630 }
4631 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4632 &ver->protocol_version)) {
4633 ret = -EINVAL;
4634 goto err;
4635 }
4636 break;
4637 }
4638 case BINDER_GET_NODE_DEBUG_INFO: {
4639 struct binder_node_debug_info info;
4640
4641 if (copy_from_user(&info, ubuf, sizeof(info))) {
4642 ret = -EFAULT;
4643 goto err;
4644 }
4645
4646 ret = binder_ioctl_get_node_debug_info(proc, &info);
4647 if (ret < 0)
4648 goto err;
4649
4650 if (copy_to_user(ubuf, &info, sizeof(info))) {
4651 ret = -EFAULT;
4652 goto err;
4653 }
4654 break;
4655 }
4656 default:
4657 ret = -EINVAL;
4658 goto err;
4659 }
4660 ret = 0;
4661err:
4662 if (thread)
4663 thread->looper_need_return = false;
4664 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4665 if (ret && ret != -ERESTARTSYS)
4666 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4667err_unlocked:
4668 trace_binder_ioctl_done(ret);
4669 return ret;
4670}
4671
4672static void binder_vma_open(struct vm_area_struct *vma)
4673{
4674 struct binder_proc *proc = vma->vm_private_data;
4675
4676 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4677 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4678 proc->pid, vma->vm_start, vma->vm_end,
4679 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4680 (unsigned long)pgprot_val(vma->vm_page_prot));
4681}
4682
4683static void binder_vma_close(struct vm_area_struct *vma)
4684{
4685 struct binder_proc *proc = vma->vm_private_data;
4686
4687 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4688 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4689 proc->pid, vma->vm_start, vma->vm_end,
4690 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4691 (unsigned long)pgprot_val(vma->vm_page_prot));
4692 binder_alloc_vma_close(&proc->alloc);
4693 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4694}
4695
4696static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4697{
4698 return VM_FAULT_SIGBUS;
4699}
4700
4701static const struct vm_operations_struct binder_vm_ops = {
4702 .open = binder_vma_open,
4703 .close = binder_vma_close,
4704 .fault = binder_vm_fault,
4705};
4706
4707static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4708{
4709 int ret;
4710 struct binder_proc *proc = filp->private_data;
4711 const char *failure_string;
4712
4713 if (proc->tsk != current->group_leader)
4714 return -EINVAL;
4715
4716 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4717 vma->vm_end = vma->vm_start + SZ_4M;
4718
4719 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4720 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4721 __func__, proc->pid, vma->vm_start, vma->vm_end,
4722 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4723 (unsigned long)pgprot_val(vma->vm_page_prot));
4724
4725 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4726 ret = -EPERM;
4727 failure_string = "bad vm_flags";
4728 goto err_bad_arg;
4729 }
4730 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4731 vma->vm_flags &= ~VM_MAYWRITE;
4732
4733 vma->vm_ops = &binder_vm_ops;
4734 vma->vm_private_data = proc;
4735
4736 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4737 if (ret)
4738 return ret;
4739 mutex_lock(&proc->files_lock);
4740 proc->files = get_files_struct(current);
4741 mutex_unlock(&proc->files_lock);
4742 return 0;
4743
4744err_bad_arg:
4745 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4746 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4747 return ret;
4748}
4749
4750static int binder_open(struct inode *nodp, struct file *filp)
4751{
4752 struct binder_proc *proc;
4753 struct binder_device *binder_dev;
4754
4755 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
4756 current->group_leader->pid, current->pid);
4757
4758 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4759 if (proc == NULL)
4760 return -ENOMEM;
4761 spin_lock_init(&proc->inner_lock);
4762 spin_lock_init(&proc->outer_lock);
4763 get_task_struct(current->group_leader);
4764 proc->tsk = current->group_leader;
4765 mutex_init(&proc->files_lock);
4766 INIT_LIST_HEAD(&proc->todo);
4767 proc->default_priority = task_nice(current);
4768 binder_dev = container_of(filp->private_data, struct binder_device,
4769 miscdev);
4770 proc->context = &binder_dev->context;
4771 binder_alloc_init(&proc->alloc);
4772
4773 binder_stats_created(BINDER_STAT_PROC);
4774 proc->pid = current->group_leader->pid;
4775 INIT_LIST_HEAD(&proc->delivered_death);
4776 INIT_LIST_HEAD(&proc->waiting_threads);
4777 filp->private_data = proc;
4778
4779 mutex_lock(&binder_procs_lock);
4780 hlist_add_head(&proc->proc_node, &binder_procs);
4781 mutex_unlock(&binder_procs_lock);
4782
4783 if (binder_debugfs_dir_entry_proc) {
4784 char strbuf[11];
4785
4786 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4787
4788
4789
4790
4791
4792
4793
4794 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
4795 binder_debugfs_dir_entry_proc,
4796 (void *)(unsigned long)proc->pid,
4797 &binder_proc_fops);
4798 }
4799
4800 return 0;
4801}
4802
4803static int binder_flush(struct file *filp, fl_owner_t id)
4804{
4805 struct binder_proc *proc = filp->private_data;
4806
4807 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4808
4809 return 0;
4810}
4811
4812static void binder_deferred_flush(struct binder_proc *proc)
4813{
4814 struct rb_node *n;
4815 int wake_count = 0;
4816
4817 binder_inner_proc_lock(proc);
4818 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4819 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4820
4821 thread->looper_need_return = true;
4822 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4823 wake_up_interruptible(&thread->wait);
4824 wake_count++;
4825 }
4826 }
4827 binder_inner_proc_unlock(proc);
4828
4829 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4830 "binder_flush: %d woke %d threads\n", proc->pid,
4831 wake_count);
4832}
4833
4834static int binder_release(struct inode *nodp, struct file *filp)
4835{
4836 struct binder_proc *proc = filp->private_data;
4837
4838 debugfs_remove(proc->debugfs_entry);
4839 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4840
4841 return 0;
4842}
4843
4844static int binder_node_release(struct binder_node *node, int refs)
4845{
4846 struct binder_ref *ref;
4847 int death = 0;
4848 struct binder_proc *proc = node->proc;
4849
4850 binder_release_work(proc, &node->async_todo);
4851
4852 binder_node_lock(node);
4853 binder_inner_proc_lock(proc);
4854 binder_dequeue_work_ilocked(&node->work);
4855
4856
4857
4858 BUG_ON(!node->tmp_refs);
4859 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4860 binder_inner_proc_unlock(proc);
4861 binder_node_unlock(node);
4862 binder_free_node(node);
4863
4864 return refs;
4865 }
4866
4867 node->proc = NULL;
4868 node->local_strong_refs = 0;
4869 node->local_weak_refs = 0;
4870 binder_inner_proc_unlock(proc);
4871
4872 spin_lock(&binder_dead_nodes_lock);
4873 hlist_add_head(&node->dead_node, &binder_dead_nodes);
4874 spin_unlock(&binder_dead_nodes_lock);
4875
4876 hlist_for_each_entry(ref, &node->refs, node_entry) {
4877 refs++;
4878
4879
4880
4881
4882
4883
4884 binder_inner_proc_lock(ref->proc);
4885 if (!ref->death) {
4886 binder_inner_proc_unlock(ref->proc);
4887 continue;
4888 }
4889
4890 death++;
4891
4892 BUG_ON(!list_empty(&ref->death->work.entry));
4893 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4894 binder_enqueue_work_ilocked(&ref->death->work,
4895 &ref->proc->todo);
4896 binder_wakeup_proc_ilocked(ref->proc);
4897 binder_inner_proc_unlock(ref->proc);
4898 }
4899
4900 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4901 "node %d now dead, refs %d, death %d\n",
4902 node->debug_id, refs, death);
4903 binder_node_unlock(node);
4904 binder_put_node(node);
4905
4906 return refs;
4907}
4908
4909static void binder_deferred_release(struct binder_proc *proc)
4910{
4911 struct binder_context *context = proc->context;
4912 struct rb_node *n;
4913 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4914
4915 BUG_ON(proc->files);
4916
4917 mutex_lock(&binder_procs_lock);
4918 hlist_del(&proc->proc_node);
4919 mutex_unlock(&binder_procs_lock);
4920
4921 mutex_lock(&context->context_mgr_node_lock);
4922 if (context->binder_context_mgr_node &&
4923 context->binder_context_mgr_node->proc == proc) {
4924 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4925 "%s: %d context_mgr_node gone\n",
4926 __func__, proc->pid);
4927 context->binder_context_mgr_node = NULL;
4928 }
4929 mutex_unlock(&context->context_mgr_node_lock);
4930 binder_inner_proc_lock(proc);
4931
4932
4933
4934
4935 proc->tmp_ref++;
4936
4937 proc->is_dead = true;
4938 threads = 0;
4939 active_transactions = 0;
4940 while ((n = rb_first(&proc->threads))) {
4941 struct binder_thread *thread;
4942
4943 thread = rb_entry(n, struct binder_thread, rb_node);
4944 binder_inner_proc_unlock(proc);
4945 threads++;
4946 active_transactions += binder_thread_release(proc, thread);
4947 binder_inner_proc_lock(proc);
4948 }
4949
4950 nodes = 0;
4951 incoming_refs = 0;
4952 while ((n = rb_first(&proc->nodes))) {
4953 struct binder_node *node;
4954
4955 node = rb_entry(n, struct binder_node, rb_node);
4956 nodes++;
4957
4958
4959
4960
4961
4962 binder_inc_node_tmpref_ilocked(node);
4963 rb_erase(&node->rb_node, &proc->nodes);
4964 binder_inner_proc_unlock(proc);
4965 incoming_refs = binder_node_release(node, incoming_refs);
4966 binder_inner_proc_lock(proc);
4967 }
4968 binder_inner_proc_unlock(proc);
4969
4970 outgoing_refs = 0;
4971 binder_proc_lock(proc);
4972 while ((n = rb_first(&proc->refs_by_desc))) {
4973 struct binder_ref *ref;
4974
4975 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4976 outgoing_refs++;
4977 binder_cleanup_ref_olocked(ref);
4978 binder_proc_unlock(proc);
4979 binder_free_ref(ref);
4980 binder_proc_lock(proc);
4981 }
4982 binder_proc_unlock(proc);
4983
4984 binder_release_work(proc, &proc->todo);
4985 binder_release_work(proc, &proc->delivered_death);
4986
4987 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4988 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4989 __func__, proc->pid, threads, nodes, incoming_refs,
4990 outgoing_refs, active_transactions);
4991
4992 binder_proc_dec_tmpref(proc);
4993}
4994
4995static void binder_deferred_func(struct work_struct *work)
4996{
4997 struct binder_proc *proc;
4998 struct files_struct *files;
4999
5000 int defer;
5001
5002 do {
5003 mutex_lock(&binder_deferred_lock);
5004 if (!hlist_empty(&binder_deferred_list)) {
5005 proc = hlist_entry(binder_deferred_list.first,
5006 struct binder_proc, deferred_work_node);
5007 hlist_del_init(&proc->deferred_work_node);
5008 defer = proc->deferred_work;
5009 proc->deferred_work = 0;
5010 } else {
5011 proc = NULL;
5012 defer = 0;
5013 }
5014 mutex_unlock(&binder_deferred_lock);
5015
5016 files = NULL;
5017 if (defer & BINDER_DEFERRED_PUT_FILES) {
5018 mutex_lock(&proc->files_lock);
5019 files = proc->files;
5020 if (files)
5021 proc->files = NULL;
5022 mutex_unlock(&proc->files_lock);
5023 }
5024
5025 if (defer & BINDER_DEFERRED_FLUSH)
5026 binder_deferred_flush(proc);
5027
5028 if (defer & BINDER_DEFERRED_RELEASE)
5029 binder_deferred_release(proc);
5030
5031 if (files)
5032 put_files_struct(files);
5033 } while (proc);
5034}
5035static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5036
5037static void
5038binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5039{
5040 mutex_lock(&binder_deferred_lock);
5041 proc->deferred_work |= defer;
5042 if (hlist_unhashed(&proc->deferred_work_node)) {
5043 hlist_add_head(&proc->deferred_work_node,
5044 &binder_deferred_list);
5045 schedule_work(&binder_deferred_work);
5046 }
5047 mutex_unlock(&binder_deferred_lock);
5048}
5049
5050static void print_binder_transaction_ilocked(struct seq_file *m,
5051 struct binder_proc *proc,
5052 const char *prefix,
5053 struct binder_transaction *t)
5054{
5055 struct binder_proc *to_proc;
5056 struct binder_buffer *buffer = t->buffer;
5057
5058 spin_lock(&t->lock);
5059 to_proc = t->to_proc;
5060 seq_printf(m,
5061 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5062 prefix, t->debug_id, t,
5063 t->from ? t->from->proc->pid : 0,
5064 t->from ? t->from->pid : 0,
5065 to_proc ? to_proc->pid : 0,
5066 t->to_thread ? t->to_thread->pid : 0,
5067 t->code, t->flags, t->priority, t->need_reply);
5068 spin_unlock(&t->lock);
5069
5070 if (proc != to_proc) {
5071
5072
5073
5074
5075 seq_puts(m, "\n");
5076 return;
5077 }
5078
5079 if (buffer == NULL) {
5080 seq_puts(m, " buffer free\n");
5081 return;
5082 }
5083 if (buffer->target_node)
5084 seq_printf(m, " node %d", buffer->target_node->debug_id);
5085 seq_printf(m, " size %zd:%zd data %pK\n",
5086 buffer->data_size, buffer->offsets_size,
5087 buffer->data);
5088}
5089
5090static void print_binder_work_ilocked(struct seq_file *m,
5091 struct binder_proc *proc,
5092 const char *prefix,
5093 const char *transaction_prefix,
5094 struct binder_work *w)
5095{
5096 struct binder_node *node;
5097 struct binder_transaction *t;
5098
5099 switch (w->type) {
5100 case BINDER_WORK_TRANSACTION:
5101 t = container_of(w, struct binder_transaction, work);
5102 print_binder_transaction_ilocked(
5103 m, proc, transaction_prefix, t);
5104 break;
5105 case BINDER_WORK_RETURN_ERROR: {
5106 struct binder_error *e = container_of(
5107 w, struct binder_error, work);
5108
5109 seq_printf(m, "%stransaction error: %u\n",
5110 prefix, e->cmd);
5111 } break;
5112 case BINDER_WORK_TRANSACTION_COMPLETE:
5113 seq_printf(m, "%stransaction complete\n", prefix);
5114 break;
5115 case BINDER_WORK_NODE:
5116 node = container_of(w, struct binder_node, work);
5117 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5118 prefix, node->debug_id,
5119 (u64)node->ptr, (u64)node->cookie);
5120 break;
5121 case BINDER_WORK_DEAD_BINDER:
5122 seq_printf(m, "%shas dead binder\n", prefix);
5123 break;
5124 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5125 seq_printf(m, "%shas cleared dead binder\n", prefix);
5126 break;
5127 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5128 seq_printf(m, "%shas cleared death notification\n", prefix);
5129 break;
5130 default:
5131 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5132 break;
5133 }
5134}
5135
5136static void print_binder_thread_ilocked(struct seq_file *m,
5137 struct binder_thread *thread,
5138 int print_always)
5139{
5140 struct binder_transaction *t;
5141 struct binder_work *w;
5142 size_t start_pos = m->count;
5143 size_t header_pos;
5144
5145 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5146 thread->pid, thread->looper,
5147 thread->looper_need_return,
5148 atomic_read(&thread->tmp_ref));
5149 header_pos = m->count;
5150 t = thread->transaction_stack;
5151 while (t) {
5152 if (t->from == thread) {
5153 print_binder_transaction_ilocked(m, thread->proc,
5154 " outgoing transaction", t);
5155 t = t->from_parent;
5156 } else if (t->to_thread == thread) {
5157 print_binder_transaction_ilocked(m, thread->proc,
5158 " incoming transaction", t);
5159 t = t->to_parent;
5160 } else {
5161 print_binder_transaction_ilocked(m, thread->proc,
5162 " bad transaction", t);
5163 t = NULL;
5164 }
5165 }
5166 list_for_each_entry(w, &thread->todo, entry) {
5167 print_binder_work_ilocked(m, thread->proc, " ",
5168 " pending transaction", w);
5169 }
5170 if (!print_always && m->count == header_pos)
5171 m->count = start_pos;
5172}
5173
5174static void print_binder_node_nilocked(struct seq_file *m,
5175 struct binder_node *node)
5176{
5177 struct binder_ref *ref;
5178 struct binder_work *w;
5179 int count;
5180
5181 count = 0;
5182 hlist_for_each_entry(ref, &node->refs, node_entry)
5183 count++;
5184
5185 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5186 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5187 node->has_strong_ref, node->has_weak_ref,
5188 node->local_strong_refs, node->local_weak_refs,
5189 node->internal_strong_refs, count, node->tmp_refs);
5190 if (count) {
5191 seq_puts(m, " proc");
5192 hlist_for_each_entry(ref, &node->refs, node_entry)
5193 seq_printf(m, " %d", ref->proc->pid);
5194 }
5195 seq_puts(m, "\n");
5196 if (node->proc) {
5197 list_for_each_entry(w, &node->async_todo, entry)
5198 print_binder_work_ilocked(m, node->proc, " ",
5199 " pending async transaction", w);
5200 }
5201}
5202
5203static void print_binder_ref_olocked(struct seq_file *m,
5204 struct binder_ref *ref)
5205{
5206 binder_node_lock(ref->node);
5207 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5208 ref->data.debug_id, ref->data.desc,
5209 ref->node->proc ? "" : "dead ",
5210 ref->node->debug_id, ref->data.strong,
5211 ref->data.weak, ref->death);
5212 binder_node_unlock(ref->node);
5213}
5214
5215static void print_binder_proc(struct seq_file *m,
5216 struct binder_proc *proc, int print_all)
5217{
5218 struct binder_work *w;
5219 struct rb_node *n;
5220 size_t start_pos = m->count;
5221 size_t header_pos;
5222 struct binder_node *last_node = NULL;
5223
5224 seq_printf(m, "proc %d\n", proc->pid);
5225 seq_printf(m, "context %s\n", proc->context->name);
5226 header_pos = m->count;
5227
5228 binder_inner_proc_lock(proc);
5229 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5230 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5231 rb_node), print_all);
5232
5233 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5234 struct binder_node *node = rb_entry(n, struct binder_node,
5235 rb_node);
5236
5237
5238
5239
5240
5241 binder_inc_node_tmpref_ilocked(node);
5242
5243 binder_inner_proc_unlock(proc);
5244 if (last_node)
5245 binder_put_node(last_node);
5246 binder_node_inner_lock(node);
5247 print_binder_node_nilocked(m, node);
5248 binder_node_inner_unlock(node);
5249 last_node = node;
5250 binder_inner_proc_lock(proc);
5251 }
5252 binder_inner_proc_unlock(proc);
5253 if (last_node)
5254 binder_put_node(last_node);
5255
5256 if (print_all) {
5257 binder_proc_lock(proc);
5258 for (n = rb_first(&proc->refs_by_desc);
5259 n != NULL;
5260 n = rb_next(n))
5261 print_binder_ref_olocked(m, rb_entry(n,
5262 struct binder_ref,
5263 rb_node_desc));
5264 binder_proc_unlock(proc);
5265 }
5266 binder_alloc_print_allocated(m, &proc->alloc);
5267 binder_inner_proc_lock(proc);
5268 list_for_each_entry(w, &proc->todo, entry)
5269 print_binder_work_ilocked(m, proc, " ",
5270 " pending transaction", w);
5271 list_for_each_entry(w, &proc->delivered_death, entry) {
5272 seq_puts(m, " has delivered dead binder\n");
5273 break;
5274 }
5275 binder_inner_proc_unlock(proc);
5276 if (!print_all && m->count == header_pos)
5277 m->count = start_pos;
5278}
5279
5280static const char * const binder_return_strings[] = {
5281 "BR_ERROR",
5282 "BR_OK",
5283 "BR_TRANSACTION",
5284 "BR_REPLY",
5285 "BR_ACQUIRE_RESULT",
5286 "BR_DEAD_REPLY",
5287 "BR_TRANSACTION_COMPLETE",
5288 "BR_INCREFS",
5289 "BR_ACQUIRE",
5290 "BR_RELEASE",
5291 "BR_DECREFS",
5292 "BR_ATTEMPT_ACQUIRE",
5293 "BR_NOOP",
5294 "BR_SPAWN_LOOPER",
5295 "BR_FINISHED",
5296 "BR_DEAD_BINDER",
5297 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5298 "BR_FAILED_REPLY"
5299};
5300
5301static const char * const binder_command_strings[] = {
5302 "BC_TRANSACTION",
5303 "BC_REPLY",
5304 "BC_ACQUIRE_RESULT",
5305 "BC_FREE_BUFFER",
5306 "BC_INCREFS",
5307 "BC_ACQUIRE",
5308 "BC_RELEASE",
5309 "BC_DECREFS",
5310 "BC_INCREFS_DONE",
5311 "BC_ACQUIRE_DONE",
5312 "BC_ATTEMPT_ACQUIRE",
5313 "BC_REGISTER_LOOPER",
5314 "BC_ENTER_LOOPER",
5315 "BC_EXIT_LOOPER",
5316 "BC_REQUEST_DEATH_NOTIFICATION",
5317 "BC_CLEAR_DEATH_NOTIFICATION",
5318 "BC_DEAD_BINDER_DONE",
5319 "BC_TRANSACTION_SG",
5320 "BC_REPLY_SG",
5321};
5322
5323static const char * const binder_objstat_strings[] = {
5324 "proc",
5325 "thread",
5326 "node",
5327 "ref",
5328 "death",
5329 "transaction",
5330 "transaction_complete"
5331};
5332
5333static void print_binder_stats(struct seq_file *m, const char *prefix,
5334 struct binder_stats *stats)
5335{
5336 int i;
5337
5338 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5339 ARRAY_SIZE(binder_command_strings));
5340 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5341 int temp = atomic_read(&stats->bc[i]);
5342
5343 if (temp)
5344 seq_printf(m, "%s%s: %d\n", prefix,
5345 binder_command_strings[i], temp);
5346 }
5347
5348 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5349 ARRAY_SIZE(binder_return_strings));
5350 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5351 int temp = atomic_read(&stats->br[i]);
5352
5353 if (temp)
5354 seq_printf(m, "%s%s: %d\n", prefix,
5355 binder_return_strings[i], temp);
5356 }
5357
5358 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5359 ARRAY_SIZE(binder_objstat_strings));
5360 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5361 ARRAY_SIZE(stats->obj_deleted));
5362 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5363 int created = atomic_read(&stats->obj_created[i]);
5364 int deleted = atomic_read(&stats->obj_deleted[i]);
5365
5366 if (created || deleted)
5367 seq_printf(m, "%s%s: active %d total %d\n",
5368 prefix,
5369 binder_objstat_strings[i],
5370 created - deleted,
5371 created);
5372 }
5373}
5374
5375static void print_binder_proc_stats(struct seq_file *m,
5376 struct binder_proc *proc)
5377{
5378 struct binder_work *w;
5379 struct binder_thread *thread;
5380 struct rb_node *n;
5381 int count, strong, weak, ready_threads;
5382 size_t free_async_space =
5383 binder_alloc_get_free_async_space(&proc->alloc);
5384
5385 seq_printf(m, "proc %d\n", proc->pid);
5386 seq_printf(m, "context %s\n", proc->context->name);
5387 count = 0;
5388 ready_threads = 0;
5389 binder_inner_proc_lock(proc);
5390 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5391 count++;
5392
5393 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5394 ready_threads++;
5395
5396 seq_printf(m, " threads: %d\n", count);
5397 seq_printf(m, " requested threads: %d+%d/%d\n"
5398 " ready threads %d\n"
5399 " free async space %zd\n", proc->requested_threads,
5400 proc->requested_threads_started, proc->max_threads,
5401 ready_threads,
5402 free_async_space);
5403 count = 0;
5404 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5405 count++;
5406 binder_inner_proc_unlock(proc);
5407 seq_printf(m, " nodes: %d\n", count);
5408 count = 0;
5409 strong = 0;
5410 weak = 0;
5411 binder_proc_lock(proc);
5412 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5413 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5414 rb_node_desc);
5415 count++;
5416 strong += ref->data.strong;
5417 weak += ref->data.weak;
5418 }
5419 binder_proc_unlock(proc);
5420 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5421
5422 count = binder_alloc_get_allocated_count(&proc->alloc);
5423 seq_printf(m, " buffers: %d\n", count);
5424
5425 binder_alloc_print_pages(m, &proc->alloc);
5426
5427 count = 0;
5428 binder_inner_proc_lock(proc);
5429 list_for_each_entry(w, &proc->todo, entry) {
5430 if (w->type == BINDER_WORK_TRANSACTION)
5431 count++;
5432 }
5433 binder_inner_proc_unlock(proc);
5434 seq_printf(m, " pending transactions: %d\n", count);
5435
5436 print_binder_stats(m, " ", &proc->stats);
5437}
5438
5439
5440static int binder_state_show(struct seq_file *m, void *unused)
5441{
5442 struct binder_proc *proc;
5443 struct binder_node *node;
5444 struct binder_node *last_node = NULL;
5445
5446 seq_puts(m, "binder state:\n");
5447
5448 spin_lock(&binder_dead_nodes_lock);
5449 if (!hlist_empty(&binder_dead_nodes))
5450 seq_puts(m, "dead nodes:\n");
5451 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5452
5453
5454
5455
5456
5457 node->tmp_refs++;
5458 spin_unlock(&binder_dead_nodes_lock);
5459 if (last_node)
5460 binder_put_node(last_node);
5461 binder_node_lock(node);
5462 print_binder_node_nilocked(m, node);
5463 binder_node_unlock(node);
5464 last_node = node;
5465 spin_lock(&binder_dead_nodes_lock);
5466 }
5467 spin_unlock(&binder_dead_nodes_lock);
5468 if (last_node)
5469 binder_put_node(last_node);
5470
5471 mutex_lock(&binder_procs_lock);
5472 hlist_for_each_entry(proc, &binder_procs, proc_node)
5473 print_binder_proc(m, proc, 1);
5474 mutex_unlock(&binder_procs_lock);
5475
5476 return 0;
5477}
5478
5479static int binder_stats_show(struct seq_file *m, void *unused)
5480{
5481 struct binder_proc *proc;
5482
5483 seq_puts(m, "binder stats:\n");
5484
5485 print_binder_stats(m, "", &binder_stats);
5486
5487 mutex_lock(&binder_procs_lock);
5488 hlist_for_each_entry(proc, &binder_procs, proc_node)
5489 print_binder_proc_stats(m, proc);
5490 mutex_unlock(&binder_procs_lock);
5491
5492 return 0;
5493}
5494
5495static int binder_transactions_show(struct seq_file *m, void *unused)
5496{
5497 struct binder_proc *proc;
5498
5499 seq_puts(m, "binder transactions:\n");
5500 mutex_lock(&binder_procs_lock);
5501 hlist_for_each_entry(proc, &binder_procs, proc_node)
5502 print_binder_proc(m, proc, 0);
5503 mutex_unlock(&binder_procs_lock);
5504
5505 return 0;
5506}
5507
5508static int binder_proc_show(struct seq_file *m, void *unused)
5509{
5510 struct binder_proc *itr;
5511 int pid = (unsigned long)m->private;
5512
5513 mutex_lock(&binder_procs_lock);
5514 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5515 if (itr->pid == pid) {
5516 seq_puts(m, "binder proc state:\n");
5517 print_binder_proc(m, itr, 1);
5518 }
5519 }
5520 mutex_unlock(&binder_procs_lock);
5521
5522 return 0;
5523}
5524
5525static void print_binder_transaction_log_entry(struct seq_file *m,
5526 struct binder_transaction_log_entry *e)
5527{
5528 int debug_id = READ_ONCE(e->debug_id_done);
5529
5530
5531
5532
5533 smp_rmb();
5534 seq_printf(m,
5535 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5536 e->debug_id, (e->call_type == 2) ? "reply" :
5537 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5538 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5539 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5540 e->return_error, e->return_error_param,
5541 e->return_error_line);
5542
5543
5544
5545
5546 smp_rmb();
5547 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5548 "\n" : " (incomplete)\n");
5549}
5550
5551static int binder_transaction_log_show(struct seq_file *m, void *unused)
5552{
5553 struct binder_transaction_log *log = m->private;
5554 unsigned int log_cur = atomic_read(&log->cur);
5555 unsigned int count;
5556 unsigned int cur;
5557 int i;
5558
5559 count = log_cur + 1;
5560 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5561 0 : count % ARRAY_SIZE(log->entry);
5562 if (count > ARRAY_SIZE(log->entry) || log->full)
5563 count = ARRAY_SIZE(log->entry);
5564 for (i = 0; i < count; i++) {
5565 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5566
5567 print_binder_transaction_log_entry(m, &log->entry[index]);
5568 }
5569 return 0;
5570}
5571
5572static const struct file_operations binder_fops = {
5573 .owner = THIS_MODULE,
5574 .poll = binder_poll,
5575 .unlocked_ioctl = binder_ioctl,
5576 .compat_ioctl = compat_ptr_ioctl,
5577 .mmap = binder_mmap,
5578 .open = binder_open,
5579 .flush = binder_flush,
5580 .release = binder_release,
5581};
5582
5583BINDER_DEBUG_ENTRY(state);
5584BINDER_DEBUG_ENTRY(stats);
5585BINDER_DEBUG_ENTRY(transactions);
5586BINDER_DEBUG_ENTRY(transaction_log);
5587
5588static int __init init_binder_device(const char *name)
5589{
5590 int ret;
5591 struct binder_device *binder_device;
5592
5593 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5594 if (!binder_device)
5595 return -ENOMEM;
5596
5597 binder_device->miscdev.fops = &binder_fops;
5598 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5599 binder_device->miscdev.name = name;
5600
5601 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5602 binder_device->context.name = name;
5603 mutex_init(&binder_device->context.context_mgr_node_lock);
5604
5605 ret = misc_register(&binder_device->miscdev);
5606 if (ret < 0) {
5607 kfree(binder_device);
5608 return ret;
5609 }
5610
5611 hlist_add_head(&binder_device->hlist, &binder_devices);
5612
5613 return ret;
5614}
5615
5616static int __init binder_init(void)
5617{
5618 int ret;
5619 char *device_name, *device_names, *device_tmp;
5620 struct binder_device *device;
5621 struct hlist_node *tmp;
5622
5623 ret = binder_alloc_shrinker_init();
5624 if (ret)
5625 return ret;
5626
5627 atomic_set(&binder_transaction_log.cur, ~0U);
5628 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5629
5630 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5631 if (binder_debugfs_dir_entry_root)
5632 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5633 binder_debugfs_dir_entry_root);
5634
5635 if (binder_debugfs_dir_entry_root) {
5636 debugfs_create_file("state",
5637 0444,
5638 binder_debugfs_dir_entry_root,
5639 NULL,
5640 &binder_state_fops);
5641 debugfs_create_file("stats",
5642 0444,
5643 binder_debugfs_dir_entry_root,
5644 NULL,
5645 &binder_stats_fops);
5646 debugfs_create_file("transactions",
5647 0444,
5648 binder_debugfs_dir_entry_root,
5649 NULL,
5650 &binder_transactions_fops);
5651 debugfs_create_file("transaction_log",
5652 0444,
5653 binder_debugfs_dir_entry_root,
5654 &binder_transaction_log,
5655 &binder_transaction_log_fops);
5656 debugfs_create_file("failed_transaction_log",
5657 0444,
5658 binder_debugfs_dir_entry_root,
5659 &binder_transaction_log_failed,
5660 &binder_transaction_log_fops);
5661 }
5662
5663
5664
5665
5666
5667 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5668 if (!device_names) {
5669 ret = -ENOMEM;
5670 goto err_alloc_device_names_failed;
5671 }
5672 strcpy(device_names, binder_devices_param);
5673
5674 device_tmp = device_names;
5675 while ((device_name = strsep(&device_tmp, ","))) {
5676 ret = init_binder_device(device_name);
5677 if (ret)
5678 goto err_init_binder_device_failed;
5679 }
5680
5681 return ret;
5682
5683err_init_binder_device_failed:
5684 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5685 misc_deregister(&device->miscdev);
5686 hlist_del(&device->hlist);
5687 kfree(device);
5688 }
5689
5690 kfree(device_names);
5691
5692err_alloc_device_names_failed:
5693 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5694
5695 return ret;
5696}
5697
5698device_initcall(binder_init);
5699
5700#define CREATE_TRACE_POINTS
5701#include "binder_trace.h"
5702
5703MODULE_LICENSE("GPL v2");
5704