1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54#include <linux/fdtable.h>
55#include <linux/file.h>
56#include <linux/freezer.h>
57#include <linux/fs.h>
58#include <linux/list.h>
59#include <linux/miscdevice.h>
60#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/nsproxy.h>
63#include <linux/poll.h>
64#include <linux/debugfs.h>
65#include <linux/rbtree.h>
66#include <linux/sched/signal.h>
67#include <linux/sched/mm.h>
68#include <linux/seq_file.h>
69#include <linux/uaccess.h>
70#include <linux/pid_namespace.h>
71#include <linux/security.h>
72#include <linux/spinlock.h>
73#include <linux/ratelimit.h>
74
75#include <uapi/linux/android/binder.h>
76
77#include <asm/cacheflush.h>
78
79#include "binder_alloc.h"
80#include "binder_trace.h"
81
82static HLIST_HEAD(binder_deferred_list);
83static DEFINE_MUTEX(binder_deferred_lock);
84
85static HLIST_HEAD(binder_devices);
86static HLIST_HEAD(binder_procs);
87static DEFINE_MUTEX(binder_procs_lock);
88
89static HLIST_HEAD(binder_dead_nodes);
90static DEFINE_SPINLOCK(binder_dead_nodes_lock);
91
92static struct dentry *binder_debugfs_dir_entry_root;
93static struct dentry *binder_debugfs_dir_entry_proc;
94static atomic_t binder_last_id;
95
96#define BINDER_DEBUG_ENTRY(name) \
97static int binder_##name##_open(struct inode *inode, struct file *file) \
98{ \
99 return single_open(file, binder_##name##_show, inode->i_private); \
100} \
101\
102static const struct file_operations binder_##name##_fops = { \
103 .owner = THIS_MODULE, \
104 .open = binder_##name##_open, \
105 .read = seq_read, \
106 .llseek = seq_lseek, \
107 .release = single_release, \
108}
109
110static int binder_proc_show(struct seq_file *m, void *unused);
111BINDER_DEBUG_ENTRY(proc);
112
113
114#ifndef SZ_1K
115#define SZ_1K 0x400
116#endif
117
118#ifndef SZ_4M
119#define SZ_4M 0x400000
120#endif
121
122#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
123
124enum {
125 BINDER_DEBUG_USER_ERROR = 1U << 0,
126 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
127 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
128 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
129 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
130 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
131 BINDER_DEBUG_READ_WRITE = 1U << 6,
132 BINDER_DEBUG_USER_REFS = 1U << 7,
133 BINDER_DEBUG_THREADS = 1U << 8,
134 BINDER_DEBUG_TRANSACTION = 1U << 9,
135 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
136 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
137 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
138 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
139 BINDER_DEBUG_SPINLOCKS = 1U << 14,
140};
141static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
142 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
143module_param_named(debug_mask, binder_debug_mask, uint, 0644);
144
145static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
146module_param_named(devices, binder_devices_param, charp, 0444);
147
148static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
149static int binder_stop_on_user_error;
150
151static int binder_set_stop_on_user_error(const char *val,
152 const struct kernel_param *kp)
153{
154 int ret;
155
156 ret = param_set_int(val, kp);
157 if (binder_stop_on_user_error < 2)
158 wake_up(&binder_user_error_wait);
159 return ret;
160}
161module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
162 param_get_int, &binder_stop_on_user_error, 0644);
163
164#define binder_debug(mask, x...) \
165 do { \
166 if (binder_debug_mask & mask) \
167 pr_info_ratelimited(x); \
168 } while (0)
169
170#define binder_user_error(x...) \
171 do { \
172 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
173 pr_info_ratelimited(x); \
174 if (binder_stop_on_user_error) \
175 binder_stop_on_user_error = 2; \
176 } while (0)
177
178#define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
180
181#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182
183#define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
185
186#define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
188
189enum binder_stat_types {
190 BINDER_STAT_PROC,
191 BINDER_STAT_THREAD,
192 BINDER_STAT_NODE,
193 BINDER_STAT_REF,
194 BINDER_STAT_DEATH,
195 BINDER_STAT_TRANSACTION,
196 BINDER_STAT_TRANSACTION_COMPLETE,
197 BINDER_STAT_COUNT
198};
199
200struct binder_stats {
201 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
202 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
203 atomic_t obj_created[BINDER_STAT_COUNT];
204 atomic_t obj_deleted[BINDER_STAT_COUNT];
205};
206
207static struct binder_stats binder_stats;
208
209static inline void binder_stats_deleted(enum binder_stat_types type)
210{
211 atomic_inc(&binder_stats.obj_deleted[type]);
212}
213
214static inline void binder_stats_created(enum binder_stat_types type)
215{
216 atomic_inc(&binder_stats.obj_created[type]);
217}
218
219struct binder_transaction_log_entry {
220 int debug_id;
221 int debug_id_done;
222 int call_type;
223 int from_proc;
224 int from_thread;
225 int target_handle;
226 int to_proc;
227 int to_thread;
228 int to_node;
229 int data_size;
230 int offsets_size;
231 int return_error_line;
232 uint32_t return_error;
233 uint32_t return_error_param;
234 const char *context_name;
235};
236struct binder_transaction_log {
237 atomic_t cur;
238 bool full;
239 struct binder_transaction_log_entry entry[32];
240};
241static struct binder_transaction_log binder_transaction_log;
242static struct binder_transaction_log binder_transaction_log_failed;
243
244static struct binder_transaction_log_entry *binder_transaction_log_add(
245 struct binder_transaction_log *log)
246{
247 struct binder_transaction_log_entry *e;
248 unsigned int cur = atomic_inc_return(&log->cur);
249
250 if (cur >= ARRAY_SIZE(log->entry))
251 log->full = true;
252 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
253 WRITE_ONCE(e->debug_id_done, 0);
254
255
256
257
258
259 smp_wmb();
260 memset(e, 0, sizeof(*e));
261 return e;
262}
263
264struct binder_context {
265 struct binder_node *binder_context_mgr_node;
266 struct mutex context_mgr_node_lock;
267
268 kuid_t binder_context_mgr_uid;
269 const char *name;
270};
271
272struct binder_device {
273 struct hlist_node hlist;
274 struct miscdevice miscdev;
275 struct binder_context context;
276};
277
278
279
280
281
282
283
284
285struct binder_work {
286 struct list_head entry;
287
288 enum {
289 BINDER_WORK_TRANSACTION = 1,
290 BINDER_WORK_TRANSACTION_COMPLETE,
291 BINDER_WORK_RETURN_ERROR,
292 BINDER_WORK_NODE,
293 BINDER_WORK_DEAD_BINDER,
294 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
295 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
296 } type;
297};
298
299struct binder_error {
300 struct binder_work work;
301 uint32_t cmd;
302};
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct binder_node {
363 int debug_id;
364 spinlock_t lock;
365 struct binder_work work;
366 union {
367 struct rb_node rb_node;
368 struct hlist_node dead_node;
369 };
370 struct binder_proc *proc;
371 struct hlist_head refs;
372 int internal_strong_refs;
373 int local_weak_refs;
374 int local_strong_refs;
375 int tmp_refs;
376 binder_uintptr_t ptr;
377 binder_uintptr_t cookie;
378 struct {
379
380
381
382
383 u8 has_strong_ref:1;
384 u8 pending_strong_ref:1;
385 u8 has_weak_ref:1;
386 u8 pending_weak_ref:1;
387 };
388 struct {
389
390
391
392 u8 accept_fds:1;
393 u8 min_priority;
394 };
395 bool has_async_transaction;
396 struct list_head async_todo;
397};
398
399struct binder_ref_death {
400
401
402
403
404
405 struct binder_work work;
406 binder_uintptr_t cookie;
407};
408
409
410
411
412
413
414
415
416
417
418
419
420
421struct binder_ref_data {
422 int debug_id;
423 uint32_t desc;
424 int strong;
425 int weak;
426};
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445struct binder_ref {
446
447
448
449
450 struct binder_ref_data data;
451 struct rb_node rb_node_desc;
452 struct rb_node rb_node_node;
453 struct hlist_node node_entry;
454 struct binder_proc *proc;
455 struct binder_node *node;
456 struct binder_ref_death *death;
457};
458
459enum binder_deferred_state {
460 BINDER_DEFERRED_PUT_FILES = 0x01,
461 BINDER_DEFERRED_FLUSH = 0x02,
462 BINDER_DEFERRED_RELEASE = 0x04,
463};
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521struct binder_proc {
522 struct hlist_node proc_node;
523 struct rb_root threads;
524 struct rb_root nodes;
525 struct rb_root refs_by_desc;
526 struct rb_root refs_by_node;
527 struct list_head waiting_threads;
528 int pid;
529 struct task_struct *tsk;
530 struct files_struct *files;
531 struct mutex files_lock;
532 struct hlist_node deferred_work_node;
533 int deferred_work;
534 bool is_dead;
535
536 struct list_head todo;
537 struct binder_stats stats;
538 struct list_head delivered_death;
539 int max_threads;
540 int requested_threads;
541 int requested_threads_started;
542 int tmp_ref;
543 long default_priority;
544 struct dentry *debugfs_entry;
545 struct binder_alloc alloc;
546 struct binder_context *context;
547 spinlock_t inner_lock;
548 spinlock_t outer_lock;
549};
550
551enum {
552 BINDER_LOOPER_STATE_REGISTERED = 0x01,
553 BINDER_LOOPER_STATE_ENTERED = 0x02,
554 BINDER_LOOPER_STATE_EXITED = 0x04,
555 BINDER_LOOPER_STATE_INVALID = 0x08,
556 BINDER_LOOPER_STATE_WAITING = 0x10,
557 BINDER_LOOPER_STATE_POLL = 0x20,
558};
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596struct binder_thread {
597 struct binder_proc *proc;
598 struct rb_node rb_node;
599 struct list_head waiting_thread_node;
600 int pid;
601 int looper;
602 bool looper_need_return;
603 struct binder_transaction *transaction_stack;
604 struct list_head todo;
605 bool process_todo;
606 struct binder_error return_error;
607 struct binder_error reply_error;
608 wait_queue_head_t wait;
609 struct binder_stats stats;
610 atomic_t tmp_ref;
611 bool is_dead;
612};
613
614struct binder_transaction {
615 int debug_id;
616 struct binder_work work;
617 struct binder_thread *from;
618 struct binder_transaction *from_parent;
619 struct binder_proc *to_proc;
620 struct binder_thread *to_thread;
621 struct binder_transaction *to_parent;
622 unsigned need_reply:1;
623
624
625 struct binder_buffer *buffer;
626 unsigned int code;
627 unsigned int flags;
628 long priority;
629 long saved_priority;
630 kuid_t sender_euid;
631
632
633
634
635
636
637 spinlock_t lock;
638};
639
640
641
642
643
644
645
646
647#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
648static void
649_binder_proc_lock(struct binder_proc *proc, int line)
650{
651 binder_debug(BINDER_DEBUG_SPINLOCKS,
652 "%s: line=%d\n", __func__, line);
653 spin_lock(&proc->outer_lock);
654}
655
656
657
658
659
660
661
662#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
663static void
664_binder_proc_unlock(struct binder_proc *proc, int line)
665{
666 binder_debug(BINDER_DEBUG_SPINLOCKS,
667 "%s: line=%d\n", __func__, line);
668 spin_unlock(&proc->outer_lock);
669}
670
671
672
673
674
675
676
677#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
678static void
679_binder_inner_proc_lock(struct binder_proc *proc, int line)
680{
681 binder_debug(BINDER_DEBUG_SPINLOCKS,
682 "%s: line=%d\n", __func__, line);
683 spin_lock(&proc->inner_lock);
684}
685
686
687
688
689
690
691
692#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
693static void
694_binder_inner_proc_unlock(struct binder_proc *proc, int line)
695{
696 binder_debug(BINDER_DEBUG_SPINLOCKS,
697 "%s: line=%d\n", __func__, line);
698 spin_unlock(&proc->inner_lock);
699}
700
701
702
703
704
705
706
707#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
708static void
709_binder_node_lock(struct binder_node *node, int line)
710{
711 binder_debug(BINDER_DEBUG_SPINLOCKS,
712 "%s: line=%d\n", __func__, line);
713 spin_lock(&node->lock);
714}
715
716
717
718
719
720
721
722#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
723static void
724_binder_node_unlock(struct binder_node *node, int line)
725{
726 binder_debug(BINDER_DEBUG_SPINLOCKS,
727 "%s: line=%d\n", __func__, line);
728 spin_unlock(&node->lock);
729}
730
731
732
733
734
735
736
737
738#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
739static void
740_binder_node_inner_lock(struct binder_node *node, int line)
741{
742 binder_debug(BINDER_DEBUG_SPINLOCKS,
743 "%s: line=%d\n", __func__, line);
744 spin_lock(&node->lock);
745 if (node->proc)
746 binder_inner_proc_lock(node->proc);
747}
748
749
750
751
752
753
754
755#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
756static void
757_binder_node_inner_unlock(struct binder_node *node, int line)
758{
759 struct binder_proc *proc = node->proc;
760
761 binder_debug(BINDER_DEBUG_SPINLOCKS,
762 "%s: line=%d\n", __func__, line);
763 if (proc)
764 binder_inner_proc_unlock(proc);
765 spin_unlock(&node->lock);
766}
767
768static bool binder_worklist_empty_ilocked(struct list_head *list)
769{
770 return list_empty(list);
771}
772
773
774
775
776
777
778
779
780static bool binder_worklist_empty(struct binder_proc *proc,
781 struct list_head *list)
782{
783 bool ret;
784
785 binder_inner_proc_lock(proc);
786 ret = binder_worklist_empty_ilocked(list);
787 binder_inner_proc_unlock(proc);
788 return ret;
789}
790
791
792
793
794
795
796
797
798
799
800
801static void
802binder_enqueue_work_ilocked(struct binder_work *work,
803 struct list_head *target_list)
804{
805 BUG_ON(target_list == NULL);
806 BUG_ON(work->entry.next && !list_empty(&work->entry));
807 list_add_tail(&work->entry, target_list);
808}
809
810
811
812
813
814
815
816
817
818
819
820
821static void
822binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
823 struct binder_work *work)
824{
825 binder_enqueue_work_ilocked(work, &thread->todo);
826}
827
828
829
830
831
832
833
834
835
836
837
838static void
839binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
840 struct binder_work *work)
841{
842 binder_enqueue_work_ilocked(work, &thread->todo);
843 thread->process_todo = true;
844}
845
846
847
848
849
850
851
852
853
854static void
855binder_enqueue_thread_work(struct binder_thread *thread,
856 struct binder_work *work)
857{
858 binder_inner_proc_lock(thread->proc);
859 binder_enqueue_thread_work_ilocked(thread, work);
860 binder_inner_proc_unlock(thread->proc);
861}
862
863static void
864binder_dequeue_work_ilocked(struct binder_work *work)
865{
866 list_del_init(&work->entry);
867}
868
869
870
871
872
873
874
875
876
877static void
878binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
879{
880 binder_inner_proc_lock(proc);
881 binder_dequeue_work_ilocked(work);
882 binder_inner_proc_unlock(proc);
883}
884
885static struct binder_work *binder_dequeue_work_head_ilocked(
886 struct list_head *list)
887{
888 struct binder_work *w;
889
890 w = list_first_entry_or_null(list, struct binder_work, entry);
891 if (w)
892 list_del_init(&w->entry);
893 return w;
894}
895
896
897
898
899
900
901
902
903
904
905static struct binder_work *binder_dequeue_work_head(
906 struct binder_proc *proc,
907 struct list_head *list)
908{
909 struct binder_work *w;
910
911 binder_inner_proc_lock(proc);
912 w = binder_dequeue_work_head_ilocked(list);
913 binder_inner_proc_unlock(proc);
914 return w;
915}
916
917static void
918binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
919static void binder_free_thread(struct binder_thread *thread);
920static void binder_free_proc(struct binder_proc *proc);
921static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
922
923static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
924{
925 unsigned long rlim_cur;
926 unsigned long irqs;
927 int ret;
928
929 mutex_lock(&proc->files_lock);
930 if (proc->files == NULL) {
931 ret = -ESRCH;
932 goto err;
933 }
934 if (!lock_task_sighand(proc->tsk, &irqs)) {
935 ret = -EMFILE;
936 goto err;
937 }
938 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
939 unlock_task_sighand(proc->tsk, &irqs);
940
941 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
942err:
943 mutex_unlock(&proc->files_lock);
944 return ret;
945}
946
947
948
949
950static void task_fd_install(
951 struct binder_proc *proc, unsigned int fd, struct file *file)
952{
953 mutex_lock(&proc->files_lock);
954 if (proc->files)
955 __fd_install(proc->files, fd, file);
956 mutex_unlock(&proc->files_lock);
957}
958
959
960
961
962static long task_close_fd(struct binder_proc *proc, unsigned int fd)
963{
964 int retval;
965
966 mutex_lock(&proc->files_lock);
967 if (proc->files == NULL) {
968 retval = -ESRCH;
969 goto err;
970 }
971 retval = __close_fd(proc->files, fd);
972
973 if (unlikely(retval == -ERESTARTSYS ||
974 retval == -ERESTARTNOINTR ||
975 retval == -ERESTARTNOHAND ||
976 retval == -ERESTART_RESTARTBLOCK))
977 retval = -EINTR;
978err:
979 mutex_unlock(&proc->files_lock);
980 return retval;
981}
982
983static bool binder_has_work_ilocked(struct binder_thread *thread,
984 bool do_proc_work)
985{
986 return thread->process_todo ||
987 thread->looper_need_return ||
988 (do_proc_work &&
989 !binder_worklist_empty_ilocked(&thread->proc->todo));
990}
991
992static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
993{
994 bool has_work;
995
996 binder_inner_proc_lock(thread->proc);
997 has_work = binder_has_work_ilocked(thread, do_proc_work);
998 binder_inner_proc_unlock(thread->proc);
999
1000 return has_work;
1001}
1002
1003static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1004{
1005 return !thread->transaction_stack &&
1006 binder_worklist_empty_ilocked(&thread->todo) &&
1007 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1008 BINDER_LOOPER_STATE_REGISTERED));
1009}
1010
1011static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1012 bool sync)
1013{
1014 struct rb_node *n;
1015 struct binder_thread *thread;
1016
1017 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1018 thread = rb_entry(n, struct binder_thread, rb_node);
1019 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1020 binder_available_for_proc_work_ilocked(thread)) {
1021 if (sync)
1022 wake_up_interruptible_sync(&thread->wait);
1023 else
1024 wake_up_interruptible(&thread->wait);
1025 }
1026 }
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041static struct binder_thread *
1042binder_select_thread_ilocked(struct binder_proc *proc)
1043{
1044 struct binder_thread *thread;
1045
1046 assert_spin_locked(&proc->inner_lock);
1047 thread = list_first_entry_or_null(&proc->waiting_threads,
1048 struct binder_thread,
1049 waiting_thread_node);
1050
1051 if (thread)
1052 list_del_init(&thread->waiting_thread_node);
1053
1054 return thread;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1074 struct binder_thread *thread,
1075 bool sync)
1076{
1077 assert_spin_locked(&proc->inner_lock);
1078
1079 if (thread) {
1080 if (sync)
1081 wake_up_interruptible_sync(&thread->wait);
1082 else
1083 wake_up_interruptible(&thread->wait);
1084 return;
1085 }
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 binder_wakeup_poll_threads_ilocked(proc, sync);
1101}
1102
1103static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1104{
1105 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1106
1107 binder_wakeup_thread_ilocked(proc, thread, false);
1108}
1109
1110static void binder_set_nice(long nice)
1111{
1112 long min_nice;
1113
1114 if (can_nice(current, nice)) {
1115 set_user_nice(current, nice);
1116 return;
1117 }
1118 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1119 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1120 "%d: nice value %ld not allowed use %ld instead\n",
1121 current->pid, nice, min_nice);
1122 set_user_nice(current, min_nice);
1123 if (min_nice <= MAX_NICE)
1124 return;
1125 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1126}
1127
1128static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1129 binder_uintptr_t ptr)
1130{
1131 struct rb_node *n = proc->nodes.rb_node;
1132 struct binder_node *node;
1133
1134 assert_spin_locked(&proc->inner_lock);
1135
1136 while (n) {
1137 node = rb_entry(n, struct binder_node, rb_node);
1138
1139 if (ptr < node->ptr)
1140 n = n->rb_left;
1141 else if (ptr > node->ptr)
1142 n = n->rb_right;
1143 else {
1144
1145
1146
1147
1148
1149 binder_inc_node_tmpref_ilocked(node);
1150 return node;
1151 }
1152 }
1153 return NULL;
1154}
1155
1156static struct binder_node *binder_get_node(struct binder_proc *proc,
1157 binder_uintptr_t ptr)
1158{
1159 struct binder_node *node;
1160
1161 binder_inner_proc_lock(proc);
1162 node = binder_get_node_ilocked(proc, ptr);
1163 binder_inner_proc_unlock(proc);
1164 return node;
1165}
1166
1167static struct binder_node *binder_init_node_ilocked(
1168 struct binder_proc *proc,
1169 struct binder_node *new_node,
1170 struct flat_binder_object *fp)
1171{
1172 struct rb_node **p = &proc->nodes.rb_node;
1173 struct rb_node *parent = NULL;
1174 struct binder_node *node;
1175 binder_uintptr_t ptr = fp ? fp->binder : 0;
1176 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1177 __u32 flags = fp ? fp->flags : 0;
1178
1179 assert_spin_locked(&proc->inner_lock);
1180
1181 while (*p) {
1182
1183 parent = *p;
1184 node = rb_entry(parent, struct binder_node, rb_node);
1185
1186 if (ptr < node->ptr)
1187 p = &(*p)->rb_left;
1188 else if (ptr > node->ptr)
1189 p = &(*p)->rb_right;
1190 else {
1191
1192
1193
1194
1195
1196 binder_inc_node_tmpref_ilocked(node);
1197 return node;
1198 }
1199 }
1200 node = new_node;
1201 binder_stats_created(BINDER_STAT_NODE);
1202 node->tmp_refs++;
1203 rb_link_node(&node->rb_node, parent, p);
1204 rb_insert_color(&node->rb_node, &proc->nodes);
1205 node->debug_id = atomic_inc_return(&binder_last_id);
1206 node->proc = proc;
1207 node->ptr = ptr;
1208 node->cookie = cookie;
1209 node->work.type = BINDER_WORK_NODE;
1210 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1211 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1212 spin_lock_init(&node->lock);
1213 INIT_LIST_HEAD(&node->work.entry);
1214 INIT_LIST_HEAD(&node->async_todo);
1215 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1216 "%d:%d node %d u%016llx c%016llx created\n",
1217 proc->pid, current->pid, node->debug_id,
1218 (u64)node->ptr, (u64)node->cookie);
1219
1220 return node;
1221}
1222
1223static struct binder_node *binder_new_node(struct binder_proc *proc,
1224 struct flat_binder_object *fp)
1225{
1226 struct binder_node *node;
1227 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1228
1229 if (!new_node)
1230 return NULL;
1231 binder_inner_proc_lock(proc);
1232 node = binder_init_node_ilocked(proc, new_node, fp);
1233 binder_inner_proc_unlock(proc);
1234 if (node != new_node)
1235
1236
1237
1238 kfree(new_node);
1239
1240 return node;
1241}
1242
1243static void binder_free_node(struct binder_node *node)
1244{
1245 kfree(node);
1246 binder_stats_deleted(BINDER_STAT_NODE);
1247}
1248
1249static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1250 int internal,
1251 struct list_head *target_list)
1252{
1253 struct binder_proc *proc = node->proc;
1254
1255 assert_spin_locked(&node->lock);
1256 if (proc)
1257 assert_spin_locked(&proc->inner_lock);
1258 if (strong) {
1259 if (internal) {
1260 if (target_list == NULL &&
1261 node->internal_strong_refs == 0 &&
1262 !(node->proc &&
1263 node == node->proc->context->binder_context_mgr_node &&
1264 node->has_strong_ref)) {
1265 pr_err("invalid inc strong node for %d\n",
1266 node->debug_id);
1267 return -EINVAL;
1268 }
1269 node->internal_strong_refs++;
1270 } else
1271 node->local_strong_refs++;
1272 if (!node->has_strong_ref && target_list) {
1273 binder_dequeue_work_ilocked(&node->work);
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 binder_enqueue_work_ilocked(&node->work, target_list);
1286 }
1287 } else {
1288 if (!internal)
1289 node->local_weak_refs++;
1290 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1291 if (target_list == NULL) {
1292 pr_err("invalid inc weak node for %d\n",
1293 node->debug_id);
1294 return -EINVAL;
1295 }
1296
1297
1298
1299 binder_enqueue_work_ilocked(&node->work, target_list);
1300 }
1301 }
1302 return 0;
1303}
1304
1305static int binder_inc_node(struct binder_node *node, int strong, int internal,
1306 struct list_head *target_list)
1307{
1308 int ret;
1309
1310 binder_node_inner_lock(node);
1311 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1312 binder_node_inner_unlock(node);
1313
1314 return ret;
1315}
1316
1317static bool binder_dec_node_nilocked(struct binder_node *node,
1318 int strong, int internal)
1319{
1320 struct binder_proc *proc = node->proc;
1321
1322 assert_spin_locked(&node->lock);
1323 if (proc)
1324 assert_spin_locked(&proc->inner_lock);
1325 if (strong) {
1326 if (internal)
1327 node->internal_strong_refs--;
1328 else
1329 node->local_strong_refs--;
1330 if (node->local_strong_refs || node->internal_strong_refs)
1331 return false;
1332 } else {
1333 if (!internal)
1334 node->local_weak_refs--;
1335 if (node->local_weak_refs || node->tmp_refs ||
1336 !hlist_empty(&node->refs))
1337 return false;
1338 }
1339
1340 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1341 if (list_empty(&node->work.entry)) {
1342 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1343 binder_wakeup_proc_ilocked(proc);
1344 }
1345 } else {
1346 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1347 !node->local_weak_refs && !node->tmp_refs) {
1348 if (proc) {
1349 binder_dequeue_work_ilocked(&node->work);
1350 rb_erase(&node->rb_node, &proc->nodes);
1351 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1352 "refless node %d deleted\n",
1353 node->debug_id);
1354 } else {
1355 BUG_ON(!list_empty(&node->work.entry));
1356 spin_lock(&binder_dead_nodes_lock);
1357
1358
1359
1360
1361 if (node->tmp_refs) {
1362 spin_unlock(&binder_dead_nodes_lock);
1363 return false;
1364 }
1365 hlist_del(&node->dead_node);
1366 spin_unlock(&binder_dead_nodes_lock);
1367 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1368 "dead node %d deleted\n",
1369 node->debug_id);
1370 }
1371 return true;
1372 }
1373 }
1374 return false;
1375}
1376
1377static void binder_dec_node(struct binder_node *node, int strong, int internal)
1378{
1379 bool free_node;
1380
1381 binder_node_inner_lock(node);
1382 free_node = binder_dec_node_nilocked(node, strong, internal);
1383 binder_node_inner_unlock(node);
1384 if (free_node)
1385 binder_free_node(node);
1386}
1387
1388static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1389{
1390
1391
1392
1393
1394
1395 node->tmp_refs++;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411static void binder_inc_node_tmpref(struct binder_node *node)
1412{
1413 binder_node_lock(node);
1414 if (node->proc)
1415 binder_inner_proc_lock(node->proc);
1416 else
1417 spin_lock(&binder_dead_nodes_lock);
1418 binder_inc_node_tmpref_ilocked(node);
1419 if (node->proc)
1420 binder_inner_proc_unlock(node->proc);
1421 else
1422 spin_unlock(&binder_dead_nodes_lock);
1423 binder_node_unlock(node);
1424}
1425
1426
1427
1428
1429
1430
1431
1432static void binder_dec_node_tmpref(struct binder_node *node)
1433{
1434 bool free_node;
1435
1436 binder_node_inner_lock(node);
1437 if (!node->proc)
1438 spin_lock(&binder_dead_nodes_lock);
1439 node->tmp_refs--;
1440 BUG_ON(node->tmp_refs < 0);
1441 if (!node->proc)
1442 spin_unlock(&binder_dead_nodes_lock);
1443
1444
1445
1446
1447
1448
1449 free_node = binder_dec_node_nilocked(node, 0, 1);
1450 binder_node_inner_unlock(node);
1451 if (free_node)
1452 binder_free_node(node);
1453}
1454
1455static void binder_put_node(struct binder_node *node)
1456{
1457 binder_dec_node_tmpref(node);
1458}
1459
1460static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1461 u32 desc, bool need_strong_ref)
1462{
1463 struct rb_node *n = proc->refs_by_desc.rb_node;
1464 struct binder_ref *ref;
1465
1466 while (n) {
1467 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1468
1469 if (desc < ref->data.desc) {
1470 n = n->rb_left;
1471 } else if (desc > ref->data.desc) {
1472 n = n->rb_right;
1473 } else if (need_strong_ref && !ref->data.strong) {
1474 binder_user_error("tried to use weak ref as strong ref\n");
1475 return NULL;
1476 } else {
1477 return ref;
1478 }
1479 }
1480 return NULL;
1481}
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static struct binder_ref *binder_get_ref_for_node_olocked(
1502 struct binder_proc *proc,
1503 struct binder_node *node,
1504 struct binder_ref *new_ref)
1505{
1506 struct binder_context *context = proc->context;
1507 struct rb_node **p = &proc->refs_by_node.rb_node;
1508 struct rb_node *parent = NULL;
1509 struct binder_ref *ref;
1510 struct rb_node *n;
1511
1512 while (*p) {
1513 parent = *p;
1514 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1515
1516 if (node < ref->node)
1517 p = &(*p)->rb_left;
1518 else if (node > ref->node)
1519 p = &(*p)->rb_right;
1520 else
1521 return ref;
1522 }
1523 if (!new_ref)
1524 return NULL;
1525
1526 binder_stats_created(BINDER_STAT_REF);
1527 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1528 new_ref->proc = proc;
1529 new_ref->node = node;
1530 rb_link_node(&new_ref->rb_node_node, parent, p);
1531 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1532
1533 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1534 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1535 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1536 if (ref->data.desc > new_ref->data.desc)
1537 break;
1538 new_ref->data.desc = ref->data.desc + 1;
1539 }
1540
1541 p = &proc->refs_by_desc.rb_node;
1542 while (*p) {
1543 parent = *p;
1544 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1545
1546 if (new_ref->data.desc < ref->data.desc)
1547 p = &(*p)->rb_left;
1548 else if (new_ref->data.desc > ref->data.desc)
1549 p = &(*p)->rb_right;
1550 else
1551 BUG();
1552 }
1553 rb_link_node(&new_ref->rb_node_desc, parent, p);
1554 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1555
1556 binder_node_lock(node);
1557 hlist_add_head(&new_ref->node_entry, &node->refs);
1558
1559 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1560 "%d new ref %d desc %d for node %d\n",
1561 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1562 node->debug_id);
1563 binder_node_unlock(node);
1564 return new_ref;
1565}
1566
1567static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1568{
1569 bool delete_node = false;
1570
1571 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1572 "%d delete ref %d desc %d for node %d\n",
1573 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1574 ref->node->debug_id);
1575
1576 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1577 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1578
1579 binder_node_inner_lock(ref->node);
1580 if (ref->data.strong)
1581 binder_dec_node_nilocked(ref->node, 1, 1);
1582
1583 hlist_del(&ref->node_entry);
1584 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1585 binder_node_inner_unlock(ref->node);
1586
1587
1588
1589 if (!delete_node) {
1590
1591
1592
1593
1594
1595 ref->node = NULL;
1596 }
1597
1598 if (ref->death) {
1599 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1600 "%d delete ref %d desc %d has death notification\n",
1601 ref->proc->pid, ref->data.debug_id,
1602 ref->data.desc);
1603 binder_dequeue_work(ref->proc, &ref->death->work);
1604 binder_stats_deleted(BINDER_STAT_DEATH);
1605 }
1606 binder_stats_deleted(BINDER_STAT_REF);
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1620 struct list_head *target_list)
1621{
1622 int ret;
1623
1624 if (strong) {
1625 if (ref->data.strong == 0) {
1626 ret = binder_inc_node(ref->node, 1, 1, target_list);
1627 if (ret)
1628 return ret;
1629 }
1630 ref->data.strong++;
1631 } else {
1632 if (ref->data.weak == 0) {
1633 ret = binder_inc_node(ref->node, 0, 1, target_list);
1634 if (ret)
1635 return ret;
1636 }
1637 ref->data.weak++;
1638 }
1639 return 0;
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1652{
1653 if (strong) {
1654 if (ref->data.strong == 0) {
1655 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1656 ref->proc->pid, ref->data.debug_id,
1657 ref->data.desc, ref->data.strong,
1658 ref->data.weak);
1659 return false;
1660 }
1661 ref->data.strong--;
1662 if (ref->data.strong == 0)
1663 binder_dec_node(ref->node, strong, 1);
1664 } else {
1665 if (ref->data.weak == 0) {
1666 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1667 ref->proc->pid, ref->data.debug_id,
1668 ref->data.desc, ref->data.strong,
1669 ref->data.weak);
1670 return false;
1671 }
1672 ref->data.weak--;
1673 }
1674 if (ref->data.strong == 0 && ref->data.weak == 0) {
1675 binder_cleanup_ref_olocked(ref);
1676 return true;
1677 }
1678 return false;
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692static struct binder_node *binder_get_node_from_ref(
1693 struct binder_proc *proc,
1694 u32 desc, bool need_strong_ref,
1695 struct binder_ref_data *rdata)
1696{
1697 struct binder_node *node;
1698 struct binder_ref *ref;
1699
1700 binder_proc_lock(proc);
1701 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1702 if (!ref)
1703 goto err_no_ref;
1704 node = ref->node;
1705
1706
1707
1708
1709 binder_inc_node_tmpref(node);
1710 if (rdata)
1711 *rdata = ref->data;
1712 binder_proc_unlock(proc);
1713
1714 return node;
1715
1716err_no_ref:
1717 binder_proc_unlock(proc);
1718 return NULL;
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728static void binder_free_ref(struct binder_ref *ref)
1729{
1730 if (ref->node)
1731 binder_free_node(ref->node);
1732 kfree(ref->death);
1733 kfree(ref);
1734}
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749static int binder_update_ref_for_handle(struct binder_proc *proc,
1750 uint32_t desc, bool increment, bool strong,
1751 struct binder_ref_data *rdata)
1752{
1753 int ret = 0;
1754 struct binder_ref *ref;
1755 bool delete_ref = false;
1756
1757 binder_proc_lock(proc);
1758 ref = binder_get_ref_olocked(proc, desc, strong);
1759 if (!ref) {
1760 ret = -EINVAL;
1761 goto err_no_ref;
1762 }
1763 if (increment)
1764 ret = binder_inc_ref_olocked(ref, strong, NULL);
1765 else
1766 delete_ref = binder_dec_ref_olocked(ref, strong);
1767
1768 if (rdata)
1769 *rdata = ref->data;
1770 binder_proc_unlock(proc);
1771
1772 if (delete_ref)
1773 binder_free_ref(ref);
1774 return ret;
1775
1776err_no_ref:
1777 binder_proc_unlock(proc);
1778 return ret;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792static int binder_dec_ref_for_handle(struct binder_proc *proc,
1793 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1794{
1795 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812static int binder_inc_ref_for_node(struct binder_proc *proc,
1813 struct binder_node *node,
1814 bool strong,
1815 struct list_head *target_list,
1816 struct binder_ref_data *rdata)
1817{
1818 struct binder_ref *ref;
1819 struct binder_ref *new_ref = NULL;
1820 int ret = 0;
1821
1822 binder_proc_lock(proc);
1823 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1824 if (!ref) {
1825 binder_proc_unlock(proc);
1826 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1827 if (!new_ref)
1828 return -ENOMEM;
1829 binder_proc_lock(proc);
1830 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1831 }
1832 ret = binder_inc_ref_olocked(ref, strong, target_list);
1833 *rdata = ref->data;
1834 binder_proc_unlock(proc);
1835 if (new_ref && ref != new_ref)
1836
1837
1838
1839
1840 kfree(new_ref);
1841 return ret;
1842}
1843
1844static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1845 struct binder_transaction *t)
1846{
1847 BUG_ON(!target_thread);
1848 assert_spin_locked(&target_thread->proc->inner_lock);
1849 BUG_ON(target_thread->transaction_stack != t);
1850 BUG_ON(target_thread->transaction_stack->from != target_thread);
1851 target_thread->transaction_stack =
1852 target_thread->transaction_stack->from_parent;
1853 t->from = NULL;
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868static void binder_thread_dec_tmpref(struct binder_thread *thread)
1869{
1870
1871
1872
1873
1874 binder_inner_proc_lock(thread->proc);
1875 atomic_dec(&thread->tmp_ref);
1876 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1877 binder_inner_proc_unlock(thread->proc);
1878 binder_free_thread(thread);
1879 return;
1880 }
1881 binder_inner_proc_unlock(thread->proc);
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896static void binder_proc_dec_tmpref(struct binder_proc *proc)
1897{
1898 binder_inner_proc_lock(proc);
1899 proc->tmp_ref--;
1900 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1901 !proc->tmp_ref) {
1902 binder_inner_proc_unlock(proc);
1903 binder_free_proc(proc);
1904 return;
1905 }
1906 binder_inner_proc_unlock(proc);
1907}
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919static struct binder_thread *binder_get_txn_from(
1920 struct binder_transaction *t)
1921{
1922 struct binder_thread *from;
1923
1924 spin_lock(&t->lock);
1925 from = t->from;
1926 if (from)
1927 atomic_inc(&from->tmp_ref);
1928 spin_unlock(&t->lock);
1929 return from;
1930}
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943static struct binder_thread *binder_get_txn_from_and_acq_inner(
1944 struct binder_transaction *t)
1945{
1946 struct binder_thread *from;
1947
1948 from = binder_get_txn_from(t);
1949 if (!from)
1950 return NULL;
1951 binder_inner_proc_lock(from->proc);
1952 if (t->from) {
1953 BUG_ON(from != t->from);
1954 return from;
1955 }
1956 binder_inner_proc_unlock(from->proc);
1957 binder_thread_dec_tmpref(from);
1958 return NULL;
1959}
1960
1961static void binder_free_transaction(struct binder_transaction *t)
1962{
1963 if (t->buffer)
1964 t->buffer->transaction = NULL;
1965 kfree(t);
1966 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1967}
1968
1969static void binder_send_failed_reply(struct binder_transaction *t,
1970 uint32_t error_code)
1971{
1972 struct binder_thread *target_thread;
1973 struct binder_transaction *next;
1974
1975 BUG_ON(t->flags & TF_ONE_WAY);
1976 while (1) {
1977 target_thread = binder_get_txn_from_and_acq_inner(t);
1978 if (target_thread) {
1979 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1980 "send failed reply for transaction %d to %d:%d\n",
1981 t->debug_id,
1982 target_thread->proc->pid,
1983 target_thread->pid);
1984
1985 binder_pop_transaction_ilocked(target_thread, t);
1986 if (target_thread->reply_error.cmd == BR_OK) {
1987 target_thread->reply_error.cmd = error_code;
1988 binder_enqueue_thread_work_ilocked(
1989 target_thread,
1990 &target_thread->reply_error.work);
1991 wake_up_interruptible(&target_thread->wait);
1992 } else {
1993
1994
1995
1996
1997
1998
1999 pr_warn("Unexpected reply error: %u\n",
2000 target_thread->reply_error.cmd);
2001 }
2002 binder_inner_proc_unlock(target_thread->proc);
2003 binder_thread_dec_tmpref(target_thread);
2004 binder_free_transaction(t);
2005 return;
2006 }
2007 next = t->from_parent;
2008
2009 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2010 "send failed reply for transaction %d, target dead\n",
2011 t->debug_id);
2012
2013 binder_free_transaction(t);
2014 if (next == NULL) {
2015 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2016 "reply failed, no target thread at root\n");
2017 return;
2018 }
2019 t = next;
2020 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2021 "reply failed, no target thread -- retry %d\n",
2022 t->debug_id);
2023 }
2024}
2025
2026
2027
2028
2029
2030
2031
2032static void binder_cleanup_transaction(struct binder_transaction *t,
2033 const char *reason,
2034 uint32_t error_code)
2035{
2036 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2037 binder_send_failed_reply(t, error_code);
2038 } else {
2039 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2040 "undelivered transaction %d, %s\n",
2041 t->debug_id, reason);
2042 binder_free_transaction(t);
2043 }
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2055{
2056
2057 struct binder_object_header *hdr;
2058 size_t object_size = 0;
2059
2060 if (buffer->data_size < sizeof(*hdr) ||
2061 offset > buffer->data_size - sizeof(*hdr) ||
2062 !IS_ALIGNED(offset, sizeof(u32)))
2063 return 0;
2064
2065
2066 hdr = (struct binder_object_header *)(buffer->data + offset);
2067 switch (hdr->type) {
2068 case BINDER_TYPE_BINDER:
2069 case BINDER_TYPE_WEAK_BINDER:
2070 case BINDER_TYPE_HANDLE:
2071 case BINDER_TYPE_WEAK_HANDLE:
2072 object_size = sizeof(struct flat_binder_object);
2073 break;
2074 case BINDER_TYPE_FD:
2075 object_size = sizeof(struct binder_fd_object);
2076 break;
2077 case BINDER_TYPE_PTR:
2078 object_size = sizeof(struct binder_buffer_object);
2079 break;
2080 case BINDER_TYPE_FDA:
2081 object_size = sizeof(struct binder_fd_array_object);
2082 break;
2083 default:
2084 return 0;
2085 }
2086 if (offset <= buffer->data_size - object_size &&
2087 buffer->data_size >= object_size)
2088 return object_size;
2089 else
2090 return 0;
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2111 binder_size_t index,
2112 binder_size_t *start,
2113 binder_size_t num_valid)
2114{
2115 struct binder_buffer_object *buffer_obj;
2116 binder_size_t *offp;
2117
2118 if (index >= num_valid)
2119 return NULL;
2120
2121 offp = start + index;
2122 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2123 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2124 return NULL;
2125
2126 return buffer_obj;
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167static bool binder_validate_fixup(struct binder_buffer *b,
2168 binder_size_t *objects_start,
2169 struct binder_buffer_object *buffer,
2170 binder_size_t fixup_offset,
2171 struct binder_buffer_object *last_obj,
2172 binder_size_t last_min_offset)
2173{
2174 if (!last_obj) {
2175
2176 return false;
2177 }
2178
2179 while (last_obj != buffer) {
2180
2181
2182
2183
2184 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2185 return false;
2186 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2187 last_obj = (struct binder_buffer_object *)
2188 (b->data + *(objects_start + last_obj->parent));
2189 }
2190 return (fixup_offset >= last_min_offset);
2191}
2192
2193static void binder_transaction_buffer_release(struct binder_proc *proc,
2194 struct binder_buffer *buffer,
2195 binder_size_t *failed_at)
2196{
2197 binder_size_t *offp, *off_start, *off_end;
2198 int debug_id = buffer->debug_id;
2199
2200 binder_debug(BINDER_DEBUG_TRANSACTION,
2201 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2202 proc->pid, buffer->debug_id,
2203 buffer->data_size, buffer->offsets_size, failed_at);
2204
2205 if (buffer->target_node)
2206 binder_dec_node(buffer->target_node, 1, 0);
2207
2208 off_start = (binder_size_t *)(buffer->data +
2209 ALIGN(buffer->data_size, sizeof(void *)));
2210 if (failed_at)
2211 off_end = failed_at;
2212 else
2213 off_end = (void *)off_start + buffer->offsets_size;
2214 for (offp = off_start; offp < off_end; offp++) {
2215 struct binder_object_header *hdr;
2216 size_t object_size = binder_validate_object(buffer, *offp);
2217
2218 if (object_size == 0) {
2219 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2220 debug_id, (u64)*offp, buffer->data_size);
2221 continue;
2222 }
2223 hdr = (struct binder_object_header *)(buffer->data + *offp);
2224 switch (hdr->type) {
2225 case BINDER_TYPE_BINDER:
2226 case BINDER_TYPE_WEAK_BINDER: {
2227 struct flat_binder_object *fp;
2228 struct binder_node *node;
2229
2230 fp = to_flat_binder_object(hdr);
2231 node = binder_get_node(proc, fp->binder);
2232 if (node == NULL) {
2233 pr_err("transaction release %d bad node %016llx\n",
2234 debug_id, (u64)fp->binder);
2235 break;
2236 }
2237 binder_debug(BINDER_DEBUG_TRANSACTION,
2238 " node %d u%016llx\n",
2239 node->debug_id, (u64)node->ptr);
2240 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2241 0);
2242 binder_put_node(node);
2243 } break;
2244 case BINDER_TYPE_HANDLE:
2245 case BINDER_TYPE_WEAK_HANDLE: {
2246 struct flat_binder_object *fp;
2247 struct binder_ref_data rdata;
2248 int ret;
2249
2250 fp = to_flat_binder_object(hdr);
2251 ret = binder_dec_ref_for_handle(proc, fp->handle,
2252 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2253
2254 if (ret) {
2255 pr_err("transaction release %d bad handle %d, ret = %d\n",
2256 debug_id, fp->handle, ret);
2257 break;
2258 }
2259 binder_debug(BINDER_DEBUG_TRANSACTION,
2260 " ref %d desc %d\n",
2261 rdata.debug_id, rdata.desc);
2262 } break;
2263
2264 case BINDER_TYPE_FD: {
2265 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2266
2267 binder_debug(BINDER_DEBUG_TRANSACTION,
2268 " fd %d\n", fp->fd);
2269 if (failed_at)
2270 task_close_fd(proc, fp->fd);
2271 } break;
2272 case BINDER_TYPE_PTR:
2273
2274
2275
2276
2277 break;
2278 case BINDER_TYPE_FDA: {
2279 struct binder_fd_array_object *fda;
2280 struct binder_buffer_object *parent;
2281 uintptr_t parent_buffer;
2282 u32 *fd_array;
2283 size_t fd_index;
2284 binder_size_t fd_buf_size;
2285
2286 fda = to_binder_fd_array_object(hdr);
2287 parent = binder_validate_ptr(buffer, fda->parent,
2288 off_start,
2289 offp - off_start);
2290 if (!parent) {
2291 pr_err("transaction release %d bad parent offset\n",
2292 debug_id);
2293 continue;
2294 }
2295
2296
2297
2298
2299 parent_buffer = parent->buffer -
2300 binder_alloc_get_user_buffer_offset(
2301 &proc->alloc);
2302
2303 fd_buf_size = sizeof(u32) * fda->num_fds;
2304 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2305 pr_err("transaction release %d invalid number of fds (%lld)\n",
2306 debug_id, (u64)fda->num_fds);
2307 continue;
2308 }
2309 if (fd_buf_size > parent->length ||
2310 fda->parent_offset > parent->length - fd_buf_size) {
2311
2312 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2313 debug_id, (u64)fda->num_fds);
2314 continue;
2315 }
2316 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2317 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2318 task_close_fd(proc, fd_array[fd_index]);
2319 } break;
2320 default:
2321 pr_err("transaction release %d bad object type %x\n",
2322 debug_id, hdr->type);
2323 break;
2324 }
2325 }
2326}
2327
2328static int binder_translate_binder(struct flat_binder_object *fp,
2329 struct binder_transaction *t,
2330 struct binder_thread *thread)
2331{
2332 struct binder_node *node;
2333 struct binder_proc *proc = thread->proc;
2334 struct binder_proc *target_proc = t->to_proc;
2335 struct binder_ref_data rdata;
2336 int ret = 0;
2337
2338 node = binder_get_node(proc, fp->binder);
2339 if (!node) {
2340 node = binder_new_node(proc, fp);
2341 if (!node)
2342 return -ENOMEM;
2343 }
2344 if (fp->cookie != node->cookie) {
2345 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2346 proc->pid, thread->pid, (u64)fp->binder,
2347 node->debug_id, (u64)fp->cookie,
2348 (u64)node->cookie);
2349 ret = -EINVAL;
2350 goto done;
2351 }
2352 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2353 ret = -EPERM;
2354 goto done;
2355 }
2356
2357 ret = binder_inc_ref_for_node(target_proc, node,
2358 fp->hdr.type == BINDER_TYPE_BINDER,
2359 &thread->todo, &rdata);
2360 if (ret)
2361 goto done;
2362
2363 if (fp->hdr.type == BINDER_TYPE_BINDER)
2364 fp->hdr.type = BINDER_TYPE_HANDLE;
2365 else
2366 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2367 fp->binder = 0;
2368 fp->handle = rdata.desc;
2369 fp->cookie = 0;
2370
2371 trace_binder_transaction_node_to_ref(t, node, &rdata);
2372 binder_debug(BINDER_DEBUG_TRANSACTION,
2373 " node %d u%016llx -> ref %d desc %d\n",
2374 node->debug_id, (u64)node->ptr,
2375 rdata.debug_id, rdata.desc);
2376done:
2377 binder_put_node(node);
2378 return ret;
2379}
2380
2381static int binder_translate_handle(struct flat_binder_object *fp,
2382 struct binder_transaction *t,
2383 struct binder_thread *thread)
2384{
2385 struct binder_proc *proc = thread->proc;
2386 struct binder_proc *target_proc = t->to_proc;
2387 struct binder_node *node;
2388 struct binder_ref_data src_rdata;
2389 int ret = 0;
2390
2391 node = binder_get_node_from_ref(proc, fp->handle,
2392 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2393 if (!node) {
2394 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2395 proc->pid, thread->pid, fp->handle);
2396 return -EINVAL;
2397 }
2398 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2399 ret = -EPERM;
2400 goto done;
2401 }
2402
2403 binder_node_lock(node);
2404 if (node->proc == target_proc) {
2405 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2406 fp->hdr.type = BINDER_TYPE_BINDER;
2407 else
2408 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2409 fp->binder = node->ptr;
2410 fp->cookie = node->cookie;
2411 if (node->proc)
2412 binder_inner_proc_lock(node->proc);
2413 binder_inc_node_nilocked(node,
2414 fp->hdr.type == BINDER_TYPE_BINDER,
2415 0, NULL);
2416 if (node->proc)
2417 binder_inner_proc_unlock(node->proc);
2418 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2419 binder_debug(BINDER_DEBUG_TRANSACTION,
2420 " ref %d desc %d -> node %d u%016llx\n",
2421 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2422 (u64)node->ptr);
2423 binder_node_unlock(node);
2424 } else {
2425 struct binder_ref_data dest_rdata;
2426
2427 binder_node_unlock(node);
2428 ret = binder_inc_ref_for_node(target_proc, node,
2429 fp->hdr.type == BINDER_TYPE_HANDLE,
2430 NULL, &dest_rdata);
2431 if (ret)
2432 goto done;
2433
2434 fp->binder = 0;
2435 fp->handle = dest_rdata.desc;
2436 fp->cookie = 0;
2437 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2438 &dest_rdata);
2439 binder_debug(BINDER_DEBUG_TRANSACTION,
2440 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2441 src_rdata.debug_id, src_rdata.desc,
2442 dest_rdata.debug_id, dest_rdata.desc,
2443 node->debug_id);
2444 }
2445done:
2446 binder_put_node(node);
2447 return ret;
2448}
2449
2450static int binder_translate_fd(int fd,
2451 struct binder_transaction *t,
2452 struct binder_thread *thread,
2453 struct binder_transaction *in_reply_to)
2454{
2455 struct binder_proc *proc = thread->proc;
2456 struct binder_proc *target_proc = t->to_proc;
2457 int target_fd;
2458 struct file *file;
2459 int ret;
2460 bool target_allows_fd;
2461
2462 if (in_reply_to)
2463 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2464 else
2465 target_allows_fd = t->buffer->target_node->accept_fds;
2466 if (!target_allows_fd) {
2467 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2468 proc->pid, thread->pid,
2469 in_reply_to ? "reply" : "transaction",
2470 fd);
2471 ret = -EPERM;
2472 goto err_fd_not_accepted;
2473 }
2474
2475 file = fget(fd);
2476 if (!file) {
2477 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2478 proc->pid, thread->pid, fd);
2479 ret = -EBADF;
2480 goto err_fget;
2481 }
2482 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2483 if (ret < 0) {
2484 ret = -EPERM;
2485 goto err_security;
2486 }
2487
2488 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2489 if (target_fd < 0) {
2490 ret = -ENOMEM;
2491 goto err_get_unused_fd;
2492 }
2493 task_fd_install(target_proc, target_fd, file);
2494 trace_binder_transaction_fd(t, fd, target_fd);
2495 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2496 fd, target_fd);
2497
2498 return target_fd;
2499
2500err_get_unused_fd:
2501err_security:
2502 fput(file);
2503err_fget:
2504err_fd_not_accepted:
2505 return ret;
2506}
2507
2508static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2509 struct binder_buffer_object *parent,
2510 struct binder_transaction *t,
2511 struct binder_thread *thread,
2512 struct binder_transaction *in_reply_to)
2513{
2514 binder_size_t fdi, fd_buf_size, num_installed_fds;
2515 int target_fd;
2516 uintptr_t parent_buffer;
2517 u32 *fd_array;
2518 struct binder_proc *proc = thread->proc;
2519 struct binder_proc *target_proc = t->to_proc;
2520
2521 fd_buf_size = sizeof(u32) * fda->num_fds;
2522 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2523 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2524 proc->pid, thread->pid, (u64)fda->num_fds);
2525 return -EINVAL;
2526 }
2527 if (fd_buf_size > parent->length ||
2528 fda->parent_offset > parent->length - fd_buf_size) {
2529
2530 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2531 proc->pid, thread->pid, (u64)fda->num_fds);
2532 return -EINVAL;
2533 }
2534
2535
2536
2537
2538 parent_buffer = parent->buffer -
2539 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2540 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2541 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2542 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2543 proc->pid, thread->pid);
2544 return -EINVAL;
2545 }
2546 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2547 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2548 in_reply_to);
2549 if (target_fd < 0)
2550 goto err_translate_fd_failed;
2551 fd_array[fdi] = target_fd;
2552 }
2553 return 0;
2554
2555err_translate_fd_failed:
2556
2557
2558
2559
2560 num_installed_fds = fdi;
2561 for (fdi = 0; fdi < num_installed_fds; fdi++)
2562 task_close_fd(target_proc, fd_array[fdi]);
2563 return target_fd;
2564}
2565
2566static int binder_fixup_parent(struct binder_transaction *t,
2567 struct binder_thread *thread,
2568 struct binder_buffer_object *bp,
2569 binder_size_t *off_start,
2570 binder_size_t num_valid,
2571 struct binder_buffer_object *last_fixup_obj,
2572 binder_size_t last_fixup_min_off)
2573{
2574 struct binder_buffer_object *parent;
2575 u8 *parent_buffer;
2576 struct binder_buffer *b = t->buffer;
2577 struct binder_proc *proc = thread->proc;
2578 struct binder_proc *target_proc = t->to_proc;
2579
2580 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2581 return 0;
2582
2583 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2584 if (!parent) {
2585 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2586 proc->pid, thread->pid);
2587 return -EINVAL;
2588 }
2589
2590 if (!binder_validate_fixup(b, off_start,
2591 parent, bp->parent_offset,
2592 last_fixup_obj,
2593 last_fixup_min_off)) {
2594 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2595 proc->pid, thread->pid);
2596 return -EINVAL;
2597 }
2598
2599 if (parent->length < sizeof(binder_uintptr_t) ||
2600 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2601
2602 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2603 proc->pid, thread->pid);
2604 return -EINVAL;
2605 }
2606 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2607 binder_alloc_get_user_buffer_offset(
2608 &target_proc->alloc));
2609 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2610
2611 return 0;
2612}
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631static bool binder_proc_transaction(struct binder_transaction *t,
2632 struct binder_proc *proc,
2633 struct binder_thread *thread)
2634{
2635 struct binder_node *node = t->buffer->target_node;
2636 bool oneway = !!(t->flags & TF_ONE_WAY);
2637 bool pending_async = false;
2638
2639 BUG_ON(!node);
2640 binder_node_lock(node);
2641 if (oneway) {
2642 BUG_ON(thread);
2643 if (node->has_async_transaction) {
2644 pending_async = true;
2645 } else {
2646 node->has_async_transaction = true;
2647 }
2648 }
2649
2650 binder_inner_proc_lock(proc);
2651
2652 if (proc->is_dead || (thread && thread->is_dead)) {
2653 binder_inner_proc_unlock(proc);
2654 binder_node_unlock(node);
2655 return false;
2656 }
2657
2658 if (!thread && !pending_async)
2659 thread = binder_select_thread_ilocked(proc);
2660
2661 if (thread)
2662 binder_enqueue_thread_work_ilocked(thread, &t->work);
2663 else if (!pending_async)
2664 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2665 else
2666 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2667
2668 if (!pending_async)
2669 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2670
2671 binder_inner_proc_unlock(proc);
2672 binder_node_unlock(node);
2673
2674 return true;
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698static struct binder_node *binder_get_node_refs_for_txn(
2699 struct binder_node *node,
2700 struct binder_proc **procp,
2701 uint32_t *error)
2702{
2703 struct binder_node *target_node = NULL;
2704
2705 binder_node_inner_lock(node);
2706 if (node->proc) {
2707 target_node = node;
2708 binder_inc_node_nilocked(node, 1, 0, NULL);
2709 binder_inc_node_tmpref_ilocked(node);
2710 node->proc->tmp_ref++;
2711 *procp = node->proc;
2712 } else
2713 *error = BR_DEAD_REPLY;
2714 binder_node_inner_unlock(node);
2715
2716 return target_node;
2717}
2718
2719static void binder_transaction(struct binder_proc *proc,
2720 struct binder_thread *thread,
2721 struct binder_transaction_data *tr, int reply,
2722 binder_size_t extra_buffers_size)
2723{
2724 int ret;
2725 struct binder_transaction *t;
2726 struct binder_work *tcomplete;
2727 binder_size_t *offp, *off_end, *off_start;
2728 binder_size_t off_min;
2729 u8 *sg_bufp, *sg_buf_end;
2730 struct binder_proc *target_proc = NULL;
2731 struct binder_thread *target_thread = NULL;
2732 struct binder_node *target_node = NULL;
2733 struct binder_transaction *in_reply_to = NULL;
2734 struct binder_transaction_log_entry *e;
2735 uint32_t return_error = 0;
2736 uint32_t return_error_param = 0;
2737 uint32_t return_error_line = 0;
2738 struct binder_buffer_object *last_fixup_obj = NULL;
2739 binder_size_t last_fixup_min_off = 0;
2740 struct binder_context *context = proc->context;
2741 int t_debug_id = atomic_inc_return(&binder_last_id);
2742
2743 e = binder_transaction_log_add(&binder_transaction_log);
2744 e->debug_id = t_debug_id;
2745 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2746 e->from_proc = proc->pid;
2747 e->from_thread = thread->pid;
2748 e->target_handle = tr->target.handle;
2749 e->data_size = tr->data_size;
2750 e->offsets_size = tr->offsets_size;
2751 e->context_name = proc->context->name;
2752
2753 if (reply) {
2754 binder_inner_proc_lock(proc);
2755 in_reply_to = thread->transaction_stack;
2756 if (in_reply_to == NULL) {
2757 binder_inner_proc_unlock(proc);
2758 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2759 proc->pid, thread->pid);
2760 return_error = BR_FAILED_REPLY;
2761 return_error_param = -EPROTO;
2762 return_error_line = __LINE__;
2763 goto err_empty_call_stack;
2764 }
2765 if (in_reply_to->to_thread != thread) {
2766 spin_lock(&in_reply_to->lock);
2767 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2768 proc->pid, thread->pid, in_reply_to->debug_id,
2769 in_reply_to->to_proc ?
2770 in_reply_to->to_proc->pid : 0,
2771 in_reply_to->to_thread ?
2772 in_reply_to->to_thread->pid : 0);
2773 spin_unlock(&in_reply_to->lock);
2774 binder_inner_proc_unlock(proc);
2775 return_error = BR_FAILED_REPLY;
2776 return_error_param = -EPROTO;
2777 return_error_line = __LINE__;
2778 in_reply_to = NULL;
2779 goto err_bad_call_stack;
2780 }
2781 thread->transaction_stack = in_reply_to->to_parent;
2782 binder_inner_proc_unlock(proc);
2783 binder_set_nice(in_reply_to->saved_priority);
2784 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2785 if (target_thread == NULL) {
2786 return_error = BR_DEAD_REPLY;
2787 return_error_line = __LINE__;
2788 goto err_dead_binder;
2789 }
2790 if (target_thread->transaction_stack != in_reply_to) {
2791 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2792 proc->pid, thread->pid,
2793 target_thread->transaction_stack ?
2794 target_thread->transaction_stack->debug_id : 0,
2795 in_reply_to->debug_id);
2796 binder_inner_proc_unlock(target_thread->proc);
2797 return_error = BR_FAILED_REPLY;
2798 return_error_param = -EPROTO;
2799 return_error_line = __LINE__;
2800 in_reply_to = NULL;
2801 target_thread = NULL;
2802 goto err_dead_binder;
2803 }
2804 target_proc = target_thread->proc;
2805 target_proc->tmp_ref++;
2806 binder_inner_proc_unlock(target_thread->proc);
2807 } else {
2808 if (tr->target.handle) {
2809 struct binder_ref *ref;
2810
2811
2812
2813
2814
2815
2816
2817
2818 binder_proc_lock(proc);
2819 ref = binder_get_ref_olocked(proc, tr->target.handle,
2820 true);
2821 if (ref) {
2822 target_node = binder_get_node_refs_for_txn(
2823 ref->node, &target_proc,
2824 &return_error);
2825 } else {
2826 binder_user_error("%d:%d got transaction to invalid handle\n",
2827 proc->pid, thread->pid);
2828 return_error = BR_FAILED_REPLY;
2829 }
2830 binder_proc_unlock(proc);
2831 } else {
2832 mutex_lock(&context->context_mgr_node_lock);
2833 target_node = context->binder_context_mgr_node;
2834 if (target_node)
2835 target_node = binder_get_node_refs_for_txn(
2836 target_node, &target_proc,
2837 &return_error);
2838 else
2839 return_error = BR_DEAD_REPLY;
2840 mutex_unlock(&context->context_mgr_node_lock);
2841 if (target_node && target_proc == proc) {
2842 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2843 proc->pid, thread->pid);
2844 return_error = BR_FAILED_REPLY;
2845 return_error_param = -EINVAL;
2846 return_error_line = __LINE__;
2847 goto err_invalid_target_handle;
2848 }
2849 }
2850 if (!target_node) {
2851
2852
2853
2854 return_error_param = -EINVAL;
2855 return_error_line = __LINE__;
2856 goto err_dead_binder;
2857 }
2858 e->to_node = target_node->debug_id;
2859 if (security_binder_transaction(proc->tsk,
2860 target_proc->tsk) < 0) {
2861 return_error = BR_FAILED_REPLY;
2862 return_error_param = -EPERM;
2863 return_error_line = __LINE__;
2864 goto err_invalid_target_handle;
2865 }
2866 binder_inner_proc_lock(proc);
2867 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2868 struct binder_transaction *tmp;
2869
2870 tmp = thread->transaction_stack;
2871 if (tmp->to_thread != thread) {
2872 spin_lock(&tmp->lock);
2873 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2874 proc->pid, thread->pid, tmp->debug_id,
2875 tmp->to_proc ? tmp->to_proc->pid : 0,
2876 tmp->to_thread ?
2877 tmp->to_thread->pid : 0);
2878 spin_unlock(&tmp->lock);
2879 binder_inner_proc_unlock(proc);
2880 return_error = BR_FAILED_REPLY;
2881 return_error_param = -EPROTO;
2882 return_error_line = __LINE__;
2883 goto err_bad_call_stack;
2884 }
2885 while (tmp) {
2886 struct binder_thread *from;
2887
2888 spin_lock(&tmp->lock);
2889 from = tmp->from;
2890 if (from && from->proc == target_proc) {
2891 atomic_inc(&from->tmp_ref);
2892 target_thread = from;
2893 spin_unlock(&tmp->lock);
2894 break;
2895 }
2896 spin_unlock(&tmp->lock);
2897 tmp = tmp->from_parent;
2898 }
2899 }
2900 binder_inner_proc_unlock(proc);
2901 }
2902 if (target_thread)
2903 e->to_thread = target_thread->pid;
2904 e->to_proc = target_proc->pid;
2905
2906
2907 t = kzalloc(sizeof(*t), GFP_KERNEL);
2908 if (t == NULL) {
2909 return_error = BR_FAILED_REPLY;
2910 return_error_param = -ENOMEM;
2911 return_error_line = __LINE__;
2912 goto err_alloc_t_failed;
2913 }
2914 binder_stats_created(BINDER_STAT_TRANSACTION);
2915 spin_lock_init(&t->lock);
2916
2917 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2918 if (tcomplete == NULL) {
2919 return_error = BR_FAILED_REPLY;
2920 return_error_param = -ENOMEM;
2921 return_error_line = __LINE__;
2922 goto err_alloc_tcomplete_failed;
2923 }
2924 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2925
2926 t->debug_id = t_debug_id;
2927
2928 if (reply)
2929 binder_debug(BINDER_DEBUG_TRANSACTION,
2930 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2931 proc->pid, thread->pid, t->debug_id,
2932 target_proc->pid, target_thread->pid,
2933 (u64)tr->data.ptr.buffer,
2934 (u64)tr->data.ptr.offsets,
2935 (u64)tr->data_size, (u64)tr->offsets_size,
2936 (u64)extra_buffers_size);
2937 else
2938 binder_debug(BINDER_DEBUG_TRANSACTION,
2939 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2940 proc->pid, thread->pid, t->debug_id,
2941 target_proc->pid, target_node->debug_id,
2942 (u64)tr->data.ptr.buffer,
2943 (u64)tr->data.ptr.offsets,
2944 (u64)tr->data_size, (u64)tr->offsets_size,
2945 (u64)extra_buffers_size);
2946
2947 if (!reply && !(tr->flags & TF_ONE_WAY))
2948 t->from = thread;
2949 else
2950 t->from = NULL;
2951 t->sender_euid = task_euid(proc->tsk);
2952 t->to_proc = target_proc;
2953 t->to_thread = target_thread;
2954 t->code = tr->code;
2955 t->flags = tr->flags;
2956 t->priority = task_nice(current);
2957
2958 trace_binder_transaction(reply, t, target_node);
2959
2960 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2961 tr->offsets_size, extra_buffers_size,
2962 !reply && (t->flags & TF_ONE_WAY));
2963 if (IS_ERR(t->buffer)) {
2964
2965
2966
2967 return_error_param = PTR_ERR(t->buffer);
2968 return_error = return_error_param == -ESRCH ?
2969 BR_DEAD_REPLY : BR_FAILED_REPLY;
2970 return_error_line = __LINE__;
2971 t->buffer = NULL;
2972 goto err_binder_alloc_buf_failed;
2973 }
2974 t->buffer->allow_user_free = 0;
2975 t->buffer->debug_id = t->debug_id;
2976 t->buffer->transaction = t;
2977 t->buffer->target_node = target_node;
2978 trace_binder_transaction_alloc_buf(t->buffer);
2979 off_start = (binder_size_t *)(t->buffer->data +
2980 ALIGN(tr->data_size, sizeof(void *)));
2981 offp = off_start;
2982
2983 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2984 tr->data.ptr.buffer, tr->data_size)) {
2985 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2986 proc->pid, thread->pid);
2987 return_error = BR_FAILED_REPLY;
2988 return_error_param = -EFAULT;
2989 return_error_line = __LINE__;
2990 goto err_copy_data_failed;
2991 }
2992 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2993 tr->data.ptr.offsets, tr->offsets_size)) {
2994 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2995 proc->pid, thread->pid);
2996 return_error = BR_FAILED_REPLY;
2997 return_error_param = -EFAULT;
2998 return_error_line = __LINE__;
2999 goto err_copy_data_failed;
3000 }
3001 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3002 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3003 proc->pid, thread->pid, (u64)tr->offsets_size);
3004 return_error = BR_FAILED_REPLY;
3005 return_error_param = -EINVAL;
3006 return_error_line = __LINE__;
3007 goto err_bad_offset;
3008 }
3009 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3010 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3011 proc->pid, thread->pid,
3012 (u64)extra_buffers_size);
3013 return_error = BR_FAILED_REPLY;
3014 return_error_param = -EINVAL;
3015 return_error_line = __LINE__;
3016 goto err_bad_offset;
3017 }
3018 off_end = (void *)off_start + tr->offsets_size;
3019 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3020 sg_buf_end = sg_bufp + extra_buffers_size;
3021 off_min = 0;
3022 for (; offp < off_end; offp++) {
3023 struct binder_object_header *hdr;
3024 size_t object_size = binder_validate_object(t->buffer, *offp);
3025
3026 if (object_size == 0 || *offp < off_min) {
3027 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3028 proc->pid, thread->pid, (u64)*offp,
3029 (u64)off_min,
3030 (u64)t->buffer->data_size);
3031 return_error = BR_FAILED_REPLY;
3032 return_error_param = -EINVAL;
3033 return_error_line = __LINE__;
3034 goto err_bad_offset;
3035 }
3036
3037 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3038 off_min = *offp + object_size;
3039 switch (hdr->type) {
3040 case BINDER_TYPE_BINDER:
3041 case BINDER_TYPE_WEAK_BINDER: {
3042 struct flat_binder_object *fp;
3043
3044 fp = to_flat_binder_object(hdr);
3045 ret = binder_translate_binder(fp, t, thread);
3046 if (ret < 0) {
3047 return_error = BR_FAILED_REPLY;
3048 return_error_param = ret;
3049 return_error_line = __LINE__;
3050 goto err_translate_failed;
3051 }
3052 } break;
3053 case BINDER_TYPE_HANDLE:
3054 case BINDER_TYPE_WEAK_HANDLE: {
3055 struct flat_binder_object *fp;
3056
3057 fp = to_flat_binder_object(hdr);
3058 ret = binder_translate_handle(fp, t, thread);
3059 if (ret < 0) {
3060 return_error = BR_FAILED_REPLY;
3061 return_error_param = ret;
3062 return_error_line = __LINE__;
3063 goto err_translate_failed;
3064 }
3065 } break;
3066
3067 case BINDER_TYPE_FD: {
3068 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3069 int target_fd = binder_translate_fd(fp->fd, t, thread,
3070 in_reply_to);
3071
3072 if (target_fd < 0) {
3073 return_error = BR_FAILED_REPLY;
3074 return_error_param = target_fd;
3075 return_error_line = __LINE__;
3076 goto err_translate_failed;
3077 }
3078 fp->pad_binder = 0;
3079 fp->fd = target_fd;
3080 } break;
3081 case BINDER_TYPE_FDA: {
3082 struct binder_fd_array_object *fda =
3083 to_binder_fd_array_object(hdr);
3084 struct binder_buffer_object *parent =
3085 binder_validate_ptr(t->buffer, fda->parent,
3086 off_start,
3087 offp - off_start);
3088 if (!parent) {
3089 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3090 proc->pid, thread->pid);
3091 return_error = BR_FAILED_REPLY;
3092 return_error_param = -EINVAL;
3093 return_error_line = __LINE__;
3094 goto err_bad_parent;
3095 }
3096 if (!binder_validate_fixup(t->buffer, off_start,
3097 parent, fda->parent_offset,
3098 last_fixup_obj,
3099 last_fixup_min_off)) {
3100 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3101 proc->pid, thread->pid);
3102 return_error = BR_FAILED_REPLY;
3103 return_error_param = -EINVAL;
3104 return_error_line = __LINE__;
3105 goto err_bad_parent;
3106 }
3107 ret = binder_translate_fd_array(fda, parent, t, thread,
3108 in_reply_to);
3109 if (ret < 0) {
3110 return_error = BR_FAILED_REPLY;
3111 return_error_param = ret;
3112 return_error_line = __LINE__;
3113 goto err_translate_failed;
3114 }
3115 last_fixup_obj = parent;
3116 last_fixup_min_off =
3117 fda->parent_offset + sizeof(u32) * fda->num_fds;
3118 } break;
3119 case BINDER_TYPE_PTR: {
3120 struct binder_buffer_object *bp =
3121 to_binder_buffer_object(hdr);
3122 size_t buf_left = sg_buf_end - sg_bufp;
3123
3124 if (bp->length > buf_left) {
3125 binder_user_error("%d:%d got transaction with too large buffer\n",
3126 proc->pid, thread->pid);
3127 return_error = BR_FAILED_REPLY;
3128 return_error_param = -EINVAL;
3129 return_error_line = __LINE__;
3130 goto err_bad_offset;
3131 }
3132 if (copy_from_user(sg_bufp,
3133 (const void __user *)(uintptr_t)
3134 bp->buffer, bp->length)) {
3135 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3136 proc->pid, thread->pid);
3137 return_error_param = -EFAULT;
3138 return_error = BR_FAILED_REPLY;
3139 return_error_line = __LINE__;
3140 goto err_copy_data_failed;
3141 }
3142
3143 bp->buffer = (uintptr_t)sg_bufp +
3144 binder_alloc_get_user_buffer_offset(
3145 &target_proc->alloc);
3146 sg_bufp += ALIGN(bp->length, sizeof(u64));
3147
3148 ret = binder_fixup_parent(t, thread, bp, off_start,
3149 offp - off_start,
3150 last_fixup_obj,
3151 last_fixup_min_off);
3152 if (ret < 0) {
3153 return_error = BR_FAILED_REPLY;
3154 return_error_param = ret;
3155 return_error_line = __LINE__;
3156 goto err_translate_failed;
3157 }
3158 last_fixup_obj = bp;
3159 last_fixup_min_off = 0;
3160 } break;
3161 default:
3162 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3163 proc->pid, thread->pid, hdr->type);
3164 return_error = BR_FAILED_REPLY;
3165 return_error_param = -EINVAL;
3166 return_error_line = __LINE__;
3167 goto err_bad_object_type;
3168 }
3169 }
3170 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3171 t->work.type = BINDER_WORK_TRANSACTION;
3172
3173 if (reply) {
3174 binder_enqueue_thread_work(thread, tcomplete);
3175 binder_inner_proc_lock(target_proc);
3176 if (target_thread->is_dead) {
3177 binder_inner_proc_unlock(target_proc);
3178 goto err_dead_proc_or_thread;
3179 }
3180 BUG_ON(t->buffer->async_transaction != 0);
3181 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3182 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3183 binder_inner_proc_unlock(target_proc);
3184 wake_up_interruptible_sync(&target_thread->wait);
3185 binder_free_transaction(in_reply_to);
3186 } else if (!(t->flags & TF_ONE_WAY)) {
3187 BUG_ON(t->buffer->async_transaction != 0);
3188 binder_inner_proc_lock(proc);
3189
3190
3191
3192
3193
3194
3195
3196 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3197 t->need_reply = 1;
3198 t->from_parent = thread->transaction_stack;
3199 thread->transaction_stack = t;
3200 binder_inner_proc_unlock(proc);
3201 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3202 binder_inner_proc_lock(proc);
3203 binder_pop_transaction_ilocked(thread, t);
3204 binder_inner_proc_unlock(proc);
3205 goto err_dead_proc_or_thread;
3206 }
3207 } else {
3208 BUG_ON(target_node == NULL);
3209 BUG_ON(t->buffer->async_transaction != 1);
3210 binder_enqueue_thread_work(thread, tcomplete);
3211 if (!binder_proc_transaction(t, target_proc, NULL))
3212 goto err_dead_proc_or_thread;
3213 }
3214 if (target_thread)
3215 binder_thread_dec_tmpref(target_thread);
3216 binder_proc_dec_tmpref(target_proc);
3217 if (target_node)
3218 binder_dec_node_tmpref(target_node);
3219
3220
3221
3222
3223 smp_wmb();
3224 WRITE_ONCE(e->debug_id_done, t_debug_id);
3225 return;
3226
3227err_dead_proc_or_thread:
3228 return_error = BR_DEAD_REPLY;
3229 return_error_line = __LINE__;
3230 binder_dequeue_work(proc, tcomplete);
3231err_translate_failed:
3232err_bad_object_type:
3233err_bad_offset:
3234err_bad_parent:
3235err_copy_data_failed:
3236 trace_binder_transaction_failed_buffer_release(t->buffer);
3237 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3238 if (target_node)
3239 binder_dec_node_tmpref(target_node);
3240 target_node = NULL;
3241 t->buffer->transaction = NULL;
3242 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3243err_binder_alloc_buf_failed:
3244 kfree(tcomplete);
3245 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3246err_alloc_tcomplete_failed:
3247 kfree(t);
3248 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3249err_alloc_t_failed:
3250err_bad_call_stack:
3251err_empty_call_stack:
3252err_dead_binder:
3253err_invalid_target_handle:
3254 if (target_thread)
3255 binder_thread_dec_tmpref(target_thread);
3256 if (target_proc)
3257 binder_proc_dec_tmpref(target_proc);
3258 if (target_node) {
3259 binder_dec_node(target_node, 1, 0);
3260 binder_dec_node_tmpref(target_node);
3261 }
3262
3263 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3264 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3265 proc->pid, thread->pid, return_error, return_error_param,
3266 (u64)tr->data_size, (u64)tr->offsets_size,
3267 return_error_line);
3268
3269 {
3270 struct binder_transaction_log_entry *fe;
3271
3272 e->return_error = return_error;
3273 e->return_error_param = return_error_param;
3274 e->return_error_line = return_error_line;
3275 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3276 *fe = *e;
3277
3278
3279
3280
3281 smp_wmb();
3282 WRITE_ONCE(e->debug_id_done, t_debug_id);
3283 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3284 }
3285
3286 BUG_ON(thread->return_error.cmd != BR_OK);
3287 if (in_reply_to) {
3288 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3289 binder_enqueue_thread_work(thread, &thread->return_error.work);
3290 binder_send_failed_reply(in_reply_to, return_error);
3291 } else {
3292 thread->return_error.cmd = return_error;
3293 binder_enqueue_thread_work(thread, &thread->return_error.work);
3294 }
3295}
3296
3297static int binder_thread_write(struct binder_proc *proc,
3298 struct binder_thread *thread,
3299 binder_uintptr_t binder_buffer, size_t size,
3300 binder_size_t *consumed)
3301{
3302 uint32_t cmd;
3303 struct binder_context *context = proc->context;
3304 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3305 void __user *ptr = buffer + *consumed;
3306 void __user *end = buffer + size;
3307
3308 while (ptr < end && thread->return_error.cmd == BR_OK) {
3309 int ret;
3310
3311 if (get_user(cmd, (uint32_t __user *)ptr))
3312 return -EFAULT;
3313 ptr += sizeof(uint32_t);
3314 trace_binder_command(cmd);
3315 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3316 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3317 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3318 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3319 }
3320 switch (cmd) {
3321 case BC_INCREFS:
3322 case BC_ACQUIRE:
3323 case BC_RELEASE:
3324 case BC_DECREFS: {
3325 uint32_t target;
3326 const char *debug_string;
3327 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3328 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3329 struct binder_ref_data rdata;
3330
3331 if (get_user(target, (uint32_t __user *)ptr))
3332 return -EFAULT;
3333
3334 ptr += sizeof(uint32_t);
3335 ret = -1;
3336 if (increment && !target) {
3337 struct binder_node *ctx_mgr_node;
3338 mutex_lock(&context->context_mgr_node_lock);
3339 ctx_mgr_node = context->binder_context_mgr_node;
3340 if (ctx_mgr_node)
3341 ret = binder_inc_ref_for_node(
3342 proc, ctx_mgr_node,
3343 strong, NULL, &rdata);
3344 mutex_unlock(&context->context_mgr_node_lock);
3345 }
3346 if (ret)
3347 ret = binder_update_ref_for_handle(
3348 proc, target, increment, strong,
3349 &rdata);
3350 if (!ret && rdata.desc != target) {
3351 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3352 proc->pid, thread->pid,
3353 target, rdata.desc);
3354 }
3355 switch (cmd) {
3356 case BC_INCREFS:
3357 debug_string = "IncRefs";
3358 break;
3359 case BC_ACQUIRE:
3360 debug_string = "Acquire";
3361 break;
3362 case BC_RELEASE:
3363 debug_string = "Release";
3364 break;
3365 case BC_DECREFS:
3366 default:
3367 debug_string = "DecRefs";
3368 break;
3369 }
3370 if (ret) {
3371 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3372 proc->pid, thread->pid, debug_string,
3373 strong, target, ret);
3374 break;
3375 }
3376 binder_debug(BINDER_DEBUG_USER_REFS,
3377 "%d:%d %s ref %d desc %d s %d w %d\n",
3378 proc->pid, thread->pid, debug_string,
3379 rdata.debug_id, rdata.desc, rdata.strong,
3380 rdata.weak);
3381 break;
3382 }
3383 case BC_INCREFS_DONE:
3384 case BC_ACQUIRE_DONE: {
3385 binder_uintptr_t node_ptr;
3386 binder_uintptr_t cookie;
3387 struct binder_node *node;
3388 bool free_node;
3389
3390 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3391 return -EFAULT;
3392 ptr += sizeof(binder_uintptr_t);
3393 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3394 return -EFAULT;
3395 ptr += sizeof(binder_uintptr_t);
3396 node = binder_get_node(proc, node_ptr);
3397 if (node == NULL) {
3398 binder_user_error("%d:%d %s u%016llx no match\n",
3399 proc->pid, thread->pid,
3400 cmd == BC_INCREFS_DONE ?
3401 "BC_INCREFS_DONE" :
3402 "BC_ACQUIRE_DONE",
3403 (u64)node_ptr);
3404 break;
3405 }
3406 if (cookie != node->cookie) {
3407 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3408 proc->pid, thread->pid,
3409 cmd == BC_INCREFS_DONE ?
3410 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3411 (u64)node_ptr, node->debug_id,
3412 (u64)cookie, (u64)node->cookie);
3413 binder_put_node(node);
3414 break;
3415 }
3416 binder_node_inner_lock(node);
3417 if (cmd == BC_ACQUIRE_DONE) {
3418 if (node->pending_strong_ref == 0) {
3419 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3420 proc->pid, thread->pid,
3421 node->debug_id);
3422 binder_node_inner_unlock(node);
3423 binder_put_node(node);
3424 break;
3425 }
3426 node->pending_strong_ref = 0;
3427 } else {
3428 if (node->pending_weak_ref == 0) {
3429 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3430 proc->pid, thread->pid,
3431 node->debug_id);
3432 binder_node_inner_unlock(node);
3433 binder_put_node(node);
3434 break;
3435 }
3436 node->pending_weak_ref = 0;
3437 }
3438 free_node = binder_dec_node_nilocked(node,
3439 cmd == BC_ACQUIRE_DONE, 0);
3440 WARN_ON(free_node);
3441 binder_debug(BINDER_DEBUG_USER_REFS,
3442 "%d:%d %s node %d ls %d lw %d tr %d\n",
3443 proc->pid, thread->pid,
3444 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3445 node->debug_id, node->local_strong_refs,
3446 node->local_weak_refs, node->tmp_refs);
3447 binder_node_inner_unlock(node);
3448 binder_put_node(node);
3449 break;
3450 }
3451 case BC_ATTEMPT_ACQUIRE:
3452 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3453 return -EINVAL;
3454 case BC_ACQUIRE_RESULT:
3455 pr_err("BC_ACQUIRE_RESULT not supported\n");
3456 return -EINVAL;
3457
3458 case BC_FREE_BUFFER: {
3459 binder_uintptr_t data_ptr;
3460 struct binder_buffer *buffer;
3461
3462 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3463 return -EFAULT;
3464 ptr += sizeof(binder_uintptr_t);
3465
3466 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3467 data_ptr);
3468 if (buffer == NULL) {
3469 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3470 proc->pid, thread->pid, (u64)data_ptr);
3471 break;
3472 }
3473 if (!buffer->allow_user_free) {
3474 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3475 proc->pid, thread->pid, (u64)data_ptr);
3476 break;
3477 }
3478 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3479 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3480 proc->pid, thread->pid, (u64)data_ptr,
3481 buffer->debug_id,
3482 buffer->transaction ? "active" : "finished");
3483
3484 if (buffer->transaction) {
3485 buffer->transaction->buffer = NULL;
3486 buffer->transaction = NULL;
3487 }
3488 if (buffer->async_transaction && buffer->target_node) {
3489 struct binder_node *buf_node;
3490 struct binder_work *w;
3491
3492 buf_node = buffer->target_node;
3493 binder_node_inner_lock(buf_node);
3494 BUG_ON(!buf_node->has_async_transaction);
3495 BUG_ON(buf_node->proc != proc);
3496 w = binder_dequeue_work_head_ilocked(
3497 &buf_node->async_todo);
3498 if (!w) {
3499 buf_node->has_async_transaction = false;
3500 } else {
3501 binder_enqueue_work_ilocked(
3502 w, &proc->todo);
3503 binder_wakeup_proc_ilocked(proc);
3504 }
3505 binder_node_inner_unlock(buf_node);
3506 }
3507 trace_binder_transaction_buffer_release(buffer);
3508 binder_transaction_buffer_release(proc, buffer, NULL);
3509 binder_alloc_free_buf(&proc->alloc, buffer);
3510 break;
3511 }
3512
3513 case BC_TRANSACTION_SG:
3514 case BC_REPLY_SG: {
3515 struct binder_transaction_data_sg tr;
3516
3517 if (copy_from_user(&tr, ptr, sizeof(tr)))
3518 return -EFAULT;
3519 ptr += sizeof(tr);
3520 binder_transaction(proc, thread, &tr.transaction_data,
3521 cmd == BC_REPLY_SG, tr.buffers_size);
3522 break;
3523 }
3524 case BC_TRANSACTION:
3525 case BC_REPLY: {
3526 struct binder_transaction_data tr;
3527
3528 if (copy_from_user(&tr, ptr, sizeof(tr)))
3529 return -EFAULT;
3530 ptr += sizeof(tr);
3531 binder_transaction(proc, thread, &tr,
3532 cmd == BC_REPLY, 0);
3533 break;
3534 }
3535
3536 case BC_REGISTER_LOOPER:
3537 binder_debug(BINDER_DEBUG_THREADS,
3538 "%d:%d BC_REGISTER_LOOPER\n",
3539 proc->pid, thread->pid);
3540 binder_inner_proc_lock(proc);
3541 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3542 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3543 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3544 proc->pid, thread->pid);
3545 } else if (proc->requested_threads == 0) {
3546 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3547 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3548 proc->pid, thread->pid);
3549 } else {
3550 proc->requested_threads--;
3551 proc->requested_threads_started++;
3552 }
3553 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3554 binder_inner_proc_unlock(proc);
3555 break;
3556 case BC_ENTER_LOOPER:
3557 binder_debug(BINDER_DEBUG_THREADS,
3558 "%d:%d BC_ENTER_LOOPER\n",
3559 proc->pid, thread->pid);
3560 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3561 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3562 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3563 proc->pid, thread->pid);
3564 }
3565 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3566 break;
3567 case BC_EXIT_LOOPER:
3568 binder_debug(BINDER_DEBUG_THREADS,
3569 "%d:%d BC_EXIT_LOOPER\n",
3570 proc->pid, thread->pid);
3571 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3572 break;
3573
3574 case BC_REQUEST_DEATH_NOTIFICATION:
3575 case BC_CLEAR_DEATH_NOTIFICATION: {
3576 uint32_t target;
3577 binder_uintptr_t cookie;
3578 struct binder_ref *ref;
3579 struct binder_ref_death *death = NULL;
3580
3581 if (get_user(target, (uint32_t __user *)ptr))
3582 return -EFAULT;
3583 ptr += sizeof(uint32_t);
3584 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3585 return -EFAULT;
3586 ptr += sizeof(binder_uintptr_t);
3587 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3588
3589
3590
3591
3592 death = kzalloc(sizeof(*death), GFP_KERNEL);
3593 if (death == NULL) {
3594 WARN_ON(thread->return_error.cmd !=
3595 BR_OK);
3596 thread->return_error.cmd = BR_ERROR;
3597 binder_enqueue_thread_work(
3598 thread,
3599 &thread->return_error.work);
3600 binder_debug(
3601 BINDER_DEBUG_FAILED_TRANSACTION,
3602 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3603 proc->pid, thread->pid);
3604 break;
3605 }
3606 }
3607 binder_proc_lock(proc);
3608 ref = binder_get_ref_olocked(proc, target, false);
3609 if (ref == NULL) {
3610 binder_user_error("%d:%d %s invalid ref %d\n",
3611 proc->pid, thread->pid,
3612 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3613 "BC_REQUEST_DEATH_NOTIFICATION" :
3614 "BC_CLEAR_DEATH_NOTIFICATION",
3615 target);
3616 binder_proc_unlock(proc);
3617 kfree(death);
3618 break;
3619 }
3620
3621 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3622 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3623 proc->pid, thread->pid,
3624 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3625 "BC_REQUEST_DEATH_NOTIFICATION" :
3626 "BC_CLEAR_DEATH_NOTIFICATION",
3627 (u64)cookie, ref->data.debug_id,
3628 ref->data.desc, ref->data.strong,
3629 ref->data.weak, ref->node->debug_id);
3630
3631 binder_node_lock(ref->node);
3632 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3633 if (ref->death) {
3634 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3635 proc->pid, thread->pid);
3636 binder_node_unlock(ref->node);
3637 binder_proc_unlock(proc);
3638 kfree(death);
3639 break;
3640 }
3641 binder_stats_created(BINDER_STAT_DEATH);
3642 INIT_LIST_HEAD(&death->work.entry);
3643 death->cookie = cookie;
3644 ref->death = death;
3645 if (ref->node->proc == NULL) {
3646 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3647
3648 binder_inner_proc_lock(proc);
3649 binder_enqueue_work_ilocked(
3650 &ref->death->work, &proc->todo);
3651 binder_wakeup_proc_ilocked(proc);
3652 binder_inner_proc_unlock(proc);
3653 }
3654 } else {
3655 if (ref->death == NULL) {
3656 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3657 proc->pid, thread->pid);
3658 binder_node_unlock(ref->node);
3659 binder_proc_unlock(proc);
3660 break;
3661 }
3662 death = ref->death;
3663 if (death->cookie != cookie) {
3664 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3665 proc->pid, thread->pid,
3666 (u64)death->cookie,
3667 (u64)cookie);
3668 binder_node_unlock(ref->node);
3669 binder_proc_unlock(proc);
3670 break;
3671 }
3672 ref->death = NULL;
3673 binder_inner_proc_lock(proc);
3674 if (list_empty(&death->work.entry)) {
3675 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3676 if (thread->looper &
3677 (BINDER_LOOPER_STATE_REGISTERED |
3678 BINDER_LOOPER_STATE_ENTERED))
3679 binder_enqueue_thread_work_ilocked(
3680 thread,
3681 &death->work);
3682 else {
3683 binder_enqueue_work_ilocked(
3684 &death->work,
3685 &proc->todo);
3686 binder_wakeup_proc_ilocked(
3687 proc);
3688 }
3689 } else {
3690 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3691 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3692 }
3693 binder_inner_proc_unlock(proc);
3694 }
3695 binder_node_unlock(ref->node);
3696 binder_proc_unlock(proc);
3697 } break;
3698 case BC_DEAD_BINDER_DONE: {
3699 struct binder_work *w;
3700 binder_uintptr_t cookie;
3701 struct binder_ref_death *death = NULL;
3702
3703 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3704 return -EFAULT;
3705
3706 ptr += sizeof(cookie);
3707 binder_inner_proc_lock(proc);
3708 list_for_each_entry(w, &proc->delivered_death,
3709 entry) {
3710 struct binder_ref_death *tmp_death =
3711 container_of(w,
3712 struct binder_ref_death,
3713 work);
3714
3715 if (tmp_death->cookie == cookie) {
3716 death = tmp_death;
3717 break;
3718 }
3719 }
3720 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3721 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3722 proc->pid, thread->pid, (u64)cookie,
3723 death);
3724 if (death == NULL) {
3725 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3726 proc->pid, thread->pid, (u64)cookie);
3727 binder_inner_proc_unlock(proc);
3728 break;
3729 }
3730 binder_dequeue_work_ilocked(&death->work);
3731 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3732 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3733 if (thread->looper &
3734 (BINDER_LOOPER_STATE_REGISTERED |
3735 BINDER_LOOPER_STATE_ENTERED))
3736 binder_enqueue_thread_work_ilocked(
3737 thread, &death->work);
3738 else {
3739 binder_enqueue_work_ilocked(
3740 &death->work,
3741 &proc->todo);
3742 binder_wakeup_proc_ilocked(proc);
3743 }
3744 }
3745 binder_inner_proc_unlock(proc);
3746 } break;
3747
3748 default:
3749 pr_err("%d:%d unknown command %d\n",
3750 proc->pid, thread->pid, cmd);
3751 return -EINVAL;
3752 }
3753 *consumed = ptr - buffer;
3754 }
3755 return 0;
3756}
3757
3758static void binder_stat_br(struct binder_proc *proc,
3759 struct binder_thread *thread, uint32_t cmd)
3760{
3761 trace_binder_return(cmd);
3762 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3763 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3764 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3765 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3766 }
3767}
3768
3769static int binder_put_node_cmd(struct binder_proc *proc,
3770 struct binder_thread *thread,
3771 void __user **ptrp,
3772 binder_uintptr_t node_ptr,
3773 binder_uintptr_t node_cookie,
3774 int node_debug_id,
3775 uint32_t cmd, const char *cmd_name)
3776{
3777 void __user *ptr = *ptrp;
3778
3779 if (put_user(cmd, (uint32_t __user *)ptr))
3780 return -EFAULT;
3781 ptr += sizeof(uint32_t);
3782
3783 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3784 return -EFAULT;
3785 ptr += sizeof(binder_uintptr_t);
3786
3787 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3788 return -EFAULT;
3789 ptr += sizeof(binder_uintptr_t);
3790
3791 binder_stat_br(proc, thread, cmd);
3792 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3793 proc->pid, thread->pid, cmd_name, node_debug_id,
3794 (u64)node_ptr, (u64)node_cookie);
3795
3796 *ptrp = ptr;
3797 return 0;
3798}
3799
3800static int binder_wait_for_work(struct binder_thread *thread,
3801 bool do_proc_work)
3802{
3803 DEFINE_WAIT(wait);
3804 struct binder_proc *proc = thread->proc;
3805 int ret = 0;
3806
3807 freezer_do_not_count();
3808 binder_inner_proc_lock(proc);
3809 for (;;) {
3810 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3811 if (binder_has_work_ilocked(thread, do_proc_work))
3812 break;
3813 if (do_proc_work)
3814 list_add(&thread->waiting_thread_node,
3815 &proc->waiting_threads);
3816 binder_inner_proc_unlock(proc);
3817 schedule();
3818 binder_inner_proc_lock(proc);
3819 list_del_init(&thread->waiting_thread_node);
3820 if (signal_pending(current)) {
3821 ret = -ERESTARTSYS;
3822 break;
3823 }
3824 }
3825 finish_wait(&thread->wait, &wait);
3826 binder_inner_proc_unlock(proc);
3827 freezer_count();
3828
3829 return ret;
3830}
3831
3832static int binder_thread_read(struct binder_proc *proc,
3833 struct binder_thread *thread,
3834 binder_uintptr_t binder_buffer, size_t size,
3835 binder_size_t *consumed, int non_block)
3836{
3837 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3838 void __user *ptr = buffer + *consumed;
3839 void __user *end = buffer + size;
3840
3841 int ret = 0;
3842 int wait_for_proc_work;
3843
3844 if (*consumed == 0) {
3845 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3846 return -EFAULT;
3847 ptr += sizeof(uint32_t);
3848 }
3849
3850retry:
3851 binder_inner_proc_lock(proc);
3852 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3853 binder_inner_proc_unlock(proc);
3854
3855 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3856
3857 trace_binder_wait_for_work(wait_for_proc_work,
3858 !!thread->transaction_stack,
3859 !binder_worklist_empty(proc, &thread->todo));
3860 if (wait_for_proc_work) {
3861 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3862 BINDER_LOOPER_STATE_ENTERED))) {
3863 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3864 proc->pid, thread->pid, thread->looper);
3865 wait_event_interruptible(binder_user_error_wait,
3866 binder_stop_on_user_error < 2);
3867 }
3868 binder_set_nice(proc->default_priority);
3869 }
3870
3871 if (non_block) {
3872 if (!binder_has_work(thread, wait_for_proc_work))
3873 ret = -EAGAIN;
3874 } else {
3875 ret = binder_wait_for_work(thread, wait_for_proc_work);
3876 }
3877
3878 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3879
3880 if (ret)
3881 return ret;
3882
3883 while (1) {
3884 uint32_t cmd;
3885 struct binder_transaction_data tr;
3886 struct binder_work *w = NULL;
3887 struct list_head *list = NULL;
3888 struct binder_transaction *t = NULL;
3889 struct binder_thread *t_from;
3890
3891 binder_inner_proc_lock(proc);
3892 if (!binder_worklist_empty_ilocked(&thread->todo))
3893 list = &thread->todo;
3894 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3895 wait_for_proc_work)
3896 list = &proc->todo;
3897 else {
3898 binder_inner_proc_unlock(proc);
3899
3900
3901 if (ptr - buffer == 4 && !thread->looper_need_return)
3902 goto retry;
3903 break;
3904 }
3905
3906 if (end - ptr < sizeof(tr) + 4) {
3907 binder_inner_proc_unlock(proc);
3908 break;
3909 }
3910 w = binder_dequeue_work_head_ilocked(list);
3911 if (binder_worklist_empty_ilocked(&thread->todo))
3912 thread->process_todo = false;
3913
3914 switch (w->type) {
3915 case BINDER_WORK_TRANSACTION: {
3916 binder_inner_proc_unlock(proc);
3917 t = container_of(w, struct binder_transaction, work);
3918 } break;
3919 case BINDER_WORK_RETURN_ERROR: {
3920 struct binder_error *e = container_of(
3921 w, struct binder_error, work);
3922
3923 WARN_ON(e->cmd == BR_OK);
3924 binder_inner_proc_unlock(proc);
3925 if (put_user(e->cmd, (uint32_t __user *)ptr))
3926 return -EFAULT;
3927 cmd = e->cmd;
3928 e->cmd = BR_OK;
3929 ptr += sizeof(uint32_t);
3930
3931 binder_stat_br(proc, thread, cmd);
3932 } break;
3933 case BINDER_WORK_TRANSACTION_COMPLETE: {
3934 binder_inner_proc_unlock(proc);
3935 cmd = BR_TRANSACTION_COMPLETE;
3936 if (put_user(cmd, (uint32_t __user *)ptr))
3937 return -EFAULT;
3938 ptr += sizeof(uint32_t);
3939
3940 binder_stat_br(proc, thread, cmd);
3941 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3942 "%d:%d BR_TRANSACTION_COMPLETE\n",
3943 proc->pid, thread->pid);
3944 kfree(w);
3945 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3946 } break;
3947 case BINDER_WORK_NODE: {
3948 struct binder_node *node = container_of(w, struct binder_node, work);
3949 int strong, weak;
3950 binder_uintptr_t node_ptr = node->ptr;
3951 binder_uintptr_t node_cookie = node->cookie;
3952 int node_debug_id = node->debug_id;
3953 int has_weak_ref;
3954 int has_strong_ref;
3955 void __user *orig_ptr = ptr;
3956
3957 BUG_ON(proc != node->proc);
3958 strong = node->internal_strong_refs ||
3959 node->local_strong_refs;
3960 weak = !hlist_empty(&node->refs) ||
3961 node->local_weak_refs ||
3962 node->tmp_refs || strong;
3963 has_strong_ref = node->has_strong_ref;
3964 has_weak_ref = node->has_weak_ref;
3965
3966 if (weak && !has_weak_ref) {
3967 node->has_weak_ref = 1;
3968 node->pending_weak_ref = 1;
3969 node->local_weak_refs++;
3970 }
3971 if (strong && !has_strong_ref) {
3972 node->has_strong_ref = 1;
3973 node->pending_strong_ref = 1;
3974 node->local_strong_refs++;
3975 }
3976 if (!strong && has_strong_ref)
3977 node->has_strong_ref = 0;
3978 if (!weak && has_weak_ref)
3979 node->has_weak_ref = 0;
3980 if (!weak && !strong) {
3981 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3982 "%d:%d node %d u%016llx c%016llx deleted\n",
3983 proc->pid, thread->pid,
3984 node_debug_id,
3985 (u64)node_ptr,
3986 (u64)node_cookie);
3987 rb_erase(&node->rb_node, &proc->nodes);
3988 binder_inner_proc_unlock(proc);
3989 binder_node_lock(node);
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999 binder_node_unlock(node);
4000 binder_free_node(node);
4001 } else
4002 binder_inner_proc_unlock(proc);
4003
4004 if (weak && !has_weak_ref)
4005 ret = binder_put_node_cmd(
4006 proc, thread, &ptr, node_ptr,
4007 node_cookie, node_debug_id,
4008 BR_INCREFS, "BR_INCREFS");
4009 if (!ret && strong && !has_strong_ref)
4010 ret = binder_put_node_cmd(
4011 proc, thread, &ptr, node_ptr,
4012 node_cookie, node_debug_id,
4013 BR_ACQUIRE, "BR_ACQUIRE");
4014 if (!ret && !strong && has_strong_ref)
4015 ret = binder_put_node_cmd(
4016 proc, thread, &ptr, node_ptr,
4017 node_cookie, node_debug_id,
4018 BR_RELEASE, "BR_RELEASE");
4019 if (!ret && !weak && has_weak_ref)
4020 ret = binder_put_node_cmd(
4021 proc, thread, &ptr, node_ptr,
4022 node_cookie, node_debug_id,
4023 BR_DECREFS, "BR_DECREFS");
4024 if (orig_ptr == ptr)
4025 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4026 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4027 proc->pid, thread->pid,
4028 node_debug_id,
4029 (u64)node_ptr,
4030 (u64)node_cookie);
4031 if (ret)
4032 return ret;
4033 } break;
4034 case BINDER_WORK_DEAD_BINDER:
4035 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4036 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4037 struct binder_ref_death *death;
4038 uint32_t cmd;
4039 binder_uintptr_t cookie;
4040
4041 death = container_of(w, struct binder_ref_death, work);
4042 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4043 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4044 else
4045 cmd = BR_DEAD_BINDER;
4046 cookie = death->cookie;
4047
4048 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4049 "%d:%d %s %016llx\n",
4050 proc->pid, thread->pid,
4051 cmd == BR_DEAD_BINDER ?
4052 "BR_DEAD_BINDER" :
4053 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4054 (u64)cookie);
4055 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4056 binder_inner_proc_unlock(proc);
4057 kfree(death);
4058 binder_stats_deleted(BINDER_STAT_DEATH);
4059 } else {
4060 binder_enqueue_work_ilocked(
4061 w, &proc->delivered_death);
4062 binder_inner_proc_unlock(proc);
4063 }
4064 if (put_user(cmd, (uint32_t __user *)ptr))
4065 return -EFAULT;
4066 ptr += sizeof(uint32_t);
4067 if (put_user(cookie,
4068 (binder_uintptr_t __user *)ptr))
4069 return -EFAULT;
4070 ptr += sizeof(binder_uintptr_t);
4071 binder_stat_br(proc, thread, cmd);
4072 if (cmd == BR_DEAD_BINDER)
4073 goto done;
4074 } break;
4075 }
4076
4077 if (!t)
4078 continue;
4079
4080 BUG_ON(t->buffer == NULL);
4081 if (t->buffer->target_node) {
4082 struct binder_node *target_node = t->buffer->target_node;
4083
4084 tr.target.ptr = target_node->ptr;
4085 tr.cookie = target_node->cookie;
4086 t->saved_priority = task_nice(current);
4087 if (t->priority < target_node->min_priority &&
4088 !(t->flags & TF_ONE_WAY))
4089 binder_set_nice(t->priority);
4090 else if (!(t->flags & TF_ONE_WAY) ||
4091 t->saved_priority > target_node->min_priority)
4092 binder_set_nice(target_node->min_priority);
4093 cmd = BR_TRANSACTION;
4094 } else {
4095 tr.target.ptr = 0;
4096 tr.cookie = 0;
4097 cmd = BR_REPLY;
4098 }
4099 tr.code = t->code;
4100 tr.flags = t->flags;
4101 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4102
4103 t_from = binder_get_txn_from(t);
4104 if (t_from) {
4105 struct task_struct *sender = t_from->proc->tsk;
4106
4107 tr.sender_pid = task_tgid_nr_ns(sender,
4108 task_active_pid_ns(current));
4109 } else {
4110 tr.sender_pid = 0;
4111 }
4112
4113 tr.data_size = t->buffer->data_size;
4114 tr.offsets_size = t->buffer->offsets_size;
4115 tr.data.ptr.buffer = (binder_uintptr_t)
4116 ((uintptr_t)t->buffer->data +
4117 binder_alloc_get_user_buffer_offset(&proc->alloc));
4118 tr.data.ptr.offsets = tr.data.ptr.buffer +
4119 ALIGN(t->buffer->data_size,
4120 sizeof(void *));
4121
4122 if (put_user(cmd, (uint32_t __user *)ptr)) {
4123 if (t_from)
4124 binder_thread_dec_tmpref(t_from);
4125
4126 binder_cleanup_transaction(t, "put_user failed",
4127 BR_FAILED_REPLY);
4128
4129 return -EFAULT;
4130 }
4131 ptr += sizeof(uint32_t);
4132 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4133 if (t_from)
4134 binder_thread_dec_tmpref(t_from);
4135
4136 binder_cleanup_transaction(t, "copy_to_user failed",
4137 BR_FAILED_REPLY);
4138
4139 return -EFAULT;
4140 }
4141 ptr += sizeof(tr);
4142
4143 trace_binder_transaction_received(t);
4144 binder_stat_br(proc, thread, cmd);
4145 binder_debug(BINDER_DEBUG_TRANSACTION,
4146 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4147 proc->pid, thread->pid,
4148 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4149 "BR_REPLY",
4150 t->debug_id, t_from ? t_from->proc->pid : 0,
4151 t_from ? t_from->pid : 0, cmd,
4152 t->buffer->data_size, t->buffer->offsets_size,
4153 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4154
4155 if (t_from)
4156 binder_thread_dec_tmpref(t_from);
4157 t->buffer->allow_user_free = 1;
4158 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4159 binder_inner_proc_lock(thread->proc);
4160 t->to_parent = thread->transaction_stack;
4161 t->to_thread = thread;
4162 thread->transaction_stack = t;
4163 binder_inner_proc_unlock(thread->proc);
4164 } else {
4165 binder_free_transaction(t);
4166 }
4167 break;
4168 }
4169
4170done:
4171
4172 *consumed = ptr - buffer;
4173 binder_inner_proc_lock(proc);
4174 if (proc->requested_threads == 0 &&
4175 list_empty(&thread->proc->waiting_threads) &&
4176 proc->requested_threads_started < proc->max_threads &&
4177 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4178 BINDER_LOOPER_STATE_ENTERED))
4179 ) {
4180 proc->requested_threads++;
4181 binder_inner_proc_unlock(proc);
4182 binder_debug(BINDER_DEBUG_THREADS,
4183 "%d:%d BR_SPAWN_LOOPER\n",
4184 proc->pid, thread->pid);
4185 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4186 return -EFAULT;
4187 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4188 } else
4189 binder_inner_proc_unlock(proc);
4190 return 0;
4191}
4192
4193static void binder_release_work(struct binder_proc *proc,
4194 struct list_head *list)
4195{
4196 struct binder_work *w;
4197
4198 while (1) {
4199 w = binder_dequeue_work_head(proc, list);
4200 if (!w)
4201 return;
4202
4203 switch (w->type) {
4204 case BINDER_WORK_TRANSACTION: {
4205 struct binder_transaction *t;
4206
4207 t = container_of(w, struct binder_transaction, work);
4208
4209 binder_cleanup_transaction(t, "process died.",
4210 BR_DEAD_REPLY);
4211 } break;
4212 case BINDER_WORK_RETURN_ERROR: {
4213 struct binder_error *e = container_of(
4214 w, struct binder_error, work);
4215
4216 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4217 "undelivered TRANSACTION_ERROR: %u\n",
4218 e->cmd);
4219 } break;
4220 case BINDER_WORK_TRANSACTION_COMPLETE: {
4221 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4222 "undelivered TRANSACTION_COMPLETE\n");
4223 kfree(w);
4224 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4225 } break;
4226 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4227 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4228 struct binder_ref_death *death;
4229
4230 death = container_of(w, struct binder_ref_death, work);
4231 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4232 "undelivered death notification, %016llx\n",
4233 (u64)death->cookie);
4234 kfree(death);
4235 binder_stats_deleted(BINDER_STAT_DEATH);
4236 } break;
4237 default:
4238 pr_err("unexpected work type, %d, not freed\n",
4239 w->type);
4240 break;
4241 }
4242 }
4243
4244}
4245
4246static struct binder_thread *binder_get_thread_ilocked(
4247 struct binder_proc *proc, struct binder_thread *new_thread)
4248{
4249 struct binder_thread *thread = NULL;
4250 struct rb_node *parent = NULL;
4251 struct rb_node **p = &proc->threads.rb_node;
4252
4253 while (*p) {
4254 parent = *p;
4255 thread = rb_entry(parent, struct binder_thread, rb_node);
4256
4257 if (current->pid < thread->pid)
4258 p = &(*p)->rb_left;
4259 else if (current->pid > thread->pid)
4260 p = &(*p)->rb_right;
4261 else
4262 return thread;
4263 }
4264 if (!new_thread)
4265 return NULL;
4266 thread = new_thread;
4267 binder_stats_created(BINDER_STAT_THREAD);
4268 thread->proc = proc;
4269 thread->pid = current->pid;
4270 atomic_set(&thread->tmp_ref, 0);
4271 init_waitqueue_head(&thread->wait);
4272 INIT_LIST_HEAD(&thread->todo);
4273 rb_link_node(&thread->rb_node, parent, p);
4274 rb_insert_color(&thread->rb_node, &proc->threads);
4275 thread->looper_need_return = true;
4276 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4277 thread->return_error.cmd = BR_OK;
4278 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4279 thread->reply_error.cmd = BR_OK;
4280 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4281 return thread;
4282}
4283
4284static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4285{
4286 struct binder_thread *thread;
4287 struct binder_thread *new_thread;
4288
4289 binder_inner_proc_lock(proc);
4290 thread = binder_get_thread_ilocked(proc, NULL);
4291 binder_inner_proc_unlock(proc);
4292 if (!thread) {
4293 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4294 if (new_thread == NULL)
4295 return NULL;
4296 binder_inner_proc_lock(proc);
4297 thread = binder_get_thread_ilocked(proc, new_thread);
4298 binder_inner_proc_unlock(proc);
4299 if (thread != new_thread)
4300 kfree(new_thread);
4301 }
4302 return thread;
4303}
4304
4305static void binder_free_proc(struct binder_proc *proc)
4306{
4307 BUG_ON(!list_empty(&proc->todo));
4308 BUG_ON(!list_empty(&proc->delivered_death));
4309 binder_alloc_deferred_release(&proc->alloc);
4310 put_task_struct(proc->tsk);
4311 binder_stats_deleted(BINDER_STAT_PROC);
4312 kfree(proc);
4313}
4314
4315static void binder_free_thread(struct binder_thread *thread)
4316{
4317 BUG_ON(!list_empty(&thread->todo));
4318 binder_stats_deleted(BINDER_STAT_THREAD);
4319 binder_proc_dec_tmpref(thread->proc);
4320 kfree(thread);
4321}
4322
4323static int binder_thread_release(struct binder_proc *proc,
4324 struct binder_thread *thread)
4325{
4326 struct binder_transaction *t;
4327 struct binder_transaction *send_reply = NULL;
4328 int active_transactions = 0;
4329 struct binder_transaction *last_t = NULL;
4330
4331 binder_inner_proc_lock(thread->proc);
4332
4333
4334
4335
4336
4337
4338 proc->tmp_ref++;
4339
4340
4341
4342
4343 atomic_inc(&thread->tmp_ref);
4344 rb_erase(&thread->rb_node, &proc->threads);
4345 t = thread->transaction_stack;
4346 if (t) {
4347 spin_lock(&t->lock);
4348 if (t->to_thread == thread)
4349 send_reply = t;
4350 }
4351 thread->is_dead = true;
4352
4353 while (t) {
4354 last_t = t;
4355 active_transactions++;
4356 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4357 "release %d:%d transaction %d %s, still active\n",
4358 proc->pid, thread->pid,
4359 t->debug_id,
4360 (t->to_thread == thread) ? "in" : "out");
4361
4362 if (t->to_thread == thread) {
4363 t->to_proc = NULL;
4364 t->to_thread = NULL;
4365 if (t->buffer) {
4366 t->buffer->transaction = NULL;
4367 t->buffer = NULL;
4368 }
4369 t = t->to_parent;
4370 } else if (t->from == thread) {
4371 t->from = NULL;
4372 t = t->from_parent;
4373 } else
4374 BUG();
4375 spin_unlock(&last_t->lock);
4376 if (t)
4377 spin_lock(&t->lock);
4378 }
4379
4380
4381
4382
4383
4384
4385
4386 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4387 waitqueue_active(&thread->wait)) {
4388 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4389 }
4390
4391 binder_inner_proc_unlock(thread->proc);
4392
4393
4394
4395
4396
4397
4398
4399 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4400 synchronize_rcu();
4401
4402 if (send_reply)
4403 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4404 binder_release_work(proc, &thread->todo);
4405 binder_thread_dec_tmpref(thread);
4406 return active_transactions;
4407}
4408
4409static __poll_t binder_poll(struct file *filp,
4410 struct poll_table_struct *wait)
4411{
4412 struct binder_proc *proc = filp->private_data;
4413 struct binder_thread *thread = NULL;
4414 bool wait_for_proc_work;
4415
4416 thread = binder_get_thread(proc);
4417 if (!thread)
4418 return POLLERR;
4419
4420 binder_inner_proc_lock(thread->proc);
4421 thread->looper |= BINDER_LOOPER_STATE_POLL;
4422 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4423
4424 binder_inner_proc_unlock(thread->proc);
4425
4426 poll_wait(filp, &thread->wait, wait);
4427
4428 if (binder_has_work(thread, wait_for_proc_work))
4429 return EPOLLIN;
4430
4431 return 0;
4432}
4433
4434static int binder_ioctl_write_read(struct file *filp,
4435 unsigned int cmd, unsigned long arg,
4436 struct binder_thread *thread)
4437{
4438 int ret = 0;
4439 struct binder_proc *proc = filp->private_data;
4440 unsigned int size = _IOC_SIZE(cmd);
4441 void __user *ubuf = (void __user *)arg;
4442 struct binder_write_read bwr;
4443
4444 if (size != sizeof(struct binder_write_read)) {
4445 ret = -EINVAL;
4446 goto out;
4447 }
4448 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4449 ret = -EFAULT;
4450 goto out;
4451 }
4452 binder_debug(BINDER_DEBUG_READ_WRITE,
4453 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4454 proc->pid, thread->pid,
4455 (u64)bwr.write_size, (u64)bwr.write_buffer,
4456 (u64)bwr.read_size, (u64)bwr.read_buffer);
4457
4458 if (bwr.write_size > 0) {
4459 ret = binder_thread_write(proc, thread,
4460 bwr.write_buffer,
4461 bwr.write_size,
4462 &bwr.write_consumed);
4463 trace_binder_write_done(ret);
4464 if (ret < 0) {
4465 bwr.read_consumed = 0;
4466 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4467 ret = -EFAULT;
4468 goto out;
4469 }
4470 }
4471 if (bwr.read_size > 0) {
4472 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4473 bwr.read_size,
4474 &bwr.read_consumed,
4475 filp->f_flags & O_NONBLOCK);
4476 trace_binder_read_done(ret);
4477 binder_inner_proc_lock(proc);
4478 if (!binder_worklist_empty_ilocked(&proc->todo))
4479 binder_wakeup_proc_ilocked(proc);
4480 binder_inner_proc_unlock(proc);
4481 if (ret < 0) {
4482 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4483 ret = -EFAULT;
4484 goto out;
4485 }
4486 }
4487 binder_debug(BINDER_DEBUG_READ_WRITE,
4488 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4489 proc->pid, thread->pid,
4490 (u64)bwr.write_consumed, (u64)bwr.write_size,
4491 (u64)bwr.read_consumed, (u64)bwr.read_size);
4492 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4493 ret = -EFAULT;
4494 goto out;
4495 }
4496out:
4497 return ret;
4498}
4499
4500static int binder_ioctl_set_ctx_mgr(struct file *filp)
4501{
4502 int ret = 0;
4503 struct binder_proc *proc = filp->private_data;
4504 struct binder_context *context = proc->context;
4505 struct binder_node *new_node;
4506 kuid_t curr_euid = current_euid();
4507
4508 mutex_lock(&context->context_mgr_node_lock);
4509 if (context->binder_context_mgr_node) {
4510 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4511 ret = -EBUSY;
4512 goto out;
4513 }
4514 ret = security_binder_set_context_mgr(proc->tsk);
4515 if (ret < 0)
4516 goto out;
4517 if (uid_valid(context->binder_context_mgr_uid)) {
4518 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4519 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4520 from_kuid(&init_user_ns, curr_euid),
4521 from_kuid(&init_user_ns,
4522 context->binder_context_mgr_uid));
4523 ret = -EPERM;
4524 goto out;
4525 }
4526 } else {
4527 context->binder_context_mgr_uid = curr_euid;
4528 }
4529 new_node = binder_new_node(proc, NULL);
4530 if (!new_node) {
4531 ret = -ENOMEM;
4532 goto out;
4533 }
4534 binder_node_lock(new_node);
4535 new_node->local_weak_refs++;
4536 new_node->local_strong_refs++;
4537 new_node->has_strong_ref = 1;
4538 new_node->has_weak_ref = 1;
4539 context->binder_context_mgr_node = new_node;
4540 binder_node_unlock(new_node);
4541 binder_put_node(new_node);
4542out:
4543 mutex_unlock(&context->context_mgr_node_lock);
4544 return ret;
4545}
4546
4547static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4548 struct binder_node_debug_info *info)
4549{
4550 struct rb_node *n;
4551 binder_uintptr_t ptr = info->ptr;
4552
4553 memset(info, 0, sizeof(*info));
4554
4555 binder_inner_proc_lock(proc);
4556 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4557 struct binder_node *node = rb_entry(n, struct binder_node,
4558 rb_node);
4559 if (node->ptr > ptr) {
4560 info->ptr = node->ptr;
4561 info->cookie = node->cookie;
4562 info->has_strong_ref = node->has_strong_ref;
4563 info->has_weak_ref = node->has_weak_ref;
4564 break;
4565 }
4566 }
4567 binder_inner_proc_unlock(proc);
4568
4569 return 0;
4570}
4571
4572static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4573{
4574 int ret;
4575 struct binder_proc *proc = filp->private_data;
4576 struct binder_thread *thread;
4577 unsigned int size = _IOC_SIZE(cmd);
4578 void __user *ubuf = (void __user *)arg;
4579
4580
4581
4582
4583 binder_selftest_alloc(&proc->alloc);
4584
4585 trace_binder_ioctl(cmd, arg);
4586
4587 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4588 if (ret)
4589 goto err_unlocked;
4590
4591 thread = binder_get_thread(proc);
4592 if (thread == NULL) {
4593 ret = -ENOMEM;
4594 goto err;
4595 }
4596
4597 switch (cmd) {
4598 case BINDER_WRITE_READ:
4599 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4600 if (ret)
4601 goto err;
4602 break;
4603 case BINDER_SET_MAX_THREADS: {
4604 int max_threads;
4605
4606 if (copy_from_user(&max_threads, ubuf,
4607 sizeof(max_threads))) {
4608 ret = -EINVAL;
4609 goto err;
4610 }
4611 binder_inner_proc_lock(proc);
4612 proc->max_threads = max_threads;
4613 binder_inner_proc_unlock(proc);
4614 break;
4615 }
4616 case BINDER_SET_CONTEXT_MGR:
4617 ret = binder_ioctl_set_ctx_mgr(filp);
4618 if (ret)
4619 goto err;
4620 break;
4621 case BINDER_THREAD_EXIT:
4622 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4623 proc->pid, thread->pid);
4624 binder_thread_release(proc, thread);
4625 thread = NULL;
4626 break;
4627 case BINDER_VERSION: {
4628 struct binder_version __user *ver = ubuf;
4629
4630 if (size != sizeof(struct binder_version)) {
4631 ret = -EINVAL;
4632 goto err;
4633 }
4634 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4635 &ver->protocol_version)) {
4636 ret = -EINVAL;
4637 goto err;
4638 }
4639 break;
4640 }
4641 case BINDER_GET_NODE_DEBUG_INFO: {
4642 struct binder_node_debug_info info;
4643
4644 if (copy_from_user(&info, ubuf, sizeof(info))) {
4645 ret = -EFAULT;
4646 goto err;
4647 }
4648
4649 ret = binder_ioctl_get_node_debug_info(proc, &info);
4650 if (ret < 0)
4651 goto err;
4652
4653 if (copy_to_user(ubuf, &info, sizeof(info))) {
4654 ret = -EFAULT;
4655 goto err;
4656 }
4657 break;
4658 }
4659 default:
4660 ret = -EINVAL;
4661 goto err;
4662 }
4663 ret = 0;
4664err:
4665 if (thread)
4666 thread->looper_need_return = false;
4667 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4668 if (ret && ret != -ERESTARTSYS)
4669 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4670err_unlocked:
4671 trace_binder_ioctl_done(ret);
4672 return ret;
4673}
4674
4675static void binder_vma_open(struct vm_area_struct *vma)
4676{
4677 struct binder_proc *proc = vma->vm_private_data;
4678
4679 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4680 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4681 proc->pid, vma->vm_start, vma->vm_end,
4682 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4683 (unsigned long)pgprot_val(vma->vm_page_prot));
4684}
4685
4686static void binder_vma_close(struct vm_area_struct *vma)
4687{
4688 struct binder_proc *proc = vma->vm_private_data;
4689
4690 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4691 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4692 proc->pid, vma->vm_start, vma->vm_end,
4693 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4694 (unsigned long)pgprot_val(vma->vm_page_prot));
4695 binder_alloc_vma_close(&proc->alloc);
4696 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4697}
4698
4699static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4700{
4701 return VM_FAULT_SIGBUS;
4702}
4703
4704static const struct vm_operations_struct binder_vm_ops = {
4705 .open = binder_vma_open,
4706 .close = binder_vma_close,
4707 .fault = binder_vm_fault,
4708};
4709
4710static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4711{
4712 int ret;
4713 struct binder_proc *proc = filp->private_data;
4714 const char *failure_string;
4715
4716 if (proc->tsk != current->group_leader)
4717 return -EINVAL;
4718
4719 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4720 vma->vm_end = vma->vm_start + SZ_4M;
4721
4722 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4723 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4724 __func__, proc->pid, vma->vm_start, vma->vm_end,
4725 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4726 (unsigned long)pgprot_val(vma->vm_page_prot));
4727
4728 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4729 ret = -EPERM;
4730 failure_string = "bad vm_flags";
4731 goto err_bad_arg;
4732 }
4733 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4734 vma->vm_flags &= ~VM_MAYWRITE;
4735
4736 vma->vm_ops = &binder_vm_ops;
4737 vma->vm_private_data = proc;
4738
4739 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4740 if (ret)
4741 return ret;
4742 mutex_lock(&proc->files_lock);
4743 proc->files = get_files_struct(current);
4744 mutex_unlock(&proc->files_lock);
4745 return 0;
4746
4747err_bad_arg:
4748 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4749 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4750 return ret;
4751}
4752
4753static int binder_open(struct inode *nodp, struct file *filp)
4754{
4755 struct binder_proc *proc;
4756 struct binder_device *binder_dev;
4757
4758 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
4759 current->group_leader->pid, current->pid);
4760
4761 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4762 if (proc == NULL)
4763 return -ENOMEM;
4764 spin_lock_init(&proc->inner_lock);
4765 spin_lock_init(&proc->outer_lock);
4766 get_task_struct(current->group_leader);
4767 proc->tsk = current->group_leader;
4768 mutex_init(&proc->files_lock);
4769 INIT_LIST_HEAD(&proc->todo);
4770 proc->default_priority = task_nice(current);
4771 binder_dev = container_of(filp->private_data, struct binder_device,
4772 miscdev);
4773 proc->context = &binder_dev->context;
4774 binder_alloc_init(&proc->alloc);
4775
4776 binder_stats_created(BINDER_STAT_PROC);
4777 proc->pid = current->group_leader->pid;
4778 INIT_LIST_HEAD(&proc->delivered_death);
4779 INIT_LIST_HEAD(&proc->waiting_threads);
4780 filp->private_data = proc;
4781
4782 mutex_lock(&binder_procs_lock);
4783 hlist_add_head(&proc->proc_node, &binder_procs);
4784 mutex_unlock(&binder_procs_lock);
4785
4786 if (binder_debugfs_dir_entry_proc) {
4787 char strbuf[11];
4788
4789 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4790
4791
4792
4793
4794
4795
4796
4797 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
4798 binder_debugfs_dir_entry_proc,
4799 (void *)(unsigned long)proc->pid,
4800 &binder_proc_fops);
4801 }
4802
4803 return 0;
4804}
4805
4806static int binder_flush(struct file *filp, fl_owner_t id)
4807{
4808 struct binder_proc *proc = filp->private_data;
4809
4810 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4811
4812 return 0;
4813}
4814
4815static void binder_deferred_flush(struct binder_proc *proc)
4816{
4817 struct rb_node *n;
4818 int wake_count = 0;
4819
4820 binder_inner_proc_lock(proc);
4821 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4822 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4823
4824 thread->looper_need_return = true;
4825 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4826 wake_up_interruptible(&thread->wait);
4827 wake_count++;
4828 }
4829 }
4830 binder_inner_proc_unlock(proc);
4831
4832 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4833 "binder_flush: %d woke %d threads\n", proc->pid,
4834 wake_count);
4835}
4836
4837static int binder_release(struct inode *nodp, struct file *filp)
4838{
4839 struct binder_proc *proc = filp->private_data;
4840
4841 debugfs_remove(proc->debugfs_entry);
4842 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4843
4844 return 0;
4845}
4846
4847static int binder_node_release(struct binder_node *node, int refs)
4848{
4849 struct binder_ref *ref;
4850 int death = 0;
4851 struct binder_proc *proc = node->proc;
4852
4853 binder_release_work(proc, &node->async_todo);
4854
4855 binder_node_lock(node);
4856 binder_inner_proc_lock(proc);
4857 binder_dequeue_work_ilocked(&node->work);
4858
4859
4860
4861 BUG_ON(!node->tmp_refs);
4862 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4863 binder_inner_proc_unlock(proc);
4864 binder_node_unlock(node);
4865 binder_free_node(node);
4866
4867 return refs;
4868 }
4869
4870 node->proc = NULL;
4871 node->local_strong_refs = 0;
4872 node->local_weak_refs = 0;
4873 binder_inner_proc_unlock(proc);
4874
4875 spin_lock(&binder_dead_nodes_lock);
4876 hlist_add_head(&node->dead_node, &binder_dead_nodes);
4877 spin_unlock(&binder_dead_nodes_lock);
4878
4879 hlist_for_each_entry(ref, &node->refs, node_entry) {
4880 refs++;
4881
4882
4883
4884
4885
4886
4887 binder_inner_proc_lock(ref->proc);
4888 if (!ref->death) {
4889 binder_inner_proc_unlock(ref->proc);
4890 continue;
4891 }
4892
4893 death++;
4894
4895 BUG_ON(!list_empty(&ref->death->work.entry));
4896 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4897 binder_enqueue_work_ilocked(&ref->death->work,
4898 &ref->proc->todo);
4899 binder_wakeup_proc_ilocked(ref->proc);
4900 binder_inner_proc_unlock(ref->proc);
4901 }
4902
4903 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4904 "node %d now dead, refs %d, death %d\n",
4905 node->debug_id, refs, death);
4906 binder_node_unlock(node);
4907 binder_put_node(node);
4908
4909 return refs;
4910}
4911
4912static void binder_deferred_release(struct binder_proc *proc)
4913{
4914 struct binder_context *context = proc->context;
4915 struct rb_node *n;
4916 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4917
4918 BUG_ON(proc->files);
4919
4920 mutex_lock(&binder_procs_lock);
4921 hlist_del(&proc->proc_node);
4922 mutex_unlock(&binder_procs_lock);
4923
4924 mutex_lock(&context->context_mgr_node_lock);
4925 if (context->binder_context_mgr_node &&
4926 context->binder_context_mgr_node->proc == proc) {
4927 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4928 "%s: %d context_mgr_node gone\n",
4929 __func__, proc->pid);
4930 context->binder_context_mgr_node = NULL;
4931 }
4932 mutex_unlock(&context->context_mgr_node_lock);
4933 binder_inner_proc_lock(proc);
4934
4935
4936
4937
4938 proc->tmp_ref++;
4939
4940 proc->is_dead = true;
4941 threads = 0;
4942 active_transactions = 0;
4943 while ((n = rb_first(&proc->threads))) {
4944 struct binder_thread *thread;
4945
4946 thread = rb_entry(n, struct binder_thread, rb_node);
4947 binder_inner_proc_unlock(proc);
4948 threads++;
4949 active_transactions += binder_thread_release(proc, thread);
4950 binder_inner_proc_lock(proc);
4951 }
4952
4953 nodes = 0;
4954 incoming_refs = 0;
4955 while ((n = rb_first(&proc->nodes))) {
4956 struct binder_node *node;
4957
4958 node = rb_entry(n, struct binder_node, rb_node);
4959 nodes++;
4960
4961
4962
4963
4964
4965 binder_inc_node_tmpref_ilocked(node);
4966 rb_erase(&node->rb_node, &proc->nodes);
4967 binder_inner_proc_unlock(proc);
4968 incoming_refs = binder_node_release(node, incoming_refs);
4969 binder_inner_proc_lock(proc);
4970 }
4971 binder_inner_proc_unlock(proc);
4972
4973 outgoing_refs = 0;
4974 binder_proc_lock(proc);
4975 while ((n = rb_first(&proc->refs_by_desc))) {
4976 struct binder_ref *ref;
4977
4978 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4979 outgoing_refs++;
4980 binder_cleanup_ref_olocked(ref);
4981 binder_proc_unlock(proc);
4982 binder_free_ref(ref);
4983 binder_proc_lock(proc);
4984 }
4985 binder_proc_unlock(proc);
4986
4987 binder_release_work(proc, &proc->todo);
4988 binder_release_work(proc, &proc->delivered_death);
4989
4990 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4991 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4992 __func__, proc->pid, threads, nodes, incoming_refs,
4993 outgoing_refs, active_transactions);
4994
4995 binder_proc_dec_tmpref(proc);
4996}
4997
4998static void binder_deferred_func(struct work_struct *work)
4999{
5000 struct binder_proc *proc;
5001 struct files_struct *files;
5002
5003 int defer;
5004
5005 do {
5006 mutex_lock(&binder_deferred_lock);
5007 if (!hlist_empty(&binder_deferred_list)) {
5008 proc = hlist_entry(binder_deferred_list.first,
5009 struct binder_proc, deferred_work_node);
5010 hlist_del_init(&proc->deferred_work_node);
5011 defer = proc->deferred_work;
5012 proc->deferred_work = 0;
5013 } else {
5014 proc = NULL;
5015 defer = 0;
5016 }
5017 mutex_unlock(&binder_deferred_lock);
5018
5019 files = NULL;
5020 if (defer & BINDER_DEFERRED_PUT_FILES) {
5021 mutex_lock(&proc->files_lock);
5022 files = proc->files;
5023 if (files)
5024 proc->files = NULL;
5025 mutex_unlock(&proc->files_lock);
5026 }
5027
5028 if (defer & BINDER_DEFERRED_FLUSH)
5029 binder_deferred_flush(proc);
5030
5031 if (defer & BINDER_DEFERRED_RELEASE)
5032 binder_deferred_release(proc);
5033
5034 if (files)
5035 put_files_struct(files);
5036 } while (proc);
5037}
5038static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5039
5040static void
5041binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5042{
5043 mutex_lock(&binder_deferred_lock);
5044 proc->deferred_work |= defer;
5045 if (hlist_unhashed(&proc->deferred_work_node)) {
5046 hlist_add_head(&proc->deferred_work_node,
5047 &binder_deferred_list);
5048 schedule_work(&binder_deferred_work);
5049 }
5050 mutex_unlock(&binder_deferred_lock);
5051}
5052
5053static void print_binder_transaction_ilocked(struct seq_file *m,
5054 struct binder_proc *proc,
5055 const char *prefix,
5056 struct binder_transaction *t)
5057{
5058 struct binder_proc *to_proc;
5059 struct binder_buffer *buffer = t->buffer;
5060
5061 spin_lock(&t->lock);
5062 to_proc = t->to_proc;
5063 seq_printf(m,
5064 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5065 prefix, t->debug_id, t,
5066 t->from ? t->from->proc->pid : 0,
5067 t->from ? t->from->pid : 0,
5068 to_proc ? to_proc->pid : 0,
5069 t->to_thread ? t->to_thread->pid : 0,
5070 t->code, t->flags, t->priority, t->need_reply);
5071 spin_unlock(&t->lock);
5072
5073 if (proc != to_proc) {
5074
5075
5076
5077
5078 seq_puts(m, "\n");
5079 return;
5080 }
5081
5082 if (buffer == NULL) {
5083 seq_puts(m, " buffer free\n");
5084 return;
5085 }
5086 if (buffer->target_node)
5087 seq_printf(m, " node %d", buffer->target_node->debug_id);
5088 seq_printf(m, " size %zd:%zd data %pK\n",
5089 buffer->data_size, buffer->offsets_size,
5090 buffer->data);
5091}
5092
5093static void print_binder_work_ilocked(struct seq_file *m,
5094 struct binder_proc *proc,
5095 const char *prefix,
5096 const char *transaction_prefix,
5097 struct binder_work *w)
5098{
5099 struct binder_node *node;
5100 struct binder_transaction *t;
5101
5102 switch (w->type) {
5103 case BINDER_WORK_TRANSACTION:
5104 t = container_of(w, struct binder_transaction, work);
5105 print_binder_transaction_ilocked(
5106 m, proc, transaction_prefix, t);
5107 break;
5108 case BINDER_WORK_RETURN_ERROR: {
5109 struct binder_error *e = container_of(
5110 w, struct binder_error, work);
5111
5112 seq_printf(m, "%stransaction error: %u\n",
5113 prefix, e->cmd);
5114 } break;
5115 case BINDER_WORK_TRANSACTION_COMPLETE:
5116 seq_printf(m, "%stransaction complete\n", prefix);
5117 break;
5118 case BINDER_WORK_NODE:
5119 node = container_of(w, struct binder_node, work);
5120 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5121 prefix, node->debug_id,
5122 (u64)node->ptr, (u64)node->cookie);
5123 break;
5124 case BINDER_WORK_DEAD_BINDER:
5125 seq_printf(m, "%shas dead binder\n", prefix);
5126 break;
5127 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5128 seq_printf(m, "%shas cleared dead binder\n", prefix);
5129 break;
5130 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5131 seq_printf(m, "%shas cleared death notification\n", prefix);
5132 break;
5133 default:
5134 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5135 break;
5136 }
5137}
5138
5139static void print_binder_thread_ilocked(struct seq_file *m,
5140 struct binder_thread *thread,
5141 int print_always)
5142{
5143 struct binder_transaction *t;
5144 struct binder_work *w;
5145 size_t start_pos = m->count;
5146 size_t header_pos;
5147
5148 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5149 thread->pid, thread->looper,
5150 thread->looper_need_return,
5151 atomic_read(&thread->tmp_ref));
5152 header_pos = m->count;
5153 t = thread->transaction_stack;
5154 while (t) {
5155 if (t->from == thread) {
5156 print_binder_transaction_ilocked(m, thread->proc,
5157 " outgoing transaction", t);
5158 t = t->from_parent;
5159 } else if (t->to_thread == thread) {
5160 print_binder_transaction_ilocked(m, thread->proc,
5161 " incoming transaction", t);
5162 t = t->to_parent;
5163 } else {
5164 print_binder_transaction_ilocked(m, thread->proc,
5165 " bad transaction", t);
5166 t = NULL;
5167 }
5168 }
5169 list_for_each_entry(w, &thread->todo, entry) {
5170 print_binder_work_ilocked(m, thread->proc, " ",
5171 " pending transaction", w);
5172 }
5173 if (!print_always && m->count == header_pos)
5174 m->count = start_pos;
5175}
5176
5177static void print_binder_node_nilocked(struct seq_file *m,
5178 struct binder_node *node)
5179{
5180 struct binder_ref *ref;
5181 struct binder_work *w;
5182 int count;
5183
5184 count = 0;
5185 hlist_for_each_entry(ref, &node->refs, node_entry)
5186 count++;
5187
5188 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5189 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5190 node->has_strong_ref, node->has_weak_ref,
5191 node->local_strong_refs, node->local_weak_refs,
5192 node->internal_strong_refs, count, node->tmp_refs);
5193 if (count) {
5194 seq_puts(m, " proc");
5195 hlist_for_each_entry(ref, &node->refs, node_entry)
5196 seq_printf(m, " %d", ref->proc->pid);
5197 }
5198 seq_puts(m, "\n");
5199 if (node->proc) {
5200 list_for_each_entry(w, &node->async_todo, entry)
5201 print_binder_work_ilocked(m, node->proc, " ",
5202 " pending async transaction", w);
5203 }
5204}
5205
5206static void print_binder_ref_olocked(struct seq_file *m,
5207 struct binder_ref *ref)
5208{
5209 binder_node_lock(ref->node);
5210 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5211 ref->data.debug_id, ref->data.desc,
5212 ref->node->proc ? "" : "dead ",
5213 ref->node->debug_id, ref->data.strong,
5214 ref->data.weak, ref->death);
5215 binder_node_unlock(ref->node);
5216}
5217
5218static void print_binder_proc(struct seq_file *m,
5219 struct binder_proc *proc, int print_all)
5220{
5221 struct binder_work *w;
5222 struct rb_node *n;
5223 size_t start_pos = m->count;
5224 size_t header_pos;
5225 struct binder_node *last_node = NULL;
5226
5227 seq_printf(m, "proc %d\n", proc->pid);
5228 seq_printf(m, "context %s\n", proc->context->name);
5229 header_pos = m->count;
5230
5231 binder_inner_proc_lock(proc);
5232 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5233 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5234 rb_node), print_all);
5235
5236 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5237 struct binder_node *node = rb_entry(n, struct binder_node,
5238 rb_node);
5239
5240
5241
5242
5243
5244 binder_inc_node_tmpref_ilocked(node);
5245
5246 binder_inner_proc_unlock(proc);
5247 if (last_node)
5248 binder_put_node(last_node);
5249 binder_node_inner_lock(node);
5250 print_binder_node_nilocked(m, node);
5251 binder_node_inner_unlock(node);
5252 last_node = node;
5253 binder_inner_proc_lock(proc);
5254 }
5255 binder_inner_proc_unlock(proc);
5256 if (last_node)
5257 binder_put_node(last_node);
5258
5259 if (print_all) {
5260 binder_proc_lock(proc);
5261 for (n = rb_first(&proc->refs_by_desc);
5262 n != NULL;
5263 n = rb_next(n))
5264 print_binder_ref_olocked(m, rb_entry(n,
5265 struct binder_ref,
5266 rb_node_desc));
5267 binder_proc_unlock(proc);
5268 }
5269 binder_alloc_print_allocated(m, &proc->alloc);
5270 binder_inner_proc_lock(proc);
5271 list_for_each_entry(w, &proc->todo, entry)
5272 print_binder_work_ilocked(m, proc, " ",
5273 " pending transaction", w);
5274 list_for_each_entry(w, &proc->delivered_death, entry) {
5275 seq_puts(m, " has delivered dead binder\n");
5276 break;
5277 }
5278 binder_inner_proc_unlock(proc);
5279 if (!print_all && m->count == header_pos)
5280 m->count = start_pos;
5281}
5282
5283static const char * const binder_return_strings[] = {
5284 "BR_ERROR",
5285 "BR_OK",
5286 "BR_TRANSACTION",
5287 "BR_REPLY",
5288 "BR_ACQUIRE_RESULT",
5289 "BR_DEAD_REPLY",
5290 "BR_TRANSACTION_COMPLETE",
5291 "BR_INCREFS",
5292 "BR_ACQUIRE",
5293 "BR_RELEASE",
5294 "BR_DECREFS",
5295 "BR_ATTEMPT_ACQUIRE",
5296 "BR_NOOP",
5297 "BR_SPAWN_LOOPER",
5298 "BR_FINISHED",
5299 "BR_DEAD_BINDER",
5300 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5301 "BR_FAILED_REPLY"
5302};
5303
5304static const char * const binder_command_strings[] = {
5305 "BC_TRANSACTION",
5306 "BC_REPLY",
5307 "BC_ACQUIRE_RESULT",
5308 "BC_FREE_BUFFER",
5309 "BC_INCREFS",
5310 "BC_ACQUIRE",
5311 "BC_RELEASE",
5312 "BC_DECREFS",
5313 "BC_INCREFS_DONE",
5314 "BC_ACQUIRE_DONE",
5315 "BC_ATTEMPT_ACQUIRE",
5316 "BC_REGISTER_LOOPER",
5317 "BC_ENTER_LOOPER",
5318 "BC_EXIT_LOOPER",
5319 "BC_REQUEST_DEATH_NOTIFICATION",
5320 "BC_CLEAR_DEATH_NOTIFICATION",
5321 "BC_DEAD_BINDER_DONE",
5322 "BC_TRANSACTION_SG",
5323 "BC_REPLY_SG",
5324};
5325
5326static const char * const binder_objstat_strings[] = {
5327 "proc",
5328 "thread",
5329 "node",
5330 "ref",
5331 "death",
5332 "transaction",
5333 "transaction_complete"
5334};
5335
5336static void print_binder_stats(struct seq_file *m, const char *prefix,
5337 struct binder_stats *stats)
5338{
5339 int i;
5340
5341 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5342 ARRAY_SIZE(binder_command_strings));
5343 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5344 int temp = atomic_read(&stats->bc[i]);
5345
5346 if (temp)
5347 seq_printf(m, "%s%s: %d\n", prefix,
5348 binder_command_strings[i], temp);
5349 }
5350
5351 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5352 ARRAY_SIZE(binder_return_strings));
5353 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5354 int temp = atomic_read(&stats->br[i]);
5355
5356 if (temp)
5357 seq_printf(m, "%s%s: %d\n", prefix,
5358 binder_return_strings[i], temp);
5359 }
5360
5361 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5362 ARRAY_SIZE(binder_objstat_strings));
5363 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5364 ARRAY_SIZE(stats->obj_deleted));
5365 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5366 int created = atomic_read(&stats->obj_created[i]);
5367 int deleted = atomic_read(&stats->obj_deleted[i]);
5368
5369 if (created || deleted)
5370 seq_printf(m, "%s%s: active %d total %d\n",
5371 prefix,
5372 binder_objstat_strings[i],
5373 created - deleted,
5374 created);
5375 }
5376}
5377
5378static void print_binder_proc_stats(struct seq_file *m,
5379 struct binder_proc *proc)
5380{
5381 struct binder_work *w;
5382 struct binder_thread *thread;
5383 struct rb_node *n;
5384 int count, strong, weak, ready_threads;
5385 size_t free_async_space =
5386 binder_alloc_get_free_async_space(&proc->alloc);
5387
5388 seq_printf(m, "proc %d\n", proc->pid);
5389 seq_printf(m, "context %s\n", proc->context->name);
5390 count = 0;
5391 ready_threads = 0;
5392 binder_inner_proc_lock(proc);
5393 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5394 count++;
5395
5396 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5397 ready_threads++;
5398
5399 seq_printf(m, " threads: %d\n", count);
5400 seq_printf(m, " requested threads: %d+%d/%d\n"
5401 " ready threads %d\n"
5402 " free async space %zd\n", proc->requested_threads,
5403 proc->requested_threads_started, proc->max_threads,
5404 ready_threads,
5405 free_async_space);
5406 count = 0;
5407 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5408 count++;
5409 binder_inner_proc_unlock(proc);
5410 seq_printf(m, " nodes: %d\n", count);
5411 count = 0;
5412 strong = 0;
5413 weak = 0;
5414 binder_proc_lock(proc);
5415 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5416 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5417 rb_node_desc);
5418 count++;
5419 strong += ref->data.strong;
5420 weak += ref->data.weak;
5421 }
5422 binder_proc_unlock(proc);
5423 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5424
5425 count = binder_alloc_get_allocated_count(&proc->alloc);
5426 seq_printf(m, " buffers: %d\n", count);
5427
5428 binder_alloc_print_pages(m, &proc->alloc);
5429
5430 count = 0;
5431 binder_inner_proc_lock(proc);
5432 list_for_each_entry(w, &proc->todo, entry) {
5433 if (w->type == BINDER_WORK_TRANSACTION)
5434 count++;
5435 }
5436 binder_inner_proc_unlock(proc);
5437 seq_printf(m, " pending transactions: %d\n", count);
5438
5439 print_binder_stats(m, " ", &proc->stats);
5440}
5441
5442
5443static int binder_state_show(struct seq_file *m, void *unused)
5444{
5445 struct binder_proc *proc;
5446 struct binder_node *node;
5447 struct binder_node *last_node = NULL;
5448
5449 seq_puts(m, "binder state:\n");
5450
5451 spin_lock(&binder_dead_nodes_lock);
5452 if (!hlist_empty(&binder_dead_nodes))
5453 seq_puts(m, "dead nodes:\n");
5454 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5455
5456
5457
5458
5459
5460 node->tmp_refs++;
5461 spin_unlock(&binder_dead_nodes_lock);
5462 if (last_node)
5463 binder_put_node(last_node);
5464 binder_node_lock(node);
5465 print_binder_node_nilocked(m, node);
5466 binder_node_unlock(node);
5467 last_node = node;
5468 spin_lock(&binder_dead_nodes_lock);
5469 }
5470 spin_unlock(&binder_dead_nodes_lock);
5471 if (last_node)
5472 binder_put_node(last_node);
5473
5474 mutex_lock(&binder_procs_lock);
5475 hlist_for_each_entry(proc, &binder_procs, proc_node)
5476 print_binder_proc(m, proc, 1);
5477 mutex_unlock(&binder_procs_lock);
5478
5479 return 0;
5480}
5481
5482static int binder_stats_show(struct seq_file *m, void *unused)
5483{
5484 struct binder_proc *proc;
5485
5486 seq_puts(m, "binder stats:\n");
5487
5488 print_binder_stats(m, "", &binder_stats);
5489
5490 mutex_lock(&binder_procs_lock);
5491 hlist_for_each_entry(proc, &binder_procs, proc_node)
5492 print_binder_proc_stats(m, proc);
5493 mutex_unlock(&binder_procs_lock);
5494
5495 return 0;
5496}
5497
5498static int binder_transactions_show(struct seq_file *m, void *unused)
5499{
5500 struct binder_proc *proc;
5501
5502 seq_puts(m, "binder transactions:\n");
5503 mutex_lock(&binder_procs_lock);
5504 hlist_for_each_entry(proc, &binder_procs, proc_node)
5505 print_binder_proc(m, proc, 0);
5506 mutex_unlock(&binder_procs_lock);
5507
5508 return 0;
5509}
5510
5511static int binder_proc_show(struct seq_file *m, void *unused)
5512{
5513 struct binder_proc *itr;
5514 int pid = (unsigned long)m->private;
5515
5516 mutex_lock(&binder_procs_lock);
5517 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5518 if (itr->pid == pid) {
5519 seq_puts(m, "binder proc state:\n");
5520 print_binder_proc(m, itr, 1);
5521 }
5522 }
5523 mutex_unlock(&binder_procs_lock);
5524
5525 return 0;
5526}
5527
5528static void print_binder_transaction_log_entry(struct seq_file *m,
5529 struct binder_transaction_log_entry *e)
5530{
5531 int debug_id = READ_ONCE(e->debug_id_done);
5532
5533
5534
5535
5536 smp_rmb();
5537 seq_printf(m,
5538 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5539 e->debug_id, (e->call_type == 2) ? "reply" :
5540 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5541 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5542 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5543 e->return_error, e->return_error_param,
5544 e->return_error_line);
5545
5546
5547
5548
5549 smp_rmb();
5550 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5551 "\n" : " (incomplete)\n");
5552}
5553
5554static int binder_transaction_log_show(struct seq_file *m, void *unused)
5555{
5556 struct binder_transaction_log *log = m->private;
5557 unsigned int log_cur = atomic_read(&log->cur);
5558 unsigned int count;
5559 unsigned int cur;
5560 int i;
5561
5562 count = log_cur + 1;
5563 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5564 0 : count % ARRAY_SIZE(log->entry);
5565 if (count > ARRAY_SIZE(log->entry) || log->full)
5566 count = ARRAY_SIZE(log->entry);
5567 for (i = 0; i < count; i++) {
5568 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5569
5570 print_binder_transaction_log_entry(m, &log->entry[index]);
5571 }
5572 return 0;
5573}
5574
5575static const struct file_operations binder_fops = {
5576 .owner = THIS_MODULE,
5577 .poll = binder_poll,
5578 .unlocked_ioctl = binder_ioctl,
5579 .compat_ioctl = binder_ioctl,
5580 .mmap = binder_mmap,
5581 .open = binder_open,
5582 .flush = binder_flush,
5583 .release = binder_release,
5584};
5585
5586BINDER_DEBUG_ENTRY(state);
5587BINDER_DEBUG_ENTRY(stats);
5588BINDER_DEBUG_ENTRY(transactions);
5589BINDER_DEBUG_ENTRY(transaction_log);
5590
5591static int __init init_binder_device(const char *name)
5592{
5593 int ret;
5594 struct binder_device *binder_device;
5595
5596 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5597 if (!binder_device)
5598 return -ENOMEM;
5599
5600 binder_device->miscdev.fops = &binder_fops;
5601 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5602 binder_device->miscdev.name = name;
5603
5604 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5605 binder_device->context.name = name;
5606 mutex_init(&binder_device->context.context_mgr_node_lock);
5607
5608 ret = misc_register(&binder_device->miscdev);
5609 if (ret < 0) {
5610 kfree(binder_device);
5611 return ret;
5612 }
5613
5614 hlist_add_head(&binder_device->hlist, &binder_devices);
5615
5616 return ret;
5617}
5618
5619static int __init binder_init(void)
5620{
5621 int ret;
5622 char *device_name, *device_names, *device_tmp;
5623 struct binder_device *device;
5624 struct hlist_node *tmp;
5625
5626 ret = binder_alloc_shrinker_init();
5627 if (ret)
5628 return ret;
5629
5630 atomic_set(&binder_transaction_log.cur, ~0U);
5631 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5632
5633 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5634 if (binder_debugfs_dir_entry_root)
5635 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5636 binder_debugfs_dir_entry_root);
5637
5638 if (binder_debugfs_dir_entry_root) {
5639 debugfs_create_file("state",
5640 0444,
5641 binder_debugfs_dir_entry_root,
5642 NULL,
5643 &binder_state_fops);
5644 debugfs_create_file("stats",
5645 0444,
5646 binder_debugfs_dir_entry_root,
5647 NULL,
5648 &binder_stats_fops);
5649 debugfs_create_file("transactions",
5650 0444,
5651 binder_debugfs_dir_entry_root,
5652 NULL,
5653 &binder_transactions_fops);
5654 debugfs_create_file("transaction_log",
5655 0444,
5656 binder_debugfs_dir_entry_root,
5657 &binder_transaction_log,
5658 &binder_transaction_log_fops);
5659 debugfs_create_file("failed_transaction_log",
5660 0444,
5661 binder_debugfs_dir_entry_root,
5662 &binder_transaction_log_failed,
5663 &binder_transaction_log_fops);
5664 }
5665
5666
5667
5668
5669
5670 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5671 if (!device_names) {
5672 ret = -ENOMEM;
5673 goto err_alloc_device_names_failed;
5674 }
5675 strcpy(device_names, binder_devices_param);
5676
5677 device_tmp = device_names;
5678 while ((device_name = strsep(&device_tmp, ","))) {
5679 ret = init_binder_device(device_name);
5680 if (ret)
5681 goto err_init_binder_device_failed;
5682 }
5683
5684 return ret;
5685
5686err_init_binder_device_failed:
5687 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5688 misc_deregister(&device->miscdev);
5689 hlist_del(&device->hlist);
5690 kfree(device);
5691 }
5692
5693 kfree(device_names);
5694
5695err_alloc_device_names_failed:
5696 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5697
5698 return ret;
5699}
5700
5701device_initcall(binder_init);
5702
5703#define CREATE_TRACE_POINTS
5704#include "binder_trace.h"
5705
5706MODULE_LICENSE("GPL v2");
5707