1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
57#include <linux/freezer.h>
58#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
61#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
65#include <linux/debugfs.h>
66#include <linux/rbtree.h>
67#include <linux/sched/signal.h>
68#include <linux/sched/mm.h>
69#include <linux/seq_file.h>
70#include <linux/uaccess.h>
71#include <linux/pid_namespace.h>
72#include <linux/security.h>
73#include <linux/spinlock.h>
74
75#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76#define BINDER_IPC_32BIT 1
77#endif
78
79#include <uapi/linux/android/binder.h>
80#include "binder_alloc.h"
81#include "binder_trace.h"
82
83static HLIST_HEAD(binder_deferred_list);
84static DEFINE_MUTEX(binder_deferred_lock);
85
86static HLIST_HEAD(binder_devices);
87static HLIST_HEAD(binder_procs);
88static DEFINE_MUTEX(binder_procs_lock);
89
90static HLIST_HEAD(binder_dead_nodes);
91static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
95static atomic_t binder_last_id;
96
97#define BINDER_DEBUG_ENTRY(name) \
98static int binder_##name##_open(struct inode *inode, struct file *file) \
99{ \
100 return single_open(file, binder_##name##_show, inode->i_private); \
101} \
102\
103static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
106 .read = seq_read, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
109}
110
111static int binder_proc_show(struct seq_file *m, void *unused);
112BINDER_DEBUG_ENTRY(proc);
113
114
115#ifndef SZ_1K
116#define SZ_1K 0x400
117#endif
118
119#ifndef SZ_4M
120#define SZ_4M 0x400000
121#endif
122
123#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
124
125enum {
126 BINDER_DEBUG_USER_ERROR = 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
132 BINDER_DEBUG_READ_WRITE = 1U << 6,
133 BINDER_DEBUG_USER_REFS = 1U << 7,
134 BINDER_DEBUG_THREADS = 1U << 8,
135 BINDER_DEBUG_TRANSACTION = 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
140 BINDER_DEBUG_SPINLOCKS = 1U << 14,
141};
142static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
145
146static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147module_param_named(devices, binder_devices_param, charp, 0444);
148
149static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150static int binder_stop_on_user_error;
151
152static int binder_set_stop_on_user_error(const char *val,
153 struct kernel_param *kp)
154{
155 int ret;
156
157 ret = param_set_int(val, kp);
158 if (binder_stop_on_user_error < 2)
159 wake_up(&binder_user_error_wait);
160 return ret;
161}
162module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
164
165#define binder_debug(mask, x...) \
166 do { \
167 if (binder_debug_mask & mask) \
168 pr_info(x); \
169 } while (0)
170
171#define binder_user_error(x...) \
172 do { \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
174 pr_info(x); \
175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
177 } while (0)
178
179#define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
181
182#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
184#define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
186
187#define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
189
190enum binder_stat_types {
191 BINDER_STAT_PROC,
192 BINDER_STAT_THREAD,
193 BINDER_STAT_NODE,
194 BINDER_STAT_REF,
195 BINDER_STAT_DEATH,
196 BINDER_STAT_TRANSACTION,
197 BINDER_STAT_TRANSACTION_COMPLETE,
198 BINDER_STAT_COUNT
199};
200
201struct binder_stats {
202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 atomic_t obj_created[BINDER_STAT_COUNT];
205 atomic_t obj_deleted[BINDER_STAT_COUNT];
206};
207
208static struct binder_stats binder_stats;
209
210static inline void binder_stats_deleted(enum binder_stat_types type)
211{
212 atomic_inc(&binder_stats.obj_deleted[type]);
213}
214
215static inline void binder_stats_created(enum binder_stat_types type)
216{
217 atomic_inc(&binder_stats.obj_created[type]);
218}
219
220struct binder_transaction_log_entry {
221 int debug_id;
222 int debug_id_done;
223 int call_type;
224 int from_proc;
225 int from_thread;
226 int target_handle;
227 int to_proc;
228 int to_thread;
229 int to_node;
230 int data_size;
231 int offsets_size;
232 int return_error_line;
233 uint32_t return_error;
234 uint32_t return_error_param;
235 const char *context_name;
236};
237struct binder_transaction_log {
238 atomic_t cur;
239 bool full;
240 struct binder_transaction_log_entry entry[32];
241};
242static struct binder_transaction_log binder_transaction_log;
243static struct binder_transaction_log binder_transaction_log_failed;
244
245static struct binder_transaction_log_entry *binder_transaction_log_add(
246 struct binder_transaction_log *log)
247{
248 struct binder_transaction_log_entry *e;
249 unsigned int cur = atomic_inc_return(&log->cur);
250
251 if (cur >= ARRAY_SIZE(log->entry))
252 log->full = 1;
253 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 WRITE_ONCE(e->debug_id_done, 0);
255
256
257
258
259
260 smp_wmb();
261 memset(e, 0, sizeof(*e));
262 return e;
263}
264
265struct binder_context {
266 struct binder_node *binder_context_mgr_node;
267 struct mutex context_mgr_node_lock;
268
269 kuid_t binder_context_mgr_uid;
270 const char *name;
271};
272
273struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
277};
278
279
280
281
282
283
284
285
286struct binder_work {
287 struct list_head entry;
288
289 enum {
290 BINDER_WORK_TRANSACTION = 1,
291 BINDER_WORK_TRANSACTION_COMPLETE,
292 BINDER_WORK_RETURN_ERROR,
293 BINDER_WORK_NODE,
294 BINDER_WORK_DEAD_BINDER,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297 } type;
298};
299
300struct binder_error {
301 struct binder_work work;
302 uint32_t cmd;
303};
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363struct binder_node {
364 int debug_id;
365 spinlock_t lock;
366 struct binder_work work;
367 union {
368 struct rb_node rb_node;
369 struct hlist_node dead_node;
370 };
371 struct binder_proc *proc;
372 struct hlist_head refs;
373 int internal_strong_refs;
374 int local_weak_refs;
375 int local_strong_refs;
376 int tmp_refs;
377 binder_uintptr_t ptr;
378 binder_uintptr_t cookie;
379 struct {
380
381
382
383
384 u8 has_strong_ref:1;
385 u8 pending_strong_ref:1;
386 u8 has_weak_ref:1;
387 u8 pending_weak_ref:1;
388 };
389 struct {
390
391
392
393 u8 accept_fds:1;
394 u8 min_priority;
395 };
396 bool has_async_transaction;
397 struct list_head async_todo;
398};
399
400struct binder_ref_death {
401
402
403
404
405
406 struct binder_work work;
407 binder_uintptr_t cookie;
408};
409
410
411
412
413
414
415
416
417
418
419
420
421
422struct binder_ref_data {
423 int debug_id;
424 uint32_t desc;
425 int strong;
426 int weak;
427};
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446struct binder_ref {
447
448
449
450
451 struct binder_ref_data data;
452 struct rb_node rb_node_desc;
453 struct rb_node rb_node_node;
454 struct hlist_node node_entry;
455 struct binder_proc *proc;
456 struct binder_node *node;
457 struct binder_ref_death *death;
458};
459
460enum binder_deferred_state {
461 BINDER_DEFERRED_PUT_FILES = 0x01,
462 BINDER_DEFERRED_FLUSH = 0x02,
463 BINDER_DEFERRED_RELEASE = 0x04,
464};
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523struct binder_proc {
524 struct hlist_node proc_node;
525 struct rb_root threads;
526 struct rb_root nodes;
527 struct rb_root refs_by_desc;
528 struct rb_root refs_by_node;
529 struct list_head waiting_threads;
530 int pid;
531 struct task_struct *tsk;
532 struct files_struct *files;
533 struct hlist_node deferred_work_node;
534 int deferred_work;
535 bool is_dead;
536
537 struct list_head todo;
538 wait_queue_head_t wait;
539 struct binder_stats stats;
540 struct list_head delivered_death;
541 int max_threads;
542 int requested_threads;
543 int requested_threads_started;
544 int tmp_ref;
545 long default_priority;
546 struct dentry *debugfs_entry;
547 struct binder_alloc alloc;
548 struct binder_context *context;
549 spinlock_t inner_lock;
550 spinlock_t outer_lock;
551};
552
553enum {
554 BINDER_LOOPER_STATE_REGISTERED = 0x01,
555 BINDER_LOOPER_STATE_ENTERED = 0x02,
556 BINDER_LOOPER_STATE_EXITED = 0x04,
557 BINDER_LOOPER_STATE_INVALID = 0x08,
558 BINDER_LOOPER_STATE_WAITING = 0x10,
559 BINDER_LOOPER_STATE_POLL = 0x20,
560};
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596struct binder_thread {
597 struct binder_proc *proc;
598 struct rb_node rb_node;
599 struct list_head waiting_thread_node;
600 int pid;
601 int looper;
602 bool looper_need_return;
603 struct binder_transaction *transaction_stack;
604 struct list_head todo;
605 struct binder_error return_error;
606 struct binder_error reply_error;
607 wait_queue_head_t wait;
608 struct binder_stats stats;
609 atomic_t tmp_ref;
610 bool is_dead;
611};
612
613struct binder_transaction {
614 int debug_id;
615 struct binder_work work;
616 struct binder_thread *from;
617 struct binder_transaction *from_parent;
618 struct binder_proc *to_proc;
619 struct binder_thread *to_thread;
620 struct binder_transaction *to_parent;
621 unsigned need_reply:1;
622
623
624 struct binder_buffer *buffer;
625 unsigned int code;
626 unsigned int flags;
627 long priority;
628 long saved_priority;
629 kuid_t sender_euid;
630
631
632
633
634
635
636 spinlock_t lock;
637};
638
639
640
641
642
643
644
645
646#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
647static void
648_binder_proc_lock(struct binder_proc *proc, int line)
649{
650 binder_debug(BINDER_DEBUG_SPINLOCKS,
651 "%s: line=%d\n", __func__, line);
652 spin_lock(&proc->outer_lock);
653}
654
655
656
657
658
659
660
661#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
662static void
663_binder_proc_unlock(struct binder_proc *proc, int line)
664{
665 binder_debug(BINDER_DEBUG_SPINLOCKS,
666 "%s: line=%d\n", __func__, line);
667 spin_unlock(&proc->outer_lock);
668}
669
670
671
672
673
674
675
676#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
677static void
678_binder_inner_proc_lock(struct binder_proc *proc, int line)
679{
680 binder_debug(BINDER_DEBUG_SPINLOCKS,
681 "%s: line=%d\n", __func__, line);
682 spin_lock(&proc->inner_lock);
683}
684
685
686
687
688
689
690
691#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
692static void
693_binder_inner_proc_unlock(struct binder_proc *proc, int line)
694{
695 binder_debug(BINDER_DEBUG_SPINLOCKS,
696 "%s: line=%d\n", __func__, line);
697 spin_unlock(&proc->inner_lock);
698}
699
700
701
702
703
704
705
706#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
707static void
708_binder_node_lock(struct binder_node *node, int line)
709{
710 binder_debug(BINDER_DEBUG_SPINLOCKS,
711 "%s: line=%d\n", __func__, line);
712 spin_lock(&node->lock);
713}
714
715
716
717
718
719
720
721#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
722static void
723_binder_node_unlock(struct binder_node *node, int line)
724{
725 binder_debug(BINDER_DEBUG_SPINLOCKS,
726 "%s: line=%d\n", __func__, line);
727 spin_unlock(&node->lock);
728}
729
730
731
732
733
734
735
736
737#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
738static void
739_binder_node_inner_lock(struct binder_node *node, int line)
740{
741 binder_debug(BINDER_DEBUG_SPINLOCKS,
742 "%s: line=%d\n", __func__, line);
743 spin_lock(&node->lock);
744 if (node->proc)
745 binder_inner_proc_lock(node->proc);
746}
747
748
749
750
751
752
753
754#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
755static void
756_binder_node_inner_unlock(struct binder_node *node, int line)
757{
758 struct binder_proc *proc = node->proc;
759
760 binder_debug(BINDER_DEBUG_SPINLOCKS,
761 "%s: line=%d\n", __func__, line);
762 if (proc)
763 binder_inner_proc_unlock(proc);
764 spin_unlock(&node->lock);
765}
766
767static bool binder_worklist_empty_ilocked(struct list_head *list)
768{
769 return list_empty(list);
770}
771
772
773
774
775
776
777
778
779static bool binder_worklist_empty(struct binder_proc *proc,
780 struct list_head *list)
781{
782 bool ret;
783
784 binder_inner_proc_lock(proc);
785 ret = binder_worklist_empty_ilocked(list);
786 binder_inner_proc_unlock(proc);
787 return ret;
788}
789
790static void
791binder_enqueue_work_ilocked(struct binder_work *work,
792 struct list_head *target_list)
793{
794 BUG_ON(target_list == NULL);
795 BUG_ON(work->entry.next && !list_empty(&work->entry));
796 list_add_tail(&work->entry, target_list);
797}
798
799
800
801
802
803
804
805
806
807
808static void
809binder_enqueue_work(struct binder_proc *proc,
810 struct binder_work *work,
811 struct list_head *target_list)
812{
813 binder_inner_proc_lock(proc);
814 binder_enqueue_work_ilocked(work, target_list);
815 binder_inner_proc_unlock(proc);
816}
817
818static void
819binder_dequeue_work_ilocked(struct binder_work *work)
820{
821 list_del_init(&work->entry);
822}
823
824
825
826
827
828
829
830
831
832static void
833binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
834{
835 binder_inner_proc_lock(proc);
836 binder_dequeue_work_ilocked(work);
837 binder_inner_proc_unlock(proc);
838}
839
840static struct binder_work *binder_dequeue_work_head_ilocked(
841 struct list_head *list)
842{
843 struct binder_work *w;
844
845 w = list_first_entry_or_null(list, struct binder_work, entry);
846 if (w)
847 list_del_init(&w->entry);
848 return w;
849}
850
851
852
853
854
855
856
857
858
859
860static struct binder_work *binder_dequeue_work_head(
861 struct binder_proc *proc,
862 struct list_head *list)
863{
864 struct binder_work *w;
865
866 binder_inner_proc_lock(proc);
867 w = binder_dequeue_work_head_ilocked(list);
868 binder_inner_proc_unlock(proc);
869 return w;
870}
871
872static void
873binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
874static void binder_free_thread(struct binder_thread *thread);
875static void binder_free_proc(struct binder_proc *proc);
876static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
877
878static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
879{
880 struct files_struct *files = proc->files;
881 unsigned long rlim_cur;
882 unsigned long irqs;
883
884 if (files == NULL)
885 return -ESRCH;
886
887 if (!lock_task_sighand(proc->tsk, &irqs))
888 return -EMFILE;
889
890 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
891 unlock_task_sighand(proc->tsk, &irqs);
892
893 return __alloc_fd(files, 0, rlim_cur, flags);
894}
895
896
897
898
899static void task_fd_install(
900 struct binder_proc *proc, unsigned int fd, struct file *file)
901{
902 if (proc->files)
903 __fd_install(proc->files, fd, file);
904}
905
906
907
908
909static long task_close_fd(struct binder_proc *proc, unsigned int fd)
910{
911 int retval;
912
913 if (proc->files == NULL)
914 return -ESRCH;
915
916 retval = __close_fd(proc->files, fd);
917
918 if (unlikely(retval == -ERESTARTSYS ||
919 retval == -ERESTARTNOINTR ||
920 retval == -ERESTARTNOHAND ||
921 retval == -ERESTART_RESTARTBLOCK))
922 retval = -EINTR;
923
924 return retval;
925}
926
927static bool binder_has_work_ilocked(struct binder_thread *thread,
928 bool do_proc_work)
929{
930 return !binder_worklist_empty_ilocked(&thread->todo) ||
931 thread->looper_need_return ||
932 (do_proc_work &&
933 !binder_worklist_empty_ilocked(&thread->proc->todo));
934}
935
936static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
937{
938 bool has_work;
939
940 binder_inner_proc_lock(thread->proc);
941 has_work = binder_has_work_ilocked(thread, do_proc_work);
942 binder_inner_proc_unlock(thread->proc);
943
944 return has_work;
945}
946
947static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
948{
949 return !thread->transaction_stack &&
950 binder_worklist_empty_ilocked(&thread->todo) &&
951 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
952 BINDER_LOOPER_STATE_REGISTERED));
953}
954
955static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
956 bool sync)
957{
958 struct rb_node *n;
959 struct binder_thread *thread;
960
961 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
962 thread = rb_entry(n, struct binder_thread, rb_node);
963 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
964 binder_available_for_proc_work_ilocked(thread)) {
965 if (sync)
966 wake_up_interruptible_sync(&thread->wait);
967 else
968 wake_up_interruptible(&thread->wait);
969 }
970 }
971}
972
973
974
975
976
977
978
979
980
981
982
983
984
985static struct binder_thread *
986binder_select_thread_ilocked(struct binder_proc *proc)
987{
988 struct binder_thread *thread;
989
990 assert_spin_locked(&proc->inner_lock);
991 thread = list_first_entry_or_null(&proc->waiting_threads,
992 struct binder_thread,
993 waiting_thread_node);
994
995 if (thread)
996 list_del_init(&thread->waiting_thread_node);
997
998 return thread;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1018 struct binder_thread *thread,
1019 bool sync)
1020{
1021 assert_spin_locked(&proc->inner_lock);
1022
1023 if (thread) {
1024 if (sync)
1025 wake_up_interruptible_sync(&thread->wait);
1026 else
1027 wake_up_interruptible(&thread->wait);
1028 return;
1029 }
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 binder_wakeup_poll_threads_ilocked(proc, sync);
1045}
1046
1047static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1048{
1049 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1050
1051 binder_wakeup_thread_ilocked(proc, thread, false);
1052}
1053
1054static void binder_set_nice(long nice)
1055{
1056 long min_nice;
1057
1058 if (can_nice(current, nice)) {
1059 set_user_nice(current, nice);
1060 return;
1061 }
1062 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1063 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1064 "%d: nice value %ld not allowed use %ld instead\n",
1065 current->pid, nice, min_nice);
1066 set_user_nice(current, min_nice);
1067 if (min_nice <= MAX_NICE)
1068 return;
1069 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1070}
1071
1072static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1073 binder_uintptr_t ptr)
1074{
1075 struct rb_node *n = proc->nodes.rb_node;
1076 struct binder_node *node;
1077
1078 assert_spin_locked(&proc->inner_lock);
1079
1080 while (n) {
1081 node = rb_entry(n, struct binder_node, rb_node);
1082
1083 if (ptr < node->ptr)
1084 n = n->rb_left;
1085 else if (ptr > node->ptr)
1086 n = n->rb_right;
1087 else {
1088
1089
1090
1091
1092
1093 binder_inc_node_tmpref_ilocked(node);
1094 return node;
1095 }
1096 }
1097 return NULL;
1098}
1099
1100static struct binder_node *binder_get_node(struct binder_proc *proc,
1101 binder_uintptr_t ptr)
1102{
1103 struct binder_node *node;
1104
1105 binder_inner_proc_lock(proc);
1106 node = binder_get_node_ilocked(proc, ptr);
1107 binder_inner_proc_unlock(proc);
1108 return node;
1109}
1110
1111static struct binder_node *binder_init_node_ilocked(
1112 struct binder_proc *proc,
1113 struct binder_node *new_node,
1114 struct flat_binder_object *fp)
1115{
1116 struct rb_node **p = &proc->nodes.rb_node;
1117 struct rb_node *parent = NULL;
1118 struct binder_node *node;
1119 binder_uintptr_t ptr = fp ? fp->binder : 0;
1120 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1121 __u32 flags = fp ? fp->flags : 0;
1122
1123 assert_spin_locked(&proc->inner_lock);
1124
1125 while (*p) {
1126
1127 parent = *p;
1128 node = rb_entry(parent, struct binder_node, rb_node);
1129
1130 if (ptr < node->ptr)
1131 p = &(*p)->rb_left;
1132 else if (ptr > node->ptr)
1133 p = &(*p)->rb_right;
1134 else {
1135
1136
1137
1138
1139
1140 binder_inc_node_tmpref_ilocked(node);
1141 return node;
1142 }
1143 }
1144 node = new_node;
1145 binder_stats_created(BINDER_STAT_NODE);
1146 node->tmp_refs++;
1147 rb_link_node(&node->rb_node, parent, p);
1148 rb_insert_color(&node->rb_node, &proc->nodes);
1149 node->debug_id = atomic_inc_return(&binder_last_id);
1150 node->proc = proc;
1151 node->ptr = ptr;
1152 node->cookie = cookie;
1153 node->work.type = BINDER_WORK_NODE;
1154 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1155 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1156 spin_lock_init(&node->lock);
1157 INIT_LIST_HEAD(&node->work.entry);
1158 INIT_LIST_HEAD(&node->async_todo);
1159 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1160 "%d:%d node %d u%016llx c%016llx created\n",
1161 proc->pid, current->pid, node->debug_id,
1162 (u64)node->ptr, (u64)node->cookie);
1163
1164 return node;
1165}
1166
1167static struct binder_node *binder_new_node(struct binder_proc *proc,
1168 struct flat_binder_object *fp)
1169{
1170 struct binder_node *node;
1171 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1172
1173 if (!new_node)
1174 return NULL;
1175 binder_inner_proc_lock(proc);
1176 node = binder_init_node_ilocked(proc, new_node, fp);
1177 binder_inner_proc_unlock(proc);
1178 if (node != new_node)
1179
1180
1181
1182 kfree(new_node);
1183
1184 return node;
1185}
1186
1187static void binder_free_node(struct binder_node *node)
1188{
1189 kfree(node);
1190 binder_stats_deleted(BINDER_STAT_NODE);
1191}
1192
1193static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1194 int internal,
1195 struct list_head *target_list)
1196{
1197 struct binder_proc *proc = node->proc;
1198
1199 assert_spin_locked(&node->lock);
1200 if (proc)
1201 assert_spin_locked(&proc->inner_lock);
1202 if (strong) {
1203 if (internal) {
1204 if (target_list == NULL &&
1205 node->internal_strong_refs == 0 &&
1206 !(node->proc &&
1207 node == node->proc->context->binder_context_mgr_node &&
1208 node->has_strong_ref)) {
1209 pr_err("invalid inc strong node for %d\n",
1210 node->debug_id);
1211 return -EINVAL;
1212 }
1213 node->internal_strong_refs++;
1214 } else
1215 node->local_strong_refs++;
1216 if (!node->has_strong_ref && target_list) {
1217 binder_dequeue_work_ilocked(&node->work);
1218 binder_enqueue_work_ilocked(&node->work, target_list);
1219 }
1220 } else {
1221 if (!internal)
1222 node->local_weak_refs++;
1223 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1224 if (target_list == NULL) {
1225 pr_err("invalid inc weak node for %d\n",
1226 node->debug_id);
1227 return -EINVAL;
1228 }
1229 binder_enqueue_work_ilocked(&node->work, target_list);
1230 }
1231 }
1232 return 0;
1233}
1234
1235static int binder_inc_node(struct binder_node *node, int strong, int internal,
1236 struct list_head *target_list)
1237{
1238 int ret;
1239
1240 binder_node_inner_lock(node);
1241 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1242 binder_node_inner_unlock(node);
1243
1244 return ret;
1245}
1246
1247static bool binder_dec_node_nilocked(struct binder_node *node,
1248 int strong, int internal)
1249{
1250 struct binder_proc *proc = node->proc;
1251
1252 assert_spin_locked(&node->lock);
1253 if (proc)
1254 assert_spin_locked(&proc->inner_lock);
1255 if (strong) {
1256 if (internal)
1257 node->internal_strong_refs--;
1258 else
1259 node->local_strong_refs--;
1260 if (node->local_strong_refs || node->internal_strong_refs)
1261 return false;
1262 } else {
1263 if (!internal)
1264 node->local_weak_refs--;
1265 if (node->local_weak_refs || node->tmp_refs ||
1266 !hlist_empty(&node->refs))
1267 return false;
1268 }
1269
1270 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1271 if (list_empty(&node->work.entry)) {
1272 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1273 binder_wakeup_proc_ilocked(proc);
1274 }
1275 } else {
1276 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1277 !node->local_weak_refs && !node->tmp_refs) {
1278 if (proc) {
1279 binder_dequeue_work_ilocked(&node->work);
1280 rb_erase(&node->rb_node, &proc->nodes);
1281 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1282 "refless node %d deleted\n",
1283 node->debug_id);
1284 } else {
1285 BUG_ON(!list_empty(&node->work.entry));
1286 spin_lock(&binder_dead_nodes_lock);
1287
1288
1289
1290
1291 if (node->tmp_refs) {
1292 spin_unlock(&binder_dead_nodes_lock);
1293 return false;
1294 }
1295 hlist_del(&node->dead_node);
1296 spin_unlock(&binder_dead_nodes_lock);
1297 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1298 "dead node %d deleted\n",
1299 node->debug_id);
1300 }
1301 return true;
1302 }
1303 }
1304 return false;
1305}
1306
1307static void binder_dec_node(struct binder_node *node, int strong, int internal)
1308{
1309 bool free_node;
1310
1311 binder_node_inner_lock(node);
1312 free_node = binder_dec_node_nilocked(node, strong, internal);
1313 binder_node_inner_unlock(node);
1314 if (free_node)
1315 binder_free_node(node);
1316}
1317
1318static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1319{
1320
1321
1322
1323
1324
1325 node->tmp_refs++;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static void binder_inc_node_tmpref(struct binder_node *node)
1342{
1343 binder_node_lock(node);
1344 if (node->proc)
1345 binder_inner_proc_lock(node->proc);
1346 else
1347 spin_lock(&binder_dead_nodes_lock);
1348 binder_inc_node_tmpref_ilocked(node);
1349 if (node->proc)
1350 binder_inner_proc_unlock(node->proc);
1351 else
1352 spin_unlock(&binder_dead_nodes_lock);
1353 binder_node_unlock(node);
1354}
1355
1356
1357
1358
1359
1360
1361
1362static void binder_dec_node_tmpref(struct binder_node *node)
1363{
1364 bool free_node;
1365
1366 binder_node_inner_lock(node);
1367 if (!node->proc)
1368 spin_lock(&binder_dead_nodes_lock);
1369 node->tmp_refs--;
1370 BUG_ON(node->tmp_refs < 0);
1371 if (!node->proc)
1372 spin_unlock(&binder_dead_nodes_lock);
1373
1374
1375
1376
1377
1378
1379 free_node = binder_dec_node_nilocked(node, 0, 1);
1380 binder_node_inner_unlock(node);
1381 if (free_node)
1382 binder_free_node(node);
1383}
1384
1385static void binder_put_node(struct binder_node *node)
1386{
1387 binder_dec_node_tmpref(node);
1388}
1389
1390static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1391 u32 desc, bool need_strong_ref)
1392{
1393 struct rb_node *n = proc->refs_by_desc.rb_node;
1394 struct binder_ref *ref;
1395
1396 while (n) {
1397 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1398
1399 if (desc < ref->data.desc) {
1400 n = n->rb_left;
1401 } else if (desc > ref->data.desc) {
1402 n = n->rb_right;
1403 } else if (need_strong_ref && !ref->data.strong) {
1404 binder_user_error("tried to use weak ref as strong ref\n");
1405 return NULL;
1406 } else {
1407 return ref;
1408 }
1409 }
1410 return NULL;
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static struct binder_ref *binder_get_ref_for_node_olocked(
1432 struct binder_proc *proc,
1433 struct binder_node *node,
1434 struct binder_ref *new_ref)
1435{
1436 struct binder_context *context = proc->context;
1437 struct rb_node **p = &proc->refs_by_node.rb_node;
1438 struct rb_node *parent = NULL;
1439 struct binder_ref *ref;
1440 struct rb_node *n;
1441
1442 while (*p) {
1443 parent = *p;
1444 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1445
1446 if (node < ref->node)
1447 p = &(*p)->rb_left;
1448 else if (node > ref->node)
1449 p = &(*p)->rb_right;
1450 else
1451 return ref;
1452 }
1453 if (!new_ref)
1454 return NULL;
1455
1456 binder_stats_created(BINDER_STAT_REF);
1457 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1458 new_ref->proc = proc;
1459 new_ref->node = node;
1460 rb_link_node(&new_ref->rb_node_node, parent, p);
1461 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1462
1463 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1464 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1465 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466 if (ref->data.desc > new_ref->data.desc)
1467 break;
1468 new_ref->data.desc = ref->data.desc + 1;
1469 }
1470
1471 p = &proc->refs_by_desc.rb_node;
1472 while (*p) {
1473 parent = *p;
1474 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1475
1476 if (new_ref->data.desc < ref->data.desc)
1477 p = &(*p)->rb_left;
1478 else if (new_ref->data.desc > ref->data.desc)
1479 p = &(*p)->rb_right;
1480 else
1481 BUG();
1482 }
1483 rb_link_node(&new_ref->rb_node_desc, parent, p);
1484 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1485
1486 binder_node_lock(node);
1487 hlist_add_head(&new_ref->node_entry, &node->refs);
1488
1489 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1490 "%d new ref %d desc %d for node %d\n",
1491 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1492 node->debug_id);
1493 binder_node_unlock(node);
1494 return new_ref;
1495}
1496
1497static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1498{
1499 bool delete_node = false;
1500
1501 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1502 "%d delete ref %d desc %d for node %d\n",
1503 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1504 ref->node->debug_id);
1505
1506 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1507 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1508
1509 binder_node_inner_lock(ref->node);
1510 if (ref->data.strong)
1511 binder_dec_node_nilocked(ref->node, 1, 1);
1512
1513 hlist_del(&ref->node_entry);
1514 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1515 binder_node_inner_unlock(ref->node);
1516
1517
1518
1519 if (!delete_node) {
1520
1521
1522
1523
1524
1525 ref->node = NULL;
1526 }
1527
1528 if (ref->death) {
1529 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1530 "%d delete ref %d desc %d has death notification\n",
1531 ref->proc->pid, ref->data.debug_id,
1532 ref->data.desc);
1533 binder_dequeue_work(ref->proc, &ref->death->work);
1534 binder_stats_deleted(BINDER_STAT_DEATH);
1535 }
1536 binder_stats_deleted(BINDER_STAT_REF);
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1550 struct list_head *target_list)
1551{
1552 int ret;
1553
1554 if (strong) {
1555 if (ref->data.strong == 0) {
1556 ret = binder_inc_node(ref->node, 1, 1, target_list);
1557 if (ret)
1558 return ret;
1559 }
1560 ref->data.strong++;
1561 } else {
1562 if (ref->data.weak == 0) {
1563 ret = binder_inc_node(ref->node, 0, 1, target_list);
1564 if (ret)
1565 return ret;
1566 }
1567 ref->data.weak++;
1568 }
1569 return 0;
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1582{
1583 if (strong) {
1584 if (ref->data.strong == 0) {
1585 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586 ref->proc->pid, ref->data.debug_id,
1587 ref->data.desc, ref->data.strong,
1588 ref->data.weak);
1589 return false;
1590 }
1591 ref->data.strong--;
1592 if (ref->data.strong == 0)
1593 binder_dec_node(ref->node, strong, 1);
1594 } else {
1595 if (ref->data.weak == 0) {
1596 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597 ref->proc->pid, ref->data.debug_id,
1598 ref->data.desc, ref->data.strong,
1599 ref->data.weak);
1600 return false;
1601 }
1602 ref->data.weak--;
1603 }
1604 if (ref->data.strong == 0 && ref->data.weak == 0) {
1605 binder_cleanup_ref_olocked(ref);
1606 return true;
1607 }
1608 return false;
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static struct binder_node *binder_get_node_from_ref(
1623 struct binder_proc *proc,
1624 u32 desc, bool need_strong_ref,
1625 struct binder_ref_data *rdata)
1626{
1627 struct binder_node *node;
1628 struct binder_ref *ref;
1629
1630 binder_proc_lock(proc);
1631 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1632 if (!ref)
1633 goto err_no_ref;
1634 node = ref->node;
1635
1636
1637
1638
1639 binder_inc_node_tmpref(node);
1640 if (rdata)
1641 *rdata = ref->data;
1642 binder_proc_unlock(proc);
1643
1644 return node;
1645
1646err_no_ref:
1647 binder_proc_unlock(proc);
1648 return NULL;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658static void binder_free_ref(struct binder_ref *ref)
1659{
1660 if (ref->node)
1661 binder_free_node(ref->node);
1662 kfree(ref->death);
1663 kfree(ref);
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static int binder_update_ref_for_handle(struct binder_proc *proc,
1680 uint32_t desc, bool increment, bool strong,
1681 struct binder_ref_data *rdata)
1682{
1683 int ret = 0;
1684 struct binder_ref *ref;
1685 bool delete_ref = false;
1686
1687 binder_proc_lock(proc);
1688 ref = binder_get_ref_olocked(proc, desc, strong);
1689 if (!ref) {
1690 ret = -EINVAL;
1691 goto err_no_ref;
1692 }
1693 if (increment)
1694 ret = binder_inc_ref_olocked(ref, strong, NULL);
1695 else
1696 delete_ref = binder_dec_ref_olocked(ref, strong);
1697
1698 if (rdata)
1699 *rdata = ref->data;
1700 binder_proc_unlock(proc);
1701
1702 if (delete_ref)
1703 binder_free_ref(ref);
1704 return ret;
1705
1706err_no_ref:
1707 binder_proc_unlock(proc);
1708 return ret;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static int binder_dec_ref_for_handle(struct binder_proc *proc,
1723 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1724{
1725 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static int binder_inc_ref_for_node(struct binder_proc *proc,
1743 struct binder_node *node,
1744 bool strong,
1745 struct list_head *target_list,
1746 struct binder_ref_data *rdata)
1747{
1748 struct binder_ref *ref;
1749 struct binder_ref *new_ref = NULL;
1750 int ret = 0;
1751
1752 binder_proc_lock(proc);
1753 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1754 if (!ref) {
1755 binder_proc_unlock(proc);
1756 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1757 if (!new_ref)
1758 return -ENOMEM;
1759 binder_proc_lock(proc);
1760 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1761 }
1762 ret = binder_inc_ref_olocked(ref, strong, target_list);
1763 *rdata = ref->data;
1764 binder_proc_unlock(proc);
1765 if (new_ref && ref != new_ref)
1766
1767
1768
1769
1770 kfree(new_ref);
1771 return ret;
1772}
1773
1774static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1775 struct binder_transaction *t)
1776{
1777 BUG_ON(!target_thread);
1778 assert_spin_locked(&target_thread->proc->inner_lock);
1779 BUG_ON(target_thread->transaction_stack != t);
1780 BUG_ON(target_thread->transaction_stack->from != target_thread);
1781 target_thread->transaction_stack =
1782 target_thread->transaction_stack->from_parent;
1783 t->from = NULL;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798static void binder_thread_dec_tmpref(struct binder_thread *thread)
1799{
1800
1801
1802
1803
1804 binder_inner_proc_lock(thread->proc);
1805 atomic_dec(&thread->tmp_ref);
1806 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1807 binder_inner_proc_unlock(thread->proc);
1808 binder_free_thread(thread);
1809 return;
1810 }
1811 binder_inner_proc_unlock(thread->proc);
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827{
1828 binder_inner_proc_lock(proc);
1829 proc->tmp_ref--;
1830 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831 !proc->tmp_ref) {
1832 binder_inner_proc_unlock(proc);
1833 binder_free_proc(proc);
1834 return;
1835 }
1836 binder_inner_proc_unlock(proc);
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849static struct binder_thread *binder_get_txn_from(
1850 struct binder_transaction *t)
1851{
1852 struct binder_thread *from;
1853
1854 spin_lock(&t->lock);
1855 from = t->from;
1856 if (from)
1857 atomic_inc(&from->tmp_ref);
1858 spin_unlock(&t->lock);
1859 return from;
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static struct binder_thread *binder_get_txn_from_and_acq_inner(
1874 struct binder_transaction *t)
1875{
1876 struct binder_thread *from;
1877
1878 from = binder_get_txn_from(t);
1879 if (!from)
1880 return NULL;
1881 binder_inner_proc_lock(from->proc);
1882 if (t->from) {
1883 BUG_ON(from != t->from);
1884 return from;
1885 }
1886 binder_inner_proc_unlock(from->proc);
1887 binder_thread_dec_tmpref(from);
1888 return NULL;
1889}
1890
1891static void binder_free_transaction(struct binder_transaction *t)
1892{
1893 if (t->buffer)
1894 t->buffer->transaction = NULL;
1895 kfree(t);
1896 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1897}
1898
1899static void binder_send_failed_reply(struct binder_transaction *t,
1900 uint32_t error_code)
1901{
1902 struct binder_thread *target_thread;
1903 struct binder_transaction *next;
1904
1905 BUG_ON(t->flags & TF_ONE_WAY);
1906 while (1) {
1907 target_thread = binder_get_txn_from_and_acq_inner(t);
1908 if (target_thread) {
1909 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1910 "send failed reply for transaction %d to %d:%d\n",
1911 t->debug_id,
1912 target_thread->proc->pid,
1913 target_thread->pid);
1914
1915 binder_pop_transaction_ilocked(target_thread, t);
1916 if (target_thread->reply_error.cmd == BR_OK) {
1917 target_thread->reply_error.cmd = error_code;
1918 binder_enqueue_work_ilocked(
1919 &target_thread->reply_error.work,
1920 &target_thread->todo);
1921 wake_up_interruptible(&target_thread->wait);
1922 } else {
1923 WARN(1, "Unexpected reply error: %u\n",
1924 target_thread->reply_error.cmd);
1925 }
1926 binder_inner_proc_unlock(target_thread->proc);
1927 binder_thread_dec_tmpref(target_thread);
1928 binder_free_transaction(t);
1929 return;
1930 }
1931 next = t->from_parent;
1932
1933 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1934 "send failed reply for transaction %d, target dead\n",
1935 t->debug_id);
1936
1937 binder_free_transaction(t);
1938 if (next == NULL) {
1939 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1940 "reply failed, no target thread at root\n");
1941 return;
1942 }
1943 t = next;
1944 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1945 "reply failed, no target thread -- retry %d\n",
1946 t->debug_id);
1947 }
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1959{
1960
1961 struct binder_object_header *hdr;
1962 size_t object_size = 0;
1963
1964 if (offset > buffer->data_size - sizeof(*hdr) ||
1965 buffer->data_size < sizeof(*hdr) ||
1966 !IS_ALIGNED(offset, sizeof(u32)))
1967 return 0;
1968
1969
1970 hdr = (struct binder_object_header *)(buffer->data + offset);
1971 switch (hdr->type) {
1972 case BINDER_TYPE_BINDER:
1973 case BINDER_TYPE_WEAK_BINDER:
1974 case BINDER_TYPE_HANDLE:
1975 case BINDER_TYPE_WEAK_HANDLE:
1976 object_size = sizeof(struct flat_binder_object);
1977 break;
1978 case BINDER_TYPE_FD:
1979 object_size = sizeof(struct binder_fd_object);
1980 break;
1981 case BINDER_TYPE_PTR:
1982 object_size = sizeof(struct binder_buffer_object);
1983 break;
1984 case BINDER_TYPE_FDA:
1985 object_size = sizeof(struct binder_fd_array_object);
1986 break;
1987 default:
1988 return 0;
1989 }
1990 if (offset <= buffer->data_size - object_size &&
1991 buffer->data_size >= object_size)
1992 return object_size;
1993 else
1994 return 0;
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2015 binder_size_t index,
2016 binder_size_t *start,
2017 binder_size_t num_valid)
2018{
2019 struct binder_buffer_object *buffer_obj;
2020 binder_size_t *offp;
2021
2022 if (index >= num_valid)
2023 return NULL;
2024
2025 offp = start + index;
2026 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2027 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2028 return NULL;
2029
2030 return buffer_obj;
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071static bool binder_validate_fixup(struct binder_buffer *b,
2072 binder_size_t *objects_start,
2073 struct binder_buffer_object *buffer,
2074 binder_size_t fixup_offset,
2075 struct binder_buffer_object *last_obj,
2076 binder_size_t last_min_offset)
2077{
2078 if (!last_obj) {
2079
2080 return false;
2081 }
2082
2083 while (last_obj != buffer) {
2084
2085
2086
2087
2088 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2089 return false;
2090 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2091 last_obj = (struct binder_buffer_object *)
2092 (b->data + *(objects_start + last_obj->parent));
2093 }
2094 return (fixup_offset >= last_min_offset);
2095}
2096
2097static void binder_transaction_buffer_release(struct binder_proc *proc,
2098 struct binder_buffer *buffer,
2099 binder_size_t *failed_at)
2100{
2101 binder_size_t *offp, *off_start, *off_end;
2102 int debug_id = buffer->debug_id;
2103
2104 binder_debug(BINDER_DEBUG_TRANSACTION,
2105 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2106 proc->pid, buffer->debug_id,
2107 buffer->data_size, buffer->offsets_size, failed_at);
2108
2109 if (buffer->target_node)
2110 binder_dec_node(buffer->target_node, 1, 0);
2111
2112 off_start = (binder_size_t *)(buffer->data +
2113 ALIGN(buffer->data_size, sizeof(void *)));
2114 if (failed_at)
2115 off_end = failed_at;
2116 else
2117 off_end = (void *)off_start + buffer->offsets_size;
2118 for (offp = off_start; offp < off_end; offp++) {
2119 struct binder_object_header *hdr;
2120 size_t object_size = binder_validate_object(buffer, *offp);
2121
2122 if (object_size == 0) {
2123 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2124 debug_id, (u64)*offp, buffer->data_size);
2125 continue;
2126 }
2127 hdr = (struct binder_object_header *)(buffer->data + *offp);
2128 switch (hdr->type) {
2129 case BINDER_TYPE_BINDER:
2130 case BINDER_TYPE_WEAK_BINDER: {
2131 struct flat_binder_object *fp;
2132 struct binder_node *node;
2133
2134 fp = to_flat_binder_object(hdr);
2135 node = binder_get_node(proc, fp->binder);
2136 if (node == NULL) {
2137 pr_err("transaction release %d bad node %016llx\n",
2138 debug_id, (u64)fp->binder);
2139 break;
2140 }
2141 binder_debug(BINDER_DEBUG_TRANSACTION,
2142 " node %d u%016llx\n",
2143 node->debug_id, (u64)node->ptr);
2144 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2145 0);
2146 binder_put_node(node);
2147 } break;
2148 case BINDER_TYPE_HANDLE:
2149 case BINDER_TYPE_WEAK_HANDLE: {
2150 struct flat_binder_object *fp;
2151 struct binder_ref_data rdata;
2152 int ret;
2153
2154 fp = to_flat_binder_object(hdr);
2155 ret = binder_dec_ref_for_handle(proc, fp->handle,
2156 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2157
2158 if (ret) {
2159 pr_err("transaction release %d bad handle %d, ret = %d\n",
2160 debug_id, fp->handle, ret);
2161 break;
2162 }
2163 binder_debug(BINDER_DEBUG_TRANSACTION,
2164 " ref %d desc %d\n",
2165 rdata.debug_id, rdata.desc);
2166 } break;
2167
2168 case BINDER_TYPE_FD: {
2169 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2170
2171 binder_debug(BINDER_DEBUG_TRANSACTION,
2172 " fd %d\n", fp->fd);
2173 if (failed_at)
2174 task_close_fd(proc, fp->fd);
2175 } break;
2176 case BINDER_TYPE_PTR:
2177
2178
2179
2180
2181 break;
2182 case BINDER_TYPE_FDA: {
2183 struct binder_fd_array_object *fda;
2184 struct binder_buffer_object *parent;
2185 uintptr_t parent_buffer;
2186 u32 *fd_array;
2187 size_t fd_index;
2188 binder_size_t fd_buf_size;
2189
2190 fda = to_binder_fd_array_object(hdr);
2191 parent = binder_validate_ptr(buffer, fda->parent,
2192 off_start,
2193 offp - off_start);
2194 if (!parent) {
2195 pr_err("transaction release %d bad parent offset",
2196 debug_id);
2197 continue;
2198 }
2199
2200
2201
2202
2203 parent_buffer = parent->buffer -
2204 binder_alloc_get_user_buffer_offset(
2205 &proc->alloc);
2206
2207 fd_buf_size = sizeof(u32) * fda->num_fds;
2208 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2209 pr_err("transaction release %d invalid number of fds (%lld)\n",
2210 debug_id, (u64)fda->num_fds);
2211 continue;
2212 }
2213 if (fd_buf_size > parent->length ||
2214 fda->parent_offset > parent->length - fd_buf_size) {
2215
2216 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2217 debug_id, (u64)fda->num_fds);
2218 continue;
2219 }
2220 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2221 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2222 task_close_fd(proc, fd_array[fd_index]);
2223 } break;
2224 default:
2225 pr_err("transaction release %d bad object type %x\n",
2226 debug_id, hdr->type);
2227 break;
2228 }
2229 }
2230}
2231
2232static int binder_translate_binder(struct flat_binder_object *fp,
2233 struct binder_transaction *t,
2234 struct binder_thread *thread)
2235{
2236 struct binder_node *node;
2237 struct binder_proc *proc = thread->proc;
2238 struct binder_proc *target_proc = t->to_proc;
2239 struct binder_ref_data rdata;
2240 int ret = 0;
2241
2242 node = binder_get_node(proc, fp->binder);
2243 if (!node) {
2244 node = binder_new_node(proc, fp);
2245 if (!node)
2246 return -ENOMEM;
2247 }
2248 if (fp->cookie != node->cookie) {
2249 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2250 proc->pid, thread->pid, (u64)fp->binder,
2251 node->debug_id, (u64)fp->cookie,
2252 (u64)node->cookie);
2253 ret = -EINVAL;
2254 goto done;
2255 }
2256 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2257 ret = -EPERM;
2258 goto done;
2259 }
2260
2261 ret = binder_inc_ref_for_node(target_proc, node,
2262 fp->hdr.type == BINDER_TYPE_BINDER,
2263 &thread->todo, &rdata);
2264 if (ret)
2265 goto done;
2266
2267 if (fp->hdr.type == BINDER_TYPE_BINDER)
2268 fp->hdr.type = BINDER_TYPE_HANDLE;
2269 else
2270 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2271 fp->binder = 0;
2272 fp->handle = rdata.desc;
2273 fp->cookie = 0;
2274
2275 trace_binder_transaction_node_to_ref(t, node, &rdata);
2276 binder_debug(BINDER_DEBUG_TRANSACTION,
2277 " node %d u%016llx -> ref %d desc %d\n",
2278 node->debug_id, (u64)node->ptr,
2279 rdata.debug_id, rdata.desc);
2280done:
2281 binder_put_node(node);
2282 return ret;
2283}
2284
2285static int binder_translate_handle(struct flat_binder_object *fp,
2286 struct binder_transaction *t,
2287 struct binder_thread *thread)
2288{
2289 struct binder_proc *proc = thread->proc;
2290 struct binder_proc *target_proc = t->to_proc;
2291 struct binder_node *node;
2292 struct binder_ref_data src_rdata;
2293 int ret = 0;
2294
2295 node = binder_get_node_from_ref(proc, fp->handle,
2296 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2297 if (!node) {
2298 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2299 proc->pid, thread->pid, fp->handle);
2300 return -EINVAL;
2301 }
2302 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2303 ret = -EPERM;
2304 goto done;
2305 }
2306
2307 binder_node_lock(node);
2308 if (node->proc == target_proc) {
2309 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2310 fp->hdr.type = BINDER_TYPE_BINDER;
2311 else
2312 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2313 fp->binder = node->ptr;
2314 fp->cookie = node->cookie;
2315 if (node->proc)
2316 binder_inner_proc_lock(node->proc);
2317 binder_inc_node_nilocked(node,
2318 fp->hdr.type == BINDER_TYPE_BINDER,
2319 0, NULL);
2320 if (node->proc)
2321 binder_inner_proc_unlock(node->proc);
2322 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2323 binder_debug(BINDER_DEBUG_TRANSACTION,
2324 " ref %d desc %d -> node %d u%016llx\n",
2325 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2326 (u64)node->ptr);
2327 binder_node_unlock(node);
2328 } else {
2329 struct binder_ref_data dest_rdata;
2330
2331 binder_node_unlock(node);
2332 ret = binder_inc_ref_for_node(target_proc, node,
2333 fp->hdr.type == BINDER_TYPE_HANDLE,
2334 NULL, &dest_rdata);
2335 if (ret)
2336 goto done;
2337
2338 fp->binder = 0;
2339 fp->handle = dest_rdata.desc;
2340 fp->cookie = 0;
2341 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2342 &dest_rdata);
2343 binder_debug(BINDER_DEBUG_TRANSACTION,
2344 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2345 src_rdata.debug_id, src_rdata.desc,
2346 dest_rdata.debug_id, dest_rdata.desc,
2347 node->debug_id);
2348 }
2349done:
2350 binder_put_node(node);
2351 return ret;
2352}
2353
2354static int binder_translate_fd(int fd,
2355 struct binder_transaction *t,
2356 struct binder_thread *thread,
2357 struct binder_transaction *in_reply_to)
2358{
2359 struct binder_proc *proc = thread->proc;
2360 struct binder_proc *target_proc = t->to_proc;
2361 int target_fd;
2362 struct file *file;
2363 int ret;
2364 bool target_allows_fd;
2365
2366 if (in_reply_to)
2367 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2368 else
2369 target_allows_fd = t->buffer->target_node->accept_fds;
2370 if (!target_allows_fd) {
2371 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2372 proc->pid, thread->pid,
2373 in_reply_to ? "reply" : "transaction",
2374 fd);
2375 ret = -EPERM;
2376 goto err_fd_not_accepted;
2377 }
2378
2379 file = fget(fd);
2380 if (!file) {
2381 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2382 proc->pid, thread->pid, fd);
2383 ret = -EBADF;
2384 goto err_fget;
2385 }
2386 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2387 if (ret < 0) {
2388 ret = -EPERM;
2389 goto err_security;
2390 }
2391
2392 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2393 if (target_fd < 0) {
2394 ret = -ENOMEM;
2395 goto err_get_unused_fd;
2396 }
2397 task_fd_install(target_proc, target_fd, file);
2398 trace_binder_transaction_fd(t, fd, target_fd);
2399 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2400 fd, target_fd);
2401
2402 return target_fd;
2403
2404err_get_unused_fd:
2405err_security:
2406 fput(file);
2407err_fget:
2408err_fd_not_accepted:
2409 return ret;
2410}
2411
2412static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2413 struct binder_buffer_object *parent,
2414 struct binder_transaction *t,
2415 struct binder_thread *thread,
2416 struct binder_transaction *in_reply_to)
2417{
2418 binder_size_t fdi, fd_buf_size, num_installed_fds;
2419 int target_fd;
2420 uintptr_t parent_buffer;
2421 u32 *fd_array;
2422 struct binder_proc *proc = thread->proc;
2423 struct binder_proc *target_proc = t->to_proc;
2424
2425 fd_buf_size = sizeof(u32) * fda->num_fds;
2426 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2427 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2428 proc->pid, thread->pid, (u64)fda->num_fds);
2429 return -EINVAL;
2430 }
2431 if (fd_buf_size > parent->length ||
2432 fda->parent_offset > parent->length - fd_buf_size) {
2433
2434 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2435 proc->pid, thread->pid, (u64)fda->num_fds);
2436 return -EINVAL;
2437 }
2438
2439
2440
2441
2442 parent_buffer = parent->buffer -
2443 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2444 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2445 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2446 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2447 proc->pid, thread->pid);
2448 return -EINVAL;
2449 }
2450 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2451 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2452 in_reply_to);
2453 if (target_fd < 0)
2454 goto err_translate_fd_failed;
2455 fd_array[fdi] = target_fd;
2456 }
2457 return 0;
2458
2459err_translate_fd_failed:
2460
2461
2462
2463
2464 num_installed_fds = fdi;
2465 for (fdi = 0; fdi < num_installed_fds; fdi++)
2466 task_close_fd(target_proc, fd_array[fdi]);
2467 return target_fd;
2468}
2469
2470static int binder_fixup_parent(struct binder_transaction *t,
2471 struct binder_thread *thread,
2472 struct binder_buffer_object *bp,
2473 binder_size_t *off_start,
2474 binder_size_t num_valid,
2475 struct binder_buffer_object *last_fixup_obj,
2476 binder_size_t last_fixup_min_off)
2477{
2478 struct binder_buffer_object *parent;
2479 u8 *parent_buffer;
2480 struct binder_buffer *b = t->buffer;
2481 struct binder_proc *proc = thread->proc;
2482 struct binder_proc *target_proc = t->to_proc;
2483
2484 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2485 return 0;
2486
2487 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2488 if (!parent) {
2489 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2490 proc->pid, thread->pid);
2491 return -EINVAL;
2492 }
2493
2494 if (!binder_validate_fixup(b, off_start,
2495 parent, bp->parent_offset,
2496 last_fixup_obj,
2497 last_fixup_min_off)) {
2498 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2499 proc->pid, thread->pid);
2500 return -EINVAL;
2501 }
2502
2503 if (parent->length < sizeof(binder_uintptr_t) ||
2504 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2505
2506 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2507 proc->pid, thread->pid);
2508 return -EINVAL;
2509 }
2510 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2511 binder_alloc_get_user_buffer_offset(
2512 &target_proc->alloc));
2513 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2514
2515 return 0;
2516}
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535static bool binder_proc_transaction(struct binder_transaction *t,
2536 struct binder_proc *proc,
2537 struct binder_thread *thread)
2538{
2539 struct list_head *target_list = NULL;
2540 struct binder_node *node = t->buffer->target_node;
2541 bool oneway = !!(t->flags & TF_ONE_WAY);
2542 bool wakeup = true;
2543
2544 BUG_ON(!node);
2545 binder_node_lock(node);
2546 if (oneway) {
2547 BUG_ON(thread);
2548 if (node->has_async_transaction) {
2549 target_list = &node->async_todo;
2550 wakeup = false;
2551 } else {
2552 node->has_async_transaction = 1;
2553 }
2554 }
2555
2556 binder_inner_proc_lock(proc);
2557
2558 if (proc->is_dead || (thread && thread->is_dead)) {
2559 binder_inner_proc_unlock(proc);
2560 binder_node_unlock(node);
2561 return false;
2562 }
2563
2564 if (!thread && !target_list)
2565 thread = binder_select_thread_ilocked(proc);
2566
2567 if (thread)
2568 target_list = &thread->todo;
2569 else if (!target_list)
2570 target_list = &proc->todo;
2571 else
2572 BUG_ON(target_list != &node->async_todo);
2573
2574 binder_enqueue_work_ilocked(&t->work, target_list);
2575
2576 if (wakeup)
2577 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2578
2579 binder_inner_proc_unlock(proc);
2580 binder_node_unlock(node);
2581
2582 return true;
2583}
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606static struct binder_node *binder_get_node_refs_for_txn(
2607 struct binder_node *node,
2608 struct binder_proc **procp,
2609 uint32_t *error)
2610{
2611 struct binder_node *target_node = NULL;
2612
2613 binder_node_inner_lock(node);
2614 if (node->proc) {
2615 target_node = node;
2616 binder_inc_node_nilocked(node, 1, 0, NULL);
2617 binder_inc_node_tmpref_ilocked(node);
2618 node->proc->tmp_ref++;
2619 *procp = node->proc;
2620 } else
2621 *error = BR_DEAD_REPLY;
2622 binder_node_inner_unlock(node);
2623
2624 return target_node;
2625}
2626
2627static void binder_transaction(struct binder_proc *proc,
2628 struct binder_thread *thread,
2629 struct binder_transaction_data *tr, int reply,
2630 binder_size_t extra_buffers_size)
2631{
2632 int ret;
2633 struct binder_transaction *t;
2634 struct binder_work *tcomplete;
2635 binder_size_t *offp, *off_end, *off_start;
2636 binder_size_t off_min;
2637 u8 *sg_bufp, *sg_buf_end;
2638 struct binder_proc *target_proc = NULL;
2639 struct binder_thread *target_thread = NULL;
2640 struct binder_node *target_node = NULL;
2641 struct binder_transaction *in_reply_to = NULL;
2642 struct binder_transaction_log_entry *e;
2643 uint32_t return_error = 0;
2644 uint32_t return_error_param = 0;
2645 uint32_t return_error_line = 0;
2646 struct binder_buffer_object *last_fixup_obj = NULL;
2647 binder_size_t last_fixup_min_off = 0;
2648 struct binder_context *context = proc->context;
2649 int t_debug_id = atomic_inc_return(&binder_last_id);
2650
2651 e = binder_transaction_log_add(&binder_transaction_log);
2652 e->debug_id = t_debug_id;
2653 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2654 e->from_proc = proc->pid;
2655 e->from_thread = thread->pid;
2656 e->target_handle = tr->target.handle;
2657 e->data_size = tr->data_size;
2658 e->offsets_size = tr->offsets_size;
2659 e->context_name = proc->context->name;
2660
2661 if (reply) {
2662 binder_inner_proc_lock(proc);
2663 in_reply_to = thread->transaction_stack;
2664 if (in_reply_to == NULL) {
2665 binder_inner_proc_unlock(proc);
2666 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2667 proc->pid, thread->pid);
2668 return_error = BR_FAILED_REPLY;
2669 return_error_param = -EPROTO;
2670 return_error_line = __LINE__;
2671 goto err_empty_call_stack;
2672 }
2673 if (in_reply_to->to_thread != thread) {
2674 spin_lock(&in_reply_to->lock);
2675 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2676 proc->pid, thread->pid, in_reply_to->debug_id,
2677 in_reply_to->to_proc ?
2678 in_reply_to->to_proc->pid : 0,
2679 in_reply_to->to_thread ?
2680 in_reply_to->to_thread->pid : 0);
2681 spin_unlock(&in_reply_to->lock);
2682 binder_inner_proc_unlock(proc);
2683 return_error = BR_FAILED_REPLY;
2684 return_error_param = -EPROTO;
2685 return_error_line = __LINE__;
2686 in_reply_to = NULL;
2687 goto err_bad_call_stack;
2688 }
2689 thread->transaction_stack = in_reply_to->to_parent;
2690 binder_inner_proc_unlock(proc);
2691 binder_set_nice(in_reply_to->saved_priority);
2692 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2693 if (target_thread == NULL) {
2694 return_error = BR_DEAD_REPLY;
2695 return_error_line = __LINE__;
2696 goto err_dead_binder;
2697 }
2698 if (target_thread->transaction_stack != in_reply_to) {
2699 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2700 proc->pid, thread->pid,
2701 target_thread->transaction_stack ?
2702 target_thread->transaction_stack->debug_id : 0,
2703 in_reply_to->debug_id);
2704 binder_inner_proc_unlock(target_thread->proc);
2705 return_error = BR_FAILED_REPLY;
2706 return_error_param = -EPROTO;
2707 return_error_line = __LINE__;
2708 in_reply_to = NULL;
2709 target_thread = NULL;
2710 goto err_dead_binder;
2711 }
2712 target_proc = target_thread->proc;
2713 target_proc->tmp_ref++;
2714 binder_inner_proc_unlock(target_thread->proc);
2715 } else {
2716 if (tr->target.handle) {
2717 struct binder_ref *ref;
2718
2719
2720
2721
2722
2723
2724
2725
2726 binder_proc_lock(proc);
2727 ref = binder_get_ref_olocked(proc, tr->target.handle,
2728 true);
2729 if (ref) {
2730 target_node = binder_get_node_refs_for_txn(
2731 ref->node, &target_proc,
2732 &return_error);
2733 } else {
2734 binder_user_error("%d:%d got transaction to invalid handle\n",
2735 proc->pid, thread->pid);
2736 return_error = BR_FAILED_REPLY;
2737 }
2738 binder_proc_unlock(proc);
2739 } else {
2740 mutex_lock(&context->context_mgr_node_lock);
2741 target_node = context->binder_context_mgr_node;
2742 if (target_node)
2743 target_node = binder_get_node_refs_for_txn(
2744 target_node, &target_proc,
2745 &return_error);
2746 else
2747 return_error = BR_DEAD_REPLY;
2748 mutex_unlock(&context->context_mgr_node_lock);
2749 }
2750 if (!target_node) {
2751
2752
2753
2754 return_error_param = -EINVAL;
2755 return_error_line = __LINE__;
2756 goto err_dead_binder;
2757 }
2758 e->to_node = target_node->debug_id;
2759 if (security_binder_transaction(proc->tsk,
2760 target_proc->tsk) < 0) {
2761 return_error = BR_FAILED_REPLY;
2762 return_error_param = -EPERM;
2763 return_error_line = __LINE__;
2764 goto err_invalid_target_handle;
2765 }
2766 binder_inner_proc_lock(proc);
2767 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2768 struct binder_transaction *tmp;
2769
2770 tmp = thread->transaction_stack;
2771 if (tmp->to_thread != thread) {
2772 spin_lock(&tmp->lock);
2773 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2774 proc->pid, thread->pid, tmp->debug_id,
2775 tmp->to_proc ? tmp->to_proc->pid : 0,
2776 tmp->to_thread ?
2777 tmp->to_thread->pid : 0);
2778 spin_unlock(&tmp->lock);
2779 binder_inner_proc_unlock(proc);
2780 return_error = BR_FAILED_REPLY;
2781 return_error_param = -EPROTO;
2782 return_error_line = __LINE__;
2783 goto err_bad_call_stack;
2784 }
2785 while (tmp) {
2786 struct binder_thread *from;
2787
2788 spin_lock(&tmp->lock);
2789 from = tmp->from;
2790 if (from && from->proc == target_proc) {
2791 atomic_inc(&from->tmp_ref);
2792 target_thread = from;
2793 spin_unlock(&tmp->lock);
2794 break;
2795 }
2796 spin_unlock(&tmp->lock);
2797 tmp = tmp->from_parent;
2798 }
2799 }
2800 binder_inner_proc_unlock(proc);
2801 }
2802 if (target_thread)
2803 e->to_thread = target_thread->pid;
2804 e->to_proc = target_proc->pid;
2805
2806
2807 t = kzalloc(sizeof(*t), GFP_KERNEL);
2808 if (t == NULL) {
2809 return_error = BR_FAILED_REPLY;
2810 return_error_param = -ENOMEM;
2811 return_error_line = __LINE__;
2812 goto err_alloc_t_failed;
2813 }
2814 binder_stats_created(BINDER_STAT_TRANSACTION);
2815 spin_lock_init(&t->lock);
2816
2817 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2818 if (tcomplete == NULL) {
2819 return_error = BR_FAILED_REPLY;
2820 return_error_param = -ENOMEM;
2821 return_error_line = __LINE__;
2822 goto err_alloc_tcomplete_failed;
2823 }
2824 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2825
2826 t->debug_id = t_debug_id;
2827
2828 if (reply)
2829 binder_debug(BINDER_DEBUG_TRANSACTION,
2830 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2831 proc->pid, thread->pid, t->debug_id,
2832 target_proc->pid, target_thread->pid,
2833 (u64)tr->data.ptr.buffer,
2834 (u64)tr->data.ptr.offsets,
2835 (u64)tr->data_size, (u64)tr->offsets_size,
2836 (u64)extra_buffers_size);
2837 else
2838 binder_debug(BINDER_DEBUG_TRANSACTION,
2839 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2840 proc->pid, thread->pid, t->debug_id,
2841 target_proc->pid, target_node->debug_id,
2842 (u64)tr->data.ptr.buffer,
2843 (u64)tr->data.ptr.offsets,
2844 (u64)tr->data_size, (u64)tr->offsets_size,
2845 (u64)extra_buffers_size);
2846
2847 if (!reply && !(tr->flags & TF_ONE_WAY))
2848 t->from = thread;
2849 else
2850 t->from = NULL;
2851 t->sender_euid = task_euid(proc->tsk);
2852 t->to_proc = target_proc;
2853 t->to_thread = target_thread;
2854 t->code = tr->code;
2855 t->flags = tr->flags;
2856 t->priority = task_nice(current);
2857
2858 trace_binder_transaction(reply, t, target_node);
2859
2860 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2861 tr->offsets_size, extra_buffers_size,
2862 !reply && (t->flags & TF_ONE_WAY));
2863 if (IS_ERR(t->buffer)) {
2864
2865
2866
2867 return_error_param = PTR_ERR(t->buffer);
2868 return_error = return_error_param == -ESRCH ?
2869 BR_DEAD_REPLY : BR_FAILED_REPLY;
2870 return_error_line = __LINE__;
2871 t->buffer = NULL;
2872 goto err_binder_alloc_buf_failed;
2873 }
2874 t->buffer->allow_user_free = 0;
2875 t->buffer->debug_id = t->debug_id;
2876 t->buffer->transaction = t;
2877 t->buffer->target_node = target_node;
2878 trace_binder_transaction_alloc_buf(t->buffer);
2879 off_start = (binder_size_t *)(t->buffer->data +
2880 ALIGN(tr->data_size, sizeof(void *)));
2881 offp = off_start;
2882
2883 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2884 tr->data.ptr.buffer, tr->data_size)) {
2885 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2886 proc->pid, thread->pid);
2887 return_error = BR_FAILED_REPLY;
2888 return_error_param = -EFAULT;
2889 return_error_line = __LINE__;
2890 goto err_copy_data_failed;
2891 }
2892 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2893 tr->data.ptr.offsets, tr->offsets_size)) {
2894 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2895 proc->pid, thread->pid);
2896 return_error = BR_FAILED_REPLY;
2897 return_error_param = -EFAULT;
2898 return_error_line = __LINE__;
2899 goto err_copy_data_failed;
2900 }
2901 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2902 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2903 proc->pid, thread->pid, (u64)tr->offsets_size);
2904 return_error = BR_FAILED_REPLY;
2905 return_error_param = -EINVAL;
2906 return_error_line = __LINE__;
2907 goto err_bad_offset;
2908 }
2909 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2910 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2911 proc->pid, thread->pid,
2912 (u64)extra_buffers_size);
2913 return_error = BR_FAILED_REPLY;
2914 return_error_param = -EINVAL;
2915 return_error_line = __LINE__;
2916 goto err_bad_offset;
2917 }
2918 off_end = (void *)off_start + tr->offsets_size;
2919 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2920 sg_buf_end = sg_bufp + extra_buffers_size;
2921 off_min = 0;
2922 for (; offp < off_end; offp++) {
2923 struct binder_object_header *hdr;
2924 size_t object_size = binder_validate_object(t->buffer, *offp);
2925
2926 if (object_size == 0 || *offp < off_min) {
2927 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2928 proc->pid, thread->pid, (u64)*offp,
2929 (u64)off_min,
2930 (u64)t->buffer->data_size);
2931 return_error = BR_FAILED_REPLY;
2932 return_error_param = -EINVAL;
2933 return_error_line = __LINE__;
2934 goto err_bad_offset;
2935 }
2936
2937 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2938 off_min = *offp + object_size;
2939 switch (hdr->type) {
2940 case BINDER_TYPE_BINDER:
2941 case BINDER_TYPE_WEAK_BINDER: {
2942 struct flat_binder_object *fp;
2943
2944 fp = to_flat_binder_object(hdr);
2945 ret = binder_translate_binder(fp, t, thread);
2946 if (ret < 0) {
2947 return_error = BR_FAILED_REPLY;
2948 return_error_param = ret;
2949 return_error_line = __LINE__;
2950 goto err_translate_failed;
2951 }
2952 } break;
2953 case BINDER_TYPE_HANDLE:
2954 case BINDER_TYPE_WEAK_HANDLE: {
2955 struct flat_binder_object *fp;
2956
2957 fp = to_flat_binder_object(hdr);
2958 ret = binder_translate_handle(fp, t, thread);
2959 if (ret < 0) {
2960 return_error = BR_FAILED_REPLY;
2961 return_error_param = ret;
2962 return_error_line = __LINE__;
2963 goto err_translate_failed;
2964 }
2965 } break;
2966
2967 case BINDER_TYPE_FD: {
2968 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2969 int target_fd = binder_translate_fd(fp->fd, t, thread,
2970 in_reply_to);
2971
2972 if (target_fd < 0) {
2973 return_error = BR_FAILED_REPLY;
2974 return_error_param = target_fd;
2975 return_error_line = __LINE__;
2976 goto err_translate_failed;
2977 }
2978 fp->pad_binder = 0;
2979 fp->fd = target_fd;
2980 } break;
2981 case BINDER_TYPE_FDA: {
2982 struct binder_fd_array_object *fda =
2983 to_binder_fd_array_object(hdr);
2984 struct binder_buffer_object *parent =
2985 binder_validate_ptr(t->buffer, fda->parent,
2986 off_start,
2987 offp - off_start);
2988 if (!parent) {
2989 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2990 proc->pid, thread->pid);
2991 return_error = BR_FAILED_REPLY;
2992 return_error_param = -EINVAL;
2993 return_error_line = __LINE__;
2994 goto err_bad_parent;
2995 }
2996 if (!binder_validate_fixup(t->buffer, off_start,
2997 parent, fda->parent_offset,
2998 last_fixup_obj,
2999 last_fixup_min_off)) {
3000 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3001 proc->pid, thread->pid);
3002 return_error = BR_FAILED_REPLY;
3003 return_error_param = -EINVAL;
3004 return_error_line = __LINE__;
3005 goto err_bad_parent;
3006 }
3007 ret = binder_translate_fd_array(fda, parent, t, thread,
3008 in_reply_to);
3009 if (ret < 0) {
3010 return_error = BR_FAILED_REPLY;
3011 return_error_param = ret;
3012 return_error_line = __LINE__;
3013 goto err_translate_failed;
3014 }
3015 last_fixup_obj = parent;
3016 last_fixup_min_off =
3017 fda->parent_offset + sizeof(u32) * fda->num_fds;
3018 } break;
3019 case BINDER_TYPE_PTR: {
3020 struct binder_buffer_object *bp =
3021 to_binder_buffer_object(hdr);
3022 size_t buf_left = sg_buf_end - sg_bufp;
3023
3024 if (bp->length > buf_left) {
3025 binder_user_error("%d:%d got transaction with too large buffer\n",
3026 proc->pid, thread->pid);
3027 return_error = BR_FAILED_REPLY;
3028 return_error_param = -EINVAL;
3029 return_error_line = __LINE__;
3030 goto err_bad_offset;
3031 }
3032 if (copy_from_user(sg_bufp,
3033 (const void __user *)(uintptr_t)
3034 bp->buffer, bp->length)) {
3035 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3036 proc->pid, thread->pid);
3037 return_error_param = -EFAULT;
3038 return_error = BR_FAILED_REPLY;
3039 return_error_line = __LINE__;
3040 goto err_copy_data_failed;
3041 }
3042
3043 bp->buffer = (uintptr_t)sg_bufp +
3044 binder_alloc_get_user_buffer_offset(
3045 &target_proc->alloc);
3046 sg_bufp += ALIGN(bp->length, sizeof(u64));
3047
3048 ret = binder_fixup_parent(t, thread, bp, off_start,
3049 offp - off_start,
3050 last_fixup_obj,
3051 last_fixup_min_off);
3052 if (ret < 0) {
3053 return_error = BR_FAILED_REPLY;
3054 return_error_param = ret;
3055 return_error_line = __LINE__;
3056 goto err_translate_failed;
3057 }
3058 last_fixup_obj = bp;
3059 last_fixup_min_off = 0;
3060 } break;
3061 default:
3062 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3063 proc->pid, thread->pid, hdr->type);
3064 return_error = BR_FAILED_REPLY;
3065 return_error_param = -EINVAL;
3066 return_error_line = __LINE__;
3067 goto err_bad_object_type;
3068 }
3069 }
3070 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3071 binder_enqueue_work(proc, tcomplete, &thread->todo);
3072 t->work.type = BINDER_WORK_TRANSACTION;
3073
3074 if (reply) {
3075 binder_inner_proc_lock(target_proc);
3076 if (target_thread->is_dead) {
3077 binder_inner_proc_unlock(target_proc);
3078 goto err_dead_proc_or_thread;
3079 }
3080 BUG_ON(t->buffer->async_transaction != 0);
3081 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3082 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
3083 binder_inner_proc_unlock(target_proc);
3084 wake_up_interruptible_sync(&target_thread->wait);
3085 binder_free_transaction(in_reply_to);
3086 } else if (!(t->flags & TF_ONE_WAY)) {
3087 BUG_ON(t->buffer->async_transaction != 0);
3088 binder_inner_proc_lock(proc);
3089 t->need_reply = 1;
3090 t->from_parent = thread->transaction_stack;
3091 thread->transaction_stack = t;
3092 binder_inner_proc_unlock(proc);
3093 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3094 binder_inner_proc_lock(proc);
3095 binder_pop_transaction_ilocked(thread, t);
3096 binder_inner_proc_unlock(proc);
3097 goto err_dead_proc_or_thread;
3098 }
3099 } else {
3100 BUG_ON(target_node == NULL);
3101 BUG_ON(t->buffer->async_transaction != 1);
3102 if (!binder_proc_transaction(t, target_proc, NULL))
3103 goto err_dead_proc_or_thread;
3104 }
3105 if (target_thread)
3106 binder_thread_dec_tmpref(target_thread);
3107 binder_proc_dec_tmpref(target_proc);
3108 if (target_node)
3109 binder_dec_node_tmpref(target_node);
3110
3111
3112
3113
3114 smp_wmb();
3115 WRITE_ONCE(e->debug_id_done, t_debug_id);
3116 return;
3117
3118err_dead_proc_or_thread:
3119 return_error = BR_DEAD_REPLY;
3120 return_error_line = __LINE__;
3121 binder_dequeue_work(proc, tcomplete);
3122err_translate_failed:
3123err_bad_object_type:
3124err_bad_offset:
3125err_bad_parent:
3126err_copy_data_failed:
3127 trace_binder_transaction_failed_buffer_release(t->buffer);
3128 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3129 if (target_node)
3130 binder_dec_node_tmpref(target_node);
3131 target_node = NULL;
3132 t->buffer->transaction = NULL;
3133 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3134err_binder_alloc_buf_failed:
3135 kfree(tcomplete);
3136 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3137err_alloc_tcomplete_failed:
3138 kfree(t);
3139 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3140err_alloc_t_failed:
3141err_bad_call_stack:
3142err_empty_call_stack:
3143err_dead_binder:
3144err_invalid_target_handle:
3145 if (target_thread)
3146 binder_thread_dec_tmpref(target_thread);
3147 if (target_proc)
3148 binder_proc_dec_tmpref(target_proc);
3149 if (target_node) {
3150 binder_dec_node(target_node, 1, 0);
3151 binder_dec_node_tmpref(target_node);
3152 }
3153
3154 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3155 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3156 proc->pid, thread->pid, return_error, return_error_param,
3157 (u64)tr->data_size, (u64)tr->offsets_size,
3158 return_error_line);
3159
3160 {
3161 struct binder_transaction_log_entry *fe;
3162
3163 e->return_error = return_error;
3164 e->return_error_param = return_error_param;
3165 e->return_error_line = return_error_line;
3166 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3167 *fe = *e;
3168
3169
3170
3171
3172 smp_wmb();
3173 WRITE_ONCE(e->debug_id_done, t_debug_id);
3174 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3175 }
3176
3177 BUG_ON(thread->return_error.cmd != BR_OK);
3178 if (in_reply_to) {
3179 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3180 binder_enqueue_work(thread->proc,
3181 &thread->return_error.work,
3182 &thread->todo);
3183 binder_send_failed_reply(in_reply_to, return_error);
3184 } else {
3185 thread->return_error.cmd = return_error;
3186 binder_enqueue_work(thread->proc,
3187 &thread->return_error.work,
3188 &thread->todo);
3189 }
3190}
3191
3192static int binder_thread_write(struct binder_proc *proc,
3193 struct binder_thread *thread,
3194 binder_uintptr_t binder_buffer, size_t size,
3195 binder_size_t *consumed)
3196{
3197 uint32_t cmd;
3198 struct binder_context *context = proc->context;
3199 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3200 void __user *ptr = buffer + *consumed;
3201 void __user *end = buffer + size;
3202
3203 while (ptr < end && thread->return_error.cmd == BR_OK) {
3204 int ret;
3205
3206 if (get_user(cmd, (uint32_t __user *)ptr))
3207 return -EFAULT;
3208 ptr += sizeof(uint32_t);
3209 trace_binder_command(cmd);
3210 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3211 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3212 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3213 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3214 }
3215 switch (cmd) {
3216 case BC_INCREFS:
3217 case BC_ACQUIRE:
3218 case BC_RELEASE:
3219 case BC_DECREFS: {
3220 uint32_t target;
3221 const char *debug_string;
3222 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3223 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3224 struct binder_ref_data rdata;
3225
3226 if (get_user(target, (uint32_t __user *)ptr))
3227 return -EFAULT;
3228
3229 ptr += sizeof(uint32_t);
3230 ret = -1;
3231 if (increment && !target) {
3232 struct binder_node *ctx_mgr_node;
3233 mutex_lock(&context->context_mgr_node_lock);
3234 ctx_mgr_node = context->binder_context_mgr_node;
3235 if (ctx_mgr_node)
3236 ret = binder_inc_ref_for_node(
3237 proc, ctx_mgr_node,
3238 strong, NULL, &rdata);
3239 mutex_unlock(&context->context_mgr_node_lock);
3240 }
3241 if (ret)
3242 ret = binder_update_ref_for_handle(
3243 proc, target, increment, strong,
3244 &rdata);
3245 if (!ret && rdata.desc != target) {
3246 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3247 proc->pid, thread->pid,
3248 target, rdata.desc);
3249 }
3250 switch (cmd) {
3251 case BC_INCREFS:
3252 debug_string = "IncRefs";
3253 break;
3254 case BC_ACQUIRE:
3255 debug_string = "Acquire";
3256 break;
3257 case BC_RELEASE:
3258 debug_string = "Release";
3259 break;
3260 case BC_DECREFS:
3261 default:
3262 debug_string = "DecRefs";
3263 break;
3264 }
3265 if (ret) {
3266 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3267 proc->pid, thread->pid, debug_string,
3268 strong, target, ret);
3269 break;
3270 }
3271 binder_debug(BINDER_DEBUG_USER_REFS,
3272 "%d:%d %s ref %d desc %d s %d w %d\n",
3273 proc->pid, thread->pid, debug_string,
3274 rdata.debug_id, rdata.desc, rdata.strong,
3275 rdata.weak);
3276 break;
3277 }
3278 case BC_INCREFS_DONE:
3279 case BC_ACQUIRE_DONE: {
3280 binder_uintptr_t node_ptr;
3281 binder_uintptr_t cookie;
3282 struct binder_node *node;
3283 bool free_node;
3284
3285 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3286 return -EFAULT;
3287 ptr += sizeof(binder_uintptr_t);
3288 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3289 return -EFAULT;
3290 ptr += sizeof(binder_uintptr_t);
3291 node = binder_get_node(proc, node_ptr);
3292 if (node == NULL) {
3293 binder_user_error("%d:%d %s u%016llx no match\n",
3294 proc->pid, thread->pid,
3295 cmd == BC_INCREFS_DONE ?
3296 "BC_INCREFS_DONE" :
3297 "BC_ACQUIRE_DONE",
3298 (u64)node_ptr);
3299 break;
3300 }
3301 if (cookie != node->cookie) {
3302 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3303 proc->pid, thread->pid,
3304 cmd == BC_INCREFS_DONE ?
3305 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3306 (u64)node_ptr, node->debug_id,
3307 (u64)cookie, (u64)node->cookie);
3308 binder_put_node(node);
3309 break;
3310 }
3311 binder_node_inner_lock(node);
3312 if (cmd == BC_ACQUIRE_DONE) {
3313 if (node->pending_strong_ref == 0) {
3314 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3315 proc->pid, thread->pid,
3316 node->debug_id);
3317 binder_node_inner_unlock(node);
3318 binder_put_node(node);
3319 break;
3320 }
3321 node->pending_strong_ref = 0;
3322 } else {
3323 if (node->pending_weak_ref == 0) {
3324 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3325 proc->pid, thread->pid,
3326 node->debug_id);
3327 binder_node_inner_unlock(node);
3328 binder_put_node(node);
3329 break;
3330 }
3331 node->pending_weak_ref = 0;
3332 }
3333 free_node = binder_dec_node_nilocked(node,
3334 cmd == BC_ACQUIRE_DONE, 0);
3335 WARN_ON(free_node);
3336 binder_debug(BINDER_DEBUG_USER_REFS,
3337 "%d:%d %s node %d ls %d lw %d tr %d\n",
3338 proc->pid, thread->pid,
3339 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3340 node->debug_id, node->local_strong_refs,
3341 node->local_weak_refs, node->tmp_refs);
3342 binder_node_inner_unlock(node);
3343 binder_put_node(node);
3344 break;
3345 }
3346 case BC_ATTEMPT_ACQUIRE:
3347 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3348 return -EINVAL;
3349 case BC_ACQUIRE_RESULT:
3350 pr_err("BC_ACQUIRE_RESULT not supported\n");
3351 return -EINVAL;
3352
3353 case BC_FREE_BUFFER: {
3354 binder_uintptr_t data_ptr;
3355 struct binder_buffer *buffer;
3356
3357 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3358 return -EFAULT;
3359 ptr += sizeof(binder_uintptr_t);
3360
3361 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3362 data_ptr);
3363 if (buffer == NULL) {
3364 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3365 proc->pid, thread->pid, (u64)data_ptr);
3366 break;
3367 }
3368 if (!buffer->allow_user_free) {
3369 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3370 proc->pid, thread->pid, (u64)data_ptr);
3371 break;
3372 }
3373 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3374 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3375 proc->pid, thread->pid, (u64)data_ptr,
3376 buffer->debug_id,
3377 buffer->transaction ? "active" : "finished");
3378
3379 if (buffer->transaction) {
3380 buffer->transaction->buffer = NULL;
3381 buffer->transaction = NULL;
3382 }
3383 if (buffer->async_transaction && buffer->target_node) {
3384 struct binder_node *buf_node;
3385 struct binder_work *w;
3386
3387 buf_node = buffer->target_node;
3388 binder_node_inner_lock(buf_node);
3389 BUG_ON(!buf_node->has_async_transaction);
3390 BUG_ON(buf_node->proc != proc);
3391 w = binder_dequeue_work_head_ilocked(
3392 &buf_node->async_todo);
3393 if (!w) {
3394 buf_node->has_async_transaction = 0;
3395 } else {
3396 binder_enqueue_work_ilocked(
3397 w, &proc->todo);
3398 binder_wakeup_proc_ilocked(proc);
3399 }
3400 binder_node_inner_unlock(buf_node);
3401 }
3402 trace_binder_transaction_buffer_release(buffer);
3403 binder_transaction_buffer_release(proc, buffer, NULL);
3404 binder_alloc_free_buf(&proc->alloc, buffer);
3405 break;
3406 }
3407
3408 case BC_TRANSACTION_SG:
3409 case BC_REPLY_SG: {
3410 struct binder_transaction_data_sg tr;
3411
3412 if (copy_from_user(&tr, ptr, sizeof(tr)))
3413 return -EFAULT;
3414 ptr += sizeof(tr);
3415 binder_transaction(proc, thread, &tr.transaction_data,
3416 cmd == BC_REPLY_SG, tr.buffers_size);
3417 break;
3418 }
3419 case BC_TRANSACTION:
3420 case BC_REPLY: {
3421 struct binder_transaction_data tr;
3422
3423 if (copy_from_user(&tr, ptr, sizeof(tr)))
3424 return -EFAULT;
3425 ptr += sizeof(tr);
3426 binder_transaction(proc, thread, &tr,
3427 cmd == BC_REPLY, 0);
3428 break;
3429 }
3430
3431 case BC_REGISTER_LOOPER:
3432 binder_debug(BINDER_DEBUG_THREADS,
3433 "%d:%d BC_REGISTER_LOOPER\n",
3434 proc->pid, thread->pid);
3435 binder_inner_proc_lock(proc);
3436 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3437 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3438 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3439 proc->pid, thread->pid);
3440 } else if (proc->requested_threads == 0) {
3441 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3442 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3443 proc->pid, thread->pid);
3444 } else {
3445 proc->requested_threads--;
3446 proc->requested_threads_started++;
3447 }
3448 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3449 binder_inner_proc_unlock(proc);
3450 break;
3451 case BC_ENTER_LOOPER:
3452 binder_debug(BINDER_DEBUG_THREADS,
3453 "%d:%d BC_ENTER_LOOPER\n",
3454 proc->pid, thread->pid);
3455 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3456 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3457 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3458 proc->pid, thread->pid);
3459 }
3460 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3461 break;
3462 case BC_EXIT_LOOPER:
3463 binder_debug(BINDER_DEBUG_THREADS,
3464 "%d:%d BC_EXIT_LOOPER\n",
3465 proc->pid, thread->pid);
3466 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3467 break;
3468
3469 case BC_REQUEST_DEATH_NOTIFICATION:
3470 case BC_CLEAR_DEATH_NOTIFICATION: {
3471 uint32_t target;
3472 binder_uintptr_t cookie;
3473 struct binder_ref *ref;
3474 struct binder_ref_death *death = NULL;
3475
3476 if (get_user(target, (uint32_t __user *)ptr))
3477 return -EFAULT;
3478 ptr += sizeof(uint32_t);
3479 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3480 return -EFAULT;
3481 ptr += sizeof(binder_uintptr_t);
3482 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3483
3484
3485
3486
3487 death = kzalloc(sizeof(*death), GFP_KERNEL);
3488 if (death == NULL) {
3489 WARN_ON(thread->return_error.cmd !=
3490 BR_OK);
3491 thread->return_error.cmd = BR_ERROR;
3492 binder_enqueue_work(
3493 thread->proc,
3494 &thread->return_error.work,
3495 &thread->todo);
3496 binder_debug(
3497 BINDER_DEBUG_FAILED_TRANSACTION,
3498 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3499 proc->pid, thread->pid);
3500 break;
3501 }
3502 }
3503 binder_proc_lock(proc);
3504 ref = binder_get_ref_olocked(proc, target, false);
3505 if (ref == NULL) {
3506 binder_user_error("%d:%d %s invalid ref %d\n",
3507 proc->pid, thread->pid,
3508 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3509 "BC_REQUEST_DEATH_NOTIFICATION" :
3510 "BC_CLEAR_DEATH_NOTIFICATION",
3511 target);
3512 binder_proc_unlock(proc);
3513 kfree(death);
3514 break;
3515 }
3516
3517 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3518 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3519 proc->pid, thread->pid,
3520 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3521 "BC_REQUEST_DEATH_NOTIFICATION" :
3522 "BC_CLEAR_DEATH_NOTIFICATION",
3523 (u64)cookie, ref->data.debug_id,
3524 ref->data.desc, ref->data.strong,
3525 ref->data.weak, ref->node->debug_id);
3526
3527 binder_node_lock(ref->node);
3528 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3529 if (ref->death) {
3530 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3531 proc->pid, thread->pid);
3532 binder_node_unlock(ref->node);
3533 binder_proc_unlock(proc);
3534 kfree(death);
3535 break;
3536 }
3537 binder_stats_created(BINDER_STAT_DEATH);
3538 INIT_LIST_HEAD(&death->work.entry);
3539 death->cookie = cookie;
3540 ref->death = death;
3541 if (ref->node->proc == NULL) {
3542 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3543
3544 binder_inner_proc_lock(proc);
3545 binder_enqueue_work_ilocked(
3546 &ref->death->work, &proc->todo);
3547 binder_wakeup_proc_ilocked(proc);
3548 binder_inner_proc_unlock(proc);
3549 }
3550 } else {
3551 if (ref->death == NULL) {
3552 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3553 proc->pid, thread->pid);
3554 binder_node_unlock(ref->node);
3555 binder_proc_unlock(proc);
3556 break;
3557 }
3558 death = ref->death;
3559 if (death->cookie != cookie) {
3560 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3561 proc->pid, thread->pid,
3562 (u64)death->cookie,
3563 (u64)cookie);
3564 binder_node_unlock(ref->node);
3565 binder_proc_unlock(proc);
3566 break;
3567 }
3568 ref->death = NULL;
3569 binder_inner_proc_lock(proc);
3570 if (list_empty(&death->work.entry)) {
3571 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3572 if (thread->looper &
3573 (BINDER_LOOPER_STATE_REGISTERED |
3574 BINDER_LOOPER_STATE_ENTERED))
3575 binder_enqueue_work_ilocked(
3576 &death->work,
3577 &thread->todo);
3578 else {
3579 binder_enqueue_work_ilocked(
3580 &death->work,
3581 &proc->todo);
3582 binder_wakeup_proc_ilocked(
3583 proc);
3584 }
3585 } else {
3586 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3587 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3588 }
3589 binder_inner_proc_unlock(proc);
3590 }
3591 binder_node_unlock(ref->node);
3592 binder_proc_unlock(proc);
3593 } break;
3594 case BC_DEAD_BINDER_DONE: {
3595 struct binder_work *w;
3596 binder_uintptr_t cookie;
3597 struct binder_ref_death *death = NULL;
3598
3599 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3600 return -EFAULT;
3601
3602 ptr += sizeof(cookie);
3603 binder_inner_proc_lock(proc);
3604 list_for_each_entry(w, &proc->delivered_death,
3605 entry) {
3606 struct binder_ref_death *tmp_death =
3607 container_of(w,
3608 struct binder_ref_death,
3609 work);
3610
3611 if (tmp_death->cookie == cookie) {
3612 death = tmp_death;
3613 break;
3614 }
3615 }
3616 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3617 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3618 proc->pid, thread->pid, (u64)cookie,
3619 death);
3620 if (death == NULL) {
3621 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3622 proc->pid, thread->pid, (u64)cookie);
3623 binder_inner_proc_unlock(proc);
3624 break;
3625 }
3626 binder_dequeue_work_ilocked(&death->work);
3627 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3628 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3629 if (thread->looper &
3630 (BINDER_LOOPER_STATE_REGISTERED |
3631 BINDER_LOOPER_STATE_ENTERED))
3632 binder_enqueue_work_ilocked(
3633 &death->work, &thread->todo);
3634 else {
3635 binder_enqueue_work_ilocked(
3636 &death->work,
3637 &proc->todo);
3638 binder_wakeup_proc_ilocked(proc);
3639 }
3640 }
3641 binder_inner_proc_unlock(proc);
3642 } break;
3643
3644 default:
3645 pr_err("%d:%d unknown command %d\n",
3646 proc->pid, thread->pid, cmd);
3647 return -EINVAL;
3648 }
3649 *consumed = ptr - buffer;
3650 }
3651 return 0;
3652}
3653
3654static void binder_stat_br(struct binder_proc *proc,
3655 struct binder_thread *thread, uint32_t cmd)
3656{
3657 trace_binder_return(cmd);
3658 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3659 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3660 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3661 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3662 }
3663}
3664
3665static int binder_put_node_cmd(struct binder_proc *proc,
3666 struct binder_thread *thread,
3667 void __user **ptrp,
3668 binder_uintptr_t node_ptr,
3669 binder_uintptr_t node_cookie,
3670 int node_debug_id,
3671 uint32_t cmd, const char *cmd_name)
3672{
3673 void __user *ptr = *ptrp;
3674
3675 if (put_user(cmd, (uint32_t __user *)ptr))
3676 return -EFAULT;
3677 ptr += sizeof(uint32_t);
3678
3679 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3680 return -EFAULT;
3681 ptr += sizeof(binder_uintptr_t);
3682
3683 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3684 return -EFAULT;
3685 ptr += sizeof(binder_uintptr_t);
3686
3687 binder_stat_br(proc, thread, cmd);
3688 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3689 proc->pid, thread->pid, cmd_name, node_debug_id,
3690 (u64)node_ptr, (u64)node_cookie);
3691
3692 *ptrp = ptr;
3693 return 0;
3694}
3695
3696static int binder_wait_for_work(struct binder_thread *thread,
3697 bool do_proc_work)
3698{
3699 DEFINE_WAIT(wait);
3700 struct binder_proc *proc = thread->proc;
3701 int ret = 0;
3702
3703 freezer_do_not_count();
3704 binder_inner_proc_lock(proc);
3705 for (;;) {
3706 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3707 if (binder_has_work_ilocked(thread, do_proc_work))
3708 break;
3709 if (do_proc_work)
3710 list_add(&thread->waiting_thread_node,
3711 &proc->waiting_threads);
3712 binder_inner_proc_unlock(proc);
3713 schedule();
3714 binder_inner_proc_lock(proc);
3715 list_del_init(&thread->waiting_thread_node);
3716 if (signal_pending(current)) {
3717 ret = -ERESTARTSYS;
3718 break;
3719 }
3720 }
3721 finish_wait(&thread->wait, &wait);
3722 binder_inner_proc_unlock(proc);
3723 freezer_count();
3724
3725 return ret;
3726}
3727
3728static int binder_thread_read(struct binder_proc *proc,
3729 struct binder_thread *thread,
3730 binder_uintptr_t binder_buffer, size_t size,
3731 binder_size_t *consumed, int non_block)
3732{
3733 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3734 void __user *ptr = buffer + *consumed;
3735 void __user *end = buffer + size;
3736
3737 int ret = 0;
3738 int wait_for_proc_work;
3739
3740 if (*consumed == 0) {
3741 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3742 return -EFAULT;
3743 ptr += sizeof(uint32_t);
3744 }
3745
3746retry:
3747 binder_inner_proc_lock(proc);
3748 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3749 binder_inner_proc_unlock(proc);
3750
3751 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3752
3753 trace_binder_wait_for_work(wait_for_proc_work,
3754 !!thread->transaction_stack,
3755 !binder_worklist_empty(proc, &thread->todo));
3756 if (wait_for_proc_work) {
3757 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3758 BINDER_LOOPER_STATE_ENTERED))) {
3759 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3760 proc->pid, thread->pid, thread->looper);
3761 wait_event_interruptible(binder_user_error_wait,
3762 binder_stop_on_user_error < 2);
3763 }
3764 binder_set_nice(proc->default_priority);
3765 }
3766
3767 if (non_block) {
3768 if (!binder_has_work(thread, wait_for_proc_work))
3769 ret = -EAGAIN;
3770 } else {
3771 ret = binder_wait_for_work(thread, wait_for_proc_work);
3772 }
3773
3774 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3775
3776 if (ret)
3777 return ret;
3778
3779 while (1) {
3780 uint32_t cmd;
3781 struct binder_transaction_data tr;
3782 struct binder_work *w = NULL;
3783 struct list_head *list = NULL;
3784 struct binder_transaction *t = NULL;
3785 struct binder_thread *t_from;
3786
3787 binder_inner_proc_lock(proc);
3788 if (!binder_worklist_empty_ilocked(&thread->todo))
3789 list = &thread->todo;
3790 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3791 wait_for_proc_work)
3792 list = &proc->todo;
3793 else {
3794 binder_inner_proc_unlock(proc);
3795
3796
3797 if (ptr - buffer == 4 && !thread->looper_need_return)
3798 goto retry;
3799 break;
3800 }
3801
3802 if (end - ptr < sizeof(tr) + 4) {
3803 binder_inner_proc_unlock(proc);
3804 break;
3805 }
3806 w = binder_dequeue_work_head_ilocked(list);
3807
3808 switch (w->type) {
3809 case BINDER_WORK_TRANSACTION: {
3810 binder_inner_proc_unlock(proc);
3811 t = container_of(w, struct binder_transaction, work);
3812 } break;
3813 case BINDER_WORK_RETURN_ERROR: {
3814 struct binder_error *e = container_of(
3815 w, struct binder_error, work);
3816
3817 WARN_ON(e->cmd == BR_OK);
3818 binder_inner_proc_unlock(proc);
3819 if (put_user(e->cmd, (uint32_t __user *)ptr))
3820 return -EFAULT;
3821 e->cmd = BR_OK;
3822 ptr += sizeof(uint32_t);
3823
3824 binder_stat_br(proc, thread, e->cmd);
3825 } break;
3826 case BINDER_WORK_TRANSACTION_COMPLETE: {
3827 binder_inner_proc_unlock(proc);
3828 cmd = BR_TRANSACTION_COMPLETE;
3829 if (put_user(cmd, (uint32_t __user *)ptr))
3830 return -EFAULT;
3831 ptr += sizeof(uint32_t);
3832
3833 binder_stat_br(proc, thread, cmd);
3834 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3835 "%d:%d BR_TRANSACTION_COMPLETE\n",
3836 proc->pid, thread->pid);
3837 kfree(w);
3838 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3839 } break;
3840 case BINDER_WORK_NODE: {
3841 struct binder_node *node = container_of(w, struct binder_node, work);
3842 int strong, weak;
3843 binder_uintptr_t node_ptr = node->ptr;
3844 binder_uintptr_t node_cookie = node->cookie;
3845 int node_debug_id = node->debug_id;
3846 int has_weak_ref;
3847 int has_strong_ref;
3848 void __user *orig_ptr = ptr;
3849
3850 BUG_ON(proc != node->proc);
3851 strong = node->internal_strong_refs ||
3852 node->local_strong_refs;
3853 weak = !hlist_empty(&node->refs) ||
3854 node->local_weak_refs ||
3855 node->tmp_refs || strong;
3856 has_strong_ref = node->has_strong_ref;
3857 has_weak_ref = node->has_weak_ref;
3858
3859 if (weak && !has_weak_ref) {
3860 node->has_weak_ref = 1;
3861 node->pending_weak_ref = 1;
3862 node->local_weak_refs++;
3863 }
3864 if (strong && !has_strong_ref) {
3865 node->has_strong_ref = 1;
3866 node->pending_strong_ref = 1;
3867 node->local_strong_refs++;
3868 }
3869 if (!strong && has_strong_ref)
3870 node->has_strong_ref = 0;
3871 if (!weak && has_weak_ref)
3872 node->has_weak_ref = 0;
3873 if (!weak && !strong) {
3874 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3875 "%d:%d node %d u%016llx c%016llx deleted\n",
3876 proc->pid, thread->pid,
3877 node_debug_id,
3878 (u64)node_ptr,
3879 (u64)node_cookie);
3880 rb_erase(&node->rb_node, &proc->nodes);
3881 binder_inner_proc_unlock(proc);
3882 binder_node_lock(node);
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892 binder_node_unlock(node);
3893 binder_free_node(node);
3894 } else
3895 binder_inner_proc_unlock(proc);
3896
3897 if (weak && !has_weak_ref)
3898 ret = binder_put_node_cmd(
3899 proc, thread, &ptr, node_ptr,
3900 node_cookie, node_debug_id,
3901 BR_INCREFS, "BR_INCREFS");
3902 if (!ret && strong && !has_strong_ref)
3903 ret = binder_put_node_cmd(
3904 proc, thread, &ptr, node_ptr,
3905 node_cookie, node_debug_id,
3906 BR_ACQUIRE, "BR_ACQUIRE");
3907 if (!ret && !strong && has_strong_ref)
3908 ret = binder_put_node_cmd(
3909 proc, thread, &ptr, node_ptr,
3910 node_cookie, node_debug_id,
3911 BR_RELEASE, "BR_RELEASE");
3912 if (!ret && !weak && has_weak_ref)
3913 ret = binder_put_node_cmd(
3914 proc, thread, &ptr, node_ptr,
3915 node_cookie, node_debug_id,
3916 BR_DECREFS, "BR_DECREFS");
3917 if (orig_ptr == ptr)
3918 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3919 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3920 proc->pid, thread->pid,
3921 node_debug_id,
3922 (u64)node_ptr,
3923 (u64)node_cookie);
3924 if (ret)
3925 return ret;
3926 } break;
3927 case BINDER_WORK_DEAD_BINDER:
3928 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3929 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3930 struct binder_ref_death *death;
3931 uint32_t cmd;
3932 binder_uintptr_t cookie;
3933
3934 death = container_of(w, struct binder_ref_death, work);
3935 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3936 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3937 else
3938 cmd = BR_DEAD_BINDER;
3939 cookie = death->cookie;
3940
3941 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3942 "%d:%d %s %016llx\n",
3943 proc->pid, thread->pid,
3944 cmd == BR_DEAD_BINDER ?
3945 "BR_DEAD_BINDER" :
3946 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3947 (u64)cookie);
3948 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3949 binder_inner_proc_unlock(proc);
3950 kfree(death);
3951 binder_stats_deleted(BINDER_STAT_DEATH);
3952 } else {
3953 binder_enqueue_work_ilocked(
3954 w, &proc->delivered_death);
3955 binder_inner_proc_unlock(proc);
3956 }
3957 if (put_user(cmd, (uint32_t __user *)ptr))
3958 return -EFAULT;
3959 ptr += sizeof(uint32_t);
3960 if (put_user(cookie,
3961 (binder_uintptr_t __user *)ptr))
3962 return -EFAULT;
3963 ptr += sizeof(binder_uintptr_t);
3964 binder_stat_br(proc, thread, cmd);
3965 if (cmd == BR_DEAD_BINDER)
3966 goto done;
3967 } break;
3968 }
3969
3970 if (!t)
3971 continue;
3972
3973 BUG_ON(t->buffer == NULL);
3974 if (t->buffer->target_node) {
3975 struct binder_node *target_node = t->buffer->target_node;
3976
3977 tr.target.ptr = target_node->ptr;
3978 tr.cookie = target_node->cookie;
3979 t->saved_priority = task_nice(current);
3980 if (t->priority < target_node->min_priority &&
3981 !(t->flags & TF_ONE_WAY))
3982 binder_set_nice(t->priority);
3983 else if (!(t->flags & TF_ONE_WAY) ||
3984 t->saved_priority > target_node->min_priority)
3985 binder_set_nice(target_node->min_priority);
3986 cmd = BR_TRANSACTION;
3987 } else {
3988 tr.target.ptr = 0;
3989 tr.cookie = 0;
3990 cmd = BR_REPLY;
3991 }
3992 tr.code = t->code;
3993 tr.flags = t->flags;
3994 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
3995
3996 t_from = binder_get_txn_from(t);
3997 if (t_from) {
3998 struct task_struct *sender = t_from->proc->tsk;
3999
4000 tr.sender_pid = task_tgid_nr_ns(sender,
4001 task_active_pid_ns(current));
4002 } else {
4003 tr.sender_pid = 0;
4004 }
4005
4006 tr.data_size = t->buffer->data_size;
4007 tr.offsets_size = t->buffer->offsets_size;
4008 tr.data.ptr.buffer = (binder_uintptr_t)
4009 ((uintptr_t)t->buffer->data +
4010 binder_alloc_get_user_buffer_offset(&proc->alloc));
4011 tr.data.ptr.offsets = tr.data.ptr.buffer +
4012 ALIGN(t->buffer->data_size,
4013 sizeof(void *));
4014
4015 if (put_user(cmd, (uint32_t __user *)ptr)) {
4016 if (t_from)
4017 binder_thread_dec_tmpref(t_from);
4018 return -EFAULT;
4019 }
4020 ptr += sizeof(uint32_t);
4021 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4022 if (t_from)
4023 binder_thread_dec_tmpref(t_from);
4024 return -EFAULT;
4025 }
4026 ptr += sizeof(tr);
4027
4028 trace_binder_transaction_received(t);
4029 binder_stat_br(proc, thread, cmd);
4030 binder_debug(BINDER_DEBUG_TRANSACTION,
4031 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4032 proc->pid, thread->pid,
4033 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4034 "BR_REPLY",
4035 t->debug_id, t_from ? t_from->proc->pid : 0,
4036 t_from ? t_from->pid : 0, cmd,
4037 t->buffer->data_size, t->buffer->offsets_size,
4038 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4039
4040 if (t_from)
4041 binder_thread_dec_tmpref(t_from);
4042 t->buffer->allow_user_free = 1;
4043 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4044 binder_inner_proc_lock(thread->proc);
4045 t->to_parent = thread->transaction_stack;
4046 t->to_thread = thread;
4047 thread->transaction_stack = t;
4048 binder_inner_proc_unlock(thread->proc);
4049 } else {
4050 binder_free_transaction(t);
4051 }
4052 break;
4053 }
4054
4055done:
4056
4057 *consumed = ptr - buffer;
4058 binder_inner_proc_lock(proc);
4059 if (proc->requested_threads == 0 &&
4060 list_empty(&thread->proc->waiting_threads) &&
4061 proc->requested_threads_started < proc->max_threads &&
4062 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4063 BINDER_LOOPER_STATE_ENTERED))
4064 ) {
4065 proc->requested_threads++;
4066 binder_inner_proc_unlock(proc);
4067 binder_debug(BINDER_DEBUG_THREADS,
4068 "%d:%d BR_SPAWN_LOOPER\n",
4069 proc->pid, thread->pid);
4070 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4071 return -EFAULT;
4072 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4073 } else
4074 binder_inner_proc_unlock(proc);
4075 return 0;
4076}
4077
4078static void binder_release_work(struct binder_proc *proc,
4079 struct list_head *list)
4080{
4081 struct binder_work *w;
4082
4083 while (1) {
4084 w = binder_dequeue_work_head(proc, list);
4085 if (!w)
4086 return;
4087
4088 switch (w->type) {
4089 case BINDER_WORK_TRANSACTION: {
4090 struct binder_transaction *t;
4091
4092 t = container_of(w, struct binder_transaction, work);
4093 if (t->buffer->target_node &&
4094 !(t->flags & TF_ONE_WAY)) {
4095 binder_send_failed_reply(t, BR_DEAD_REPLY);
4096 } else {
4097 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4098 "undelivered transaction %d\n",
4099 t->debug_id);
4100 binder_free_transaction(t);
4101 }
4102 } break;
4103 case BINDER_WORK_RETURN_ERROR: {
4104 struct binder_error *e = container_of(
4105 w, struct binder_error, work);
4106
4107 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4108 "undelivered TRANSACTION_ERROR: %u\n",
4109 e->cmd);
4110 } break;
4111 case BINDER_WORK_TRANSACTION_COMPLETE: {
4112 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4113 "undelivered TRANSACTION_COMPLETE\n");
4114 kfree(w);
4115 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4116 } break;
4117 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4118 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4119 struct binder_ref_death *death;
4120
4121 death = container_of(w, struct binder_ref_death, work);
4122 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4123 "undelivered death notification, %016llx\n",
4124 (u64)death->cookie);
4125 kfree(death);
4126 binder_stats_deleted(BINDER_STAT_DEATH);
4127 } break;
4128 default:
4129 pr_err("unexpected work type, %d, not freed\n",
4130 w->type);
4131 break;
4132 }
4133 }
4134
4135}
4136
4137static struct binder_thread *binder_get_thread_ilocked(
4138 struct binder_proc *proc, struct binder_thread *new_thread)
4139{
4140 struct binder_thread *thread = NULL;
4141 struct rb_node *parent = NULL;
4142 struct rb_node **p = &proc->threads.rb_node;
4143
4144 while (*p) {
4145 parent = *p;
4146 thread = rb_entry(parent, struct binder_thread, rb_node);
4147
4148 if (current->pid < thread->pid)
4149 p = &(*p)->rb_left;
4150 else if (current->pid > thread->pid)
4151 p = &(*p)->rb_right;
4152 else
4153 return thread;
4154 }
4155 if (!new_thread)
4156 return NULL;
4157 thread = new_thread;
4158 binder_stats_created(BINDER_STAT_THREAD);
4159 thread->proc = proc;
4160 thread->pid = current->pid;
4161 atomic_set(&thread->tmp_ref, 0);
4162 init_waitqueue_head(&thread->wait);
4163 INIT_LIST_HEAD(&thread->todo);
4164 rb_link_node(&thread->rb_node, parent, p);
4165 rb_insert_color(&thread->rb_node, &proc->threads);
4166 thread->looper_need_return = true;
4167 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4168 thread->return_error.cmd = BR_OK;
4169 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4170 thread->reply_error.cmd = BR_OK;
4171 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4172 return thread;
4173}
4174
4175static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4176{
4177 struct binder_thread *thread;
4178 struct binder_thread *new_thread;
4179
4180 binder_inner_proc_lock(proc);
4181 thread = binder_get_thread_ilocked(proc, NULL);
4182 binder_inner_proc_unlock(proc);
4183 if (!thread) {
4184 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4185 if (new_thread == NULL)
4186 return NULL;
4187 binder_inner_proc_lock(proc);
4188 thread = binder_get_thread_ilocked(proc, new_thread);
4189 binder_inner_proc_unlock(proc);
4190 if (thread != new_thread)
4191 kfree(new_thread);
4192 }
4193 return thread;
4194}
4195
4196static void binder_free_proc(struct binder_proc *proc)
4197{
4198 BUG_ON(!list_empty(&proc->todo));
4199 BUG_ON(!list_empty(&proc->delivered_death));
4200 binder_alloc_deferred_release(&proc->alloc);
4201 put_task_struct(proc->tsk);
4202 binder_stats_deleted(BINDER_STAT_PROC);
4203 kfree(proc);
4204}
4205
4206static void binder_free_thread(struct binder_thread *thread)
4207{
4208 BUG_ON(!list_empty(&thread->todo));
4209 binder_stats_deleted(BINDER_STAT_THREAD);
4210 binder_proc_dec_tmpref(thread->proc);
4211 kfree(thread);
4212}
4213
4214static int binder_thread_release(struct binder_proc *proc,
4215 struct binder_thread *thread)
4216{
4217 struct binder_transaction *t;
4218 struct binder_transaction *send_reply = NULL;
4219 int active_transactions = 0;
4220 struct binder_transaction *last_t = NULL;
4221
4222 binder_inner_proc_lock(thread->proc);
4223
4224
4225
4226
4227
4228
4229 proc->tmp_ref++;
4230
4231
4232
4233
4234 atomic_inc(&thread->tmp_ref);
4235 rb_erase(&thread->rb_node, &proc->threads);
4236 t = thread->transaction_stack;
4237 if (t) {
4238 spin_lock(&t->lock);
4239 if (t->to_thread == thread)
4240 send_reply = t;
4241 }
4242 thread->is_dead = true;
4243
4244 while (t) {
4245 last_t = t;
4246 active_transactions++;
4247 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4248 "release %d:%d transaction %d %s, still active\n",
4249 proc->pid, thread->pid,
4250 t->debug_id,
4251 (t->to_thread == thread) ? "in" : "out");
4252
4253 if (t->to_thread == thread) {
4254 t->to_proc = NULL;
4255 t->to_thread = NULL;
4256 if (t->buffer) {
4257 t->buffer->transaction = NULL;
4258 t->buffer = NULL;
4259 }
4260 t = t->to_parent;
4261 } else if (t->from == thread) {
4262 t->from = NULL;
4263 t = t->from_parent;
4264 } else
4265 BUG();
4266 spin_unlock(&last_t->lock);
4267 if (t)
4268 spin_lock(&t->lock);
4269 }
4270 binder_inner_proc_unlock(thread->proc);
4271
4272 if (send_reply)
4273 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4274 binder_release_work(proc, &thread->todo);
4275 binder_thread_dec_tmpref(thread);
4276 return active_transactions;
4277}
4278
4279static unsigned int binder_poll(struct file *filp,
4280 struct poll_table_struct *wait)
4281{
4282 struct binder_proc *proc = filp->private_data;
4283 struct binder_thread *thread = NULL;
4284 bool wait_for_proc_work;
4285
4286 thread = binder_get_thread(proc);
4287
4288 binder_inner_proc_lock(thread->proc);
4289 thread->looper |= BINDER_LOOPER_STATE_POLL;
4290 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4291
4292 binder_inner_proc_unlock(thread->proc);
4293
4294 poll_wait(filp, &thread->wait, wait);
4295
4296 if (binder_has_work(thread, wait_for_proc_work))
4297 return POLLIN;
4298
4299 return 0;
4300}
4301
4302static int binder_ioctl_write_read(struct file *filp,
4303 unsigned int cmd, unsigned long arg,
4304 struct binder_thread *thread)
4305{
4306 int ret = 0;
4307 struct binder_proc *proc = filp->private_data;
4308 unsigned int size = _IOC_SIZE(cmd);
4309 void __user *ubuf = (void __user *)arg;
4310 struct binder_write_read bwr;
4311
4312 if (size != sizeof(struct binder_write_read)) {
4313 ret = -EINVAL;
4314 goto out;
4315 }
4316 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4317 ret = -EFAULT;
4318 goto out;
4319 }
4320 binder_debug(BINDER_DEBUG_READ_WRITE,
4321 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4322 proc->pid, thread->pid,
4323 (u64)bwr.write_size, (u64)bwr.write_buffer,
4324 (u64)bwr.read_size, (u64)bwr.read_buffer);
4325
4326 if (bwr.write_size > 0) {
4327 ret = binder_thread_write(proc, thread,
4328 bwr.write_buffer,
4329 bwr.write_size,
4330 &bwr.write_consumed);
4331 trace_binder_write_done(ret);
4332 if (ret < 0) {
4333 bwr.read_consumed = 0;
4334 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4335 ret = -EFAULT;
4336 goto out;
4337 }
4338 }
4339 if (bwr.read_size > 0) {
4340 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4341 bwr.read_size,
4342 &bwr.read_consumed,
4343 filp->f_flags & O_NONBLOCK);
4344 trace_binder_read_done(ret);
4345 binder_inner_proc_lock(proc);
4346 if (!binder_worklist_empty_ilocked(&proc->todo))
4347 binder_wakeup_proc_ilocked(proc);
4348 binder_inner_proc_unlock(proc);
4349 if (ret < 0) {
4350 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4351 ret = -EFAULT;
4352 goto out;
4353 }
4354 }
4355 binder_debug(BINDER_DEBUG_READ_WRITE,
4356 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4357 proc->pid, thread->pid,
4358 (u64)bwr.write_consumed, (u64)bwr.write_size,
4359 (u64)bwr.read_consumed, (u64)bwr.read_size);
4360 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4361 ret = -EFAULT;
4362 goto out;
4363 }
4364out:
4365 return ret;
4366}
4367
4368static int binder_ioctl_set_ctx_mgr(struct file *filp)
4369{
4370 int ret = 0;
4371 struct binder_proc *proc = filp->private_data;
4372 struct binder_context *context = proc->context;
4373 struct binder_node *new_node;
4374 kuid_t curr_euid = current_euid();
4375
4376 mutex_lock(&context->context_mgr_node_lock);
4377 if (context->binder_context_mgr_node) {
4378 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4379 ret = -EBUSY;
4380 goto out;
4381 }
4382 ret = security_binder_set_context_mgr(proc->tsk);
4383 if (ret < 0)
4384 goto out;
4385 if (uid_valid(context->binder_context_mgr_uid)) {
4386 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4387 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4388 from_kuid(&init_user_ns, curr_euid),
4389 from_kuid(&init_user_ns,
4390 context->binder_context_mgr_uid));
4391 ret = -EPERM;
4392 goto out;
4393 }
4394 } else {
4395 context->binder_context_mgr_uid = curr_euid;
4396 }
4397 new_node = binder_new_node(proc, NULL);
4398 if (!new_node) {
4399 ret = -ENOMEM;
4400 goto out;
4401 }
4402 binder_node_lock(new_node);
4403 new_node->local_weak_refs++;
4404 new_node->local_strong_refs++;
4405 new_node->has_strong_ref = 1;
4406 new_node->has_weak_ref = 1;
4407 context->binder_context_mgr_node = new_node;
4408 binder_node_unlock(new_node);
4409 binder_put_node(new_node);
4410out:
4411 mutex_unlock(&context->context_mgr_node_lock);
4412 return ret;
4413}
4414
4415static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4416 struct binder_node_debug_info *info)
4417{
4418 struct rb_node *n;
4419 binder_uintptr_t ptr = info->ptr;
4420
4421 memset(info, 0, sizeof(*info));
4422
4423 binder_inner_proc_lock(proc);
4424 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4425 struct binder_node *node = rb_entry(n, struct binder_node,
4426 rb_node);
4427 if (node->ptr > ptr) {
4428 info->ptr = node->ptr;
4429 info->cookie = node->cookie;
4430 info->has_strong_ref = node->has_strong_ref;
4431 info->has_weak_ref = node->has_weak_ref;
4432 break;
4433 }
4434 }
4435 binder_inner_proc_unlock(proc);
4436
4437 return 0;
4438}
4439
4440static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4441{
4442 int ret;
4443 struct binder_proc *proc = filp->private_data;
4444 struct binder_thread *thread;
4445 unsigned int size = _IOC_SIZE(cmd);
4446 void __user *ubuf = (void __user *)arg;
4447
4448
4449
4450
4451 binder_selftest_alloc(&proc->alloc);
4452
4453 trace_binder_ioctl(cmd, arg);
4454
4455 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4456 if (ret)
4457 goto err_unlocked;
4458
4459 thread = binder_get_thread(proc);
4460 if (thread == NULL) {
4461 ret = -ENOMEM;
4462 goto err;
4463 }
4464
4465 switch (cmd) {
4466 case BINDER_WRITE_READ:
4467 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4468 if (ret)
4469 goto err;
4470 break;
4471 case BINDER_SET_MAX_THREADS: {
4472 int max_threads;
4473
4474 if (copy_from_user(&max_threads, ubuf,
4475 sizeof(max_threads))) {
4476 ret = -EINVAL;
4477 goto err;
4478 }
4479 binder_inner_proc_lock(proc);
4480 proc->max_threads = max_threads;
4481 binder_inner_proc_unlock(proc);
4482 break;
4483 }
4484 case BINDER_SET_CONTEXT_MGR:
4485 ret = binder_ioctl_set_ctx_mgr(filp);
4486 if (ret)
4487 goto err;
4488 break;
4489 case BINDER_THREAD_EXIT:
4490 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4491 proc->pid, thread->pid);
4492 binder_thread_release(proc, thread);
4493 thread = NULL;
4494 break;
4495 case BINDER_VERSION: {
4496 struct binder_version __user *ver = ubuf;
4497
4498 if (size != sizeof(struct binder_version)) {
4499 ret = -EINVAL;
4500 goto err;
4501 }
4502 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4503 &ver->protocol_version)) {
4504 ret = -EINVAL;
4505 goto err;
4506 }
4507 break;
4508 }
4509 case BINDER_GET_NODE_DEBUG_INFO: {
4510 struct binder_node_debug_info info;
4511
4512 if (copy_from_user(&info, ubuf, sizeof(info))) {
4513 ret = -EFAULT;
4514 goto err;
4515 }
4516
4517 ret = binder_ioctl_get_node_debug_info(proc, &info);
4518 if (ret < 0)
4519 goto err;
4520
4521 if (copy_to_user(ubuf, &info, sizeof(info))) {
4522 ret = -EFAULT;
4523 goto err;
4524 }
4525 break;
4526 }
4527 default:
4528 ret = -EINVAL;
4529 goto err;
4530 }
4531 ret = 0;
4532err:
4533 if (thread)
4534 thread->looper_need_return = false;
4535 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4536 if (ret && ret != -ERESTARTSYS)
4537 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4538err_unlocked:
4539 trace_binder_ioctl_done(ret);
4540 return ret;
4541}
4542
4543static void binder_vma_open(struct vm_area_struct *vma)
4544{
4545 struct binder_proc *proc = vma->vm_private_data;
4546
4547 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4548 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4549 proc->pid, vma->vm_start, vma->vm_end,
4550 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4551 (unsigned long)pgprot_val(vma->vm_page_prot));
4552}
4553
4554static void binder_vma_close(struct vm_area_struct *vma)
4555{
4556 struct binder_proc *proc = vma->vm_private_data;
4557
4558 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4559 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4560 proc->pid, vma->vm_start, vma->vm_end,
4561 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4562 (unsigned long)pgprot_val(vma->vm_page_prot));
4563 binder_alloc_vma_close(&proc->alloc);
4564 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4565}
4566
4567static int binder_vm_fault(struct vm_fault *vmf)
4568{
4569 return VM_FAULT_SIGBUS;
4570}
4571
4572static const struct vm_operations_struct binder_vm_ops = {
4573 .open = binder_vma_open,
4574 .close = binder_vma_close,
4575 .fault = binder_vm_fault,
4576};
4577
4578static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4579{
4580 int ret;
4581 struct binder_proc *proc = filp->private_data;
4582 const char *failure_string;
4583
4584 if (proc->tsk != current->group_leader)
4585 return -EINVAL;
4586
4587 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4588 vma->vm_end = vma->vm_start + SZ_4M;
4589
4590 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4591 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4592 __func__, proc->pid, vma->vm_start, vma->vm_end,
4593 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4594 (unsigned long)pgprot_val(vma->vm_page_prot));
4595
4596 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4597 ret = -EPERM;
4598 failure_string = "bad vm_flags";
4599 goto err_bad_arg;
4600 }
4601 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4602 vma->vm_ops = &binder_vm_ops;
4603 vma->vm_private_data = proc;
4604
4605 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4606 if (ret)
4607 return ret;
4608 proc->files = get_files_struct(current);
4609 return 0;
4610
4611err_bad_arg:
4612 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4613 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4614 return ret;
4615}
4616
4617static int binder_open(struct inode *nodp, struct file *filp)
4618{
4619 struct binder_proc *proc;
4620 struct binder_device *binder_dev;
4621
4622 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4623 current->group_leader->pid, current->pid);
4624
4625 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4626 if (proc == NULL)
4627 return -ENOMEM;
4628 spin_lock_init(&proc->inner_lock);
4629 spin_lock_init(&proc->outer_lock);
4630 get_task_struct(current->group_leader);
4631 proc->tsk = current->group_leader;
4632 INIT_LIST_HEAD(&proc->todo);
4633 proc->default_priority = task_nice(current);
4634 binder_dev = container_of(filp->private_data, struct binder_device,
4635 miscdev);
4636 proc->context = &binder_dev->context;
4637 binder_alloc_init(&proc->alloc);
4638
4639 binder_stats_created(BINDER_STAT_PROC);
4640 proc->pid = current->group_leader->pid;
4641 INIT_LIST_HEAD(&proc->delivered_death);
4642 INIT_LIST_HEAD(&proc->waiting_threads);
4643 filp->private_data = proc;
4644
4645 mutex_lock(&binder_procs_lock);
4646 hlist_add_head(&proc->proc_node, &binder_procs);
4647 mutex_unlock(&binder_procs_lock);
4648
4649 if (binder_debugfs_dir_entry_proc) {
4650 char strbuf[11];
4651
4652 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4653
4654
4655
4656
4657
4658
4659
4660 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4661 binder_debugfs_dir_entry_proc,
4662 (void *)(unsigned long)proc->pid,
4663 &binder_proc_fops);
4664 }
4665
4666 return 0;
4667}
4668
4669static int binder_flush(struct file *filp, fl_owner_t id)
4670{
4671 struct binder_proc *proc = filp->private_data;
4672
4673 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4674
4675 return 0;
4676}
4677
4678static void binder_deferred_flush(struct binder_proc *proc)
4679{
4680 struct rb_node *n;
4681 int wake_count = 0;
4682
4683 binder_inner_proc_lock(proc);
4684 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4685 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4686
4687 thread->looper_need_return = true;
4688 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4689 wake_up_interruptible(&thread->wait);
4690 wake_count++;
4691 }
4692 }
4693 binder_inner_proc_unlock(proc);
4694
4695 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4696 "binder_flush: %d woke %d threads\n", proc->pid,
4697 wake_count);
4698}
4699
4700static int binder_release(struct inode *nodp, struct file *filp)
4701{
4702 struct binder_proc *proc = filp->private_data;
4703
4704 debugfs_remove(proc->debugfs_entry);
4705 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4706
4707 return 0;
4708}
4709
4710static int binder_node_release(struct binder_node *node, int refs)
4711{
4712 struct binder_ref *ref;
4713 int death = 0;
4714 struct binder_proc *proc = node->proc;
4715
4716 binder_release_work(proc, &node->async_todo);
4717
4718 binder_node_lock(node);
4719 binder_inner_proc_lock(proc);
4720 binder_dequeue_work_ilocked(&node->work);
4721
4722
4723
4724 BUG_ON(!node->tmp_refs);
4725 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4726 binder_inner_proc_unlock(proc);
4727 binder_node_unlock(node);
4728 binder_free_node(node);
4729
4730 return refs;
4731 }
4732
4733 node->proc = NULL;
4734 node->local_strong_refs = 0;
4735 node->local_weak_refs = 0;
4736 binder_inner_proc_unlock(proc);
4737
4738 spin_lock(&binder_dead_nodes_lock);
4739 hlist_add_head(&node->dead_node, &binder_dead_nodes);
4740 spin_unlock(&binder_dead_nodes_lock);
4741
4742 hlist_for_each_entry(ref, &node->refs, node_entry) {
4743 refs++;
4744
4745
4746
4747
4748
4749
4750 binder_inner_proc_lock(ref->proc);
4751 if (!ref->death) {
4752 binder_inner_proc_unlock(ref->proc);
4753 continue;
4754 }
4755
4756 death++;
4757
4758 BUG_ON(!list_empty(&ref->death->work.entry));
4759 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4760 binder_enqueue_work_ilocked(&ref->death->work,
4761 &ref->proc->todo);
4762 binder_wakeup_proc_ilocked(ref->proc);
4763 binder_inner_proc_unlock(ref->proc);
4764 }
4765
4766 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4767 "node %d now dead, refs %d, death %d\n",
4768 node->debug_id, refs, death);
4769 binder_node_unlock(node);
4770 binder_put_node(node);
4771
4772 return refs;
4773}
4774
4775static void binder_deferred_release(struct binder_proc *proc)
4776{
4777 struct binder_context *context = proc->context;
4778 struct rb_node *n;
4779 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4780
4781 BUG_ON(proc->files);
4782
4783 mutex_lock(&binder_procs_lock);
4784 hlist_del(&proc->proc_node);
4785 mutex_unlock(&binder_procs_lock);
4786
4787 mutex_lock(&context->context_mgr_node_lock);
4788 if (context->binder_context_mgr_node &&
4789 context->binder_context_mgr_node->proc == proc) {
4790 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4791 "%s: %d context_mgr_node gone\n",
4792 __func__, proc->pid);
4793 context->binder_context_mgr_node = NULL;
4794 }
4795 mutex_unlock(&context->context_mgr_node_lock);
4796 binder_inner_proc_lock(proc);
4797
4798
4799
4800
4801 proc->tmp_ref++;
4802
4803 proc->is_dead = true;
4804 threads = 0;
4805 active_transactions = 0;
4806 while ((n = rb_first(&proc->threads))) {
4807 struct binder_thread *thread;
4808
4809 thread = rb_entry(n, struct binder_thread, rb_node);
4810 binder_inner_proc_unlock(proc);
4811 threads++;
4812 active_transactions += binder_thread_release(proc, thread);
4813 binder_inner_proc_lock(proc);
4814 }
4815
4816 nodes = 0;
4817 incoming_refs = 0;
4818 while ((n = rb_first(&proc->nodes))) {
4819 struct binder_node *node;
4820
4821 node = rb_entry(n, struct binder_node, rb_node);
4822 nodes++;
4823
4824
4825
4826
4827
4828 binder_inc_node_tmpref_ilocked(node);
4829 rb_erase(&node->rb_node, &proc->nodes);
4830 binder_inner_proc_unlock(proc);
4831 incoming_refs = binder_node_release(node, incoming_refs);
4832 binder_inner_proc_lock(proc);
4833 }
4834 binder_inner_proc_unlock(proc);
4835
4836 outgoing_refs = 0;
4837 binder_proc_lock(proc);
4838 while ((n = rb_first(&proc->refs_by_desc))) {
4839 struct binder_ref *ref;
4840
4841 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4842 outgoing_refs++;
4843 binder_cleanup_ref_olocked(ref);
4844 binder_proc_unlock(proc);
4845 binder_free_ref(ref);
4846 binder_proc_lock(proc);
4847 }
4848 binder_proc_unlock(proc);
4849
4850 binder_release_work(proc, &proc->todo);
4851 binder_release_work(proc, &proc->delivered_death);
4852
4853 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4854 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4855 __func__, proc->pid, threads, nodes, incoming_refs,
4856 outgoing_refs, active_transactions);
4857
4858 binder_proc_dec_tmpref(proc);
4859}
4860
4861static void binder_deferred_func(struct work_struct *work)
4862{
4863 struct binder_proc *proc;
4864 struct files_struct *files;
4865
4866 int defer;
4867
4868 do {
4869 mutex_lock(&binder_deferred_lock);
4870 if (!hlist_empty(&binder_deferred_list)) {
4871 proc = hlist_entry(binder_deferred_list.first,
4872 struct binder_proc, deferred_work_node);
4873 hlist_del_init(&proc->deferred_work_node);
4874 defer = proc->deferred_work;
4875 proc->deferred_work = 0;
4876 } else {
4877 proc = NULL;
4878 defer = 0;
4879 }
4880 mutex_unlock(&binder_deferred_lock);
4881
4882 files = NULL;
4883 if (defer & BINDER_DEFERRED_PUT_FILES) {
4884 files = proc->files;
4885 if (files)
4886 proc->files = NULL;
4887 }
4888
4889 if (defer & BINDER_DEFERRED_FLUSH)
4890 binder_deferred_flush(proc);
4891
4892 if (defer & BINDER_DEFERRED_RELEASE)
4893 binder_deferred_release(proc);
4894
4895 if (files)
4896 put_files_struct(files);
4897 } while (proc);
4898}
4899static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4900
4901static void
4902binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4903{
4904 mutex_lock(&binder_deferred_lock);
4905 proc->deferred_work |= defer;
4906 if (hlist_unhashed(&proc->deferred_work_node)) {
4907 hlist_add_head(&proc->deferred_work_node,
4908 &binder_deferred_list);
4909 schedule_work(&binder_deferred_work);
4910 }
4911 mutex_unlock(&binder_deferred_lock);
4912}
4913
4914static void print_binder_transaction_ilocked(struct seq_file *m,
4915 struct binder_proc *proc,
4916 const char *prefix,
4917 struct binder_transaction *t)
4918{
4919 struct binder_proc *to_proc;
4920 struct binder_buffer *buffer = t->buffer;
4921
4922 spin_lock(&t->lock);
4923 to_proc = t->to_proc;
4924 seq_printf(m,
4925 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4926 prefix, t->debug_id, t,
4927 t->from ? t->from->proc->pid : 0,
4928 t->from ? t->from->pid : 0,
4929 to_proc ? to_proc->pid : 0,
4930 t->to_thread ? t->to_thread->pid : 0,
4931 t->code, t->flags, t->priority, t->need_reply);
4932 spin_unlock(&t->lock);
4933
4934 if (proc != to_proc) {
4935
4936
4937
4938
4939 seq_puts(m, "\n");
4940 return;
4941 }
4942
4943 if (buffer == NULL) {
4944 seq_puts(m, " buffer free\n");
4945 return;
4946 }
4947 if (buffer->target_node)
4948 seq_printf(m, " node %d", buffer->target_node->debug_id);
4949 seq_printf(m, " size %zd:%zd data %p\n",
4950 buffer->data_size, buffer->offsets_size,
4951 buffer->data);
4952}
4953
4954static void print_binder_work_ilocked(struct seq_file *m,
4955 struct binder_proc *proc,
4956 const char *prefix,
4957 const char *transaction_prefix,
4958 struct binder_work *w)
4959{
4960 struct binder_node *node;
4961 struct binder_transaction *t;
4962
4963 switch (w->type) {
4964 case BINDER_WORK_TRANSACTION:
4965 t = container_of(w, struct binder_transaction, work);
4966 print_binder_transaction_ilocked(
4967 m, proc, transaction_prefix, t);
4968 break;
4969 case BINDER_WORK_RETURN_ERROR: {
4970 struct binder_error *e = container_of(
4971 w, struct binder_error, work);
4972
4973 seq_printf(m, "%stransaction error: %u\n",
4974 prefix, e->cmd);
4975 } break;
4976 case BINDER_WORK_TRANSACTION_COMPLETE:
4977 seq_printf(m, "%stransaction complete\n", prefix);
4978 break;
4979 case BINDER_WORK_NODE:
4980 node = container_of(w, struct binder_node, work);
4981 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4982 prefix, node->debug_id,
4983 (u64)node->ptr, (u64)node->cookie);
4984 break;
4985 case BINDER_WORK_DEAD_BINDER:
4986 seq_printf(m, "%shas dead binder\n", prefix);
4987 break;
4988 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4989 seq_printf(m, "%shas cleared dead binder\n", prefix);
4990 break;
4991 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
4992 seq_printf(m, "%shas cleared death notification\n", prefix);
4993 break;
4994 default:
4995 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
4996 break;
4997 }
4998}
4999
5000static void print_binder_thread_ilocked(struct seq_file *m,
5001 struct binder_thread *thread,
5002 int print_always)
5003{
5004 struct binder_transaction *t;
5005 struct binder_work *w;
5006 size_t start_pos = m->count;
5007 size_t header_pos;
5008
5009 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5010 thread->pid, thread->looper,
5011 thread->looper_need_return,
5012 atomic_read(&thread->tmp_ref));
5013 header_pos = m->count;
5014 t = thread->transaction_stack;
5015 while (t) {
5016 if (t->from == thread) {
5017 print_binder_transaction_ilocked(m, thread->proc,
5018 " outgoing transaction", t);
5019 t = t->from_parent;
5020 } else if (t->to_thread == thread) {
5021 print_binder_transaction_ilocked(m, thread->proc,
5022 " incoming transaction", t);
5023 t = t->to_parent;
5024 } else {
5025 print_binder_transaction_ilocked(m, thread->proc,
5026 " bad transaction", t);
5027 t = NULL;
5028 }
5029 }
5030 list_for_each_entry(w, &thread->todo, entry) {
5031 print_binder_work_ilocked(m, thread->proc, " ",
5032 " pending transaction", w);
5033 }
5034 if (!print_always && m->count == header_pos)
5035 m->count = start_pos;
5036}
5037
5038static void print_binder_node_nilocked(struct seq_file *m,
5039 struct binder_node *node)
5040{
5041 struct binder_ref *ref;
5042 struct binder_work *w;
5043 int count;
5044
5045 count = 0;
5046 hlist_for_each_entry(ref, &node->refs, node_entry)
5047 count++;
5048
5049 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5050 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5051 node->has_strong_ref, node->has_weak_ref,
5052 node->local_strong_refs, node->local_weak_refs,
5053 node->internal_strong_refs, count, node->tmp_refs);
5054 if (count) {
5055 seq_puts(m, " proc");
5056 hlist_for_each_entry(ref, &node->refs, node_entry)
5057 seq_printf(m, " %d", ref->proc->pid);
5058 }
5059 seq_puts(m, "\n");
5060 if (node->proc) {
5061 list_for_each_entry(w, &node->async_todo, entry)
5062 print_binder_work_ilocked(m, node->proc, " ",
5063 " pending async transaction", w);
5064 }
5065}
5066
5067static void print_binder_ref_olocked(struct seq_file *m,
5068 struct binder_ref *ref)
5069{
5070 binder_node_lock(ref->node);
5071 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5072 ref->data.debug_id, ref->data.desc,
5073 ref->node->proc ? "" : "dead ",
5074 ref->node->debug_id, ref->data.strong,
5075 ref->data.weak, ref->death);
5076 binder_node_unlock(ref->node);
5077}
5078
5079static void print_binder_proc(struct seq_file *m,
5080 struct binder_proc *proc, int print_all)
5081{
5082 struct binder_work *w;
5083 struct rb_node *n;
5084 size_t start_pos = m->count;
5085 size_t header_pos;
5086 struct binder_node *last_node = NULL;
5087
5088 seq_printf(m, "proc %d\n", proc->pid);
5089 seq_printf(m, "context %s\n", proc->context->name);
5090 header_pos = m->count;
5091
5092 binder_inner_proc_lock(proc);
5093 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5094 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5095 rb_node), print_all);
5096
5097 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5098 struct binder_node *node = rb_entry(n, struct binder_node,
5099 rb_node);
5100
5101
5102
5103
5104
5105 binder_inc_node_tmpref_ilocked(node);
5106
5107 binder_inner_proc_unlock(proc);
5108 if (last_node)
5109 binder_put_node(last_node);
5110 binder_node_inner_lock(node);
5111 print_binder_node_nilocked(m, node);
5112 binder_node_inner_unlock(node);
5113 last_node = node;
5114 binder_inner_proc_lock(proc);
5115 }
5116 binder_inner_proc_unlock(proc);
5117 if (last_node)
5118 binder_put_node(last_node);
5119
5120 if (print_all) {
5121 binder_proc_lock(proc);
5122 for (n = rb_first(&proc->refs_by_desc);
5123 n != NULL;
5124 n = rb_next(n))
5125 print_binder_ref_olocked(m, rb_entry(n,
5126 struct binder_ref,
5127 rb_node_desc));
5128 binder_proc_unlock(proc);
5129 }
5130 binder_alloc_print_allocated(m, &proc->alloc);
5131 binder_inner_proc_lock(proc);
5132 list_for_each_entry(w, &proc->todo, entry)
5133 print_binder_work_ilocked(m, proc, " ",
5134 " pending transaction", w);
5135 list_for_each_entry(w, &proc->delivered_death, entry) {
5136 seq_puts(m, " has delivered dead binder\n");
5137 break;
5138 }
5139 binder_inner_proc_unlock(proc);
5140 if (!print_all && m->count == header_pos)
5141 m->count = start_pos;
5142}
5143
5144static const char * const binder_return_strings[] = {
5145 "BR_ERROR",
5146 "BR_OK",
5147 "BR_TRANSACTION",
5148 "BR_REPLY",
5149 "BR_ACQUIRE_RESULT",
5150 "BR_DEAD_REPLY",
5151 "BR_TRANSACTION_COMPLETE",
5152 "BR_INCREFS",
5153 "BR_ACQUIRE",
5154 "BR_RELEASE",
5155 "BR_DECREFS",
5156 "BR_ATTEMPT_ACQUIRE",
5157 "BR_NOOP",
5158 "BR_SPAWN_LOOPER",
5159 "BR_FINISHED",
5160 "BR_DEAD_BINDER",
5161 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5162 "BR_FAILED_REPLY"
5163};
5164
5165static const char * const binder_command_strings[] = {
5166 "BC_TRANSACTION",
5167 "BC_REPLY",
5168 "BC_ACQUIRE_RESULT",
5169 "BC_FREE_BUFFER",
5170 "BC_INCREFS",
5171 "BC_ACQUIRE",
5172 "BC_RELEASE",
5173 "BC_DECREFS",
5174 "BC_INCREFS_DONE",
5175 "BC_ACQUIRE_DONE",
5176 "BC_ATTEMPT_ACQUIRE",
5177 "BC_REGISTER_LOOPER",
5178 "BC_ENTER_LOOPER",
5179 "BC_EXIT_LOOPER",
5180 "BC_REQUEST_DEATH_NOTIFICATION",
5181 "BC_CLEAR_DEATH_NOTIFICATION",
5182 "BC_DEAD_BINDER_DONE",
5183 "BC_TRANSACTION_SG",
5184 "BC_REPLY_SG",
5185};
5186
5187static const char * const binder_objstat_strings[] = {
5188 "proc",
5189 "thread",
5190 "node",
5191 "ref",
5192 "death",
5193 "transaction",
5194 "transaction_complete"
5195};
5196
5197static void print_binder_stats(struct seq_file *m, const char *prefix,
5198 struct binder_stats *stats)
5199{
5200 int i;
5201
5202 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5203 ARRAY_SIZE(binder_command_strings));
5204 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5205 int temp = atomic_read(&stats->bc[i]);
5206
5207 if (temp)
5208 seq_printf(m, "%s%s: %d\n", prefix,
5209 binder_command_strings[i], temp);
5210 }
5211
5212 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5213 ARRAY_SIZE(binder_return_strings));
5214 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5215 int temp = atomic_read(&stats->br[i]);
5216
5217 if (temp)
5218 seq_printf(m, "%s%s: %d\n", prefix,
5219 binder_return_strings[i], temp);
5220 }
5221
5222 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5223 ARRAY_SIZE(binder_objstat_strings));
5224 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5225 ARRAY_SIZE(stats->obj_deleted));
5226 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5227 int created = atomic_read(&stats->obj_created[i]);
5228 int deleted = atomic_read(&stats->obj_deleted[i]);
5229
5230 if (created || deleted)
5231 seq_printf(m, "%s%s: active %d total %d\n",
5232 prefix,
5233 binder_objstat_strings[i],
5234 created - deleted,
5235 created);
5236 }
5237}
5238
5239static void print_binder_proc_stats(struct seq_file *m,
5240 struct binder_proc *proc)
5241{
5242 struct binder_work *w;
5243 struct binder_thread *thread;
5244 struct rb_node *n;
5245 int count, strong, weak, ready_threads;
5246 size_t free_async_space =
5247 binder_alloc_get_free_async_space(&proc->alloc);
5248
5249 seq_printf(m, "proc %d\n", proc->pid);
5250 seq_printf(m, "context %s\n", proc->context->name);
5251 count = 0;
5252 ready_threads = 0;
5253 binder_inner_proc_lock(proc);
5254 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5255 count++;
5256
5257 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5258 ready_threads++;
5259
5260 seq_printf(m, " threads: %d\n", count);
5261 seq_printf(m, " requested threads: %d+%d/%d\n"
5262 " ready threads %d\n"
5263 " free async space %zd\n", proc->requested_threads,
5264 proc->requested_threads_started, proc->max_threads,
5265 ready_threads,
5266 free_async_space);
5267 count = 0;
5268 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5269 count++;
5270 binder_inner_proc_unlock(proc);
5271 seq_printf(m, " nodes: %d\n", count);
5272 count = 0;
5273 strong = 0;
5274 weak = 0;
5275 binder_proc_lock(proc);
5276 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5277 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5278 rb_node_desc);
5279 count++;
5280 strong += ref->data.strong;
5281 weak += ref->data.weak;
5282 }
5283 binder_proc_unlock(proc);
5284 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5285
5286 count = binder_alloc_get_allocated_count(&proc->alloc);
5287 seq_printf(m, " buffers: %d\n", count);
5288
5289 binder_alloc_print_pages(m, &proc->alloc);
5290
5291 count = 0;
5292 binder_inner_proc_lock(proc);
5293 list_for_each_entry(w, &proc->todo, entry) {
5294 if (w->type == BINDER_WORK_TRANSACTION)
5295 count++;
5296 }
5297 binder_inner_proc_unlock(proc);
5298 seq_printf(m, " pending transactions: %d\n", count);
5299
5300 print_binder_stats(m, " ", &proc->stats);
5301}
5302
5303
5304static int binder_state_show(struct seq_file *m, void *unused)
5305{
5306 struct binder_proc *proc;
5307 struct binder_node *node;
5308 struct binder_node *last_node = NULL;
5309
5310 seq_puts(m, "binder state:\n");
5311
5312 spin_lock(&binder_dead_nodes_lock);
5313 if (!hlist_empty(&binder_dead_nodes))
5314 seq_puts(m, "dead nodes:\n");
5315 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5316
5317
5318
5319
5320
5321 node->tmp_refs++;
5322 spin_unlock(&binder_dead_nodes_lock);
5323 if (last_node)
5324 binder_put_node(last_node);
5325 binder_node_lock(node);
5326 print_binder_node_nilocked(m, node);
5327 binder_node_unlock(node);
5328 last_node = node;
5329 spin_lock(&binder_dead_nodes_lock);
5330 }
5331 spin_unlock(&binder_dead_nodes_lock);
5332 if (last_node)
5333 binder_put_node(last_node);
5334
5335 mutex_lock(&binder_procs_lock);
5336 hlist_for_each_entry(proc, &binder_procs, proc_node)
5337 print_binder_proc(m, proc, 1);
5338 mutex_unlock(&binder_procs_lock);
5339
5340 return 0;
5341}
5342
5343static int binder_stats_show(struct seq_file *m, void *unused)
5344{
5345 struct binder_proc *proc;
5346
5347 seq_puts(m, "binder stats:\n");
5348
5349 print_binder_stats(m, "", &binder_stats);
5350
5351 mutex_lock(&binder_procs_lock);
5352 hlist_for_each_entry(proc, &binder_procs, proc_node)
5353 print_binder_proc_stats(m, proc);
5354 mutex_unlock(&binder_procs_lock);
5355
5356 return 0;
5357}
5358
5359static int binder_transactions_show(struct seq_file *m, void *unused)
5360{
5361 struct binder_proc *proc;
5362
5363 seq_puts(m, "binder transactions:\n");
5364 mutex_lock(&binder_procs_lock);
5365 hlist_for_each_entry(proc, &binder_procs, proc_node)
5366 print_binder_proc(m, proc, 0);
5367 mutex_unlock(&binder_procs_lock);
5368
5369 return 0;
5370}
5371
5372static int binder_proc_show(struct seq_file *m, void *unused)
5373{
5374 struct binder_proc *itr;
5375 int pid = (unsigned long)m->private;
5376
5377 mutex_lock(&binder_procs_lock);
5378 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5379 if (itr->pid == pid) {
5380 seq_puts(m, "binder proc state:\n");
5381 print_binder_proc(m, itr, 1);
5382 }
5383 }
5384 mutex_unlock(&binder_procs_lock);
5385
5386 return 0;
5387}
5388
5389static void print_binder_transaction_log_entry(struct seq_file *m,
5390 struct binder_transaction_log_entry *e)
5391{
5392 int debug_id = READ_ONCE(e->debug_id_done);
5393
5394
5395
5396
5397 smp_rmb();
5398 seq_printf(m,
5399 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5400 e->debug_id, (e->call_type == 2) ? "reply" :
5401 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5402 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5403 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5404 e->return_error, e->return_error_param,
5405 e->return_error_line);
5406
5407
5408
5409
5410 smp_rmb();
5411 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5412 "\n" : " (incomplete)\n");
5413}
5414
5415static int binder_transaction_log_show(struct seq_file *m, void *unused)
5416{
5417 struct binder_transaction_log *log = m->private;
5418 unsigned int log_cur = atomic_read(&log->cur);
5419 unsigned int count;
5420 unsigned int cur;
5421 int i;
5422
5423 count = log_cur + 1;
5424 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5425 0 : count % ARRAY_SIZE(log->entry);
5426 if (count > ARRAY_SIZE(log->entry) || log->full)
5427 count = ARRAY_SIZE(log->entry);
5428 for (i = 0; i < count; i++) {
5429 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5430
5431 print_binder_transaction_log_entry(m, &log->entry[index]);
5432 }
5433 return 0;
5434}
5435
5436static const struct file_operations binder_fops = {
5437 .owner = THIS_MODULE,
5438 .poll = binder_poll,
5439 .unlocked_ioctl = binder_ioctl,
5440 .compat_ioctl = binder_ioctl,
5441 .mmap = binder_mmap,
5442 .open = binder_open,
5443 .flush = binder_flush,
5444 .release = binder_release,
5445};
5446
5447BINDER_DEBUG_ENTRY(state);
5448BINDER_DEBUG_ENTRY(stats);
5449BINDER_DEBUG_ENTRY(transactions);
5450BINDER_DEBUG_ENTRY(transaction_log);
5451
5452static int __init init_binder_device(const char *name)
5453{
5454 int ret;
5455 struct binder_device *binder_device;
5456
5457 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5458 if (!binder_device)
5459 return -ENOMEM;
5460
5461 binder_device->miscdev.fops = &binder_fops;
5462 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5463 binder_device->miscdev.name = name;
5464
5465 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5466 binder_device->context.name = name;
5467 mutex_init(&binder_device->context.context_mgr_node_lock);
5468
5469 ret = misc_register(&binder_device->miscdev);
5470 if (ret < 0) {
5471 kfree(binder_device);
5472 return ret;
5473 }
5474
5475 hlist_add_head(&binder_device->hlist, &binder_devices);
5476
5477 return ret;
5478}
5479
5480static int __init binder_init(void)
5481{
5482 int ret;
5483 char *device_name, *device_names, *device_tmp;
5484 struct binder_device *device;
5485 struct hlist_node *tmp;
5486
5487 binder_alloc_shrinker_init();
5488
5489 atomic_set(&binder_transaction_log.cur, ~0U);
5490 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5491
5492 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5493 if (binder_debugfs_dir_entry_root)
5494 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5495 binder_debugfs_dir_entry_root);
5496
5497 if (binder_debugfs_dir_entry_root) {
5498 debugfs_create_file("state",
5499 S_IRUGO,
5500 binder_debugfs_dir_entry_root,
5501 NULL,
5502 &binder_state_fops);
5503 debugfs_create_file("stats",
5504 S_IRUGO,
5505 binder_debugfs_dir_entry_root,
5506 NULL,
5507 &binder_stats_fops);
5508 debugfs_create_file("transactions",
5509 S_IRUGO,
5510 binder_debugfs_dir_entry_root,
5511 NULL,
5512 &binder_transactions_fops);
5513 debugfs_create_file("transaction_log",
5514 S_IRUGO,
5515 binder_debugfs_dir_entry_root,
5516 &binder_transaction_log,
5517 &binder_transaction_log_fops);
5518 debugfs_create_file("failed_transaction_log",
5519 S_IRUGO,
5520 binder_debugfs_dir_entry_root,
5521 &binder_transaction_log_failed,
5522 &binder_transaction_log_fops);
5523 }
5524
5525
5526
5527
5528
5529 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5530 if (!device_names) {
5531 ret = -ENOMEM;
5532 goto err_alloc_device_names_failed;
5533 }
5534 strcpy(device_names, binder_devices_param);
5535
5536 device_tmp = device_names;
5537 while ((device_name = strsep(&device_tmp, ","))) {
5538 ret = init_binder_device(device_name);
5539 if (ret)
5540 goto err_init_binder_device_failed;
5541 }
5542
5543 return ret;
5544
5545err_init_binder_device_failed:
5546 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5547 misc_deregister(&device->miscdev);
5548 hlist_del(&device->hlist);
5549 kfree(device);
5550 }
5551
5552 kfree(device_names);
5553
5554err_alloc_device_names_failed:
5555 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5556
5557 return ret;
5558}
5559
5560device_initcall(binder_init);
5561
5562#define CREATE_TRACE_POINTS
5563#include "binder_trace.h"
5564
5565MODULE_LICENSE("GPL v2");
5566