1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69
70#include <uapi/linux/android/binder.h>
71#include <uapi/linux/android/binderfs.h>
72
73#include <asm/cacheflush.h>
74
75#include "binder_alloc.h"
76#include "binder_internal.h"
77#include "binder_trace.h"
78
79static HLIST_HEAD(binder_deferred_list);
80static DEFINE_MUTEX(binder_deferred_lock);
81
82static HLIST_HEAD(binder_devices);
83static HLIST_HEAD(binder_procs);
84static DEFINE_MUTEX(binder_procs_lock);
85
86static HLIST_HEAD(binder_dead_nodes);
87static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88
89static struct dentry *binder_debugfs_dir_entry_root;
90static struct dentry *binder_debugfs_dir_entry_proc;
91static atomic_t binder_last_id;
92
93static int proc_show(struct seq_file *m, void *unused);
94DEFINE_SHOW_ATTRIBUTE(proc);
95
96#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97
98enum {
99 BINDER_DEBUG_USER_ERROR = 1U << 0,
100 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
101 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
102 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
103 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
104 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
105 BINDER_DEBUG_READ_WRITE = 1U << 6,
106 BINDER_DEBUG_USER_REFS = 1U << 7,
107 BINDER_DEBUG_THREADS = 1U << 8,
108 BINDER_DEBUG_TRANSACTION = 1U << 9,
109 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
110 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
111 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
112 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
113 BINDER_DEBUG_SPINLOCKS = 1U << 14,
114};
115static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
116 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
117module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118
119char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
120module_param_named(devices, binder_devices_param, charp, 0444);
121
122static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
123static int binder_stop_on_user_error;
124
125static int binder_set_stop_on_user_error(const char *val,
126 const struct kernel_param *kp)
127{
128 int ret;
129
130 ret = param_set_int(val, kp);
131 if (binder_stop_on_user_error < 2)
132 wake_up(&binder_user_error_wait);
133 return ret;
134}
135module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
136 param_get_int, &binder_stop_on_user_error, 0644);
137
138#define binder_debug(mask, x...) \
139 do { \
140 if (binder_debug_mask & mask) \
141 pr_info_ratelimited(x); \
142 } while (0)
143
144#define binder_user_error(x...) \
145 do { \
146 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 pr_info_ratelimited(x); \
148 if (binder_stop_on_user_error) \
149 binder_stop_on_user_error = 2; \
150 } while (0)
151
152#define to_flat_binder_object(hdr) \
153 container_of(hdr, struct flat_binder_object, hdr)
154
155#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156
157#define to_binder_buffer_object(hdr) \
158 container_of(hdr, struct binder_buffer_object, hdr)
159
160#define to_binder_fd_array_object(hdr) \
161 container_of(hdr, struct binder_fd_array_object, hdr)
162
163enum binder_stat_types {
164 BINDER_STAT_PROC,
165 BINDER_STAT_THREAD,
166 BINDER_STAT_NODE,
167 BINDER_STAT_REF,
168 BINDER_STAT_DEATH,
169 BINDER_STAT_TRANSACTION,
170 BINDER_STAT_TRANSACTION_COMPLETE,
171 BINDER_STAT_COUNT
172};
173
174struct binder_stats {
175 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
176 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
177 atomic_t obj_created[BINDER_STAT_COUNT];
178 atomic_t obj_deleted[BINDER_STAT_COUNT];
179};
180
181static struct binder_stats binder_stats;
182
183static inline void binder_stats_deleted(enum binder_stat_types type)
184{
185 atomic_inc(&binder_stats.obj_deleted[type]);
186}
187
188static inline void binder_stats_created(enum binder_stat_types type)
189{
190 atomic_inc(&binder_stats.obj_created[type]);
191}
192
193struct binder_transaction_log binder_transaction_log;
194struct binder_transaction_log binder_transaction_log_failed;
195
196static struct binder_transaction_log_entry *binder_transaction_log_add(
197 struct binder_transaction_log *log)
198{
199 struct binder_transaction_log_entry *e;
200 unsigned int cur = atomic_inc_return(&log->cur);
201
202 if (cur >= ARRAY_SIZE(log->entry))
203 log->full = true;
204 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
205 WRITE_ONCE(e->debug_id_done, 0);
206
207
208
209
210
211 smp_wmb();
212 memset(e, 0, sizeof(*e));
213 return e;
214}
215
216
217
218
219
220
221
222
223struct binder_work {
224 struct list_head entry;
225
226 enum {
227 BINDER_WORK_TRANSACTION = 1,
228 BINDER_WORK_TRANSACTION_COMPLETE,
229 BINDER_WORK_RETURN_ERROR,
230 BINDER_WORK_NODE,
231 BINDER_WORK_DEAD_BINDER,
232 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
234 } type;
235};
236
237struct binder_error {
238 struct binder_work work;
239 uint32_t cmd;
240};
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302struct binder_node {
303 int debug_id;
304 spinlock_t lock;
305 struct binder_work work;
306 union {
307 struct rb_node rb_node;
308 struct hlist_node dead_node;
309 };
310 struct binder_proc *proc;
311 struct hlist_head refs;
312 int internal_strong_refs;
313 int local_weak_refs;
314 int local_strong_refs;
315 int tmp_refs;
316 binder_uintptr_t ptr;
317 binder_uintptr_t cookie;
318 struct {
319
320
321
322
323 u8 has_strong_ref:1;
324 u8 pending_strong_ref:1;
325 u8 has_weak_ref:1;
326 u8 pending_weak_ref:1;
327 };
328 struct {
329
330
331
332 u8 accept_fds:1;
333 u8 txn_security_ctx:1;
334 u8 min_priority;
335 };
336 bool has_async_transaction;
337 struct list_head async_todo;
338};
339
340struct binder_ref_death {
341
342
343
344
345
346 struct binder_work work;
347 binder_uintptr_t cookie;
348};
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct binder_ref_data {
363 int debug_id;
364 uint32_t desc;
365 int strong;
366 int weak;
367};
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386struct binder_ref {
387
388
389
390
391 struct binder_ref_data data;
392 struct rb_node rb_node_desc;
393 struct rb_node rb_node_node;
394 struct hlist_node node_entry;
395 struct binder_proc *proc;
396 struct binder_node *node;
397 struct binder_ref_death *death;
398};
399
400enum binder_deferred_state {
401 BINDER_DEFERRED_FLUSH = 0x01,
402 BINDER_DEFERRED_RELEASE = 0x02,
403};
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459struct binder_proc {
460 struct hlist_node proc_node;
461 struct rb_root threads;
462 struct rb_root nodes;
463 struct rb_root refs_by_desc;
464 struct rb_root refs_by_node;
465 struct list_head waiting_threads;
466 int pid;
467 struct task_struct *tsk;
468 struct hlist_node deferred_work_node;
469 int deferred_work;
470 bool is_dead;
471
472 struct list_head todo;
473 struct binder_stats stats;
474 struct list_head delivered_death;
475 int max_threads;
476 int requested_threads;
477 int requested_threads_started;
478 int tmp_ref;
479 long default_priority;
480 struct dentry *debugfs_entry;
481 struct binder_alloc alloc;
482 struct binder_context *context;
483 spinlock_t inner_lock;
484 spinlock_t outer_lock;
485 struct dentry *binderfs_entry;
486};
487
488enum {
489 BINDER_LOOPER_STATE_REGISTERED = 0x01,
490 BINDER_LOOPER_STATE_ENTERED = 0x02,
491 BINDER_LOOPER_STATE_EXITED = 0x04,
492 BINDER_LOOPER_STATE_INVALID = 0x08,
493 BINDER_LOOPER_STATE_WAITING = 0x10,
494 BINDER_LOOPER_STATE_POLL = 0x20,
495};
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533struct binder_thread {
534 struct binder_proc *proc;
535 struct rb_node rb_node;
536 struct list_head waiting_thread_node;
537 int pid;
538 int looper;
539 bool looper_need_return;
540 struct binder_transaction *transaction_stack;
541 struct list_head todo;
542 bool process_todo;
543 struct binder_error return_error;
544 struct binder_error reply_error;
545 wait_queue_head_t wait;
546 struct binder_stats stats;
547 atomic_t tmp_ref;
548 bool is_dead;
549};
550
551
552
553
554
555
556
557
558
559
560
561
562struct binder_txn_fd_fixup {
563 struct list_head fixup_entry;
564 struct file *file;
565 size_t offset;
566};
567
568struct binder_transaction {
569 int debug_id;
570 struct binder_work work;
571 struct binder_thread *from;
572 struct binder_transaction *from_parent;
573 struct binder_proc *to_proc;
574 struct binder_thread *to_thread;
575 struct binder_transaction *to_parent;
576 unsigned need_reply:1;
577
578
579 struct binder_buffer *buffer;
580 unsigned int code;
581 unsigned int flags;
582 long priority;
583 long saved_priority;
584 kuid_t sender_euid;
585 struct list_head fd_fixups;
586 binder_uintptr_t security_ctx;
587
588
589
590
591
592
593 spinlock_t lock;
594};
595
596
597
598
599
600
601
602
603
604
605
606struct binder_object {
607 union {
608 struct binder_object_header hdr;
609 struct flat_binder_object fbo;
610 struct binder_fd_object fdo;
611 struct binder_buffer_object bbo;
612 struct binder_fd_array_object fdao;
613 };
614};
615
616
617
618
619
620
621
622
623#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
624static void
625_binder_proc_lock(struct binder_proc *proc, int line)
626 __acquires(&proc->outer_lock)
627{
628 binder_debug(BINDER_DEBUG_SPINLOCKS,
629 "%s: line=%d\n", __func__, line);
630 spin_lock(&proc->outer_lock);
631}
632
633
634
635
636
637
638
639#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
640static void
641_binder_proc_unlock(struct binder_proc *proc, int line)
642 __releases(&proc->outer_lock)
643{
644 binder_debug(BINDER_DEBUG_SPINLOCKS,
645 "%s: line=%d\n", __func__, line);
646 spin_unlock(&proc->outer_lock);
647}
648
649
650
651
652
653
654
655#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
656static void
657_binder_inner_proc_lock(struct binder_proc *proc, int line)
658 __acquires(&proc->inner_lock)
659{
660 binder_debug(BINDER_DEBUG_SPINLOCKS,
661 "%s: line=%d\n", __func__, line);
662 spin_lock(&proc->inner_lock);
663}
664
665
666
667
668
669
670
671#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
672static void
673_binder_inner_proc_unlock(struct binder_proc *proc, int line)
674 __releases(&proc->inner_lock)
675{
676 binder_debug(BINDER_DEBUG_SPINLOCKS,
677 "%s: line=%d\n", __func__, line);
678 spin_unlock(&proc->inner_lock);
679}
680
681
682
683
684
685
686
687#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
688static void
689_binder_node_lock(struct binder_node *node, int line)
690 __acquires(&node->lock)
691{
692 binder_debug(BINDER_DEBUG_SPINLOCKS,
693 "%s: line=%d\n", __func__, line);
694 spin_lock(&node->lock);
695}
696
697
698
699
700
701
702
703#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
704static void
705_binder_node_unlock(struct binder_node *node, int line)
706 __releases(&node->lock)
707{
708 binder_debug(BINDER_DEBUG_SPINLOCKS,
709 "%s: line=%d\n", __func__, line);
710 spin_unlock(&node->lock);
711}
712
713
714
715
716
717
718
719
720#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
721static void
722_binder_node_inner_lock(struct binder_node *node, int line)
723 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
724{
725 binder_debug(BINDER_DEBUG_SPINLOCKS,
726 "%s: line=%d\n", __func__, line);
727 spin_lock(&node->lock);
728 if (node->proc)
729 binder_inner_proc_lock(node->proc);
730 else
731
732 __acquire(&node->proc->inner_lock);
733}
734
735
736
737
738
739
740
741#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
742static void
743_binder_node_inner_unlock(struct binder_node *node, int line)
744 __releases(&node->lock) __releases(&node->proc->inner_lock)
745{
746 struct binder_proc *proc = node->proc;
747
748 binder_debug(BINDER_DEBUG_SPINLOCKS,
749 "%s: line=%d\n", __func__, line);
750 if (proc)
751 binder_inner_proc_unlock(proc);
752 else
753
754 __release(&node->proc->inner_lock);
755 spin_unlock(&node->lock);
756}
757
758static bool binder_worklist_empty_ilocked(struct list_head *list)
759{
760 return list_empty(list);
761}
762
763
764
765
766
767
768
769
770static bool binder_worklist_empty(struct binder_proc *proc,
771 struct list_head *list)
772{
773 bool ret;
774
775 binder_inner_proc_lock(proc);
776 ret = binder_worklist_empty_ilocked(list);
777 binder_inner_proc_unlock(proc);
778 return ret;
779}
780
781
782
783
784
785
786
787
788
789
790
791static void
792binder_enqueue_work_ilocked(struct binder_work *work,
793 struct list_head *target_list)
794{
795 BUG_ON(target_list == NULL);
796 BUG_ON(work->entry.next && !list_empty(&work->entry));
797 list_add_tail(&work->entry, target_list);
798}
799
800
801
802
803
804
805
806
807
808
809
810
811static void
812binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
813 struct binder_work *work)
814{
815 WARN_ON(!list_empty(&thread->waiting_thread_node));
816 binder_enqueue_work_ilocked(work, &thread->todo);
817}
818
819
820
821
822
823
824
825
826
827
828
829static void
830binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
831 struct binder_work *work)
832{
833 WARN_ON(!list_empty(&thread->waiting_thread_node));
834 binder_enqueue_work_ilocked(work, &thread->todo);
835 thread->process_todo = true;
836}
837
838
839
840
841
842
843
844
845
846static void
847binder_enqueue_thread_work(struct binder_thread *thread,
848 struct binder_work *work)
849{
850 binder_inner_proc_lock(thread->proc);
851 binder_enqueue_thread_work_ilocked(thread, work);
852 binder_inner_proc_unlock(thread->proc);
853}
854
855static void
856binder_dequeue_work_ilocked(struct binder_work *work)
857{
858 list_del_init(&work->entry);
859}
860
861
862
863
864
865
866
867
868
869static void
870binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
871{
872 binder_inner_proc_lock(proc);
873 binder_dequeue_work_ilocked(work);
874 binder_inner_proc_unlock(proc);
875}
876
877static struct binder_work *binder_dequeue_work_head_ilocked(
878 struct list_head *list)
879{
880 struct binder_work *w;
881
882 w = list_first_entry_or_null(list, struct binder_work, entry);
883 if (w)
884 list_del_init(&w->entry);
885 return w;
886}
887
888
889
890
891
892
893
894
895
896
897static struct binder_work *binder_dequeue_work_head(
898 struct binder_proc *proc,
899 struct list_head *list)
900{
901 struct binder_work *w;
902
903 binder_inner_proc_lock(proc);
904 w = binder_dequeue_work_head_ilocked(list);
905 binder_inner_proc_unlock(proc);
906 return w;
907}
908
909static void
910binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
911static void binder_free_thread(struct binder_thread *thread);
912static void binder_free_proc(struct binder_proc *proc);
913static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
914
915static bool binder_has_work_ilocked(struct binder_thread *thread,
916 bool do_proc_work)
917{
918 return thread->process_todo ||
919 thread->looper_need_return ||
920 (do_proc_work &&
921 !binder_worklist_empty_ilocked(&thread->proc->todo));
922}
923
924static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
925{
926 bool has_work;
927
928 binder_inner_proc_lock(thread->proc);
929 has_work = binder_has_work_ilocked(thread, do_proc_work);
930 binder_inner_proc_unlock(thread->proc);
931
932 return has_work;
933}
934
935static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
936{
937 return !thread->transaction_stack &&
938 binder_worklist_empty_ilocked(&thread->todo) &&
939 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
940 BINDER_LOOPER_STATE_REGISTERED));
941}
942
943static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
944 bool sync)
945{
946 struct rb_node *n;
947 struct binder_thread *thread;
948
949 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
950 thread = rb_entry(n, struct binder_thread, rb_node);
951 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
952 binder_available_for_proc_work_ilocked(thread)) {
953 if (sync)
954 wake_up_interruptible_sync(&thread->wait);
955 else
956 wake_up_interruptible(&thread->wait);
957 }
958 }
959}
960
961
962
963
964
965
966
967
968
969
970
971
972
973static struct binder_thread *
974binder_select_thread_ilocked(struct binder_proc *proc)
975{
976 struct binder_thread *thread;
977
978 assert_spin_locked(&proc->inner_lock);
979 thread = list_first_entry_or_null(&proc->waiting_threads,
980 struct binder_thread,
981 waiting_thread_node);
982
983 if (thread)
984 list_del_init(&thread->waiting_thread_node);
985
986 return thread;
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1006 struct binder_thread *thread,
1007 bool sync)
1008{
1009 assert_spin_locked(&proc->inner_lock);
1010
1011 if (thread) {
1012 if (sync)
1013 wake_up_interruptible_sync(&thread->wait);
1014 else
1015 wake_up_interruptible(&thread->wait);
1016 return;
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 binder_wakeup_poll_threads_ilocked(proc, sync);
1033}
1034
1035static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1036{
1037 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1038
1039 binder_wakeup_thread_ilocked(proc, thread, false);
1040}
1041
1042static void binder_set_nice(long nice)
1043{
1044 long min_nice;
1045
1046 if (can_nice(current, nice)) {
1047 set_user_nice(current, nice);
1048 return;
1049 }
1050 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1051 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1052 "%d: nice value %ld not allowed use %ld instead\n",
1053 current->pid, nice, min_nice);
1054 set_user_nice(current, min_nice);
1055 if (min_nice <= MAX_NICE)
1056 return;
1057 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1058}
1059
1060static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1061 binder_uintptr_t ptr)
1062{
1063 struct rb_node *n = proc->nodes.rb_node;
1064 struct binder_node *node;
1065
1066 assert_spin_locked(&proc->inner_lock);
1067
1068 while (n) {
1069 node = rb_entry(n, struct binder_node, rb_node);
1070
1071 if (ptr < node->ptr)
1072 n = n->rb_left;
1073 else if (ptr > node->ptr)
1074 n = n->rb_right;
1075 else {
1076
1077
1078
1079
1080
1081 binder_inc_node_tmpref_ilocked(node);
1082 return node;
1083 }
1084 }
1085 return NULL;
1086}
1087
1088static struct binder_node *binder_get_node(struct binder_proc *proc,
1089 binder_uintptr_t ptr)
1090{
1091 struct binder_node *node;
1092
1093 binder_inner_proc_lock(proc);
1094 node = binder_get_node_ilocked(proc, ptr);
1095 binder_inner_proc_unlock(proc);
1096 return node;
1097}
1098
1099static struct binder_node *binder_init_node_ilocked(
1100 struct binder_proc *proc,
1101 struct binder_node *new_node,
1102 struct flat_binder_object *fp)
1103{
1104 struct rb_node **p = &proc->nodes.rb_node;
1105 struct rb_node *parent = NULL;
1106 struct binder_node *node;
1107 binder_uintptr_t ptr = fp ? fp->binder : 0;
1108 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1109 __u32 flags = fp ? fp->flags : 0;
1110
1111 assert_spin_locked(&proc->inner_lock);
1112
1113 while (*p) {
1114
1115 parent = *p;
1116 node = rb_entry(parent, struct binder_node, rb_node);
1117
1118 if (ptr < node->ptr)
1119 p = &(*p)->rb_left;
1120 else if (ptr > node->ptr)
1121 p = &(*p)->rb_right;
1122 else {
1123
1124
1125
1126
1127
1128 binder_inc_node_tmpref_ilocked(node);
1129 return node;
1130 }
1131 }
1132 node = new_node;
1133 binder_stats_created(BINDER_STAT_NODE);
1134 node->tmp_refs++;
1135 rb_link_node(&node->rb_node, parent, p);
1136 rb_insert_color(&node->rb_node, &proc->nodes);
1137 node->debug_id = atomic_inc_return(&binder_last_id);
1138 node->proc = proc;
1139 node->ptr = ptr;
1140 node->cookie = cookie;
1141 node->work.type = BINDER_WORK_NODE;
1142 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1143 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1144 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1145 spin_lock_init(&node->lock);
1146 INIT_LIST_HEAD(&node->work.entry);
1147 INIT_LIST_HEAD(&node->async_todo);
1148 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1149 "%d:%d node %d u%016llx c%016llx created\n",
1150 proc->pid, current->pid, node->debug_id,
1151 (u64)node->ptr, (u64)node->cookie);
1152
1153 return node;
1154}
1155
1156static struct binder_node *binder_new_node(struct binder_proc *proc,
1157 struct flat_binder_object *fp)
1158{
1159 struct binder_node *node;
1160 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1161
1162 if (!new_node)
1163 return NULL;
1164 binder_inner_proc_lock(proc);
1165 node = binder_init_node_ilocked(proc, new_node, fp);
1166 binder_inner_proc_unlock(proc);
1167 if (node != new_node)
1168
1169
1170
1171 kfree(new_node);
1172
1173 return node;
1174}
1175
1176static void binder_free_node(struct binder_node *node)
1177{
1178 kfree(node);
1179 binder_stats_deleted(BINDER_STAT_NODE);
1180}
1181
1182static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1183 int internal,
1184 struct list_head *target_list)
1185{
1186 struct binder_proc *proc = node->proc;
1187
1188 assert_spin_locked(&node->lock);
1189 if (proc)
1190 assert_spin_locked(&proc->inner_lock);
1191 if (strong) {
1192 if (internal) {
1193 if (target_list == NULL &&
1194 node->internal_strong_refs == 0 &&
1195 !(node->proc &&
1196 node == node->proc->context->binder_context_mgr_node &&
1197 node->has_strong_ref)) {
1198 pr_err("invalid inc strong node for %d\n",
1199 node->debug_id);
1200 return -EINVAL;
1201 }
1202 node->internal_strong_refs++;
1203 } else
1204 node->local_strong_refs++;
1205 if (!node->has_strong_ref && target_list) {
1206 struct binder_thread *thread = container_of(target_list,
1207 struct binder_thread, todo);
1208 binder_dequeue_work_ilocked(&node->work);
1209 BUG_ON(&thread->todo != target_list);
1210 binder_enqueue_deferred_thread_work_ilocked(thread,
1211 &node->work);
1212 }
1213 } else {
1214 if (!internal)
1215 node->local_weak_refs++;
1216 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1217 if (target_list == NULL) {
1218 pr_err("invalid inc weak node for %d\n",
1219 node->debug_id);
1220 return -EINVAL;
1221 }
1222
1223
1224
1225 binder_enqueue_work_ilocked(&node->work, target_list);
1226 }
1227 }
1228 return 0;
1229}
1230
1231static int binder_inc_node(struct binder_node *node, int strong, int internal,
1232 struct list_head *target_list)
1233{
1234 int ret;
1235
1236 binder_node_inner_lock(node);
1237 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1238 binder_node_inner_unlock(node);
1239
1240 return ret;
1241}
1242
1243static bool binder_dec_node_nilocked(struct binder_node *node,
1244 int strong, int internal)
1245{
1246 struct binder_proc *proc = node->proc;
1247
1248 assert_spin_locked(&node->lock);
1249 if (proc)
1250 assert_spin_locked(&proc->inner_lock);
1251 if (strong) {
1252 if (internal)
1253 node->internal_strong_refs--;
1254 else
1255 node->local_strong_refs--;
1256 if (node->local_strong_refs || node->internal_strong_refs)
1257 return false;
1258 } else {
1259 if (!internal)
1260 node->local_weak_refs--;
1261 if (node->local_weak_refs || node->tmp_refs ||
1262 !hlist_empty(&node->refs))
1263 return false;
1264 }
1265
1266 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1267 if (list_empty(&node->work.entry)) {
1268 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1269 binder_wakeup_proc_ilocked(proc);
1270 }
1271 } else {
1272 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1273 !node->local_weak_refs && !node->tmp_refs) {
1274 if (proc) {
1275 binder_dequeue_work_ilocked(&node->work);
1276 rb_erase(&node->rb_node, &proc->nodes);
1277 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1278 "refless node %d deleted\n",
1279 node->debug_id);
1280 } else {
1281 BUG_ON(!list_empty(&node->work.entry));
1282 spin_lock(&binder_dead_nodes_lock);
1283
1284
1285
1286
1287 if (node->tmp_refs) {
1288 spin_unlock(&binder_dead_nodes_lock);
1289 return false;
1290 }
1291 hlist_del(&node->dead_node);
1292 spin_unlock(&binder_dead_nodes_lock);
1293 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1294 "dead node %d deleted\n",
1295 node->debug_id);
1296 }
1297 return true;
1298 }
1299 }
1300 return false;
1301}
1302
1303static void binder_dec_node(struct binder_node *node, int strong, int internal)
1304{
1305 bool free_node;
1306
1307 binder_node_inner_lock(node);
1308 free_node = binder_dec_node_nilocked(node, strong, internal);
1309 binder_node_inner_unlock(node);
1310 if (free_node)
1311 binder_free_node(node);
1312}
1313
1314static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1315{
1316
1317
1318
1319
1320
1321 node->tmp_refs++;
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static void binder_inc_node_tmpref(struct binder_node *node)
1338{
1339 binder_node_lock(node);
1340 if (node->proc)
1341 binder_inner_proc_lock(node->proc);
1342 else
1343 spin_lock(&binder_dead_nodes_lock);
1344 binder_inc_node_tmpref_ilocked(node);
1345 if (node->proc)
1346 binder_inner_proc_unlock(node->proc);
1347 else
1348 spin_unlock(&binder_dead_nodes_lock);
1349 binder_node_unlock(node);
1350}
1351
1352
1353
1354
1355
1356
1357
1358static void binder_dec_node_tmpref(struct binder_node *node)
1359{
1360 bool free_node;
1361
1362 binder_node_inner_lock(node);
1363 if (!node->proc)
1364 spin_lock(&binder_dead_nodes_lock);
1365 else
1366 __acquire(&binder_dead_nodes_lock);
1367 node->tmp_refs--;
1368 BUG_ON(node->tmp_refs < 0);
1369 if (!node->proc)
1370 spin_unlock(&binder_dead_nodes_lock);
1371 else
1372 __release(&binder_dead_nodes_lock);
1373
1374
1375
1376
1377
1378
1379 free_node = binder_dec_node_nilocked(node, 0, 1);
1380 binder_node_inner_unlock(node);
1381 if (free_node)
1382 binder_free_node(node);
1383}
1384
1385static void binder_put_node(struct binder_node *node)
1386{
1387 binder_dec_node_tmpref(node);
1388}
1389
1390static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1391 u32 desc, bool need_strong_ref)
1392{
1393 struct rb_node *n = proc->refs_by_desc.rb_node;
1394 struct binder_ref *ref;
1395
1396 while (n) {
1397 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1398
1399 if (desc < ref->data.desc) {
1400 n = n->rb_left;
1401 } else if (desc > ref->data.desc) {
1402 n = n->rb_right;
1403 } else if (need_strong_ref && !ref->data.strong) {
1404 binder_user_error("tried to use weak ref as strong ref\n");
1405 return NULL;
1406 } else {
1407 return ref;
1408 }
1409 }
1410 return NULL;
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static struct binder_ref *binder_get_ref_for_node_olocked(
1432 struct binder_proc *proc,
1433 struct binder_node *node,
1434 struct binder_ref *new_ref)
1435{
1436 struct binder_context *context = proc->context;
1437 struct rb_node **p = &proc->refs_by_node.rb_node;
1438 struct rb_node *parent = NULL;
1439 struct binder_ref *ref;
1440 struct rb_node *n;
1441
1442 while (*p) {
1443 parent = *p;
1444 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1445
1446 if (node < ref->node)
1447 p = &(*p)->rb_left;
1448 else if (node > ref->node)
1449 p = &(*p)->rb_right;
1450 else
1451 return ref;
1452 }
1453 if (!new_ref)
1454 return NULL;
1455
1456 binder_stats_created(BINDER_STAT_REF);
1457 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1458 new_ref->proc = proc;
1459 new_ref->node = node;
1460 rb_link_node(&new_ref->rb_node_node, parent, p);
1461 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1462
1463 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1464 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1465 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466 if (ref->data.desc > new_ref->data.desc)
1467 break;
1468 new_ref->data.desc = ref->data.desc + 1;
1469 }
1470
1471 p = &proc->refs_by_desc.rb_node;
1472 while (*p) {
1473 parent = *p;
1474 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1475
1476 if (new_ref->data.desc < ref->data.desc)
1477 p = &(*p)->rb_left;
1478 else if (new_ref->data.desc > ref->data.desc)
1479 p = &(*p)->rb_right;
1480 else
1481 BUG();
1482 }
1483 rb_link_node(&new_ref->rb_node_desc, parent, p);
1484 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1485
1486 binder_node_lock(node);
1487 hlist_add_head(&new_ref->node_entry, &node->refs);
1488
1489 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1490 "%d new ref %d desc %d for node %d\n",
1491 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1492 node->debug_id);
1493 binder_node_unlock(node);
1494 return new_ref;
1495}
1496
1497static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1498{
1499 bool delete_node = false;
1500
1501 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1502 "%d delete ref %d desc %d for node %d\n",
1503 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1504 ref->node->debug_id);
1505
1506 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1507 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1508
1509 binder_node_inner_lock(ref->node);
1510 if (ref->data.strong)
1511 binder_dec_node_nilocked(ref->node, 1, 1);
1512
1513 hlist_del(&ref->node_entry);
1514 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1515 binder_node_inner_unlock(ref->node);
1516
1517
1518
1519 if (!delete_node) {
1520
1521
1522
1523
1524
1525 ref->node = NULL;
1526 }
1527
1528 if (ref->death) {
1529 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1530 "%d delete ref %d desc %d has death notification\n",
1531 ref->proc->pid, ref->data.debug_id,
1532 ref->data.desc);
1533 binder_dequeue_work(ref->proc, &ref->death->work);
1534 binder_stats_deleted(BINDER_STAT_DEATH);
1535 }
1536 binder_stats_deleted(BINDER_STAT_REF);
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1550 struct list_head *target_list)
1551{
1552 int ret;
1553
1554 if (strong) {
1555 if (ref->data.strong == 0) {
1556 ret = binder_inc_node(ref->node, 1, 1, target_list);
1557 if (ret)
1558 return ret;
1559 }
1560 ref->data.strong++;
1561 } else {
1562 if (ref->data.weak == 0) {
1563 ret = binder_inc_node(ref->node, 0, 1, target_list);
1564 if (ret)
1565 return ret;
1566 }
1567 ref->data.weak++;
1568 }
1569 return 0;
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1582{
1583 if (strong) {
1584 if (ref->data.strong == 0) {
1585 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586 ref->proc->pid, ref->data.debug_id,
1587 ref->data.desc, ref->data.strong,
1588 ref->data.weak);
1589 return false;
1590 }
1591 ref->data.strong--;
1592 if (ref->data.strong == 0)
1593 binder_dec_node(ref->node, strong, 1);
1594 } else {
1595 if (ref->data.weak == 0) {
1596 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597 ref->proc->pid, ref->data.debug_id,
1598 ref->data.desc, ref->data.strong,
1599 ref->data.weak);
1600 return false;
1601 }
1602 ref->data.weak--;
1603 }
1604 if (ref->data.strong == 0 && ref->data.weak == 0) {
1605 binder_cleanup_ref_olocked(ref);
1606 return true;
1607 }
1608 return false;
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static struct binder_node *binder_get_node_from_ref(
1623 struct binder_proc *proc,
1624 u32 desc, bool need_strong_ref,
1625 struct binder_ref_data *rdata)
1626{
1627 struct binder_node *node;
1628 struct binder_ref *ref;
1629
1630 binder_proc_lock(proc);
1631 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1632 if (!ref)
1633 goto err_no_ref;
1634 node = ref->node;
1635
1636
1637
1638
1639 binder_inc_node_tmpref(node);
1640 if (rdata)
1641 *rdata = ref->data;
1642 binder_proc_unlock(proc);
1643
1644 return node;
1645
1646err_no_ref:
1647 binder_proc_unlock(proc);
1648 return NULL;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658static void binder_free_ref(struct binder_ref *ref)
1659{
1660 if (ref->node)
1661 binder_free_node(ref->node);
1662 kfree(ref->death);
1663 kfree(ref);
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static int binder_update_ref_for_handle(struct binder_proc *proc,
1680 uint32_t desc, bool increment, bool strong,
1681 struct binder_ref_data *rdata)
1682{
1683 int ret = 0;
1684 struct binder_ref *ref;
1685 bool delete_ref = false;
1686
1687 binder_proc_lock(proc);
1688 ref = binder_get_ref_olocked(proc, desc, strong);
1689 if (!ref) {
1690 ret = -EINVAL;
1691 goto err_no_ref;
1692 }
1693 if (increment)
1694 ret = binder_inc_ref_olocked(ref, strong, NULL);
1695 else
1696 delete_ref = binder_dec_ref_olocked(ref, strong);
1697
1698 if (rdata)
1699 *rdata = ref->data;
1700 binder_proc_unlock(proc);
1701
1702 if (delete_ref)
1703 binder_free_ref(ref);
1704 return ret;
1705
1706err_no_ref:
1707 binder_proc_unlock(proc);
1708 return ret;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static int binder_dec_ref_for_handle(struct binder_proc *proc,
1723 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1724{
1725 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static int binder_inc_ref_for_node(struct binder_proc *proc,
1743 struct binder_node *node,
1744 bool strong,
1745 struct list_head *target_list,
1746 struct binder_ref_data *rdata)
1747{
1748 struct binder_ref *ref;
1749 struct binder_ref *new_ref = NULL;
1750 int ret = 0;
1751
1752 binder_proc_lock(proc);
1753 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1754 if (!ref) {
1755 binder_proc_unlock(proc);
1756 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1757 if (!new_ref)
1758 return -ENOMEM;
1759 binder_proc_lock(proc);
1760 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1761 }
1762 ret = binder_inc_ref_olocked(ref, strong, target_list);
1763 *rdata = ref->data;
1764 binder_proc_unlock(proc);
1765 if (new_ref && ref != new_ref)
1766
1767
1768
1769
1770 kfree(new_ref);
1771 return ret;
1772}
1773
1774static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1775 struct binder_transaction *t)
1776{
1777 BUG_ON(!target_thread);
1778 assert_spin_locked(&target_thread->proc->inner_lock);
1779 BUG_ON(target_thread->transaction_stack != t);
1780 BUG_ON(target_thread->transaction_stack->from != target_thread);
1781 target_thread->transaction_stack =
1782 target_thread->transaction_stack->from_parent;
1783 t->from = NULL;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798static void binder_thread_dec_tmpref(struct binder_thread *thread)
1799{
1800
1801
1802
1803
1804 binder_inner_proc_lock(thread->proc);
1805 atomic_dec(&thread->tmp_ref);
1806 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1807 binder_inner_proc_unlock(thread->proc);
1808 binder_free_thread(thread);
1809 return;
1810 }
1811 binder_inner_proc_unlock(thread->proc);
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827{
1828 binder_inner_proc_lock(proc);
1829 proc->tmp_ref--;
1830 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831 !proc->tmp_ref) {
1832 binder_inner_proc_unlock(proc);
1833 binder_free_proc(proc);
1834 return;
1835 }
1836 binder_inner_proc_unlock(proc);
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849static struct binder_thread *binder_get_txn_from(
1850 struct binder_transaction *t)
1851{
1852 struct binder_thread *from;
1853
1854 spin_lock(&t->lock);
1855 from = t->from;
1856 if (from)
1857 atomic_inc(&from->tmp_ref);
1858 spin_unlock(&t->lock);
1859 return from;
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static struct binder_thread *binder_get_txn_from_and_acq_inner(
1874 struct binder_transaction *t)
1875 __acquires(&t->from->proc->inner_lock)
1876{
1877 struct binder_thread *from;
1878
1879 from = binder_get_txn_from(t);
1880 if (!from) {
1881 __acquire(&from->proc->inner_lock);
1882 return NULL;
1883 }
1884 binder_inner_proc_lock(from->proc);
1885 if (t->from) {
1886 BUG_ON(from != t->from);
1887 return from;
1888 }
1889 binder_inner_proc_unlock(from->proc);
1890 __acquire(&from->proc->inner_lock);
1891 binder_thread_dec_tmpref(from);
1892 return NULL;
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static void binder_free_txn_fixups(struct binder_transaction *t)
1906{
1907 struct binder_txn_fd_fixup *fixup, *tmp;
1908
1909 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1910 fput(fixup->file);
1911 list_del(&fixup->fixup_entry);
1912 kfree(fixup);
1913 }
1914}
1915
1916static void binder_free_transaction(struct binder_transaction *t)
1917{
1918 struct binder_proc *target_proc = t->to_proc;
1919
1920 if (target_proc) {
1921 binder_inner_proc_lock(target_proc);
1922 if (t->buffer)
1923 t->buffer->transaction = NULL;
1924 binder_inner_proc_unlock(target_proc);
1925 }
1926
1927
1928
1929
1930 binder_free_txn_fixups(t);
1931 kfree(t);
1932 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1933}
1934
1935static void binder_send_failed_reply(struct binder_transaction *t,
1936 uint32_t error_code)
1937{
1938 struct binder_thread *target_thread;
1939 struct binder_transaction *next;
1940
1941 BUG_ON(t->flags & TF_ONE_WAY);
1942 while (1) {
1943 target_thread = binder_get_txn_from_and_acq_inner(t);
1944 if (target_thread) {
1945 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1946 "send failed reply for transaction %d to %d:%d\n",
1947 t->debug_id,
1948 target_thread->proc->pid,
1949 target_thread->pid);
1950
1951 binder_pop_transaction_ilocked(target_thread, t);
1952 if (target_thread->reply_error.cmd == BR_OK) {
1953 target_thread->reply_error.cmd = error_code;
1954 binder_enqueue_thread_work_ilocked(
1955 target_thread,
1956 &target_thread->reply_error.work);
1957 wake_up_interruptible(&target_thread->wait);
1958 } else {
1959
1960
1961
1962
1963
1964
1965 pr_warn("Unexpected reply error: %u\n",
1966 target_thread->reply_error.cmd);
1967 }
1968 binder_inner_proc_unlock(target_thread->proc);
1969 binder_thread_dec_tmpref(target_thread);
1970 binder_free_transaction(t);
1971 return;
1972 } else {
1973 __release(&target_thread->proc->inner_lock);
1974 }
1975 next = t->from_parent;
1976
1977 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1978 "send failed reply for transaction %d, target dead\n",
1979 t->debug_id);
1980
1981 binder_free_transaction(t);
1982 if (next == NULL) {
1983 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1984 "reply failed, no target thread at root\n");
1985 return;
1986 }
1987 t = next;
1988 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1989 "reply failed, no target thread -- retry %d\n",
1990 t->debug_id);
1991 }
1992}
1993
1994
1995
1996
1997
1998
1999
2000static void binder_cleanup_transaction(struct binder_transaction *t,
2001 const char *reason,
2002 uint32_t error_code)
2003{
2004 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2005 binder_send_failed_reply(t, error_code);
2006 } else {
2007 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2008 "undelivered transaction %d, %s\n",
2009 t->debug_id, reason);
2010 binder_free_transaction(t);
2011 }
2012}
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025static size_t binder_get_object(struct binder_proc *proc,
2026 struct binder_buffer *buffer,
2027 unsigned long offset,
2028 struct binder_object *object)
2029{
2030 size_t read_size;
2031 struct binder_object_header *hdr;
2032 size_t object_size = 0;
2033
2034 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2035 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2036 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2037 offset, read_size))
2038 return 0;
2039
2040
2041 hdr = &object->hdr;
2042 switch (hdr->type) {
2043 case BINDER_TYPE_BINDER:
2044 case BINDER_TYPE_WEAK_BINDER:
2045 case BINDER_TYPE_HANDLE:
2046 case BINDER_TYPE_WEAK_HANDLE:
2047 object_size = sizeof(struct flat_binder_object);
2048 break;
2049 case BINDER_TYPE_FD:
2050 object_size = sizeof(struct binder_fd_object);
2051 break;
2052 case BINDER_TYPE_PTR:
2053 object_size = sizeof(struct binder_buffer_object);
2054 break;
2055 case BINDER_TYPE_FDA:
2056 object_size = sizeof(struct binder_fd_array_object);
2057 break;
2058 default:
2059 return 0;
2060 }
2061 if (offset <= buffer->data_size - object_size &&
2062 buffer->data_size >= object_size)
2063 return object_size;
2064 else
2065 return 0;
2066}
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090static struct binder_buffer_object *binder_validate_ptr(
2091 struct binder_proc *proc,
2092 struct binder_buffer *b,
2093 struct binder_object *object,
2094 binder_size_t index,
2095 binder_size_t start_offset,
2096 binder_size_t *object_offsetp,
2097 binder_size_t num_valid)
2098{
2099 size_t object_size;
2100 binder_size_t object_offset;
2101 unsigned long buffer_offset;
2102
2103 if (index >= num_valid)
2104 return NULL;
2105
2106 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2107 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2108 b, buffer_offset,
2109 sizeof(object_offset)))
2110 return NULL;
2111 object_size = binder_get_object(proc, b, object_offset, object);
2112 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2113 return NULL;
2114 if (object_offsetp)
2115 *object_offsetp = object_offset;
2116
2117 return &object->bbo;
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159static bool binder_validate_fixup(struct binder_proc *proc,
2160 struct binder_buffer *b,
2161 binder_size_t objects_start_offset,
2162 binder_size_t buffer_obj_offset,
2163 binder_size_t fixup_offset,
2164 binder_size_t last_obj_offset,
2165 binder_size_t last_min_offset)
2166{
2167 if (!last_obj_offset) {
2168
2169 return false;
2170 }
2171
2172 while (last_obj_offset != buffer_obj_offset) {
2173 unsigned long buffer_offset;
2174 struct binder_object last_object;
2175 struct binder_buffer_object *last_bbo;
2176 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2177 &last_object);
2178 if (object_size != sizeof(*last_bbo))
2179 return false;
2180
2181 last_bbo = &last_object.bbo;
2182
2183
2184
2185
2186 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2187 return false;
2188 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2189 buffer_offset = objects_start_offset +
2190 sizeof(binder_size_t) * last_bbo->parent;
2191 if (binder_alloc_copy_from_buffer(&proc->alloc,
2192 &last_obj_offset,
2193 b, buffer_offset,
2194 sizeof(last_obj_offset)))
2195 return false;
2196 }
2197 return (fixup_offset >= last_min_offset);
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209struct binder_task_work_cb {
2210 struct callback_head twork;
2211 struct file *file;
2212};
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227static void binder_do_fd_close(struct callback_head *twork)
2228{
2229 struct binder_task_work_cb *twcb = container_of(twork,
2230 struct binder_task_work_cb, twork);
2231
2232 fput(twcb->file);
2233 kfree(twcb);
2234}
2235
2236
2237
2238
2239
2240
2241
2242
2243static void binder_deferred_fd_close(int fd)
2244{
2245 struct binder_task_work_cb *twcb;
2246
2247 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2248 if (!twcb)
2249 return;
2250 init_task_work(&twcb->twork, binder_do_fd_close);
2251 __close_fd_get_file(fd, &twcb->file);
2252 if (twcb->file) {
2253 filp_close(twcb->file, current->files);
2254 task_work_add(current, &twcb->twork, true);
2255 } else {
2256 kfree(twcb);
2257 }
2258}
2259
2260static void binder_transaction_buffer_release(struct binder_proc *proc,
2261 struct binder_buffer *buffer,
2262 binder_size_t failed_at,
2263 bool is_failure)
2264{
2265 int debug_id = buffer->debug_id;
2266 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2267
2268 binder_debug(BINDER_DEBUG_TRANSACTION,
2269 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2270 proc->pid, buffer->debug_id,
2271 buffer->data_size, buffer->offsets_size,
2272 (unsigned long long)failed_at);
2273
2274 if (buffer->target_node)
2275 binder_dec_node(buffer->target_node, 1, 0);
2276
2277 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2278 off_end_offset = is_failure ? failed_at :
2279 off_start_offset + buffer->offsets_size;
2280 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2281 buffer_offset += sizeof(binder_size_t)) {
2282 struct binder_object_header *hdr;
2283 size_t object_size = 0;
2284 struct binder_object object;
2285 binder_size_t object_offset;
2286
2287 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2288 buffer, buffer_offset,
2289 sizeof(object_offset)))
2290 object_size = binder_get_object(proc, buffer,
2291 object_offset, &object);
2292 if (object_size == 0) {
2293 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2294 debug_id, (u64)object_offset, buffer->data_size);
2295 continue;
2296 }
2297 hdr = &object.hdr;
2298 switch (hdr->type) {
2299 case BINDER_TYPE_BINDER:
2300 case BINDER_TYPE_WEAK_BINDER: {
2301 struct flat_binder_object *fp;
2302 struct binder_node *node;
2303
2304 fp = to_flat_binder_object(hdr);
2305 node = binder_get_node(proc, fp->binder);
2306 if (node == NULL) {
2307 pr_err("transaction release %d bad node %016llx\n",
2308 debug_id, (u64)fp->binder);
2309 break;
2310 }
2311 binder_debug(BINDER_DEBUG_TRANSACTION,
2312 " node %d u%016llx\n",
2313 node->debug_id, (u64)node->ptr);
2314 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2315 0);
2316 binder_put_node(node);
2317 } break;
2318 case BINDER_TYPE_HANDLE:
2319 case BINDER_TYPE_WEAK_HANDLE: {
2320 struct flat_binder_object *fp;
2321 struct binder_ref_data rdata;
2322 int ret;
2323
2324 fp = to_flat_binder_object(hdr);
2325 ret = binder_dec_ref_for_handle(proc, fp->handle,
2326 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2327
2328 if (ret) {
2329 pr_err("transaction release %d bad handle %d, ret = %d\n",
2330 debug_id, fp->handle, ret);
2331 break;
2332 }
2333 binder_debug(BINDER_DEBUG_TRANSACTION,
2334 " ref %d desc %d\n",
2335 rdata.debug_id, rdata.desc);
2336 } break;
2337
2338 case BINDER_TYPE_FD: {
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348 WARN_ON(failed_at &&
2349 proc->tsk == current->group_leader);
2350 } break;
2351 case BINDER_TYPE_PTR:
2352
2353
2354
2355
2356 break;
2357 case BINDER_TYPE_FDA: {
2358 struct binder_fd_array_object *fda;
2359 struct binder_buffer_object *parent;
2360 struct binder_object ptr_object;
2361 binder_size_t fda_offset;
2362 size_t fd_index;
2363 binder_size_t fd_buf_size;
2364 binder_size_t num_valid;
2365
2366 if (proc->tsk != current->group_leader) {
2367
2368
2369
2370
2371
2372 continue;
2373 }
2374
2375 num_valid = (buffer_offset - off_start_offset) /
2376 sizeof(binder_size_t);
2377 fda = to_binder_fd_array_object(hdr);
2378 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2379 fda->parent,
2380 off_start_offset,
2381 NULL,
2382 num_valid);
2383 if (!parent) {
2384 pr_err("transaction release %d bad parent offset\n",
2385 debug_id);
2386 continue;
2387 }
2388 fd_buf_size = sizeof(u32) * fda->num_fds;
2389 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2390 pr_err("transaction release %d invalid number of fds (%lld)\n",
2391 debug_id, (u64)fda->num_fds);
2392 continue;
2393 }
2394 if (fd_buf_size > parent->length ||
2395 fda->parent_offset > parent->length - fd_buf_size) {
2396
2397 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2398 debug_id, (u64)fda->num_fds);
2399 continue;
2400 }
2401
2402
2403
2404
2405
2406
2407
2408 fda_offset =
2409 (parent->buffer - (uintptr_t)buffer->user_data) +
2410 fda->parent_offset;
2411 for (fd_index = 0; fd_index < fda->num_fds;
2412 fd_index++) {
2413 u32 fd;
2414 int err;
2415 binder_size_t offset = fda_offset +
2416 fd_index * sizeof(fd);
2417
2418 err = binder_alloc_copy_from_buffer(
2419 &proc->alloc, &fd, buffer,
2420 offset, sizeof(fd));
2421 WARN_ON(err);
2422 if (!err)
2423 binder_deferred_fd_close(fd);
2424 }
2425 } break;
2426 default:
2427 pr_err("transaction release %d bad object type %x\n",
2428 debug_id, hdr->type);
2429 break;
2430 }
2431 }
2432}
2433
2434static int binder_translate_binder(struct flat_binder_object *fp,
2435 struct binder_transaction *t,
2436 struct binder_thread *thread)
2437{
2438 struct binder_node *node;
2439 struct binder_proc *proc = thread->proc;
2440 struct binder_proc *target_proc = t->to_proc;
2441 struct binder_ref_data rdata;
2442 int ret = 0;
2443
2444 node = binder_get_node(proc, fp->binder);
2445 if (!node) {
2446 node = binder_new_node(proc, fp);
2447 if (!node)
2448 return -ENOMEM;
2449 }
2450 if (fp->cookie != node->cookie) {
2451 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2452 proc->pid, thread->pid, (u64)fp->binder,
2453 node->debug_id, (u64)fp->cookie,
2454 (u64)node->cookie);
2455 ret = -EINVAL;
2456 goto done;
2457 }
2458 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2459 ret = -EPERM;
2460 goto done;
2461 }
2462
2463 ret = binder_inc_ref_for_node(target_proc, node,
2464 fp->hdr.type == BINDER_TYPE_BINDER,
2465 &thread->todo, &rdata);
2466 if (ret)
2467 goto done;
2468
2469 if (fp->hdr.type == BINDER_TYPE_BINDER)
2470 fp->hdr.type = BINDER_TYPE_HANDLE;
2471 else
2472 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2473 fp->binder = 0;
2474 fp->handle = rdata.desc;
2475 fp->cookie = 0;
2476
2477 trace_binder_transaction_node_to_ref(t, node, &rdata);
2478 binder_debug(BINDER_DEBUG_TRANSACTION,
2479 " node %d u%016llx -> ref %d desc %d\n",
2480 node->debug_id, (u64)node->ptr,
2481 rdata.debug_id, rdata.desc);
2482done:
2483 binder_put_node(node);
2484 return ret;
2485}
2486
2487static int binder_translate_handle(struct flat_binder_object *fp,
2488 struct binder_transaction *t,
2489 struct binder_thread *thread)
2490{
2491 struct binder_proc *proc = thread->proc;
2492 struct binder_proc *target_proc = t->to_proc;
2493 struct binder_node *node;
2494 struct binder_ref_data src_rdata;
2495 int ret = 0;
2496
2497 node = binder_get_node_from_ref(proc, fp->handle,
2498 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2499 if (!node) {
2500 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2501 proc->pid, thread->pid, fp->handle);
2502 return -EINVAL;
2503 }
2504 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2505 ret = -EPERM;
2506 goto done;
2507 }
2508
2509 binder_node_lock(node);
2510 if (node->proc == target_proc) {
2511 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2512 fp->hdr.type = BINDER_TYPE_BINDER;
2513 else
2514 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2515 fp->binder = node->ptr;
2516 fp->cookie = node->cookie;
2517 if (node->proc)
2518 binder_inner_proc_lock(node->proc);
2519 else
2520 __acquire(&node->proc->inner_lock);
2521 binder_inc_node_nilocked(node,
2522 fp->hdr.type == BINDER_TYPE_BINDER,
2523 0, NULL);
2524 if (node->proc)
2525 binder_inner_proc_unlock(node->proc);
2526 else
2527 __release(&node->proc->inner_lock);
2528 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2529 binder_debug(BINDER_DEBUG_TRANSACTION,
2530 " ref %d desc %d -> node %d u%016llx\n",
2531 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2532 (u64)node->ptr);
2533 binder_node_unlock(node);
2534 } else {
2535 struct binder_ref_data dest_rdata;
2536
2537 binder_node_unlock(node);
2538 ret = binder_inc_ref_for_node(target_proc, node,
2539 fp->hdr.type == BINDER_TYPE_HANDLE,
2540 NULL, &dest_rdata);
2541 if (ret)
2542 goto done;
2543
2544 fp->binder = 0;
2545 fp->handle = dest_rdata.desc;
2546 fp->cookie = 0;
2547 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2548 &dest_rdata);
2549 binder_debug(BINDER_DEBUG_TRANSACTION,
2550 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2551 src_rdata.debug_id, src_rdata.desc,
2552 dest_rdata.debug_id, dest_rdata.desc,
2553 node->debug_id);
2554 }
2555done:
2556 binder_put_node(node);
2557 return ret;
2558}
2559
2560static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2561 struct binder_transaction *t,
2562 struct binder_thread *thread,
2563 struct binder_transaction *in_reply_to)
2564{
2565 struct binder_proc *proc = thread->proc;
2566 struct binder_proc *target_proc = t->to_proc;
2567 struct binder_txn_fd_fixup *fixup;
2568 struct file *file;
2569 int ret = 0;
2570 bool target_allows_fd;
2571
2572 if (in_reply_to)
2573 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2574 else
2575 target_allows_fd = t->buffer->target_node->accept_fds;
2576 if (!target_allows_fd) {
2577 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2578 proc->pid, thread->pid,
2579 in_reply_to ? "reply" : "transaction",
2580 fd);
2581 ret = -EPERM;
2582 goto err_fd_not_accepted;
2583 }
2584
2585 file = fget(fd);
2586 if (!file) {
2587 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2588 proc->pid, thread->pid, fd);
2589 ret = -EBADF;
2590 goto err_fget;
2591 }
2592 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2593 if (ret < 0) {
2594 ret = -EPERM;
2595 goto err_security;
2596 }
2597
2598
2599
2600
2601
2602
2603 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2604 if (!fixup) {
2605 ret = -ENOMEM;
2606 goto err_alloc;
2607 }
2608 fixup->file = file;
2609 fixup->offset = fd_offset;
2610 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2611 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2612
2613 return ret;
2614
2615err_alloc:
2616err_security:
2617 fput(file);
2618err_fget:
2619err_fd_not_accepted:
2620 return ret;
2621}
2622
2623static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2624 struct binder_buffer_object *parent,
2625 struct binder_transaction *t,
2626 struct binder_thread *thread,
2627 struct binder_transaction *in_reply_to)
2628{
2629 binder_size_t fdi, fd_buf_size;
2630 binder_size_t fda_offset;
2631 struct binder_proc *proc = thread->proc;
2632 struct binder_proc *target_proc = t->to_proc;
2633
2634 fd_buf_size = sizeof(u32) * fda->num_fds;
2635 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2636 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2637 proc->pid, thread->pid, (u64)fda->num_fds);
2638 return -EINVAL;
2639 }
2640 if (fd_buf_size > parent->length ||
2641 fda->parent_offset > parent->length - fd_buf_size) {
2642
2643 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2644 proc->pid, thread->pid, (u64)fda->num_fds);
2645 return -EINVAL;
2646 }
2647
2648
2649
2650
2651
2652
2653
2654 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2655 fda->parent_offset;
2656 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2657 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2658 proc->pid, thread->pid);
2659 return -EINVAL;
2660 }
2661 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2662 u32 fd;
2663 int ret;
2664 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2665
2666 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2667 &fd, t->buffer,
2668 offset, sizeof(fd));
2669 if (!ret)
2670 ret = binder_translate_fd(fd, offset, t, thread,
2671 in_reply_to);
2672 if (ret < 0)
2673 return ret;
2674 }
2675 return 0;
2676}
2677
2678static int binder_fixup_parent(struct binder_transaction *t,
2679 struct binder_thread *thread,
2680 struct binder_buffer_object *bp,
2681 binder_size_t off_start_offset,
2682 binder_size_t num_valid,
2683 binder_size_t last_fixup_obj_off,
2684 binder_size_t last_fixup_min_off)
2685{
2686 struct binder_buffer_object *parent;
2687 struct binder_buffer *b = t->buffer;
2688 struct binder_proc *proc = thread->proc;
2689 struct binder_proc *target_proc = t->to_proc;
2690 struct binder_object object;
2691 binder_size_t buffer_offset;
2692 binder_size_t parent_offset;
2693
2694 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2695 return 0;
2696
2697 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2698 off_start_offset, &parent_offset,
2699 num_valid);
2700 if (!parent) {
2701 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2702 proc->pid, thread->pid);
2703 return -EINVAL;
2704 }
2705
2706 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2707 parent_offset, bp->parent_offset,
2708 last_fixup_obj_off,
2709 last_fixup_min_off)) {
2710 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2711 proc->pid, thread->pid);
2712 return -EINVAL;
2713 }
2714
2715 if (parent->length < sizeof(binder_uintptr_t) ||
2716 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2717
2718 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2719 proc->pid, thread->pid);
2720 return -EINVAL;
2721 }
2722 buffer_offset = bp->parent_offset +
2723 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2724 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2725 &bp->buffer, sizeof(bp->buffer))) {
2726 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2727 proc->pid, thread->pid);
2728 return -EINVAL;
2729 }
2730
2731 return 0;
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751static bool binder_proc_transaction(struct binder_transaction *t,
2752 struct binder_proc *proc,
2753 struct binder_thread *thread)
2754{
2755 struct binder_node *node = t->buffer->target_node;
2756 bool oneway = !!(t->flags & TF_ONE_WAY);
2757 bool pending_async = false;
2758
2759 BUG_ON(!node);
2760 binder_node_lock(node);
2761 if (oneway) {
2762 BUG_ON(thread);
2763 if (node->has_async_transaction) {
2764 pending_async = true;
2765 } else {
2766 node->has_async_transaction = true;
2767 }
2768 }
2769
2770 binder_inner_proc_lock(proc);
2771
2772 if (proc->is_dead || (thread && thread->is_dead)) {
2773 binder_inner_proc_unlock(proc);
2774 binder_node_unlock(node);
2775 return false;
2776 }
2777
2778 if (!thread && !pending_async)
2779 thread = binder_select_thread_ilocked(proc);
2780
2781 if (thread)
2782 binder_enqueue_thread_work_ilocked(thread, &t->work);
2783 else if (!pending_async)
2784 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2785 else
2786 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2787
2788 if (!pending_async)
2789 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2790
2791 binder_inner_proc_unlock(proc);
2792 binder_node_unlock(node);
2793
2794 return true;
2795}
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818static struct binder_node *binder_get_node_refs_for_txn(
2819 struct binder_node *node,
2820 struct binder_proc **procp,
2821 uint32_t *error)
2822{
2823 struct binder_node *target_node = NULL;
2824
2825 binder_node_inner_lock(node);
2826 if (node->proc) {
2827 target_node = node;
2828 binder_inc_node_nilocked(node, 1, 0, NULL);
2829 binder_inc_node_tmpref_ilocked(node);
2830 node->proc->tmp_ref++;
2831 *procp = node->proc;
2832 } else
2833 *error = BR_DEAD_REPLY;
2834 binder_node_inner_unlock(node);
2835
2836 return target_node;
2837}
2838
2839static void binder_transaction(struct binder_proc *proc,
2840 struct binder_thread *thread,
2841 struct binder_transaction_data *tr, int reply,
2842 binder_size_t extra_buffers_size)
2843{
2844 int ret;
2845 struct binder_transaction *t;
2846 struct binder_work *w;
2847 struct binder_work *tcomplete;
2848 binder_size_t buffer_offset = 0;
2849 binder_size_t off_start_offset, off_end_offset;
2850 binder_size_t off_min;
2851 binder_size_t sg_buf_offset, sg_buf_end_offset;
2852 struct binder_proc *target_proc = NULL;
2853 struct binder_thread *target_thread = NULL;
2854 struct binder_node *target_node = NULL;
2855 struct binder_transaction *in_reply_to = NULL;
2856 struct binder_transaction_log_entry *e;
2857 uint32_t return_error = 0;
2858 uint32_t return_error_param = 0;
2859 uint32_t return_error_line = 0;
2860 binder_size_t last_fixup_obj_off = 0;
2861 binder_size_t last_fixup_min_off = 0;
2862 struct binder_context *context = proc->context;
2863 int t_debug_id = atomic_inc_return(&binder_last_id);
2864 char *secctx = NULL;
2865 u32 secctx_sz = 0;
2866
2867 e = binder_transaction_log_add(&binder_transaction_log);
2868 e->debug_id = t_debug_id;
2869 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2870 e->from_proc = proc->pid;
2871 e->from_thread = thread->pid;
2872 e->target_handle = tr->target.handle;
2873 e->data_size = tr->data_size;
2874 e->offsets_size = tr->offsets_size;
2875 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2876
2877 if (reply) {
2878 binder_inner_proc_lock(proc);
2879 in_reply_to = thread->transaction_stack;
2880 if (in_reply_to == NULL) {
2881 binder_inner_proc_unlock(proc);
2882 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2883 proc->pid, thread->pid);
2884 return_error = BR_FAILED_REPLY;
2885 return_error_param = -EPROTO;
2886 return_error_line = __LINE__;
2887 goto err_empty_call_stack;
2888 }
2889 if (in_reply_to->to_thread != thread) {
2890 spin_lock(&in_reply_to->lock);
2891 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2892 proc->pid, thread->pid, in_reply_to->debug_id,
2893 in_reply_to->to_proc ?
2894 in_reply_to->to_proc->pid : 0,
2895 in_reply_to->to_thread ?
2896 in_reply_to->to_thread->pid : 0);
2897 spin_unlock(&in_reply_to->lock);
2898 binder_inner_proc_unlock(proc);
2899 return_error = BR_FAILED_REPLY;
2900 return_error_param = -EPROTO;
2901 return_error_line = __LINE__;
2902 in_reply_to = NULL;
2903 goto err_bad_call_stack;
2904 }
2905 thread->transaction_stack = in_reply_to->to_parent;
2906 binder_inner_proc_unlock(proc);
2907 binder_set_nice(in_reply_to->saved_priority);
2908 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2909 if (target_thread == NULL) {
2910
2911 __release(&target_thread->proc->inner_lock);
2912 return_error = BR_DEAD_REPLY;
2913 return_error_line = __LINE__;
2914 goto err_dead_binder;
2915 }
2916 if (target_thread->transaction_stack != in_reply_to) {
2917 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2918 proc->pid, thread->pid,
2919 target_thread->transaction_stack ?
2920 target_thread->transaction_stack->debug_id : 0,
2921 in_reply_to->debug_id);
2922 binder_inner_proc_unlock(target_thread->proc);
2923 return_error = BR_FAILED_REPLY;
2924 return_error_param = -EPROTO;
2925 return_error_line = __LINE__;
2926 in_reply_to = NULL;
2927 target_thread = NULL;
2928 goto err_dead_binder;
2929 }
2930 target_proc = target_thread->proc;
2931 target_proc->tmp_ref++;
2932 binder_inner_proc_unlock(target_thread->proc);
2933 } else {
2934 if (tr->target.handle) {
2935 struct binder_ref *ref;
2936
2937
2938
2939
2940
2941
2942
2943
2944 binder_proc_lock(proc);
2945 ref = binder_get_ref_olocked(proc, tr->target.handle,
2946 true);
2947 if (ref) {
2948 target_node = binder_get_node_refs_for_txn(
2949 ref->node, &target_proc,
2950 &return_error);
2951 } else {
2952 binder_user_error("%d:%d got transaction to invalid handle\n",
2953 proc->pid, thread->pid);
2954 return_error = BR_FAILED_REPLY;
2955 }
2956 binder_proc_unlock(proc);
2957 } else {
2958 mutex_lock(&context->context_mgr_node_lock);
2959 target_node = context->binder_context_mgr_node;
2960 if (target_node)
2961 target_node = binder_get_node_refs_for_txn(
2962 target_node, &target_proc,
2963 &return_error);
2964 else
2965 return_error = BR_DEAD_REPLY;
2966 mutex_unlock(&context->context_mgr_node_lock);
2967 if (target_node && target_proc->pid == proc->pid) {
2968 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2969 proc->pid, thread->pid);
2970 return_error = BR_FAILED_REPLY;
2971 return_error_param = -EINVAL;
2972 return_error_line = __LINE__;
2973 goto err_invalid_target_handle;
2974 }
2975 }
2976 if (!target_node) {
2977
2978
2979
2980 return_error_param = -EINVAL;
2981 return_error_line = __LINE__;
2982 goto err_dead_binder;
2983 }
2984 e->to_node = target_node->debug_id;
2985 if (security_binder_transaction(proc->tsk,
2986 target_proc->tsk) < 0) {
2987 return_error = BR_FAILED_REPLY;
2988 return_error_param = -EPERM;
2989 return_error_line = __LINE__;
2990 goto err_invalid_target_handle;
2991 }
2992 binder_inner_proc_lock(proc);
2993
2994 w = list_first_entry_or_null(&thread->todo,
2995 struct binder_work, entry);
2996 if (!(tr->flags & TF_ONE_WAY) && w &&
2997 w->type == BINDER_WORK_TRANSACTION) {
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3008 proc->pid, thread->pid);
3009 binder_inner_proc_unlock(proc);
3010 return_error = BR_FAILED_REPLY;
3011 return_error_param = -EPROTO;
3012 return_error_line = __LINE__;
3013 goto err_bad_todo_list;
3014 }
3015
3016 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3017 struct binder_transaction *tmp;
3018
3019 tmp = thread->transaction_stack;
3020 if (tmp->to_thread != thread) {
3021 spin_lock(&tmp->lock);
3022 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3023 proc->pid, thread->pid, tmp->debug_id,
3024 tmp->to_proc ? tmp->to_proc->pid : 0,
3025 tmp->to_thread ?
3026 tmp->to_thread->pid : 0);
3027 spin_unlock(&tmp->lock);
3028 binder_inner_proc_unlock(proc);
3029 return_error = BR_FAILED_REPLY;
3030 return_error_param = -EPROTO;
3031 return_error_line = __LINE__;
3032 goto err_bad_call_stack;
3033 }
3034 while (tmp) {
3035 struct binder_thread *from;
3036
3037 spin_lock(&tmp->lock);
3038 from = tmp->from;
3039 if (from && from->proc == target_proc) {
3040 atomic_inc(&from->tmp_ref);
3041 target_thread = from;
3042 spin_unlock(&tmp->lock);
3043 break;
3044 }
3045 spin_unlock(&tmp->lock);
3046 tmp = tmp->from_parent;
3047 }
3048 }
3049 binder_inner_proc_unlock(proc);
3050 }
3051 if (target_thread)
3052 e->to_thread = target_thread->pid;
3053 e->to_proc = target_proc->pid;
3054
3055
3056 t = kzalloc(sizeof(*t), GFP_KERNEL);
3057 if (t == NULL) {
3058 return_error = BR_FAILED_REPLY;
3059 return_error_param = -ENOMEM;
3060 return_error_line = __LINE__;
3061 goto err_alloc_t_failed;
3062 }
3063 INIT_LIST_HEAD(&t->fd_fixups);
3064 binder_stats_created(BINDER_STAT_TRANSACTION);
3065 spin_lock_init(&t->lock);
3066
3067 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3068 if (tcomplete == NULL) {
3069 return_error = BR_FAILED_REPLY;
3070 return_error_param = -ENOMEM;
3071 return_error_line = __LINE__;
3072 goto err_alloc_tcomplete_failed;
3073 }
3074 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3075
3076 t->debug_id = t_debug_id;
3077
3078 if (reply)
3079 binder_debug(BINDER_DEBUG_TRANSACTION,
3080 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3081 proc->pid, thread->pid, t->debug_id,
3082 target_proc->pid, target_thread->pid,
3083 (u64)tr->data.ptr.buffer,
3084 (u64)tr->data.ptr.offsets,
3085 (u64)tr->data_size, (u64)tr->offsets_size,
3086 (u64)extra_buffers_size);
3087 else
3088 binder_debug(BINDER_DEBUG_TRANSACTION,
3089 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3090 proc->pid, thread->pid, t->debug_id,
3091 target_proc->pid, target_node->debug_id,
3092 (u64)tr->data.ptr.buffer,
3093 (u64)tr->data.ptr.offsets,
3094 (u64)tr->data_size, (u64)tr->offsets_size,
3095 (u64)extra_buffers_size);
3096
3097 if (!reply && !(tr->flags & TF_ONE_WAY))
3098 t->from = thread;
3099 else
3100 t->from = NULL;
3101 t->sender_euid = task_euid(proc->tsk);
3102 t->to_proc = target_proc;
3103 t->to_thread = target_thread;
3104 t->code = tr->code;
3105 t->flags = tr->flags;
3106 t->priority = task_nice(current);
3107
3108 if (target_node && target_node->txn_security_ctx) {
3109 u32 secid;
3110 size_t added_size;
3111
3112 security_task_getsecid(proc->tsk, &secid);
3113 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3114 if (ret) {
3115 return_error = BR_FAILED_REPLY;
3116 return_error_param = ret;
3117 return_error_line = __LINE__;
3118 goto err_get_secctx_failed;
3119 }
3120 added_size = ALIGN(secctx_sz, sizeof(u64));
3121 extra_buffers_size += added_size;
3122 if (extra_buffers_size < added_size) {
3123
3124 return_error = BR_FAILED_REPLY;
3125 return_error_param = EINVAL;
3126 return_error_line = __LINE__;
3127 goto err_bad_extra_size;
3128 }
3129 }
3130
3131 trace_binder_transaction(reply, t, target_node);
3132
3133 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3134 tr->offsets_size, extra_buffers_size,
3135 !reply && (t->flags & TF_ONE_WAY));
3136 if (IS_ERR(t->buffer)) {
3137
3138
3139
3140 return_error_param = PTR_ERR(t->buffer);
3141 return_error = return_error_param == -ESRCH ?
3142 BR_DEAD_REPLY : BR_FAILED_REPLY;
3143 return_error_line = __LINE__;
3144 t->buffer = NULL;
3145 goto err_binder_alloc_buf_failed;
3146 }
3147 if (secctx) {
3148 int err;
3149 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3150 ALIGN(tr->offsets_size, sizeof(void *)) +
3151 ALIGN(extra_buffers_size, sizeof(void *)) -
3152 ALIGN(secctx_sz, sizeof(u64));
3153
3154 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3155 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3156 t->buffer, buf_offset,
3157 secctx, secctx_sz);
3158 if (err) {
3159 t->security_ctx = 0;
3160 WARN_ON(1);
3161 }
3162 security_release_secctx(secctx, secctx_sz);
3163 secctx = NULL;
3164 }
3165 t->buffer->debug_id = t->debug_id;
3166 t->buffer->transaction = t;
3167 t->buffer->target_node = target_node;
3168 trace_binder_transaction_alloc_buf(t->buffer);
3169
3170 if (binder_alloc_copy_user_to_buffer(
3171 &target_proc->alloc,
3172 t->buffer, 0,
3173 (const void __user *)
3174 (uintptr_t)tr->data.ptr.buffer,
3175 tr->data_size)) {
3176 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3177 proc->pid, thread->pid);
3178 return_error = BR_FAILED_REPLY;
3179 return_error_param = -EFAULT;
3180 return_error_line = __LINE__;
3181 goto err_copy_data_failed;
3182 }
3183 if (binder_alloc_copy_user_to_buffer(
3184 &target_proc->alloc,
3185 t->buffer,
3186 ALIGN(tr->data_size, sizeof(void *)),
3187 (const void __user *)
3188 (uintptr_t)tr->data.ptr.offsets,
3189 tr->offsets_size)) {
3190 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3191 proc->pid, thread->pid);
3192 return_error = BR_FAILED_REPLY;
3193 return_error_param = -EFAULT;
3194 return_error_line = __LINE__;
3195 goto err_copy_data_failed;
3196 }
3197 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3198 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3199 proc->pid, thread->pid, (u64)tr->offsets_size);
3200 return_error = BR_FAILED_REPLY;
3201 return_error_param = -EINVAL;
3202 return_error_line = __LINE__;
3203 goto err_bad_offset;
3204 }
3205 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3206 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3207 proc->pid, thread->pid,
3208 (u64)extra_buffers_size);
3209 return_error = BR_FAILED_REPLY;
3210 return_error_param = -EINVAL;
3211 return_error_line = __LINE__;
3212 goto err_bad_offset;
3213 }
3214 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3215 buffer_offset = off_start_offset;
3216 off_end_offset = off_start_offset + tr->offsets_size;
3217 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3218 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3219 ALIGN(secctx_sz, sizeof(u64));
3220 off_min = 0;
3221 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3222 buffer_offset += sizeof(binder_size_t)) {
3223 struct binder_object_header *hdr;
3224 size_t object_size;
3225 struct binder_object object;
3226 binder_size_t object_offset;
3227
3228 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3229 &object_offset,
3230 t->buffer,
3231 buffer_offset,
3232 sizeof(object_offset))) {
3233 return_error = BR_FAILED_REPLY;
3234 return_error_param = -EINVAL;
3235 return_error_line = __LINE__;
3236 goto err_bad_offset;
3237 }
3238 object_size = binder_get_object(target_proc, t->buffer,
3239 object_offset, &object);
3240 if (object_size == 0 || object_offset < off_min) {
3241 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3242 proc->pid, thread->pid,
3243 (u64)object_offset,
3244 (u64)off_min,
3245 (u64)t->buffer->data_size);
3246 return_error = BR_FAILED_REPLY;
3247 return_error_param = -EINVAL;
3248 return_error_line = __LINE__;
3249 goto err_bad_offset;
3250 }
3251
3252 hdr = &object.hdr;
3253 off_min = object_offset + object_size;
3254 switch (hdr->type) {
3255 case BINDER_TYPE_BINDER:
3256 case BINDER_TYPE_WEAK_BINDER: {
3257 struct flat_binder_object *fp;
3258
3259 fp = to_flat_binder_object(hdr);
3260 ret = binder_translate_binder(fp, t, thread);
3261
3262 if (ret < 0 ||
3263 binder_alloc_copy_to_buffer(&target_proc->alloc,
3264 t->buffer,
3265 object_offset,
3266 fp, sizeof(*fp))) {
3267 return_error = BR_FAILED_REPLY;
3268 return_error_param = ret;
3269 return_error_line = __LINE__;
3270 goto err_translate_failed;
3271 }
3272 } break;
3273 case BINDER_TYPE_HANDLE:
3274 case BINDER_TYPE_WEAK_HANDLE: {
3275 struct flat_binder_object *fp;
3276
3277 fp = to_flat_binder_object(hdr);
3278 ret = binder_translate_handle(fp, t, thread);
3279 if (ret < 0 ||
3280 binder_alloc_copy_to_buffer(&target_proc->alloc,
3281 t->buffer,
3282 object_offset,
3283 fp, sizeof(*fp))) {
3284 return_error = BR_FAILED_REPLY;
3285 return_error_param = ret;
3286 return_error_line = __LINE__;
3287 goto err_translate_failed;
3288 }
3289 } break;
3290
3291 case BINDER_TYPE_FD: {
3292 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3293 binder_size_t fd_offset = object_offset +
3294 (uintptr_t)&fp->fd - (uintptr_t)fp;
3295 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3296 thread, in_reply_to);
3297
3298 fp->pad_binder = 0;
3299 if (ret < 0 ||
3300 binder_alloc_copy_to_buffer(&target_proc->alloc,
3301 t->buffer,
3302 object_offset,
3303 fp, sizeof(*fp))) {
3304 return_error = BR_FAILED_REPLY;
3305 return_error_param = ret;
3306 return_error_line = __LINE__;
3307 goto err_translate_failed;
3308 }
3309 } break;
3310 case BINDER_TYPE_FDA: {
3311 struct binder_object ptr_object;
3312 binder_size_t parent_offset;
3313 struct binder_fd_array_object *fda =
3314 to_binder_fd_array_object(hdr);
3315 size_t num_valid = (buffer_offset - off_start_offset) /
3316 sizeof(binder_size_t);
3317 struct binder_buffer_object *parent =
3318 binder_validate_ptr(target_proc, t->buffer,
3319 &ptr_object, fda->parent,
3320 off_start_offset,
3321 &parent_offset,
3322 num_valid);
3323 if (!parent) {
3324 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3325 proc->pid, thread->pid);
3326 return_error = BR_FAILED_REPLY;
3327 return_error_param = -EINVAL;
3328 return_error_line = __LINE__;
3329 goto err_bad_parent;
3330 }
3331 if (!binder_validate_fixup(target_proc, t->buffer,
3332 off_start_offset,
3333 parent_offset,
3334 fda->parent_offset,
3335 last_fixup_obj_off,
3336 last_fixup_min_off)) {
3337 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3338 proc->pid, thread->pid);
3339 return_error = BR_FAILED_REPLY;
3340 return_error_param = -EINVAL;
3341 return_error_line = __LINE__;
3342 goto err_bad_parent;
3343 }
3344 ret = binder_translate_fd_array(fda, parent, t, thread,
3345 in_reply_to);
3346 if (ret < 0) {
3347 return_error = BR_FAILED_REPLY;
3348 return_error_param = ret;
3349 return_error_line = __LINE__;
3350 goto err_translate_failed;
3351 }
3352 last_fixup_obj_off = parent_offset;
3353 last_fixup_min_off =
3354 fda->parent_offset + sizeof(u32) * fda->num_fds;
3355 } break;
3356 case BINDER_TYPE_PTR: {
3357 struct binder_buffer_object *bp =
3358 to_binder_buffer_object(hdr);
3359 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3360 size_t num_valid;
3361
3362 if (bp->length > buf_left) {
3363 binder_user_error("%d:%d got transaction with too large buffer\n",
3364 proc->pid, thread->pid);
3365 return_error = BR_FAILED_REPLY;
3366 return_error_param = -EINVAL;
3367 return_error_line = __LINE__;
3368 goto err_bad_offset;
3369 }
3370 if (binder_alloc_copy_user_to_buffer(
3371 &target_proc->alloc,
3372 t->buffer,
3373 sg_buf_offset,
3374 (const void __user *)
3375 (uintptr_t)bp->buffer,
3376 bp->length)) {
3377 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3378 proc->pid, thread->pid);
3379 return_error_param = -EFAULT;
3380 return_error = BR_FAILED_REPLY;
3381 return_error_line = __LINE__;
3382 goto err_copy_data_failed;
3383 }
3384
3385 bp->buffer = (uintptr_t)
3386 t->buffer->user_data + sg_buf_offset;
3387 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3388
3389 num_valid = (buffer_offset - off_start_offset) /
3390 sizeof(binder_size_t);
3391 ret = binder_fixup_parent(t, thread, bp,
3392 off_start_offset,
3393 num_valid,
3394 last_fixup_obj_off,
3395 last_fixup_min_off);
3396 if (ret < 0 ||
3397 binder_alloc_copy_to_buffer(&target_proc->alloc,
3398 t->buffer,
3399 object_offset,
3400 bp, sizeof(*bp))) {
3401 return_error = BR_FAILED_REPLY;
3402 return_error_param = ret;
3403 return_error_line = __LINE__;
3404 goto err_translate_failed;
3405 }
3406 last_fixup_obj_off = object_offset;
3407 last_fixup_min_off = 0;
3408 } break;
3409 default:
3410 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3411 proc->pid, thread->pid, hdr->type);
3412 return_error = BR_FAILED_REPLY;
3413 return_error_param = -EINVAL;
3414 return_error_line = __LINE__;
3415 goto err_bad_object_type;
3416 }
3417 }
3418 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3419 t->work.type = BINDER_WORK_TRANSACTION;
3420
3421 if (reply) {
3422 binder_enqueue_thread_work(thread, tcomplete);
3423 binder_inner_proc_lock(target_proc);
3424 if (target_thread->is_dead) {
3425 binder_inner_proc_unlock(target_proc);
3426 goto err_dead_proc_or_thread;
3427 }
3428 BUG_ON(t->buffer->async_transaction != 0);
3429 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3430 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3431 binder_inner_proc_unlock(target_proc);
3432 wake_up_interruptible_sync(&target_thread->wait);
3433 binder_free_transaction(in_reply_to);
3434 } else if (!(t->flags & TF_ONE_WAY)) {
3435 BUG_ON(t->buffer->async_transaction != 0);
3436 binder_inner_proc_lock(proc);
3437
3438
3439
3440
3441
3442
3443
3444 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3445 t->need_reply = 1;
3446 t->from_parent = thread->transaction_stack;
3447 thread->transaction_stack = t;
3448 binder_inner_proc_unlock(proc);
3449 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3450 binder_inner_proc_lock(proc);
3451 binder_pop_transaction_ilocked(thread, t);
3452 binder_inner_proc_unlock(proc);
3453 goto err_dead_proc_or_thread;
3454 }
3455 } else {
3456 BUG_ON(target_node == NULL);
3457 BUG_ON(t->buffer->async_transaction != 1);
3458 binder_enqueue_thread_work(thread, tcomplete);
3459 if (!binder_proc_transaction(t, target_proc, NULL))
3460 goto err_dead_proc_or_thread;
3461 }
3462 if (target_thread)
3463 binder_thread_dec_tmpref(target_thread);
3464 binder_proc_dec_tmpref(target_proc);
3465 if (target_node)
3466 binder_dec_node_tmpref(target_node);
3467
3468
3469
3470
3471 smp_wmb();
3472 WRITE_ONCE(e->debug_id_done, t_debug_id);
3473 return;
3474
3475err_dead_proc_or_thread:
3476 return_error = BR_DEAD_REPLY;
3477 return_error_line = __LINE__;
3478 binder_dequeue_work(proc, tcomplete);
3479err_translate_failed:
3480err_bad_object_type:
3481err_bad_offset:
3482err_bad_parent:
3483err_copy_data_failed:
3484 binder_free_txn_fixups(t);
3485 trace_binder_transaction_failed_buffer_release(t->buffer);
3486 binder_transaction_buffer_release(target_proc, t->buffer,
3487 buffer_offset, true);
3488 if (target_node)
3489 binder_dec_node_tmpref(target_node);
3490 target_node = NULL;
3491 t->buffer->transaction = NULL;
3492 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3493err_binder_alloc_buf_failed:
3494err_bad_extra_size:
3495 if (secctx)
3496 security_release_secctx(secctx, secctx_sz);
3497err_get_secctx_failed:
3498 kfree(tcomplete);
3499 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3500err_alloc_tcomplete_failed:
3501 kfree(t);
3502 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3503err_alloc_t_failed:
3504err_bad_todo_list:
3505err_bad_call_stack:
3506err_empty_call_stack:
3507err_dead_binder:
3508err_invalid_target_handle:
3509 if (target_thread)
3510 binder_thread_dec_tmpref(target_thread);
3511 if (target_proc)
3512 binder_proc_dec_tmpref(target_proc);
3513 if (target_node) {
3514 binder_dec_node(target_node, 1, 0);
3515 binder_dec_node_tmpref(target_node);
3516 }
3517
3518 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3519 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3520 proc->pid, thread->pid, return_error, return_error_param,
3521 (u64)tr->data_size, (u64)tr->offsets_size,
3522 return_error_line);
3523
3524 {
3525 struct binder_transaction_log_entry *fe;
3526
3527 e->return_error = return_error;
3528 e->return_error_param = return_error_param;
3529 e->return_error_line = return_error_line;
3530 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3531 *fe = *e;
3532
3533
3534
3535
3536 smp_wmb();
3537 WRITE_ONCE(e->debug_id_done, t_debug_id);
3538 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3539 }
3540
3541 BUG_ON(thread->return_error.cmd != BR_OK);
3542 if (in_reply_to) {
3543 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3544 binder_enqueue_thread_work(thread, &thread->return_error.work);
3545 binder_send_failed_reply(in_reply_to, return_error);
3546 } else {
3547 thread->return_error.cmd = return_error;
3548 binder_enqueue_thread_work(thread, &thread->return_error.work);
3549 }
3550}
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562static void
3563binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3564{
3565 binder_inner_proc_lock(proc);
3566 if (buffer->transaction) {
3567 buffer->transaction->buffer = NULL;
3568 buffer->transaction = NULL;
3569 }
3570 binder_inner_proc_unlock(proc);
3571 if (buffer->async_transaction && buffer->target_node) {
3572 struct binder_node *buf_node;
3573 struct binder_work *w;
3574
3575 buf_node = buffer->target_node;
3576 binder_node_inner_lock(buf_node);
3577 BUG_ON(!buf_node->has_async_transaction);
3578 BUG_ON(buf_node->proc != proc);
3579 w = binder_dequeue_work_head_ilocked(
3580 &buf_node->async_todo);
3581 if (!w) {
3582 buf_node->has_async_transaction = false;
3583 } else {
3584 binder_enqueue_work_ilocked(
3585 w, &proc->todo);
3586 binder_wakeup_proc_ilocked(proc);
3587 }
3588 binder_node_inner_unlock(buf_node);
3589 }
3590 trace_binder_transaction_buffer_release(buffer);
3591 binder_transaction_buffer_release(proc, buffer, 0, false);
3592 binder_alloc_free_buf(&proc->alloc, buffer);
3593}
3594
3595static int binder_thread_write(struct binder_proc *proc,
3596 struct binder_thread *thread,
3597 binder_uintptr_t binder_buffer, size_t size,
3598 binder_size_t *consumed)
3599{
3600 uint32_t cmd;
3601 struct binder_context *context = proc->context;
3602 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3603 void __user *ptr = buffer + *consumed;
3604 void __user *end = buffer + size;
3605
3606 while (ptr < end && thread->return_error.cmd == BR_OK) {
3607 int ret;
3608
3609 if (get_user(cmd, (uint32_t __user *)ptr))
3610 return -EFAULT;
3611 ptr += sizeof(uint32_t);
3612 trace_binder_command(cmd);
3613 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3614 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3615 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3616 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3617 }
3618 switch (cmd) {
3619 case BC_INCREFS:
3620 case BC_ACQUIRE:
3621 case BC_RELEASE:
3622 case BC_DECREFS: {
3623 uint32_t target;
3624 const char *debug_string;
3625 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3626 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3627 struct binder_ref_data rdata;
3628
3629 if (get_user(target, (uint32_t __user *)ptr))
3630 return -EFAULT;
3631
3632 ptr += sizeof(uint32_t);
3633 ret = -1;
3634 if (increment && !target) {
3635 struct binder_node *ctx_mgr_node;
3636 mutex_lock(&context->context_mgr_node_lock);
3637 ctx_mgr_node = context->binder_context_mgr_node;
3638 if (ctx_mgr_node)
3639 ret = binder_inc_ref_for_node(
3640 proc, ctx_mgr_node,
3641 strong, NULL, &rdata);
3642 mutex_unlock(&context->context_mgr_node_lock);
3643 }
3644 if (ret)
3645 ret = binder_update_ref_for_handle(
3646 proc, target, increment, strong,
3647 &rdata);
3648 if (!ret && rdata.desc != target) {
3649 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3650 proc->pid, thread->pid,
3651 target, rdata.desc);
3652 }
3653 switch (cmd) {
3654 case BC_INCREFS:
3655 debug_string = "IncRefs";
3656 break;
3657 case BC_ACQUIRE:
3658 debug_string = "Acquire";
3659 break;
3660 case BC_RELEASE:
3661 debug_string = "Release";
3662 break;
3663 case BC_DECREFS:
3664 default:
3665 debug_string = "DecRefs";
3666 break;
3667 }
3668 if (ret) {
3669 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3670 proc->pid, thread->pid, debug_string,
3671 strong, target, ret);
3672 break;
3673 }
3674 binder_debug(BINDER_DEBUG_USER_REFS,
3675 "%d:%d %s ref %d desc %d s %d w %d\n",
3676 proc->pid, thread->pid, debug_string,
3677 rdata.debug_id, rdata.desc, rdata.strong,
3678 rdata.weak);
3679 break;
3680 }
3681 case BC_INCREFS_DONE:
3682 case BC_ACQUIRE_DONE: {
3683 binder_uintptr_t node_ptr;
3684 binder_uintptr_t cookie;
3685 struct binder_node *node;
3686 bool free_node;
3687
3688 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3689 return -EFAULT;
3690 ptr += sizeof(binder_uintptr_t);
3691 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3692 return -EFAULT;
3693 ptr += sizeof(binder_uintptr_t);
3694 node = binder_get_node(proc, node_ptr);
3695 if (node == NULL) {
3696 binder_user_error("%d:%d %s u%016llx no match\n",
3697 proc->pid, thread->pid,
3698 cmd == BC_INCREFS_DONE ?
3699 "BC_INCREFS_DONE" :
3700 "BC_ACQUIRE_DONE",
3701 (u64)node_ptr);
3702 break;
3703 }
3704 if (cookie != node->cookie) {
3705 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3706 proc->pid, thread->pid,
3707 cmd == BC_INCREFS_DONE ?
3708 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3709 (u64)node_ptr, node->debug_id,
3710 (u64)cookie, (u64)node->cookie);
3711 binder_put_node(node);
3712 break;
3713 }
3714 binder_node_inner_lock(node);
3715 if (cmd == BC_ACQUIRE_DONE) {
3716 if (node->pending_strong_ref == 0) {
3717 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3718 proc->pid, thread->pid,
3719 node->debug_id);
3720 binder_node_inner_unlock(node);
3721 binder_put_node(node);
3722 break;
3723 }
3724 node->pending_strong_ref = 0;
3725 } else {
3726 if (node->pending_weak_ref == 0) {
3727 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3728 proc->pid, thread->pid,
3729 node->debug_id);
3730 binder_node_inner_unlock(node);
3731 binder_put_node(node);
3732 break;
3733 }
3734 node->pending_weak_ref = 0;
3735 }
3736 free_node = binder_dec_node_nilocked(node,
3737 cmd == BC_ACQUIRE_DONE, 0);
3738 WARN_ON(free_node);
3739 binder_debug(BINDER_DEBUG_USER_REFS,
3740 "%d:%d %s node %d ls %d lw %d tr %d\n",
3741 proc->pid, thread->pid,
3742 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3743 node->debug_id, node->local_strong_refs,
3744 node->local_weak_refs, node->tmp_refs);
3745 binder_node_inner_unlock(node);
3746 binder_put_node(node);
3747 break;
3748 }
3749 case BC_ATTEMPT_ACQUIRE:
3750 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3751 return -EINVAL;
3752 case BC_ACQUIRE_RESULT:
3753 pr_err("BC_ACQUIRE_RESULT not supported\n");
3754 return -EINVAL;
3755
3756 case BC_FREE_BUFFER: {
3757 binder_uintptr_t data_ptr;
3758 struct binder_buffer *buffer;
3759
3760 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3761 return -EFAULT;
3762 ptr += sizeof(binder_uintptr_t);
3763
3764 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3765 data_ptr);
3766 if (IS_ERR_OR_NULL(buffer)) {
3767 if (PTR_ERR(buffer) == -EPERM) {
3768 binder_user_error(
3769 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3770 proc->pid, thread->pid,
3771 (u64)data_ptr);
3772 } else {
3773 binder_user_error(
3774 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3775 proc->pid, thread->pid,
3776 (u64)data_ptr);
3777 }
3778 break;
3779 }
3780 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3781 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3782 proc->pid, thread->pid, (u64)data_ptr,
3783 buffer->debug_id,
3784 buffer->transaction ? "active" : "finished");
3785 binder_free_buf(proc, buffer);
3786 break;
3787 }
3788
3789 case BC_TRANSACTION_SG:
3790 case BC_REPLY_SG: {
3791 struct binder_transaction_data_sg tr;
3792
3793 if (copy_from_user(&tr, ptr, sizeof(tr)))
3794 return -EFAULT;
3795 ptr += sizeof(tr);
3796 binder_transaction(proc, thread, &tr.transaction_data,
3797 cmd == BC_REPLY_SG, tr.buffers_size);
3798 break;
3799 }
3800 case BC_TRANSACTION:
3801 case BC_REPLY: {
3802 struct binder_transaction_data tr;
3803
3804 if (copy_from_user(&tr, ptr, sizeof(tr)))
3805 return -EFAULT;
3806 ptr += sizeof(tr);
3807 binder_transaction(proc, thread, &tr,
3808 cmd == BC_REPLY, 0);
3809 break;
3810 }
3811
3812 case BC_REGISTER_LOOPER:
3813 binder_debug(BINDER_DEBUG_THREADS,
3814 "%d:%d BC_REGISTER_LOOPER\n",
3815 proc->pid, thread->pid);
3816 binder_inner_proc_lock(proc);
3817 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3818 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3819 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3820 proc->pid, thread->pid);
3821 } else if (proc->requested_threads == 0) {
3822 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3823 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3824 proc->pid, thread->pid);
3825 } else {
3826 proc->requested_threads--;
3827 proc->requested_threads_started++;
3828 }
3829 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3830 binder_inner_proc_unlock(proc);
3831 break;
3832 case BC_ENTER_LOOPER:
3833 binder_debug(BINDER_DEBUG_THREADS,
3834 "%d:%d BC_ENTER_LOOPER\n",
3835 proc->pid, thread->pid);
3836 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3837 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3838 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3839 proc->pid, thread->pid);
3840 }
3841 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3842 break;
3843 case BC_EXIT_LOOPER:
3844 binder_debug(BINDER_DEBUG_THREADS,
3845 "%d:%d BC_EXIT_LOOPER\n",
3846 proc->pid, thread->pid);
3847 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3848 break;
3849
3850 case BC_REQUEST_DEATH_NOTIFICATION:
3851 case BC_CLEAR_DEATH_NOTIFICATION: {
3852 uint32_t target;
3853 binder_uintptr_t cookie;
3854 struct binder_ref *ref;
3855 struct binder_ref_death *death = NULL;
3856
3857 if (get_user(target, (uint32_t __user *)ptr))
3858 return -EFAULT;
3859 ptr += sizeof(uint32_t);
3860 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3861 return -EFAULT;
3862 ptr += sizeof(binder_uintptr_t);
3863 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3864
3865
3866
3867
3868 death = kzalloc(sizeof(*death), GFP_KERNEL);
3869 if (death == NULL) {
3870 WARN_ON(thread->return_error.cmd !=
3871 BR_OK);
3872 thread->return_error.cmd = BR_ERROR;
3873 binder_enqueue_thread_work(
3874 thread,
3875 &thread->return_error.work);
3876 binder_debug(
3877 BINDER_DEBUG_FAILED_TRANSACTION,
3878 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3879 proc->pid, thread->pid);
3880 break;
3881 }
3882 }
3883 binder_proc_lock(proc);
3884 ref = binder_get_ref_olocked(proc, target, false);
3885 if (ref == NULL) {
3886 binder_user_error("%d:%d %s invalid ref %d\n",
3887 proc->pid, thread->pid,
3888 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3889 "BC_REQUEST_DEATH_NOTIFICATION" :
3890 "BC_CLEAR_DEATH_NOTIFICATION",
3891 target);
3892 binder_proc_unlock(proc);
3893 kfree(death);
3894 break;
3895 }
3896
3897 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3898 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3899 proc->pid, thread->pid,
3900 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3901 "BC_REQUEST_DEATH_NOTIFICATION" :
3902 "BC_CLEAR_DEATH_NOTIFICATION",
3903 (u64)cookie, ref->data.debug_id,
3904 ref->data.desc, ref->data.strong,
3905 ref->data.weak, ref->node->debug_id);
3906
3907 binder_node_lock(ref->node);
3908 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3909 if (ref->death) {
3910 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3911 proc->pid, thread->pid);
3912 binder_node_unlock(ref->node);
3913 binder_proc_unlock(proc);
3914 kfree(death);
3915 break;
3916 }
3917 binder_stats_created(BINDER_STAT_DEATH);
3918 INIT_LIST_HEAD(&death->work.entry);
3919 death->cookie = cookie;
3920 ref->death = death;
3921 if (ref->node->proc == NULL) {
3922 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3923
3924 binder_inner_proc_lock(proc);
3925 binder_enqueue_work_ilocked(
3926 &ref->death->work, &proc->todo);
3927 binder_wakeup_proc_ilocked(proc);
3928 binder_inner_proc_unlock(proc);
3929 }
3930 } else {
3931 if (ref->death == NULL) {
3932 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3933 proc->pid, thread->pid);
3934 binder_node_unlock(ref->node);
3935 binder_proc_unlock(proc);
3936 break;
3937 }
3938 death = ref->death;
3939 if (death->cookie != cookie) {
3940 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3941 proc->pid, thread->pid,
3942 (u64)death->cookie,
3943 (u64)cookie);
3944 binder_node_unlock(ref->node);
3945 binder_proc_unlock(proc);
3946 break;
3947 }
3948 ref->death = NULL;
3949 binder_inner_proc_lock(proc);
3950 if (list_empty(&death->work.entry)) {
3951 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3952 if (thread->looper &
3953 (BINDER_LOOPER_STATE_REGISTERED |
3954 BINDER_LOOPER_STATE_ENTERED))
3955 binder_enqueue_thread_work_ilocked(
3956 thread,
3957 &death->work);
3958 else {
3959 binder_enqueue_work_ilocked(
3960 &death->work,
3961 &proc->todo);
3962 binder_wakeup_proc_ilocked(
3963 proc);
3964 }
3965 } else {
3966 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3967 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3968 }
3969 binder_inner_proc_unlock(proc);
3970 }
3971 binder_node_unlock(ref->node);
3972 binder_proc_unlock(proc);
3973 } break;
3974 case BC_DEAD_BINDER_DONE: {
3975 struct binder_work *w;
3976 binder_uintptr_t cookie;
3977 struct binder_ref_death *death = NULL;
3978
3979 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3980 return -EFAULT;
3981
3982 ptr += sizeof(cookie);
3983 binder_inner_proc_lock(proc);
3984 list_for_each_entry(w, &proc->delivered_death,
3985 entry) {
3986 struct binder_ref_death *tmp_death =
3987 container_of(w,
3988 struct binder_ref_death,
3989 work);
3990
3991 if (tmp_death->cookie == cookie) {
3992 death = tmp_death;
3993 break;
3994 }
3995 }
3996 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3997 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3998 proc->pid, thread->pid, (u64)cookie,
3999 death);
4000 if (death == NULL) {
4001 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4002 proc->pid, thread->pid, (u64)cookie);
4003 binder_inner_proc_unlock(proc);
4004 break;
4005 }
4006 binder_dequeue_work_ilocked(&death->work);
4007 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4008 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4009 if (thread->looper &
4010 (BINDER_LOOPER_STATE_REGISTERED |
4011 BINDER_LOOPER_STATE_ENTERED))
4012 binder_enqueue_thread_work_ilocked(
4013 thread, &death->work);
4014 else {
4015 binder_enqueue_work_ilocked(
4016 &death->work,
4017 &proc->todo);
4018 binder_wakeup_proc_ilocked(proc);
4019 }
4020 }
4021 binder_inner_proc_unlock(proc);
4022 } break;
4023
4024 default:
4025 pr_err("%d:%d unknown command %d\n",
4026 proc->pid, thread->pid, cmd);
4027 return -EINVAL;
4028 }
4029 *consumed = ptr - buffer;
4030 }
4031 return 0;
4032}
4033
4034static void binder_stat_br(struct binder_proc *proc,
4035 struct binder_thread *thread, uint32_t cmd)
4036{
4037 trace_binder_return(cmd);
4038 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4039 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4040 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4041 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4042 }
4043}
4044
4045static int binder_put_node_cmd(struct binder_proc *proc,
4046 struct binder_thread *thread,
4047 void __user **ptrp,
4048 binder_uintptr_t node_ptr,
4049 binder_uintptr_t node_cookie,
4050 int node_debug_id,
4051 uint32_t cmd, const char *cmd_name)
4052{
4053 void __user *ptr = *ptrp;
4054
4055 if (put_user(cmd, (uint32_t __user *)ptr))
4056 return -EFAULT;
4057 ptr += sizeof(uint32_t);
4058
4059 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4060 return -EFAULT;
4061 ptr += sizeof(binder_uintptr_t);
4062
4063 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4064 return -EFAULT;
4065 ptr += sizeof(binder_uintptr_t);
4066
4067 binder_stat_br(proc, thread, cmd);
4068 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4069 proc->pid, thread->pid, cmd_name, node_debug_id,
4070 (u64)node_ptr, (u64)node_cookie);
4071
4072 *ptrp = ptr;
4073 return 0;
4074}
4075
4076static int binder_wait_for_work(struct binder_thread *thread,
4077 bool do_proc_work)
4078{
4079 DEFINE_WAIT(wait);
4080 struct binder_proc *proc = thread->proc;
4081 int ret = 0;
4082
4083 freezer_do_not_count();
4084 binder_inner_proc_lock(proc);
4085 for (;;) {
4086 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4087 if (binder_has_work_ilocked(thread, do_proc_work))
4088 break;
4089 if (do_proc_work)
4090 list_add(&thread->waiting_thread_node,
4091 &proc->waiting_threads);
4092 binder_inner_proc_unlock(proc);
4093 schedule();
4094 binder_inner_proc_lock(proc);
4095 list_del_init(&thread->waiting_thread_node);
4096 if (signal_pending(current)) {
4097 ret = -ERESTARTSYS;
4098 break;
4099 }
4100 }
4101 finish_wait(&thread->wait, &wait);
4102 binder_inner_proc_unlock(proc);
4103 freezer_count();
4104
4105 return ret;
4106}
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122static int binder_apply_fd_fixups(struct binder_proc *proc,
4123 struct binder_transaction *t)
4124{
4125 struct binder_txn_fd_fixup *fixup, *tmp;
4126 int ret = 0;
4127
4128 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4129 int fd = get_unused_fd_flags(O_CLOEXEC);
4130
4131 if (fd < 0) {
4132 binder_debug(BINDER_DEBUG_TRANSACTION,
4133 "failed fd fixup txn %d fd %d\n",
4134 t->debug_id, fd);
4135 ret = -ENOMEM;
4136 break;
4137 }
4138 binder_debug(BINDER_DEBUG_TRANSACTION,
4139 "fd fixup txn %d fd %d\n",
4140 t->debug_id, fd);
4141 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4142 fd_install(fd, fixup->file);
4143 fixup->file = NULL;
4144 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4145 fixup->offset, &fd,
4146 sizeof(u32))) {
4147 ret = -EINVAL;
4148 break;
4149 }
4150 }
4151 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4152 if (fixup->file) {
4153 fput(fixup->file);
4154 } else if (ret) {
4155 u32 fd;
4156 int err;
4157
4158 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4159 t->buffer,
4160 fixup->offset,
4161 sizeof(fd));
4162 WARN_ON(err);
4163 if (!err)
4164 binder_deferred_fd_close(fd);
4165 }
4166 list_del(&fixup->fixup_entry);
4167 kfree(fixup);
4168 }
4169
4170 return ret;
4171}
4172
4173static int binder_thread_read(struct binder_proc *proc,
4174 struct binder_thread *thread,
4175 binder_uintptr_t binder_buffer, size_t size,
4176 binder_size_t *consumed, int non_block)
4177{
4178 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4179 void __user *ptr = buffer + *consumed;
4180 void __user *end = buffer + size;
4181
4182 int ret = 0;
4183 int wait_for_proc_work;
4184
4185 if (*consumed == 0) {
4186 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4187 return -EFAULT;
4188 ptr += sizeof(uint32_t);
4189 }
4190
4191retry:
4192 binder_inner_proc_lock(proc);
4193 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4194 binder_inner_proc_unlock(proc);
4195
4196 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4197
4198 trace_binder_wait_for_work(wait_for_proc_work,
4199 !!thread->transaction_stack,
4200 !binder_worklist_empty(proc, &thread->todo));
4201 if (wait_for_proc_work) {
4202 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4203 BINDER_LOOPER_STATE_ENTERED))) {
4204 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4205 proc->pid, thread->pid, thread->looper);
4206 wait_event_interruptible(binder_user_error_wait,
4207 binder_stop_on_user_error < 2);
4208 }
4209 binder_set_nice(proc->default_priority);
4210 }
4211
4212 if (non_block) {
4213 if (!binder_has_work(thread, wait_for_proc_work))
4214 ret = -EAGAIN;
4215 } else {
4216 ret = binder_wait_for_work(thread, wait_for_proc_work);
4217 }
4218
4219 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4220
4221 if (ret)
4222 return ret;
4223
4224 while (1) {
4225 uint32_t cmd;
4226 struct binder_transaction_data_secctx tr;
4227 struct binder_transaction_data *trd = &tr.transaction_data;
4228 struct binder_work *w = NULL;
4229 struct list_head *list = NULL;
4230 struct binder_transaction *t = NULL;
4231 struct binder_thread *t_from;
4232 size_t trsize = sizeof(*trd);
4233
4234 binder_inner_proc_lock(proc);
4235 if (!binder_worklist_empty_ilocked(&thread->todo))
4236 list = &thread->todo;
4237 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4238 wait_for_proc_work)
4239 list = &proc->todo;
4240 else {
4241 binder_inner_proc_unlock(proc);
4242
4243
4244 if (ptr - buffer == 4 && !thread->looper_need_return)
4245 goto retry;
4246 break;
4247 }
4248
4249 if (end - ptr < sizeof(tr) + 4) {
4250 binder_inner_proc_unlock(proc);
4251 break;
4252 }
4253 w = binder_dequeue_work_head_ilocked(list);
4254 if (binder_worklist_empty_ilocked(&thread->todo))
4255 thread->process_todo = false;
4256
4257 switch (w->type) {
4258 case BINDER_WORK_TRANSACTION: {
4259 binder_inner_proc_unlock(proc);
4260 t = container_of(w, struct binder_transaction, work);
4261 } break;
4262 case BINDER_WORK_RETURN_ERROR: {
4263 struct binder_error *e = container_of(
4264 w, struct binder_error, work);
4265
4266 WARN_ON(e->cmd == BR_OK);
4267 binder_inner_proc_unlock(proc);
4268 if (put_user(e->cmd, (uint32_t __user *)ptr))
4269 return -EFAULT;
4270 cmd = e->cmd;
4271 e->cmd = BR_OK;
4272 ptr += sizeof(uint32_t);
4273
4274 binder_stat_br(proc, thread, cmd);
4275 } break;
4276 case BINDER_WORK_TRANSACTION_COMPLETE: {
4277 binder_inner_proc_unlock(proc);
4278 cmd = BR_TRANSACTION_COMPLETE;
4279 kfree(w);
4280 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4281 if (put_user(cmd, (uint32_t __user *)ptr))
4282 return -EFAULT;
4283 ptr += sizeof(uint32_t);
4284
4285 binder_stat_br(proc, thread, cmd);
4286 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4287 "%d:%d BR_TRANSACTION_COMPLETE\n",
4288 proc->pid, thread->pid);
4289 } break;
4290 case BINDER_WORK_NODE: {
4291 struct binder_node *node = container_of(w, struct binder_node, work);
4292 int strong, weak;
4293 binder_uintptr_t node_ptr = node->ptr;
4294 binder_uintptr_t node_cookie = node->cookie;
4295 int node_debug_id = node->debug_id;
4296 int has_weak_ref;
4297 int has_strong_ref;
4298 void __user *orig_ptr = ptr;
4299
4300 BUG_ON(proc != node->proc);
4301 strong = node->internal_strong_refs ||
4302 node->local_strong_refs;
4303 weak = !hlist_empty(&node->refs) ||
4304 node->local_weak_refs ||
4305 node->tmp_refs || strong;
4306 has_strong_ref = node->has_strong_ref;
4307 has_weak_ref = node->has_weak_ref;
4308
4309 if (weak && !has_weak_ref) {
4310 node->has_weak_ref = 1;
4311 node->pending_weak_ref = 1;
4312 node->local_weak_refs++;
4313 }
4314 if (strong && !has_strong_ref) {
4315 node->has_strong_ref = 1;
4316 node->pending_strong_ref = 1;
4317 node->local_strong_refs++;
4318 }
4319 if (!strong && has_strong_ref)
4320 node->has_strong_ref = 0;
4321 if (!weak && has_weak_ref)
4322 node->has_weak_ref = 0;
4323 if (!weak && !strong) {
4324 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4325 "%d:%d node %d u%016llx c%016llx deleted\n",
4326 proc->pid, thread->pid,
4327 node_debug_id,
4328 (u64)node_ptr,
4329 (u64)node_cookie);
4330 rb_erase(&node->rb_node, &proc->nodes);
4331 binder_inner_proc_unlock(proc);
4332 binder_node_lock(node);
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342 binder_node_unlock(node);
4343 binder_free_node(node);
4344 } else
4345 binder_inner_proc_unlock(proc);
4346
4347 if (weak && !has_weak_ref)
4348 ret = binder_put_node_cmd(
4349 proc, thread, &ptr, node_ptr,
4350 node_cookie, node_debug_id,
4351 BR_INCREFS, "BR_INCREFS");
4352 if (!ret && strong && !has_strong_ref)
4353 ret = binder_put_node_cmd(
4354 proc, thread, &ptr, node_ptr,
4355 node_cookie, node_debug_id,
4356 BR_ACQUIRE, "BR_ACQUIRE");
4357 if (!ret && !strong && has_strong_ref)
4358 ret = binder_put_node_cmd(
4359 proc, thread, &ptr, node_ptr,
4360 node_cookie, node_debug_id,
4361 BR_RELEASE, "BR_RELEASE");
4362 if (!ret && !weak && has_weak_ref)
4363 ret = binder_put_node_cmd(
4364 proc, thread, &ptr, node_ptr,
4365 node_cookie, node_debug_id,
4366 BR_DECREFS, "BR_DECREFS");
4367 if (orig_ptr == ptr)
4368 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4369 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4370 proc->pid, thread->pid,
4371 node_debug_id,
4372 (u64)node_ptr,
4373 (u64)node_cookie);
4374 if (ret)
4375 return ret;
4376 } break;
4377 case BINDER_WORK_DEAD_BINDER:
4378 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4379 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4380 struct binder_ref_death *death;
4381 uint32_t cmd;
4382 binder_uintptr_t cookie;
4383
4384 death = container_of(w, struct binder_ref_death, work);
4385 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4386 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4387 else
4388 cmd = BR_DEAD_BINDER;
4389 cookie = death->cookie;
4390
4391 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4392 "%d:%d %s %016llx\n",
4393 proc->pid, thread->pid,
4394 cmd == BR_DEAD_BINDER ?
4395 "BR_DEAD_BINDER" :
4396 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4397 (u64)cookie);
4398 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4399 binder_inner_proc_unlock(proc);
4400 kfree(death);
4401 binder_stats_deleted(BINDER_STAT_DEATH);
4402 } else {
4403 binder_enqueue_work_ilocked(
4404 w, &proc->delivered_death);
4405 binder_inner_proc_unlock(proc);
4406 }
4407 if (put_user(cmd, (uint32_t __user *)ptr))
4408 return -EFAULT;
4409 ptr += sizeof(uint32_t);
4410 if (put_user(cookie,
4411 (binder_uintptr_t __user *)ptr))
4412 return -EFAULT;
4413 ptr += sizeof(binder_uintptr_t);
4414 binder_stat_br(proc, thread, cmd);
4415 if (cmd == BR_DEAD_BINDER)
4416 goto done;
4417 } break;
4418 default:
4419 binder_inner_proc_unlock(proc);
4420 pr_err("%d:%d: bad work type %d\n",
4421 proc->pid, thread->pid, w->type);
4422 break;
4423 }
4424
4425 if (!t)
4426 continue;
4427
4428 BUG_ON(t->buffer == NULL);
4429 if (t->buffer->target_node) {
4430 struct binder_node *target_node = t->buffer->target_node;
4431
4432 trd->target.ptr = target_node->ptr;
4433 trd->cookie = target_node->cookie;
4434 t->saved_priority = task_nice(current);
4435 if (t->priority < target_node->min_priority &&
4436 !(t->flags & TF_ONE_WAY))
4437 binder_set_nice(t->priority);
4438 else if (!(t->flags & TF_ONE_WAY) ||
4439 t->saved_priority > target_node->min_priority)
4440 binder_set_nice(target_node->min_priority);
4441 cmd = BR_TRANSACTION;
4442 } else {
4443 trd->target.ptr = 0;
4444 trd->cookie = 0;
4445 cmd = BR_REPLY;
4446 }
4447 trd->code = t->code;
4448 trd->flags = t->flags;
4449 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4450
4451 t_from = binder_get_txn_from(t);
4452 if (t_from) {
4453 struct task_struct *sender = t_from->proc->tsk;
4454
4455 trd->sender_pid =
4456 task_tgid_nr_ns(sender,
4457 task_active_pid_ns(current));
4458 } else {
4459 trd->sender_pid = 0;
4460 }
4461
4462 ret = binder_apply_fd_fixups(proc, t);
4463 if (ret) {
4464 struct binder_buffer *buffer = t->buffer;
4465 bool oneway = !!(t->flags & TF_ONE_WAY);
4466 int tid = t->debug_id;
4467
4468 if (t_from)
4469 binder_thread_dec_tmpref(t_from);
4470 buffer->transaction = NULL;
4471 binder_cleanup_transaction(t, "fd fixups failed",
4472 BR_FAILED_REPLY);
4473 binder_free_buf(proc, buffer);
4474 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4475 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4476 proc->pid, thread->pid,
4477 oneway ? "async " :
4478 (cmd == BR_REPLY ? "reply " : ""),
4479 tid, BR_FAILED_REPLY, ret, __LINE__);
4480 if (cmd == BR_REPLY) {
4481 cmd = BR_FAILED_REPLY;
4482 if (put_user(cmd, (uint32_t __user *)ptr))
4483 return -EFAULT;
4484 ptr += sizeof(uint32_t);
4485 binder_stat_br(proc, thread, cmd);
4486 break;
4487 }
4488 continue;
4489 }
4490 trd->data_size = t->buffer->data_size;
4491 trd->offsets_size = t->buffer->offsets_size;
4492 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4493 trd->data.ptr.offsets = trd->data.ptr.buffer +
4494 ALIGN(t->buffer->data_size,
4495 sizeof(void *));
4496
4497 tr.secctx = t->security_ctx;
4498 if (t->security_ctx) {
4499 cmd = BR_TRANSACTION_SEC_CTX;
4500 trsize = sizeof(tr);
4501 }
4502 if (put_user(cmd, (uint32_t __user *)ptr)) {
4503 if (t_from)
4504 binder_thread_dec_tmpref(t_from);
4505
4506 binder_cleanup_transaction(t, "put_user failed",
4507 BR_FAILED_REPLY);
4508
4509 return -EFAULT;
4510 }
4511 ptr += sizeof(uint32_t);
4512 if (copy_to_user(ptr, &tr, trsize)) {
4513 if (t_from)
4514 binder_thread_dec_tmpref(t_from);
4515
4516 binder_cleanup_transaction(t, "copy_to_user failed",
4517 BR_FAILED_REPLY);
4518
4519 return -EFAULT;
4520 }
4521 ptr += trsize;
4522
4523 trace_binder_transaction_received(t);
4524 binder_stat_br(proc, thread, cmd);
4525 binder_debug(BINDER_DEBUG_TRANSACTION,
4526 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4527 proc->pid, thread->pid,
4528 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4529 (cmd == BR_TRANSACTION_SEC_CTX) ?
4530 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4531 t->debug_id, t_from ? t_from->proc->pid : 0,
4532 t_from ? t_from->pid : 0, cmd,
4533 t->buffer->data_size, t->buffer->offsets_size,
4534 (u64)trd->data.ptr.buffer,
4535 (u64)trd->data.ptr.offsets);
4536
4537 if (t_from)
4538 binder_thread_dec_tmpref(t_from);
4539 t->buffer->allow_user_free = 1;
4540 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4541 binder_inner_proc_lock(thread->proc);
4542 t->to_parent = thread->transaction_stack;
4543 t->to_thread = thread;
4544 thread->transaction_stack = t;
4545 binder_inner_proc_unlock(thread->proc);
4546 } else {
4547 binder_free_transaction(t);
4548 }
4549 break;
4550 }
4551
4552done:
4553
4554 *consumed = ptr - buffer;
4555 binder_inner_proc_lock(proc);
4556 if (proc->requested_threads == 0 &&
4557 list_empty(&thread->proc->waiting_threads) &&
4558 proc->requested_threads_started < proc->max_threads &&
4559 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4560 BINDER_LOOPER_STATE_ENTERED))
4561 ) {
4562 proc->requested_threads++;
4563 binder_inner_proc_unlock(proc);
4564 binder_debug(BINDER_DEBUG_THREADS,
4565 "%d:%d BR_SPAWN_LOOPER\n",
4566 proc->pid, thread->pid);
4567 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4568 return -EFAULT;
4569 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4570 } else
4571 binder_inner_proc_unlock(proc);
4572 return 0;
4573}
4574
4575static void binder_release_work(struct binder_proc *proc,
4576 struct list_head *list)
4577{
4578 struct binder_work *w;
4579
4580 while (1) {
4581 w = binder_dequeue_work_head(proc, list);
4582 if (!w)
4583 return;
4584
4585 switch (w->type) {
4586 case BINDER_WORK_TRANSACTION: {
4587 struct binder_transaction *t;
4588
4589 t = container_of(w, struct binder_transaction, work);
4590
4591 binder_cleanup_transaction(t, "process died.",
4592 BR_DEAD_REPLY);
4593 } break;
4594 case BINDER_WORK_RETURN_ERROR: {
4595 struct binder_error *e = container_of(
4596 w, struct binder_error, work);
4597
4598 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4599 "undelivered TRANSACTION_ERROR: %u\n",
4600 e->cmd);
4601 } break;
4602 case BINDER_WORK_TRANSACTION_COMPLETE: {
4603 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4604 "undelivered TRANSACTION_COMPLETE\n");
4605 kfree(w);
4606 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4607 } break;
4608 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4609 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4610 struct binder_ref_death *death;
4611
4612 death = container_of(w, struct binder_ref_death, work);
4613 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4614 "undelivered death notification, %016llx\n",
4615 (u64)death->cookie);
4616 kfree(death);
4617 binder_stats_deleted(BINDER_STAT_DEATH);
4618 } break;
4619 default:
4620 pr_err("unexpected work type, %d, not freed\n",
4621 w->type);
4622 break;
4623 }
4624 }
4625
4626}
4627
4628static struct binder_thread *binder_get_thread_ilocked(
4629 struct binder_proc *proc, struct binder_thread *new_thread)
4630{
4631 struct binder_thread *thread = NULL;
4632 struct rb_node *parent = NULL;
4633 struct rb_node **p = &proc->threads.rb_node;
4634
4635 while (*p) {
4636 parent = *p;
4637 thread = rb_entry(parent, struct binder_thread, rb_node);
4638
4639 if (current->pid < thread->pid)
4640 p = &(*p)->rb_left;
4641 else if (current->pid > thread->pid)
4642 p = &(*p)->rb_right;
4643 else
4644 return thread;
4645 }
4646 if (!new_thread)
4647 return NULL;
4648 thread = new_thread;
4649 binder_stats_created(BINDER_STAT_THREAD);
4650 thread->proc = proc;
4651 thread->pid = current->pid;
4652 atomic_set(&thread->tmp_ref, 0);
4653 init_waitqueue_head(&thread->wait);
4654 INIT_LIST_HEAD(&thread->todo);
4655 rb_link_node(&thread->rb_node, parent, p);
4656 rb_insert_color(&thread->rb_node, &proc->threads);
4657 thread->looper_need_return = true;
4658 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4659 thread->return_error.cmd = BR_OK;
4660 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4661 thread->reply_error.cmd = BR_OK;
4662 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4663 return thread;
4664}
4665
4666static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4667{
4668 struct binder_thread *thread;
4669 struct binder_thread *new_thread;
4670
4671 binder_inner_proc_lock(proc);
4672 thread = binder_get_thread_ilocked(proc, NULL);
4673 binder_inner_proc_unlock(proc);
4674 if (!thread) {
4675 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4676 if (new_thread == NULL)
4677 return NULL;
4678 binder_inner_proc_lock(proc);
4679 thread = binder_get_thread_ilocked(proc, new_thread);
4680 binder_inner_proc_unlock(proc);
4681 if (thread != new_thread)
4682 kfree(new_thread);
4683 }
4684 return thread;
4685}
4686
4687static void binder_free_proc(struct binder_proc *proc)
4688{
4689 BUG_ON(!list_empty(&proc->todo));
4690 BUG_ON(!list_empty(&proc->delivered_death));
4691 binder_alloc_deferred_release(&proc->alloc);
4692 put_task_struct(proc->tsk);
4693 binder_stats_deleted(BINDER_STAT_PROC);
4694 kfree(proc);
4695}
4696
4697static void binder_free_thread(struct binder_thread *thread)
4698{
4699 BUG_ON(!list_empty(&thread->todo));
4700 binder_stats_deleted(BINDER_STAT_THREAD);
4701 binder_proc_dec_tmpref(thread->proc);
4702 kfree(thread);
4703}
4704
4705static int binder_thread_release(struct binder_proc *proc,
4706 struct binder_thread *thread)
4707{
4708 struct binder_transaction *t;
4709 struct binder_transaction *send_reply = NULL;
4710 int active_transactions = 0;
4711 struct binder_transaction *last_t = NULL;
4712
4713 binder_inner_proc_lock(thread->proc);
4714
4715
4716
4717
4718
4719
4720 proc->tmp_ref++;
4721
4722
4723
4724
4725 atomic_inc(&thread->tmp_ref);
4726 rb_erase(&thread->rb_node, &proc->threads);
4727 t = thread->transaction_stack;
4728 if (t) {
4729 spin_lock(&t->lock);
4730 if (t->to_thread == thread)
4731 send_reply = t;
4732 } else {
4733 __acquire(&t->lock);
4734 }
4735 thread->is_dead = true;
4736
4737 while (t) {
4738 last_t = t;
4739 active_transactions++;
4740 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4741 "release %d:%d transaction %d %s, still active\n",
4742 proc->pid, thread->pid,
4743 t->debug_id,
4744 (t->to_thread == thread) ? "in" : "out");
4745
4746 if (t->to_thread == thread) {
4747 t->to_proc = NULL;
4748 t->to_thread = NULL;
4749 if (t->buffer) {
4750 t->buffer->transaction = NULL;
4751 t->buffer = NULL;
4752 }
4753 t = t->to_parent;
4754 } else if (t->from == thread) {
4755 t->from = NULL;
4756 t = t->from_parent;
4757 } else
4758 BUG();
4759 spin_unlock(&last_t->lock);
4760 if (t)
4761 spin_lock(&t->lock);
4762 else
4763 __acquire(&t->lock);
4764 }
4765
4766 __release(&t->lock);
4767
4768
4769
4770
4771
4772
4773
4774 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4775 waitqueue_active(&thread->wait)) {
4776 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4777 }
4778
4779 binder_inner_proc_unlock(thread->proc);
4780
4781
4782
4783
4784
4785
4786
4787 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4788 synchronize_rcu();
4789
4790 if (send_reply)
4791 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4792 binder_release_work(proc, &thread->todo);
4793 binder_thread_dec_tmpref(thread);
4794 return active_transactions;
4795}
4796
4797static __poll_t binder_poll(struct file *filp,
4798 struct poll_table_struct *wait)
4799{
4800 struct binder_proc *proc = filp->private_data;
4801 struct binder_thread *thread = NULL;
4802 bool wait_for_proc_work;
4803
4804 thread = binder_get_thread(proc);
4805 if (!thread)
4806 return POLLERR;
4807
4808 binder_inner_proc_lock(thread->proc);
4809 thread->looper |= BINDER_LOOPER_STATE_POLL;
4810 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4811
4812 binder_inner_proc_unlock(thread->proc);
4813
4814 poll_wait(filp, &thread->wait, wait);
4815
4816 if (binder_has_work(thread, wait_for_proc_work))
4817 return EPOLLIN;
4818
4819 return 0;
4820}
4821
4822static int binder_ioctl_write_read(struct file *filp,
4823 unsigned int cmd, unsigned long arg,
4824 struct binder_thread *thread)
4825{
4826 int ret = 0;
4827 struct binder_proc *proc = filp->private_data;
4828 unsigned int size = _IOC_SIZE(cmd);
4829 void __user *ubuf = (void __user *)arg;
4830 struct binder_write_read bwr;
4831
4832 if (size != sizeof(struct binder_write_read)) {
4833 ret = -EINVAL;
4834 goto out;
4835 }
4836 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4837 ret = -EFAULT;
4838 goto out;
4839 }
4840 binder_debug(BINDER_DEBUG_READ_WRITE,
4841 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4842 proc->pid, thread->pid,
4843 (u64)bwr.write_size, (u64)bwr.write_buffer,
4844 (u64)bwr.read_size, (u64)bwr.read_buffer);
4845
4846 if (bwr.write_size > 0) {
4847 ret = binder_thread_write(proc, thread,
4848 bwr.write_buffer,
4849 bwr.write_size,
4850 &bwr.write_consumed);
4851 trace_binder_write_done(ret);
4852 if (ret < 0) {
4853 bwr.read_consumed = 0;
4854 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4855 ret = -EFAULT;
4856 goto out;
4857 }
4858 }
4859 if (bwr.read_size > 0) {
4860 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4861 bwr.read_size,
4862 &bwr.read_consumed,
4863 filp->f_flags & O_NONBLOCK);
4864 trace_binder_read_done(ret);
4865 binder_inner_proc_lock(proc);
4866 if (!binder_worklist_empty_ilocked(&proc->todo))
4867 binder_wakeup_proc_ilocked(proc);
4868 binder_inner_proc_unlock(proc);
4869 if (ret < 0) {
4870 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4871 ret = -EFAULT;
4872 goto out;
4873 }
4874 }
4875 binder_debug(BINDER_DEBUG_READ_WRITE,
4876 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4877 proc->pid, thread->pid,
4878 (u64)bwr.write_consumed, (u64)bwr.write_size,
4879 (u64)bwr.read_consumed, (u64)bwr.read_size);
4880 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4881 ret = -EFAULT;
4882 goto out;
4883 }
4884out:
4885 return ret;
4886}
4887
4888static int binder_ioctl_set_ctx_mgr(struct file *filp,
4889 struct flat_binder_object *fbo)
4890{
4891 int ret = 0;
4892 struct binder_proc *proc = filp->private_data;
4893 struct binder_context *context = proc->context;
4894 struct binder_node *new_node;
4895 kuid_t curr_euid = current_euid();
4896
4897 mutex_lock(&context->context_mgr_node_lock);
4898 if (context->binder_context_mgr_node) {
4899 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4900 ret = -EBUSY;
4901 goto out;
4902 }
4903 ret = security_binder_set_context_mgr(proc->tsk);
4904 if (ret < 0)
4905 goto out;
4906 if (uid_valid(context->binder_context_mgr_uid)) {
4907 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4908 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4909 from_kuid(&init_user_ns, curr_euid),
4910 from_kuid(&init_user_ns,
4911 context->binder_context_mgr_uid));
4912 ret = -EPERM;
4913 goto out;
4914 }
4915 } else {
4916 context->binder_context_mgr_uid = curr_euid;
4917 }
4918 new_node = binder_new_node(proc, fbo);
4919 if (!new_node) {
4920 ret = -ENOMEM;
4921 goto out;
4922 }
4923 binder_node_lock(new_node);
4924 new_node->local_weak_refs++;
4925 new_node->local_strong_refs++;
4926 new_node->has_strong_ref = 1;
4927 new_node->has_weak_ref = 1;
4928 context->binder_context_mgr_node = new_node;
4929 binder_node_unlock(new_node);
4930 binder_put_node(new_node);
4931out:
4932 mutex_unlock(&context->context_mgr_node_lock);
4933 return ret;
4934}
4935
4936static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4937 struct binder_node_info_for_ref *info)
4938{
4939 struct binder_node *node;
4940 struct binder_context *context = proc->context;
4941 __u32 handle = info->handle;
4942
4943 if (info->strong_count || info->weak_count || info->reserved1 ||
4944 info->reserved2 || info->reserved3) {
4945 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4946 proc->pid);
4947 return -EINVAL;
4948 }
4949
4950
4951 mutex_lock(&context->context_mgr_node_lock);
4952 if (!context->binder_context_mgr_node ||
4953 context->binder_context_mgr_node->proc != proc) {
4954 mutex_unlock(&context->context_mgr_node_lock);
4955 return -EPERM;
4956 }
4957 mutex_unlock(&context->context_mgr_node_lock);
4958
4959 node = binder_get_node_from_ref(proc, handle, true, NULL);
4960 if (!node)
4961 return -EINVAL;
4962
4963 info->strong_count = node->local_strong_refs +
4964 node->internal_strong_refs;
4965 info->weak_count = node->local_weak_refs;
4966
4967 binder_put_node(node);
4968
4969 return 0;
4970}
4971
4972static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4973 struct binder_node_debug_info *info)
4974{
4975 struct rb_node *n;
4976 binder_uintptr_t ptr = info->ptr;
4977
4978 memset(info, 0, sizeof(*info));
4979
4980 binder_inner_proc_lock(proc);
4981 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4982 struct binder_node *node = rb_entry(n, struct binder_node,
4983 rb_node);
4984 if (node->ptr > ptr) {
4985 info->ptr = node->ptr;
4986 info->cookie = node->cookie;
4987 info->has_strong_ref = node->has_strong_ref;
4988 info->has_weak_ref = node->has_weak_ref;
4989 break;
4990 }
4991 }
4992 binder_inner_proc_unlock(proc);
4993
4994 return 0;
4995}
4996
4997static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4998{
4999 int ret;
5000 struct binder_proc *proc = filp->private_data;
5001 struct binder_thread *thread;
5002 unsigned int size = _IOC_SIZE(cmd);
5003 void __user *ubuf = (void __user *)arg;
5004
5005
5006
5007
5008 binder_selftest_alloc(&proc->alloc);
5009
5010 trace_binder_ioctl(cmd, arg);
5011
5012 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5013 if (ret)
5014 goto err_unlocked;
5015
5016 thread = binder_get_thread(proc);
5017 if (thread == NULL) {
5018 ret = -ENOMEM;
5019 goto err;
5020 }
5021
5022 switch (cmd) {
5023 case BINDER_WRITE_READ:
5024 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5025 if (ret)
5026 goto err;
5027 break;
5028 case BINDER_SET_MAX_THREADS: {
5029 int max_threads;
5030
5031 if (copy_from_user(&max_threads, ubuf,
5032 sizeof(max_threads))) {
5033 ret = -EINVAL;
5034 goto err;
5035 }
5036 binder_inner_proc_lock(proc);
5037 proc->max_threads = max_threads;
5038 binder_inner_proc_unlock(proc);
5039 break;
5040 }
5041 case BINDER_SET_CONTEXT_MGR_EXT: {
5042 struct flat_binder_object fbo;
5043
5044 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5045 ret = -EINVAL;
5046 goto err;
5047 }
5048 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5049 if (ret)
5050 goto err;
5051 break;
5052 }
5053 case BINDER_SET_CONTEXT_MGR:
5054 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5055 if (ret)
5056 goto err;
5057 break;
5058 case BINDER_THREAD_EXIT:
5059 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5060 proc->pid, thread->pid);
5061 binder_thread_release(proc, thread);
5062 thread = NULL;
5063 break;
5064 case BINDER_VERSION: {
5065 struct binder_version __user *ver = ubuf;
5066
5067 if (size != sizeof(struct binder_version)) {
5068 ret = -EINVAL;
5069 goto err;
5070 }
5071 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5072 &ver->protocol_version)) {
5073 ret = -EINVAL;
5074 goto err;
5075 }
5076 break;
5077 }
5078 case BINDER_GET_NODE_INFO_FOR_REF: {
5079 struct binder_node_info_for_ref info;
5080
5081 if (copy_from_user(&info, ubuf, sizeof(info))) {
5082 ret = -EFAULT;
5083 goto err;
5084 }
5085
5086 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5087 if (ret < 0)
5088 goto err;
5089
5090 if (copy_to_user(ubuf, &info, sizeof(info))) {
5091 ret = -EFAULT;
5092 goto err;
5093 }
5094
5095 break;
5096 }
5097 case BINDER_GET_NODE_DEBUG_INFO: {
5098 struct binder_node_debug_info info;
5099
5100 if (copy_from_user(&info, ubuf, sizeof(info))) {
5101 ret = -EFAULT;
5102 goto err;
5103 }
5104
5105 ret = binder_ioctl_get_node_debug_info(proc, &info);
5106 if (ret < 0)
5107 goto err;
5108
5109 if (copy_to_user(ubuf, &info, sizeof(info))) {
5110 ret = -EFAULT;
5111 goto err;
5112 }
5113 break;
5114 }
5115 default:
5116 ret = -EINVAL;
5117 goto err;
5118 }
5119 ret = 0;
5120err:
5121 if (thread)
5122 thread->looper_need_return = false;
5123 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5124 if (ret && ret != -ERESTARTSYS)
5125 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5126err_unlocked:
5127 trace_binder_ioctl_done(ret);
5128 return ret;
5129}
5130
5131static void binder_vma_open(struct vm_area_struct *vma)
5132{
5133 struct binder_proc *proc = vma->vm_private_data;
5134
5135 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5136 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5137 proc->pid, vma->vm_start, vma->vm_end,
5138 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5139 (unsigned long)pgprot_val(vma->vm_page_prot));
5140}
5141
5142static void binder_vma_close(struct vm_area_struct *vma)
5143{
5144 struct binder_proc *proc = vma->vm_private_data;
5145
5146 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5147 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5148 proc->pid, vma->vm_start, vma->vm_end,
5149 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5150 (unsigned long)pgprot_val(vma->vm_page_prot));
5151 binder_alloc_vma_close(&proc->alloc);
5152}
5153
5154static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5155{
5156 return VM_FAULT_SIGBUS;
5157}
5158
5159static const struct vm_operations_struct binder_vm_ops = {
5160 .open = binder_vma_open,
5161 .close = binder_vma_close,
5162 .fault = binder_vm_fault,
5163};
5164
5165static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5166{
5167 int ret;
5168 struct binder_proc *proc = filp->private_data;
5169 const char *failure_string;
5170
5171 if (proc->tsk != current->group_leader)
5172 return -EINVAL;
5173
5174 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5175 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5176 __func__, proc->pid, vma->vm_start, vma->vm_end,
5177 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5178 (unsigned long)pgprot_val(vma->vm_page_prot));
5179
5180 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5181 ret = -EPERM;
5182 failure_string = "bad vm_flags";
5183 goto err_bad_arg;
5184 }
5185 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5186 vma->vm_flags &= ~VM_MAYWRITE;
5187
5188 vma->vm_ops = &binder_vm_ops;
5189 vma->vm_private_data = proc;
5190
5191 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5192 if (ret)
5193 return ret;
5194 return 0;
5195
5196err_bad_arg:
5197 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5198 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5199 return ret;
5200}
5201
5202static int binder_open(struct inode *nodp, struct file *filp)
5203{
5204 struct binder_proc *proc, *itr;
5205 struct binder_device *binder_dev;
5206 struct binderfs_info *info;
5207 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5208 bool existing_pid = false;
5209
5210 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5211 current->group_leader->pid, current->pid);
5212
5213 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5214 if (proc == NULL)
5215 return -ENOMEM;
5216 spin_lock_init(&proc->inner_lock);
5217 spin_lock_init(&proc->outer_lock);
5218 get_task_struct(current->group_leader);
5219 proc->tsk = current->group_leader;
5220 INIT_LIST_HEAD(&proc->todo);
5221 proc->default_priority = task_nice(current);
5222
5223 if (is_binderfs_device(nodp)) {
5224 binder_dev = nodp->i_private;
5225 info = nodp->i_sb->s_fs_info;
5226 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5227 } else {
5228 binder_dev = container_of(filp->private_data,
5229 struct binder_device, miscdev);
5230 }
5231 refcount_inc(&binder_dev->ref);
5232 proc->context = &binder_dev->context;
5233 binder_alloc_init(&proc->alloc);
5234
5235 binder_stats_created(BINDER_STAT_PROC);
5236 proc->pid = current->group_leader->pid;
5237 INIT_LIST_HEAD(&proc->delivered_death);
5238 INIT_LIST_HEAD(&proc->waiting_threads);
5239 filp->private_data = proc;
5240
5241 mutex_lock(&binder_procs_lock);
5242 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5243 if (itr->pid == proc->pid) {
5244 existing_pid = true;
5245 break;
5246 }
5247 }
5248 hlist_add_head(&proc->proc_node, &binder_procs);
5249 mutex_unlock(&binder_procs_lock);
5250
5251 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5252 char strbuf[11];
5253
5254 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5255
5256
5257
5258
5259
5260
5261 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5262 binder_debugfs_dir_entry_proc,
5263 (void *)(unsigned long)proc->pid,
5264 &proc_fops);
5265 }
5266
5267 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5268 char strbuf[11];
5269 struct dentry *binderfs_entry;
5270
5271 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5272
5273
5274
5275
5276
5277
5278 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5279 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5280 if (!IS_ERR(binderfs_entry)) {
5281 proc->binderfs_entry = binderfs_entry;
5282 } else {
5283 int error;
5284
5285 error = PTR_ERR(binderfs_entry);
5286 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5287 strbuf, error);
5288 }
5289 }
5290
5291 return 0;
5292}
5293
5294static int binder_flush(struct file *filp, fl_owner_t id)
5295{
5296 struct binder_proc *proc = filp->private_data;
5297
5298 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5299
5300 return 0;
5301}
5302
5303static void binder_deferred_flush(struct binder_proc *proc)
5304{
5305 struct rb_node *n;
5306 int wake_count = 0;
5307
5308 binder_inner_proc_lock(proc);
5309 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5310 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5311
5312 thread->looper_need_return = true;
5313 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5314 wake_up_interruptible(&thread->wait);
5315 wake_count++;
5316 }
5317 }
5318 binder_inner_proc_unlock(proc);
5319
5320 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5321 "binder_flush: %d woke %d threads\n", proc->pid,
5322 wake_count);
5323}
5324
5325static int binder_release(struct inode *nodp, struct file *filp)
5326{
5327 struct binder_proc *proc = filp->private_data;
5328
5329 debugfs_remove(proc->debugfs_entry);
5330
5331 if (proc->binderfs_entry) {
5332 binderfs_remove_file(proc->binderfs_entry);
5333 proc->binderfs_entry = NULL;
5334 }
5335
5336 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5337
5338 return 0;
5339}
5340
5341static int binder_node_release(struct binder_node *node, int refs)
5342{
5343 struct binder_ref *ref;
5344 int death = 0;
5345 struct binder_proc *proc = node->proc;
5346
5347 binder_release_work(proc, &node->async_todo);
5348
5349 binder_node_lock(node);
5350 binder_inner_proc_lock(proc);
5351 binder_dequeue_work_ilocked(&node->work);
5352
5353
5354
5355 BUG_ON(!node->tmp_refs);
5356 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5357 binder_inner_proc_unlock(proc);
5358 binder_node_unlock(node);
5359 binder_free_node(node);
5360
5361 return refs;
5362 }
5363
5364 node->proc = NULL;
5365 node->local_strong_refs = 0;
5366 node->local_weak_refs = 0;
5367 binder_inner_proc_unlock(proc);
5368
5369 spin_lock(&binder_dead_nodes_lock);
5370 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5371 spin_unlock(&binder_dead_nodes_lock);
5372
5373 hlist_for_each_entry(ref, &node->refs, node_entry) {
5374 refs++;
5375
5376
5377
5378
5379
5380
5381 binder_inner_proc_lock(ref->proc);
5382 if (!ref->death) {
5383 binder_inner_proc_unlock(ref->proc);
5384 continue;
5385 }
5386
5387 death++;
5388
5389 BUG_ON(!list_empty(&ref->death->work.entry));
5390 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5391 binder_enqueue_work_ilocked(&ref->death->work,
5392 &ref->proc->todo);
5393 binder_wakeup_proc_ilocked(ref->proc);
5394 binder_inner_proc_unlock(ref->proc);
5395 }
5396
5397 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5398 "node %d now dead, refs %d, death %d\n",
5399 node->debug_id, refs, death);
5400 binder_node_unlock(node);
5401 binder_put_node(node);
5402
5403 return refs;
5404}
5405
5406static void binder_deferred_release(struct binder_proc *proc)
5407{
5408 struct binder_context *context = proc->context;
5409 struct binder_device *device;
5410 struct rb_node *n;
5411 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5412
5413 mutex_lock(&binder_procs_lock);
5414 hlist_del(&proc->proc_node);
5415 mutex_unlock(&binder_procs_lock);
5416
5417 mutex_lock(&context->context_mgr_node_lock);
5418 if (context->binder_context_mgr_node &&
5419 context->binder_context_mgr_node->proc == proc) {
5420 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5421 "%s: %d context_mgr_node gone\n",
5422 __func__, proc->pid);
5423 context->binder_context_mgr_node = NULL;
5424 }
5425 mutex_unlock(&context->context_mgr_node_lock);
5426 device = container_of(proc->context, struct binder_device, context);
5427 if (refcount_dec_and_test(&device->ref)) {
5428 kfree(context->name);
5429 kfree(device);
5430 }
5431 proc->context = NULL;
5432 binder_inner_proc_lock(proc);
5433
5434
5435
5436
5437 proc->tmp_ref++;
5438
5439 proc->is_dead = true;
5440 threads = 0;
5441 active_transactions = 0;
5442 while ((n = rb_first(&proc->threads))) {
5443 struct binder_thread *thread;
5444
5445 thread = rb_entry(n, struct binder_thread, rb_node);
5446 binder_inner_proc_unlock(proc);
5447 threads++;
5448 active_transactions += binder_thread_release(proc, thread);
5449 binder_inner_proc_lock(proc);
5450 }
5451
5452 nodes = 0;
5453 incoming_refs = 0;
5454 while ((n = rb_first(&proc->nodes))) {
5455 struct binder_node *node;
5456
5457 node = rb_entry(n, struct binder_node, rb_node);
5458 nodes++;
5459
5460
5461
5462
5463
5464 binder_inc_node_tmpref_ilocked(node);
5465 rb_erase(&node->rb_node, &proc->nodes);
5466 binder_inner_proc_unlock(proc);
5467 incoming_refs = binder_node_release(node, incoming_refs);
5468 binder_inner_proc_lock(proc);
5469 }
5470 binder_inner_proc_unlock(proc);
5471
5472 outgoing_refs = 0;
5473 binder_proc_lock(proc);
5474 while ((n = rb_first(&proc->refs_by_desc))) {
5475 struct binder_ref *ref;
5476
5477 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5478 outgoing_refs++;
5479 binder_cleanup_ref_olocked(ref);
5480 binder_proc_unlock(proc);
5481 binder_free_ref(ref);
5482 binder_proc_lock(proc);
5483 }
5484 binder_proc_unlock(proc);
5485
5486 binder_release_work(proc, &proc->todo);
5487 binder_release_work(proc, &proc->delivered_death);
5488
5489 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5490 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5491 __func__, proc->pid, threads, nodes, incoming_refs,
5492 outgoing_refs, active_transactions);
5493
5494 binder_proc_dec_tmpref(proc);
5495}
5496
5497static void binder_deferred_func(struct work_struct *work)
5498{
5499 struct binder_proc *proc;
5500
5501 int defer;
5502
5503 do {
5504 mutex_lock(&binder_deferred_lock);
5505 if (!hlist_empty(&binder_deferred_list)) {
5506 proc = hlist_entry(binder_deferred_list.first,
5507 struct binder_proc, deferred_work_node);
5508 hlist_del_init(&proc->deferred_work_node);
5509 defer = proc->deferred_work;
5510 proc->deferred_work = 0;
5511 } else {
5512 proc = NULL;
5513 defer = 0;
5514 }
5515 mutex_unlock(&binder_deferred_lock);
5516
5517 if (defer & BINDER_DEFERRED_FLUSH)
5518 binder_deferred_flush(proc);
5519
5520 if (defer & BINDER_DEFERRED_RELEASE)
5521 binder_deferred_release(proc);
5522 } while (proc);
5523}
5524static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5525
5526static void
5527binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5528{
5529 mutex_lock(&binder_deferred_lock);
5530 proc->deferred_work |= defer;
5531 if (hlist_unhashed(&proc->deferred_work_node)) {
5532 hlist_add_head(&proc->deferred_work_node,
5533 &binder_deferred_list);
5534 schedule_work(&binder_deferred_work);
5535 }
5536 mutex_unlock(&binder_deferred_lock);
5537}
5538
5539static void print_binder_transaction_ilocked(struct seq_file *m,
5540 struct binder_proc *proc,
5541 const char *prefix,
5542 struct binder_transaction *t)
5543{
5544 struct binder_proc *to_proc;
5545 struct binder_buffer *buffer = t->buffer;
5546
5547 spin_lock(&t->lock);
5548 to_proc = t->to_proc;
5549 seq_printf(m,
5550 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5551 prefix, t->debug_id, t,
5552 t->from ? t->from->proc->pid : 0,
5553 t->from ? t->from->pid : 0,
5554 to_proc ? to_proc->pid : 0,
5555 t->to_thread ? t->to_thread->pid : 0,
5556 t->code, t->flags, t->priority, t->need_reply);
5557 spin_unlock(&t->lock);
5558
5559 if (proc != to_proc) {
5560
5561
5562
5563
5564 seq_puts(m, "\n");
5565 return;
5566 }
5567
5568 if (buffer == NULL) {
5569 seq_puts(m, " buffer free\n");
5570 return;
5571 }
5572 if (buffer->target_node)
5573 seq_printf(m, " node %d", buffer->target_node->debug_id);
5574 seq_printf(m, " size %zd:%zd data %pK\n",
5575 buffer->data_size, buffer->offsets_size,
5576 buffer->user_data);
5577}
5578
5579static void print_binder_work_ilocked(struct seq_file *m,
5580 struct binder_proc *proc,
5581 const char *prefix,
5582 const char *transaction_prefix,
5583 struct binder_work *w)
5584{
5585 struct binder_node *node;
5586 struct binder_transaction *t;
5587
5588 switch (w->type) {
5589 case BINDER_WORK_TRANSACTION:
5590 t = container_of(w, struct binder_transaction, work);
5591 print_binder_transaction_ilocked(
5592 m, proc, transaction_prefix, t);
5593 break;
5594 case BINDER_WORK_RETURN_ERROR: {
5595 struct binder_error *e = container_of(
5596 w, struct binder_error, work);
5597
5598 seq_printf(m, "%stransaction error: %u\n",
5599 prefix, e->cmd);
5600 } break;
5601 case BINDER_WORK_TRANSACTION_COMPLETE:
5602 seq_printf(m, "%stransaction complete\n", prefix);
5603 break;
5604 case BINDER_WORK_NODE:
5605 node = container_of(w, struct binder_node, work);
5606 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5607 prefix, node->debug_id,
5608 (u64)node->ptr, (u64)node->cookie);
5609 break;
5610 case BINDER_WORK_DEAD_BINDER:
5611 seq_printf(m, "%shas dead binder\n", prefix);
5612 break;
5613 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5614 seq_printf(m, "%shas cleared dead binder\n", prefix);
5615 break;
5616 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5617 seq_printf(m, "%shas cleared death notification\n", prefix);
5618 break;
5619 default:
5620 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5621 break;
5622 }
5623}
5624
5625static void print_binder_thread_ilocked(struct seq_file *m,
5626 struct binder_thread *thread,
5627 int print_always)
5628{
5629 struct binder_transaction *t;
5630 struct binder_work *w;
5631 size_t start_pos = m->count;
5632 size_t header_pos;
5633
5634 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5635 thread->pid, thread->looper,
5636 thread->looper_need_return,
5637 atomic_read(&thread->tmp_ref));
5638 header_pos = m->count;
5639 t = thread->transaction_stack;
5640 while (t) {
5641 if (t->from == thread) {
5642 print_binder_transaction_ilocked(m, thread->proc,
5643 " outgoing transaction", t);
5644 t = t->from_parent;
5645 } else if (t->to_thread == thread) {
5646 print_binder_transaction_ilocked(m, thread->proc,
5647 " incoming transaction", t);
5648 t = t->to_parent;
5649 } else {
5650 print_binder_transaction_ilocked(m, thread->proc,
5651 " bad transaction", t);
5652 t = NULL;
5653 }
5654 }
5655 list_for_each_entry(w, &thread->todo, entry) {
5656 print_binder_work_ilocked(m, thread->proc, " ",
5657 " pending transaction", w);
5658 }
5659 if (!print_always && m->count == header_pos)
5660 m->count = start_pos;
5661}
5662
5663static void print_binder_node_nilocked(struct seq_file *m,
5664 struct binder_node *node)
5665{
5666 struct binder_ref *ref;
5667 struct binder_work *w;
5668 int count;
5669
5670 count = 0;
5671 hlist_for_each_entry(ref, &node->refs, node_entry)
5672 count++;
5673
5674 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5675 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5676 node->has_strong_ref, node->has_weak_ref,
5677 node->local_strong_refs, node->local_weak_refs,
5678 node->internal_strong_refs, count, node->tmp_refs);
5679 if (count) {
5680 seq_puts(m, " proc");
5681 hlist_for_each_entry(ref, &node->refs, node_entry)
5682 seq_printf(m, " %d", ref->proc->pid);
5683 }
5684 seq_puts(m, "\n");
5685 if (node->proc) {
5686 list_for_each_entry(w, &node->async_todo, entry)
5687 print_binder_work_ilocked(m, node->proc, " ",
5688 " pending async transaction", w);
5689 }
5690}
5691
5692static void print_binder_ref_olocked(struct seq_file *m,
5693 struct binder_ref *ref)
5694{
5695 binder_node_lock(ref->node);
5696 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5697 ref->data.debug_id, ref->data.desc,
5698 ref->node->proc ? "" : "dead ",
5699 ref->node->debug_id, ref->data.strong,
5700 ref->data.weak, ref->death);
5701 binder_node_unlock(ref->node);
5702}
5703
5704static void print_binder_proc(struct seq_file *m,
5705 struct binder_proc *proc, int print_all)
5706{
5707 struct binder_work *w;
5708 struct rb_node *n;
5709 size_t start_pos = m->count;
5710 size_t header_pos;
5711 struct binder_node *last_node = NULL;
5712
5713 seq_printf(m, "proc %d\n", proc->pid);
5714 seq_printf(m, "context %s\n", proc->context->name);
5715 header_pos = m->count;
5716
5717 binder_inner_proc_lock(proc);
5718 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5719 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5720 rb_node), print_all);
5721
5722 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5723 struct binder_node *node = rb_entry(n, struct binder_node,
5724 rb_node);
5725 if (!print_all && !node->has_async_transaction)
5726 continue;
5727
5728
5729
5730
5731
5732
5733 binder_inc_node_tmpref_ilocked(node);
5734
5735 binder_inner_proc_unlock(proc);
5736 if (last_node)
5737 binder_put_node(last_node);
5738 binder_node_inner_lock(node);
5739 print_binder_node_nilocked(m, node);
5740 binder_node_inner_unlock(node);
5741 last_node = node;
5742 binder_inner_proc_lock(proc);
5743 }
5744 binder_inner_proc_unlock(proc);
5745 if (last_node)
5746 binder_put_node(last_node);
5747
5748 if (print_all) {
5749 binder_proc_lock(proc);
5750 for (n = rb_first(&proc->refs_by_desc);
5751 n != NULL;
5752 n = rb_next(n))
5753 print_binder_ref_olocked(m, rb_entry(n,
5754 struct binder_ref,
5755 rb_node_desc));
5756 binder_proc_unlock(proc);
5757 }
5758 binder_alloc_print_allocated(m, &proc->alloc);
5759 binder_inner_proc_lock(proc);
5760 list_for_each_entry(w, &proc->todo, entry)
5761 print_binder_work_ilocked(m, proc, " ",
5762 " pending transaction", w);
5763 list_for_each_entry(w, &proc->delivered_death, entry) {
5764 seq_puts(m, " has delivered dead binder\n");
5765 break;
5766 }
5767 binder_inner_proc_unlock(proc);
5768 if (!print_all && m->count == header_pos)
5769 m->count = start_pos;
5770}
5771
5772static const char * const binder_return_strings[] = {
5773 "BR_ERROR",
5774 "BR_OK",
5775 "BR_TRANSACTION",
5776 "BR_REPLY",
5777 "BR_ACQUIRE_RESULT",
5778 "BR_DEAD_REPLY",
5779 "BR_TRANSACTION_COMPLETE",
5780 "BR_INCREFS",
5781 "BR_ACQUIRE",
5782 "BR_RELEASE",
5783 "BR_DECREFS",
5784 "BR_ATTEMPT_ACQUIRE",
5785 "BR_NOOP",
5786 "BR_SPAWN_LOOPER",
5787 "BR_FINISHED",
5788 "BR_DEAD_BINDER",
5789 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5790 "BR_FAILED_REPLY"
5791};
5792
5793static const char * const binder_command_strings[] = {
5794 "BC_TRANSACTION",
5795 "BC_REPLY",
5796 "BC_ACQUIRE_RESULT",
5797 "BC_FREE_BUFFER",
5798 "BC_INCREFS",
5799 "BC_ACQUIRE",
5800 "BC_RELEASE",
5801 "BC_DECREFS",
5802 "BC_INCREFS_DONE",
5803 "BC_ACQUIRE_DONE",
5804 "BC_ATTEMPT_ACQUIRE",
5805 "BC_REGISTER_LOOPER",
5806 "BC_ENTER_LOOPER",
5807 "BC_EXIT_LOOPER",
5808 "BC_REQUEST_DEATH_NOTIFICATION",
5809 "BC_CLEAR_DEATH_NOTIFICATION",
5810 "BC_DEAD_BINDER_DONE",
5811 "BC_TRANSACTION_SG",
5812 "BC_REPLY_SG",
5813};
5814
5815static const char * const binder_objstat_strings[] = {
5816 "proc",
5817 "thread",
5818 "node",
5819 "ref",
5820 "death",
5821 "transaction",
5822 "transaction_complete"
5823};
5824
5825static void print_binder_stats(struct seq_file *m, const char *prefix,
5826 struct binder_stats *stats)
5827{
5828 int i;
5829
5830 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5831 ARRAY_SIZE(binder_command_strings));
5832 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5833 int temp = atomic_read(&stats->bc[i]);
5834
5835 if (temp)
5836 seq_printf(m, "%s%s: %d\n", prefix,
5837 binder_command_strings[i], temp);
5838 }
5839
5840 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5841 ARRAY_SIZE(binder_return_strings));
5842 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5843 int temp = atomic_read(&stats->br[i]);
5844
5845 if (temp)
5846 seq_printf(m, "%s%s: %d\n", prefix,
5847 binder_return_strings[i], temp);
5848 }
5849
5850 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5851 ARRAY_SIZE(binder_objstat_strings));
5852 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5853 ARRAY_SIZE(stats->obj_deleted));
5854 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5855 int created = atomic_read(&stats->obj_created[i]);
5856 int deleted = atomic_read(&stats->obj_deleted[i]);
5857
5858 if (created || deleted)
5859 seq_printf(m, "%s%s: active %d total %d\n",
5860 prefix,
5861 binder_objstat_strings[i],
5862 created - deleted,
5863 created);
5864 }
5865}
5866
5867static void print_binder_proc_stats(struct seq_file *m,
5868 struct binder_proc *proc)
5869{
5870 struct binder_work *w;
5871 struct binder_thread *thread;
5872 struct rb_node *n;
5873 int count, strong, weak, ready_threads;
5874 size_t free_async_space =
5875 binder_alloc_get_free_async_space(&proc->alloc);
5876
5877 seq_printf(m, "proc %d\n", proc->pid);
5878 seq_printf(m, "context %s\n", proc->context->name);
5879 count = 0;
5880 ready_threads = 0;
5881 binder_inner_proc_lock(proc);
5882 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5883 count++;
5884
5885 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5886 ready_threads++;
5887
5888 seq_printf(m, " threads: %d\n", count);
5889 seq_printf(m, " requested threads: %d+%d/%d\n"
5890 " ready threads %d\n"
5891 " free async space %zd\n", proc->requested_threads,
5892 proc->requested_threads_started, proc->max_threads,
5893 ready_threads,
5894 free_async_space);
5895 count = 0;
5896 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5897 count++;
5898 binder_inner_proc_unlock(proc);
5899 seq_printf(m, " nodes: %d\n", count);
5900 count = 0;
5901 strong = 0;
5902 weak = 0;
5903 binder_proc_lock(proc);
5904 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5905 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5906 rb_node_desc);
5907 count++;
5908 strong += ref->data.strong;
5909 weak += ref->data.weak;
5910 }
5911 binder_proc_unlock(proc);
5912 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5913
5914 count = binder_alloc_get_allocated_count(&proc->alloc);
5915 seq_printf(m, " buffers: %d\n", count);
5916
5917 binder_alloc_print_pages(m, &proc->alloc);
5918
5919 count = 0;
5920 binder_inner_proc_lock(proc);
5921 list_for_each_entry(w, &proc->todo, entry) {
5922 if (w->type == BINDER_WORK_TRANSACTION)
5923 count++;
5924 }
5925 binder_inner_proc_unlock(proc);
5926 seq_printf(m, " pending transactions: %d\n", count);
5927
5928 print_binder_stats(m, " ", &proc->stats);
5929}
5930
5931
5932int binder_state_show(struct seq_file *m, void *unused)
5933{
5934 struct binder_proc *proc;
5935 struct binder_node *node;
5936 struct binder_node *last_node = NULL;
5937
5938 seq_puts(m, "binder state:\n");
5939
5940 spin_lock(&binder_dead_nodes_lock);
5941 if (!hlist_empty(&binder_dead_nodes))
5942 seq_puts(m, "dead nodes:\n");
5943 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5944
5945
5946
5947
5948
5949 node->tmp_refs++;
5950 spin_unlock(&binder_dead_nodes_lock);
5951 if (last_node)
5952 binder_put_node(last_node);
5953 binder_node_lock(node);
5954 print_binder_node_nilocked(m, node);
5955 binder_node_unlock(node);
5956 last_node = node;
5957 spin_lock(&binder_dead_nodes_lock);
5958 }
5959 spin_unlock(&binder_dead_nodes_lock);
5960 if (last_node)
5961 binder_put_node(last_node);
5962
5963 mutex_lock(&binder_procs_lock);
5964 hlist_for_each_entry(proc, &binder_procs, proc_node)
5965 print_binder_proc(m, proc, 1);
5966 mutex_unlock(&binder_procs_lock);
5967
5968 return 0;
5969}
5970
5971int binder_stats_show(struct seq_file *m, void *unused)
5972{
5973 struct binder_proc *proc;
5974
5975 seq_puts(m, "binder stats:\n");
5976
5977 print_binder_stats(m, "", &binder_stats);
5978
5979 mutex_lock(&binder_procs_lock);
5980 hlist_for_each_entry(proc, &binder_procs, proc_node)
5981 print_binder_proc_stats(m, proc);
5982 mutex_unlock(&binder_procs_lock);
5983
5984 return 0;
5985}
5986
5987int binder_transactions_show(struct seq_file *m, void *unused)
5988{
5989 struct binder_proc *proc;
5990
5991 seq_puts(m, "binder transactions:\n");
5992 mutex_lock(&binder_procs_lock);
5993 hlist_for_each_entry(proc, &binder_procs, proc_node)
5994 print_binder_proc(m, proc, 0);
5995 mutex_unlock(&binder_procs_lock);
5996
5997 return 0;
5998}
5999
6000static int proc_show(struct seq_file *m, void *unused)
6001{
6002 struct binder_proc *itr;
6003 int pid = (unsigned long)m->private;
6004
6005 mutex_lock(&binder_procs_lock);
6006 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6007 if (itr->pid == pid) {
6008 seq_puts(m, "binder proc state:\n");
6009 print_binder_proc(m, itr, 1);
6010 }
6011 }
6012 mutex_unlock(&binder_procs_lock);
6013
6014 return 0;
6015}
6016
6017static void print_binder_transaction_log_entry(struct seq_file *m,
6018 struct binder_transaction_log_entry *e)
6019{
6020 int debug_id = READ_ONCE(e->debug_id_done);
6021
6022
6023
6024
6025 smp_rmb();
6026 seq_printf(m,
6027 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6028 e->debug_id, (e->call_type == 2) ? "reply" :
6029 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6030 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6031 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6032 e->return_error, e->return_error_param,
6033 e->return_error_line);
6034
6035
6036
6037
6038 smp_rmb();
6039 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6040 "\n" : " (incomplete)\n");
6041}
6042
6043int binder_transaction_log_show(struct seq_file *m, void *unused)
6044{
6045 struct binder_transaction_log *log = m->private;
6046 unsigned int log_cur = atomic_read(&log->cur);
6047 unsigned int count;
6048 unsigned int cur;
6049 int i;
6050
6051 count = log_cur + 1;
6052 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6053 0 : count % ARRAY_SIZE(log->entry);
6054 if (count > ARRAY_SIZE(log->entry) || log->full)
6055 count = ARRAY_SIZE(log->entry);
6056 for (i = 0; i < count; i++) {
6057 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6058
6059 print_binder_transaction_log_entry(m, &log->entry[index]);
6060 }
6061 return 0;
6062}
6063
6064const struct file_operations binder_fops = {
6065 .owner = THIS_MODULE,
6066 .poll = binder_poll,
6067 .unlocked_ioctl = binder_ioctl,
6068 .compat_ioctl = compat_ptr_ioctl,
6069 .mmap = binder_mmap,
6070 .open = binder_open,
6071 .flush = binder_flush,
6072 .release = binder_release,
6073};
6074
6075static int __init init_binder_device(const char *name)
6076{
6077 int ret;
6078 struct binder_device *binder_device;
6079
6080 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6081 if (!binder_device)
6082 return -ENOMEM;
6083
6084 binder_device->miscdev.fops = &binder_fops;
6085 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6086 binder_device->miscdev.name = name;
6087
6088 refcount_set(&binder_device->ref, 1);
6089 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6090 binder_device->context.name = name;
6091 mutex_init(&binder_device->context.context_mgr_node_lock);
6092
6093 ret = misc_register(&binder_device->miscdev);
6094 if (ret < 0) {
6095 kfree(binder_device);
6096 return ret;
6097 }
6098
6099 hlist_add_head(&binder_device->hlist, &binder_devices);
6100
6101 return ret;
6102}
6103
6104static int __init binder_init(void)
6105{
6106 int ret;
6107 char *device_name, *device_tmp;
6108 struct binder_device *device;
6109 struct hlist_node *tmp;
6110 char *device_names = NULL;
6111
6112 ret = binder_alloc_shrinker_init();
6113 if (ret)
6114 return ret;
6115
6116 atomic_set(&binder_transaction_log.cur, ~0U);
6117 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6118
6119 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6120 if (binder_debugfs_dir_entry_root)
6121 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6122 binder_debugfs_dir_entry_root);
6123
6124 if (binder_debugfs_dir_entry_root) {
6125 debugfs_create_file("state",
6126 0444,
6127 binder_debugfs_dir_entry_root,
6128 NULL,
6129 &binder_state_fops);
6130 debugfs_create_file("stats",
6131 0444,
6132 binder_debugfs_dir_entry_root,
6133 NULL,
6134 &binder_stats_fops);
6135 debugfs_create_file("transactions",
6136 0444,
6137 binder_debugfs_dir_entry_root,
6138 NULL,
6139 &binder_transactions_fops);
6140 debugfs_create_file("transaction_log",
6141 0444,
6142 binder_debugfs_dir_entry_root,
6143 &binder_transaction_log,
6144 &binder_transaction_log_fops);
6145 debugfs_create_file("failed_transaction_log",
6146 0444,
6147 binder_debugfs_dir_entry_root,
6148 &binder_transaction_log_failed,
6149 &binder_transaction_log_fops);
6150 }
6151
6152 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6153 strcmp(binder_devices_param, "") != 0) {
6154
6155
6156
6157
6158 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6159 if (!device_names) {
6160 ret = -ENOMEM;
6161 goto err_alloc_device_names_failed;
6162 }
6163
6164 device_tmp = device_names;
6165 while ((device_name = strsep(&device_tmp, ","))) {
6166 ret = init_binder_device(device_name);
6167 if (ret)
6168 goto err_init_binder_device_failed;
6169 }
6170 }
6171
6172 ret = init_binderfs();
6173 if (ret)
6174 goto err_init_binder_device_failed;
6175
6176 return ret;
6177
6178err_init_binder_device_failed:
6179 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6180 misc_deregister(&device->miscdev);
6181 hlist_del(&device->hlist);
6182 kfree(device);
6183 }
6184
6185 kfree(device_names);
6186
6187err_alloc_device_names_failed:
6188 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6189
6190 return ret;
6191}
6192
6193device_initcall(binder_init);
6194
6195#define CREATE_TRACE_POINTS
6196#include "binder_trace.h"
6197
6198MODULE_LICENSE("GPL v2");
6199