1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68
69#include <uapi/linux/android/binder.h>
70#include <uapi/linux/android/binderfs.h>
71
72#include <asm/cacheflush.h>
73
74#include "binder_alloc.h"
75#include "binder_internal.h"
76#include "binder_trace.h"
77
78static HLIST_HEAD(binder_deferred_list);
79static DEFINE_MUTEX(binder_deferred_lock);
80
81static HLIST_HEAD(binder_devices);
82static HLIST_HEAD(binder_procs);
83static DEFINE_MUTEX(binder_procs_lock);
84
85static HLIST_HEAD(binder_dead_nodes);
86static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88static struct dentry *binder_debugfs_dir_entry_root;
89static struct dentry *binder_debugfs_dir_entry_proc;
90static atomic_t binder_last_id;
91
92static int proc_show(struct seq_file *m, void *unused);
93DEFINE_SHOW_ATTRIBUTE(proc);
94
95
96#ifndef SZ_1K
97#define SZ_1K 0x400
98#endif
99
100#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
101
102enum {
103 BINDER_DEBUG_USER_ERROR = 1U << 0,
104 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109 BINDER_DEBUG_READ_WRITE = 1U << 6,
110 BINDER_DEBUG_USER_REFS = 1U << 7,
111 BINDER_DEBUG_THREADS = 1U << 8,
112 BINDER_DEBUG_TRANSACTION = 1U << 9,
113 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117 BINDER_DEBUG_SPINLOCKS = 1U << 14,
118};
119static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122
123char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124module_param_named(devices, binder_devices_param, charp, 0444);
125
126static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127static int binder_stop_on_user_error;
128
129static int binder_set_stop_on_user_error(const char *val,
130 const struct kernel_param *kp)
131{
132 int ret;
133
134 ret = param_set_int(val, kp);
135 if (binder_stop_on_user_error < 2)
136 wake_up(&binder_user_error_wait);
137 return ret;
138}
139module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 param_get_int, &binder_stop_on_user_error, 0644);
141
142#define binder_debug(mask, x...) \
143 do { \
144 if (binder_debug_mask & mask) \
145 pr_info_ratelimited(x); \
146 } while (0)
147
148#define binder_user_error(x...) \
149 do { \
150 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
151 pr_info_ratelimited(x); \
152 if (binder_stop_on_user_error) \
153 binder_stop_on_user_error = 2; \
154 } while (0)
155
156#define to_flat_binder_object(hdr) \
157 container_of(hdr, struct flat_binder_object, hdr)
158
159#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
160
161#define to_binder_buffer_object(hdr) \
162 container_of(hdr, struct binder_buffer_object, hdr)
163
164#define to_binder_fd_array_object(hdr) \
165 container_of(hdr, struct binder_fd_array_object, hdr)
166
167enum binder_stat_types {
168 BINDER_STAT_PROC,
169 BINDER_STAT_THREAD,
170 BINDER_STAT_NODE,
171 BINDER_STAT_REF,
172 BINDER_STAT_DEATH,
173 BINDER_STAT_TRANSACTION,
174 BINDER_STAT_TRANSACTION_COMPLETE,
175 BINDER_STAT_COUNT
176};
177
178struct binder_stats {
179 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
180 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
181 atomic_t obj_created[BINDER_STAT_COUNT];
182 atomic_t obj_deleted[BINDER_STAT_COUNT];
183};
184
185static struct binder_stats binder_stats;
186
187static inline void binder_stats_deleted(enum binder_stat_types type)
188{
189 atomic_inc(&binder_stats.obj_deleted[type]);
190}
191
192static inline void binder_stats_created(enum binder_stat_types type)
193{
194 atomic_inc(&binder_stats.obj_created[type]);
195}
196
197struct binder_transaction_log binder_transaction_log;
198struct binder_transaction_log binder_transaction_log_failed;
199
200static struct binder_transaction_log_entry *binder_transaction_log_add(
201 struct binder_transaction_log *log)
202{
203 struct binder_transaction_log_entry *e;
204 unsigned int cur = atomic_inc_return(&log->cur);
205
206 if (cur >= ARRAY_SIZE(log->entry))
207 log->full = true;
208 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
209 WRITE_ONCE(e->debug_id_done, 0);
210
211
212
213
214
215 smp_wmb();
216 memset(e, 0, sizeof(*e));
217 return e;
218}
219
220
221
222
223
224
225
226
227struct binder_work {
228 struct list_head entry;
229
230 enum {
231 BINDER_WORK_TRANSACTION = 1,
232 BINDER_WORK_TRANSACTION_COMPLETE,
233 BINDER_WORK_RETURN_ERROR,
234 BINDER_WORK_NODE,
235 BINDER_WORK_DEAD_BINDER,
236 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
237 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
238 } type;
239};
240
241struct binder_error {
242 struct binder_work work;
243 uint32_t cmd;
244};
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306struct binder_node {
307 int debug_id;
308 spinlock_t lock;
309 struct binder_work work;
310 union {
311 struct rb_node rb_node;
312 struct hlist_node dead_node;
313 };
314 struct binder_proc *proc;
315 struct hlist_head refs;
316 int internal_strong_refs;
317 int local_weak_refs;
318 int local_strong_refs;
319 int tmp_refs;
320 binder_uintptr_t ptr;
321 binder_uintptr_t cookie;
322 struct {
323
324
325
326
327 u8 has_strong_ref:1;
328 u8 pending_strong_ref:1;
329 u8 has_weak_ref:1;
330 u8 pending_weak_ref:1;
331 };
332 struct {
333
334
335
336 u8 accept_fds:1;
337 u8 txn_security_ctx:1;
338 u8 min_priority;
339 };
340 bool has_async_transaction;
341 struct list_head async_todo;
342};
343
344struct binder_ref_death {
345
346
347
348
349
350 struct binder_work work;
351 binder_uintptr_t cookie;
352};
353
354
355
356
357
358
359
360
361
362
363
364
365
366struct binder_ref_data {
367 int debug_id;
368 uint32_t desc;
369 int strong;
370 int weak;
371};
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390struct binder_ref {
391
392
393
394
395 struct binder_ref_data data;
396 struct rb_node rb_node_desc;
397 struct rb_node rb_node_node;
398 struct hlist_node node_entry;
399 struct binder_proc *proc;
400 struct binder_node *node;
401 struct binder_ref_death *death;
402};
403
404enum binder_deferred_state {
405 BINDER_DEFERRED_FLUSH = 0x01,
406 BINDER_DEFERRED_RELEASE = 0x02,
407};
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463struct binder_proc {
464 struct hlist_node proc_node;
465 struct rb_root threads;
466 struct rb_root nodes;
467 struct rb_root refs_by_desc;
468 struct rb_root refs_by_node;
469 struct list_head waiting_threads;
470 int pid;
471 struct task_struct *tsk;
472 struct hlist_node deferred_work_node;
473 int deferred_work;
474 bool is_dead;
475
476 struct list_head todo;
477 struct binder_stats stats;
478 struct list_head delivered_death;
479 int max_threads;
480 int requested_threads;
481 int requested_threads_started;
482 int tmp_ref;
483 long default_priority;
484 struct dentry *debugfs_entry;
485 struct binder_alloc alloc;
486 struct binder_context *context;
487 spinlock_t inner_lock;
488 spinlock_t outer_lock;
489 struct dentry *binderfs_entry;
490};
491
492enum {
493 BINDER_LOOPER_STATE_REGISTERED = 0x01,
494 BINDER_LOOPER_STATE_ENTERED = 0x02,
495 BINDER_LOOPER_STATE_EXITED = 0x04,
496 BINDER_LOOPER_STATE_INVALID = 0x08,
497 BINDER_LOOPER_STATE_WAITING = 0x10,
498 BINDER_LOOPER_STATE_POLL = 0x20,
499};
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537struct binder_thread {
538 struct binder_proc *proc;
539 struct rb_node rb_node;
540 struct list_head waiting_thread_node;
541 int pid;
542 int looper;
543 bool looper_need_return;
544 struct binder_transaction *transaction_stack;
545 struct list_head todo;
546 bool process_todo;
547 struct binder_error return_error;
548 struct binder_error reply_error;
549 wait_queue_head_t wait;
550 struct binder_stats stats;
551 atomic_t tmp_ref;
552 bool is_dead;
553};
554
555
556
557
558
559
560
561
562
563
564
565
566struct binder_txn_fd_fixup {
567 struct list_head fixup_entry;
568 struct file *file;
569 size_t offset;
570};
571
572struct binder_transaction {
573 int debug_id;
574 struct binder_work work;
575 struct binder_thread *from;
576 struct binder_transaction *from_parent;
577 struct binder_proc *to_proc;
578 struct binder_thread *to_thread;
579 struct binder_transaction *to_parent;
580 unsigned need_reply:1;
581
582
583 struct binder_buffer *buffer;
584 unsigned int code;
585 unsigned int flags;
586 long priority;
587 long saved_priority;
588 kuid_t sender_euid;
589 struct list_head fd_fixups;
590 binder_uintptr_t security_ctx;
591
592
593
594
595
596
597 spinlock_t lock;
598};
599
600
601
602
603
604
605
606
607
608
609
610struct binder_object {
611 union {
612 struct binder_object_header hdr;
613 struct flat_binder_object fbo;
614 struct binder_fd_object fdo;
615 struct binder_buffer_object bbo;
616 struct binder_fd_array_object fdao;
617 };
618};
619
620
621
622
623
624
625
626
627#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
628static void
629_binder_proc_lock(struct binder_proc *proc, int line)
630 __acquires(&proc->outer_lock)
631{
632 binder_debug(BINDER_DEBUG_SPINLOCKS,
633 "%s: line=%d\n", __func__, line);
634 spin_lock(&proc->outer_lock);
635}
636
637
638
639
640
641
642
643#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
644static void
645_binder_proc_unlock(struct binder_proc *proc, int line)
646 __releases(&proc->outer_lock)
647{
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_unlock(&proc->outer_lock);
651}
652
653
654
655
656
657
658
659#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
660static void
661_binder_inner_proc_lock(struct binder_proc *proc, int line)
662 __acquires(&proc->inner_lock)
663{
664 binder_debug(BINDER_DEBUG_SPINLOCKS,
665 "%s: line=%d\n", __func__, line);
666 spin_lock(&proc->inner_lock);
667}
668
669
670
671
672
673
674
675#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
676static void
677_binder_inner_proc_unlock(struct binder_proc *proc, int line)
678 __releases(&proc->inner_lock)
679{
680 binder_debug(BINDER_DEBUG_SPINLOCKS,
681 "%s: line=%d\n", __func__, line);
682 spin_unlock(&proc->inner_lock);
683}
684
685
686
687
688
689
690
691#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
692static void
693_binder_node_lock(struct binder_node *node, int line)
694 __acquires(&node->lock)
695{
696 binder_debug(BINDER_DEBUG_SPINLOCKS,
697 "%s: line=%d\n", __func__, line);
698 spin_lock(&node->lock);
699}
700
701
702
703
704
705
706
707#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
708static void
709_binder_node_unlock(struct binder_node *node, int line)
710 __releases(&node->lock)
711{
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_unlock(&node->lock);
715}
716
717
718
719
720
721
722
723
724#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
725static void
726_binder_node_inner_lock(struct binder_node *node, int line)
727 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
728{
729 binder_debug(BINDER_DEBUG_SPINLOCKS,
730 "%s: line=%d\n", __func__, line);
731 spin_lock(&node->lock);
732 if (node->proc)
733 binder_inner_proc_lock(node->proc);
734 else
735
736 __acquire(&node->proc->inner_lock);
737}
738
739
740
741
742
743
744
745#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
746static void
747_binder_node_inner_unlock(struct binder_node *node, int line)
748 __releases(&node->lock) __releases(&node->proc->inner_lock)
749{
750 struct binder_proc *proc = node->proc;
751
752 binder_debug(BINDER_DEBUG_SPINLOCKS,
753 "%s: line=%d\n", __func__, line);
754 if (proc)
755 binder_inner_proc_unlock(proc);
756 else
757
758 __release(&node->proc->inner_lock);
759 spin_unlock(&node->lock);
760}
761
762static bool binder_worklist_empty_ilocked(struct list_head *list)
763{
764 return list_empty(list);
765}
766
767
768
769
770
771
772
773
774static bool binder_worklist_empty(struct binder_proc *proc,
775 struct list_head *list)
776{
777 bool ret;
778
779 binder_inner_proc_lock(proc);
780 ret = binder_worklist_empty_ilocked(list);
781 binder_inner_proc_unlock(proc);
782 return ret;
783}
784
785
786
787
788
789
790
791
792
793
794
795static void
796binder_enqueue_work_ilocked(struct binder_work *work,
797 struct list_head *target_list)
798{
799 BUG_ON(target_list == NULL);
800 BUG_ON(work->entry.next && !list_empty(&work->entry));
801 list_add_tail(&work->entry, target_list);
802}
803
804
805
806
807
808
809
810
811
812
813
814
815static void
816binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
817 struct binder_work *work)
818{
819 WARN_ON(!list_empty(&thread->waiting_thread_node));
820 binder_enqueue_work_ilocked(work, &thread->todo);
821}
822
823
824
825
826
827
828
829
830
831
832
833static void
834binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
835 struct binder_work *work)
836{
837 WARN_ON(!list_empty(&thread->waiting_thread_node));
838 binder_enqueue_work_ilocked(work, &thread->todo);
839 thread->process_todo = true;
840}
841
842
843
844
845
846
847
848
849
850static void
851binder_enqueue_thread_work(struct binder_thread *thread,
852 struct binder_work *work)
853{
854 binder_inner_proc_lock(thread->proc);
855 binder_enqueue_thread_work_ilocked(thread, work);
856 binder_inner_proc_unlock(thread->proc);
857}
858
859static void
860binder_dequeue_work_ilocked(struct binder_work *work)
861{
862 list_del_init(&work->entry);
863}
864
865
866
867
868
869
870
871
872
873static void
874binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
875{
876 binder_inner_proc_lock(proc);
877 binder_dequeue_work_ilocked(work);
878 binder_inner_proc_unlock(proc);
879}
880
881static struct binder_work *binder_dequeue_work_head_ilocked(
882 struct list_head *list)
883{
884 struct binder_work *w;
885
886 w = list_first_entry_or_null(list, struct binder_work, entry);
887 if (w)
888 list_del_init(&w->entry);
889 return w;
890}
891
892
893
894
895
896
897
898
899
900
901static struct binder_work *binder_dequeue_work_head(
902 struct binder_proc *proc,
903 struct list_head *list)
904{
905 struct binder_work *w;
906
907 binder_inner_proc_lock(proc);
908 w = binder_dequeue_work_head_ilocked(list);
909 binder_inner_proc_unlock(proc);
910 return w;
911}
912
913static void
914binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
915static void binder_free_thread(struct binder_thread *thread);
916static void binder_free_proc(struct binder_proc *proc);
917static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
918
919static bool binder_has_work_ilocked(struct binder_thread *thread,
920 bool do_proc_work)
921{
922 return thread->process_todo ||
923 thread->looper_need_return ||
924 (do_proc_work &&
925 !binder_worklist_empty_ilocked(&thread->proc->todo));
926}
927
928static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
929{
930 bool has_work;
931
932 binder_inner_proc_lock(thread->proc);
933 has_work = binder_has_work_ilocked(thread, do_proc_work);
934 binder_inner_proc_unlock(thread->proc);
935
936 return has_work;
937}
938
939static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
940{
941 return !thread->transaction_stack &&
942 binder_worklist_empty_ilocked(&thread->todo) &&
943 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
944 BINDER_LOOPER_STATE_REGISTERED));
945}
946
947static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
948 bool sync)
949{
950 struct rb_node *n;
951 struct binder_thread *thread;
952
953 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
954 thread = rb_entry(n, struct binder_thread, rb_node);
955 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
956 binder_available_for_proc_work_ilocked(thread)) {
957 if (sync)
958 wake_up_interruptible_sync(&thread->wait);
959 else
960 wake_up_interruptible(&thread->wait);
961 }
962 }
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977static struct binder_thread *
978binder_select_thread_ilocked(struct binder_proc *proc)
979{
980 struct binder_thread *thread;
981
982 assert_spin_locked(&proc->inner_lock);
983 thread = list_first_entry_or_null(&proc->waiting_threads,
984 struct binder_thread,
985 waiting_thread_node);
986
987 if (thread)
988 list_del_init(&thread->waiting_thread_node);
989
990 return thread;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1010 struct binder_thread *thread,
1011 bool sync)
1012{
1013 assert_spin_locked(&proc->inner_lock);
1014
1015 if (thread) {
1016 if (sync)
1017 wake_up_interruptible_sync(&thread->wait);
1018 else
1019 wake_up_interruptible(&thread->wait);
1020 return;
1021 }
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 binder_wakeup_poll_threads_ilocked(proc, sync);
1037}
1038
1039static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1040{
1041 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1042
1043 binder_wakeup_thread_ilocked(proc, thread, false);
1044}
1045
1046static void binder_set_nice(long nice)
1047{
1048 long min_nice;
1049
1050 if (can_nice(current, nice)) {
1051 set_user_nice(current, nice);
1052 return;
1053 }
1054 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1055 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1056 "%d: nice value %ld not allowed use %ld instead\n",
1057 current->pid, nice, min_nice);
1058 set_user_nice(current, min_nice);
1059 if (min_nice <= MAX_NICE)
1060 return;
1061 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1062}
1063
1064static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1065 binder_uintptr_t ptr)
1066{
1067 struct rb_node *n = proc->nodes.rb_node;
1068 struct binder_node *node;
1069
1070 assert_spin_locked(&proc->inner_lock);
1071
1072 while (n) {
1073 node = rb_entry(n, struct binder_node, rb_node);
1074
1075 if (ptr < node->ptr)
1076 n = n->rb_left;
1077 else if (ptr > node->ptr)
1078 n = n->rb_right;
1079 else {
1080
1081
1082
1083
1084
1085 binder_inc_node_tmpref_ilocked(node);
1086 return node;
1087 }
1088 }
1089 return NULL;
1090}
1091
1092static struct binder_node *binder_get_node(struct binder_proc *proc,
1093 binder_uintptr_t ptr)
1094{
1095 struct binder_node *node;
1096
1097 binder_inner_proc_lock(proc);
1098 node = binder_get_node_ilocked(proc, ptr);
1099 binder_inner_proc_unlock(proc);
1100 return node;
1101}
1102
1103static struct binder_node *binder_init_node_ilocked(
1104 struct binder_proc *proc,
1105 struct binder_node *new_node,
1106 struct flat_binder_object *fp)
1107{
1108 struct rb_node **p = &proc->nodes.rb_node;
1109 struct rb_node *parent = NULL;
1110 struct binder_node *node;
1111 binder_uintptr_t ptr = fp ? fp->binder : 0;
1112 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1113 __u32 flags = fp ? fp->flags : 0;
1114
1115 assert_spin_locked(&proc->inner_lock);
1116
1117 while (*p) {
1118
1119 parent = *p;
1120 node = rb_entry(parent, struct binder_node, rb_node);
1121
1122 if (ptr < node->ptr)
1123 p = &(*p)->rb_left;
1124 else if (ptr > node->ptr)
1125 p = &(*p)->rb_right;
1126 else {
1127
1128
1129
1130
1131
1132 binder_inc_node_tmpref_ilocked(node);
1133 return node;
1134 }
1135 }
1136 node = new_node;
1137 binder_stats_created(BINDER_STAT_NODE);
1138 node->tmp_refs++;
1139 rb_link_node(&node->rb_node, parent, p);
1140 rb_insert_color(&node->rb_node, &proc->nodes);
1141 node->debug_id = atomic_inc_return(&binder_last_id);
1142 node->proc = proc;
1143 node->ptr = ptr;
1144 node->cookie = cookie;
1145 node->work.type = BINDER_WORK_NODE;
1146 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1147 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1148 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1149 spin_lock_init(&node->lock);
1150 INIT_LIST_HEAD(&node->work.entry);
1151 INIT_LIST_HEAD(&node->async_todo);
1152 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1153 "%d:%d node %d u%016llx c%016llx created\n",
1154 proc->pid, current->pid, node->debug_id,
1155 (u64)node->ptr, (u64)node->cookie);
1156
1157 return node;
1158}
1159
1160static struct binder_node *binder_new_node(struct binder_proc *proc,
1161 struct flat_binder_object *fp)
1162{
1163 struct binder_node *node;
1164 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1165
1166 if (!new_node)
1167 return NULL;
1168 binder_inner_proc_lock(proc);
1169 node = binder_init_node_ilocked(proc, new_node, fp);
1170 binder_inner_proc_unlock(proc);
1171 if (node != new_node)
1172
1173
1174
1175 kfree(new_node);
1176
1177 return node;
1178}
1179
1180static void binder_free_node(struct binder_node *node)
1181{
1182 kfree(node);
1183 binder_stats_deleted(BINDER_STAT_NODE);
1184}
1185
1186static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1187 int internal,
1188 struct list_head *target_list)
1189{
1190 struct binder_proc *proc = node->proc;
1191
1192 assert_spin_locked(&node->lock);
1193 if (proc)
1194 assert_spin_locked(&proc->inner_lock);
1195 if (strong) {
1196 if (internal) {
1197 if (target_list == NULL &&
1198 node->internal_strong_refs == 0 &&
1199 !(node->proc &&
1200 node == node->proc->context->binder_context_mgr_node &&
1201 node->has_strong_ref)) {
1202 pr_err("invalid inc strong node for %d\n",
1203 node->debug_id);
1204 return -EINVAL;
1205 }
1206 node->internal_strong_refs++;
1207 } else
1208 node->local_strong_refs++;
1209 if (!node->has_strong_ref && target_list) {
1210 struct binder_thread *thread = container_of(target_list,
1211 struct binder_thread, todo);
1212 binder_dequeue_work_ilocked(&node->work);
1213 BUG_ON(&thread->todo != target_list);
1214 binder_enqueue_deferred_thread_work_ilocked(thread,
1215 &node->work);
1216 }
1217 } else {
1218 if (!internal)
1219 node->local_weak_refs++;
1220 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1221 if (target_list == NULL) {
1222 pr_err("invalid inc weak node for %d\n",
1223 node->debug_id);
1224 return -EINVAL;
1225 }
1226
1227
1228
1229 binder_enqueue_work_ilocked(&node->work, target_list);
1230 }
1231 }
1232 return 0;
1233}
1234
1235static int binder_inc_node(struct binder_node *node, int strong, int internal,
1236 struct list_head *target_list)
1237{
1238 int ret;
1239
1240 binder_node_inner_lock(node);
1241 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1242 binder_node_inner_unlock(node);
1243
1244 return ret;
1245}
1246
1247static bool binder_dec_node_nilocked(struct binder_node *node,
1248 int strong, int internal)
1249{
1250 struct binder_proc *proc = node->proc;
1251
1252 assert_spin_locked(&node->lock);
1253 if (proc)
1254 assert_spin_locked(&proc->inner_lock);
1255 if (strong) {
1256 if (internal)
1257 node->internal_strong_refs--;
1258 else
1259 node->local_strong_refs--;
1260 if (node->local_strong_refs || node->internal_strong_refs)
1261 return false;
1262 } else {
1263 if (!internal)
1264 node->local_weak_refs--;
1265 if (node->local_weak_refs || node->tmp_refs ||
1266 !hlist_empty(&node->refs))
1267 return false;
1268 }
1269
1270 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1271 if (list_empty(&node->work.entry)) {
1272 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1273 binder_wakeup_proc_ilocked(proc);
1274 }
1275 } else {
1276 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1277 !node->local_weak_refs && !node->tmp_refs) {
1278 if (proc) {
1279 binder_dequeue_work_ilocked(&node->work);
1280 rb_erase(&node->rb_node, &proc->nodes);
1281 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1282 "refless node %d deleted\n",
1283 node->debug_id);
1284 } else {
1285 BUG_ON(!list_empty(&node->work.entry));
1286 spin_lock(&binder_dead_nodes_lock);
1287
1288
1289
1290
1291 if (node->tmp_refs) {
1292 spin_unlock(&binder_dead_nodes_lock);
1293 return false;
1294 }
1295 hlist_del(&node->dead_node);
1296 spin_unlock(&binder_dead_nodes_lock);
1297 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1298 "dead node %d deleted\n",
1299 node->debug_id);
1300 }
1301 return true;
1302 }
1303 }
1304 return false;
1305}
1306
1307static void binder_dec_node(struct binder_node *node, int strong, int internal)
1308{
1309 bool free_node;
1310
1311 binder_node_inner_lock(node);
1312 free_node = binder_dec_node_nilocked(node, strong, internal);
1313 binder_node_inner_unlock(node);
1314 if (free_node)
1315 binder_free_node(node);
1316}
1317
1318static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1319{
1320
1321
1322
1323
1324
1325 node->tmp_refs++;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static void binder_inc_node_tmpref(struct binder_node *node)
1342{
1343 binder_node_lock(node);
1344 if (node->proc)
1345 binder_inner_proc_lock(node->proc);
1346 else
1347 spin_lock(&binder_dead_nodes_lock);
1348 binder_inc_node_tmpref_ilocked(node);
1349 if (node->proc)
1350 binder_inner_proc_unlock(node->proc);
1351 else
1352 spin_unlock(&binder_dead_nodes_lock);
1353 binder_node_unlock(node);
1354}
1355
1356
1357
1358
1359
1360
1361
1362static void binder_dec_node_tmpref(struct binder_node *node)
1363{
1364 bool free_node;
1365
1366 binder_node_inner_lock(node);
1367 if (!node->proc)
1368 spin_lock(&binder_dead_nodes_lock);
1369 else
1370 __acquire(&binder_dead_nodes_lock);
1371 node->tmp_refs--;
1372 BUG_ON(node->tmp_refs < 0);
1373 if (!node->proc)
1374 spin_unlock(&binder_dead_nodes_lock);
1375 else
1376 __release(&binder_dead_nodes_lock);
1377
1378
1379
1380
1381
1382
1383 free_node = binder_dec_node_nilocked(node, 0, 1);
1384 binder_node_inner_unlock(node);
1385 if (free_node)
1386 binder_free_node(node);
1387}
1388
1389static void binder_put_node(struct binder_node *node)
1390{
1391 binder_dec_node_tmpref(node);
1392}
1393
1394static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1395 u32 desc, bool need_strong_ref)
1396{
1397 struct rb_node *n = proc->refs_by_desc.rb_node;
1398 struct binder_ref *ref;
1399
1400 while (n) {
1401 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1402
1403 if (desc < ref->data.desc) {
1404 n = n->rb_left;
1405 } else if (desc > ref->data.desc) {
1406 n = n->rb_right;
1407 } else if (need_strong_ref && !ref->data.strong) {
1408 binder_user_error("tried to use weak ref as strong ref\n");
1409 return NULL;
1410 } else {
1411 return ref;
1412 }
1413 }
1414 return NULL;
1415}
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435static struct binder_ref *binder_get_ref_for_node_olocked(
1436 struct binder_proc *proc,
1437 struct binder_node *node,
1438 struct binder_ref *new_ref)
1439{
1440 struct binder_context *context = proc->context;
1441 struct rb_node **p = &proc->refs_by_node.rb_node;
1442 struct rb_node *parent = NULL;
1443 struct binder_ref *ref;
1444 struct rb_node *n;
1445
1446 while (*p) {
1447 parent = *p;
1448 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1449
1450 if (node < ref->node)
1451 p = &(*p)->rb_left;
1452 else if (node > ref->node)
1453 p = &(*p)->rb_right;
1454 else
1455 return ref;
1456 }
1457 if (!new_ref)
1458 return NULL;
1459
1460 binder_stats_created(BINDER_STAT_REF);
1461 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1462 new_ref->proc = proc;
1463 new_ref->node = node;
1464 rb_link_node(&new_ref->rb_node_node, parent, p);
1465 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1466
1467 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1468 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1469 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1470 if (ref->data.desc > new_ref->data.desc)
1471 break;
1472 new_ref->data.desc = ref->data.desc + 1;
1473 }
1474
1475 p = &proc->refs_by_desc.rb_node;
1476 while (*p) {
1477 parent = *p;
1478 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1479
1480 if (new_ref->data.desc < ref->data.desc)
1481 p = &(*p)->rb_left;
1482 else if (new_ref->data.desc > ref->data.desc)
1483 p = &(*p)->rb_right;
1484 else
1485 BUG();
1486 }
1487 rb_link_node(&new_ref->rb_node_desc, parent, p);
1488 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1489
1490 binder_node_lock(node);
1491 hlist_add_head(&new_ref->node_entry, &node->refs);
1492
1493 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1494 "%d new ref %d desc %d for node %d\n",
1495 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1496 node->debug_id);
1497 binder_node_unlock(node);
1498 return new_ref;
1499}
1500
1501static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1502{
1503 bool delete_node = false;
1504
1505 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1506 "%d delete ref %d desc %d for node %d\n",
1507 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1508 ref->node->debug_id);
1509
1510 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1511 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1512
1513 binder_node_inner_lock(ref->node);
1514 if (ref->data.strong)
1515 binder_dec_node_nilocked(ref->node, 1, 1);
1516
1517 hlist_del(&ref->node_entry);
1518 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1519 binder_node_inner_unlock(ref->node);
1520
1521
1522
1523 if (!delete_node) {
1524
1525
1526
1527
1528
1529 ref->node = NULL;
1530 }
1531
1532 if (ref->death) {
1533 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1534 "%d delete ref %d desc %d has death notification\n",
1535 ref->proc->pid, ref->data.debug_id,
1536 ref->data.desc);
1537 binder_dequeue_work(ref->proc, &ref->death->work);
1538 binder_stats_deleted(BINDER_STAT_DEATH);
1539 }
1540 binder_stats_deleted(BINDER_STAT_REF);
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1554 struct list_head *target_list)
1555{
1556 int ret;
1557
1558 if (strong) {
1559 if (ref->data.strong == 0) {
1560 ret = binder_inc_node(ref->node, 1, 1, target_list);
1561 if (ret)
1562 return ret;
1563 }
1564 ref->data.strong++;
1565 } else {
1566 if (ref->data.weak == 0) {
1567 ret = binder_inc_node(ref->node, 0, 1, target_list);
1568 if (ret)
1569 return ret;
1570 }
1571 ref->data.weak++;
1572 }
1573 return 0;
1574}
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1586{
1587 if (strong) {
1588 if (ref->data.strong == 0) {
1589 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1590 ref->proc->pid, ref->data.debug_id,
1591 ref->data.desc, ref->data.strong,
1592 ref->data.weak);
1593 return false;
1594 }
1595 ref->data.strong--;
1596 if (ref->data.strong == 0)
1597 binder_dec_node(ref->node, strong, 1);
1598 } else {
1599 if (ref->data.weak == 0) {
1600 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1601 ref->proc->pid, ref->data.debug_id,
1602 ref->data.desc, ref->data.strong,
1603 ref->data.weak);
1604 return false;
1605 }
1606 ref->data.weak--;
1607 }
1608 if (ref->data.strong == 0 && ref->data.weak == 0) {
1609 binder_cleanup_ref_olocked(ref);
1610 return true;
1611 }
1612 return false;
1613}
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static struct binder_node *binder_get_node_from_ref(
1627 struct binder_proc *proc,
1628 u32 desc, bool need_strong_ref,
1629 struct binder_ref_data *rdata)
1630{
1631 struct binder_node *node;
1632 struct binder_ref *ref;
1633
1634 binder_proc_lock(proc);
1635 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1636 if (!ref)
1637 goto err_no_ref;
1638 node = ref->node;
1639
1640
1641
1642
1643 binder_inc_node_tmpref(node);
1644 if (rdata)
1645 *rdata = ref->data;
1646 binder_proc_unlock(proc);
1647
1648 return node;
1649
1650err_no_ref:
1651 binder_proc_unlock(proc);
1652 return NULL;
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662static void binder_free_ref(struct binder_ref *ref)
1663{
1664 if (ref->node)
1665 binder_free_node(ref->node);
1666 kfree(ref->death);
1667 kfree(ref);
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683static int binder_update_ref_for_handle(struct binder_proc *proc,
1684 uint32_t desc, bool increment, bool strong,
1685 struct binder_ref_data *rdata)
1686{
1687 int ret = 0;
1688 struct binder_ref *ref;
1689 bool delete_ref = false;
1690
1691 binder_proc_lock(proc);
1692 ref = binder_get_ref_olocked(proc, desc, strong);
1693 if (!ref) {
1694 ret = -EINVAL;
1695 goto err_no_ref;
1696 }
1697 if (increment)
1698 ret = binder_inc_ref_olocked(ref, strong, NULL);
1699 else
1700 delete_ref = binder_dec_ref_olocked(ref, strong);
1701
1702 if (rdata)
1703 *rdata = ref->data;
1704 binder_proc_unlock(proc);
1705
1706 if (delete_ref)
1707 binder_free_ref(ref);
1708 return ret;
1709
1710err_no_ref:
1711 binder_proc_unlock(proc);
1712 return ret;
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726static int binder_dec_ref_for_handle(struct binder_proc *proc,
1727 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1728{
1729 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746static int binder_inc_ref_for_node(struct binder_proc *proc,
1747 struct binder_node *node,
1748 bool strong,
1749 struct list_head *target_list,
1750 struct binder_ref_data *rdata)
1751{
1752 struct binder_ref *ref;
1753 struct binder_ref *new_ref = NULL;
1754 int ret = 0;
1755
1756 binder_proc_lock(proc);
1757 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1758 if (!ref) {
1759 binder_proc_unlock(proc);
1760 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1761 if (!new_ref)
1762 return -ENOMEM;
1763 binder_proc_lock(proc);
1764 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1765 }
1766 ret = binder_inc_ref_olocked(ref, strong, target_list);
1767 *rdata = ref->data;
1768 binder_proc_unlock(proc);
1769 if (new_ref && ref != new_ref)
1770
1771
1772
1773
1774 kfree(new_ref);
1775 return ret;
1776}
1777
1778static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1779 struct binder_transaction *t)
1780{
1781 BUG_ON(!target_thread);
1782 assert_spin_locked(&target_thread->proc->inner_lock);
1783 BUG_ON(target_thread->transaction_stack != t);
1784 BUG_ON(target_thread->transaction_stack->from != target_thread);
1785 target_thread->transaction_stack =
1786 target_thread->transaction_stack->from_parent;
1787 t->from = NULL;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802static void binder_thread_dec_tmpref(struct binder_thread *thread)
1803{
1804
1805
1806
1807
1808 binder_inner_proc_lock(thread->proc);
1809 atomic_dec(&thread->tmp_ref);
1810 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1811 binder_inner_proc_unlock(thread->proc);
1812 binder_free_thread(thread);
1813 return;
1814 }
1815 binder_inner_proc_unlock(thread->proc);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830static void binder_proc_dec_tmpref(struct binder_proc *proc)
1831{
1832 binder_inner_proc_lock(proc);
1833 proc->tmp_ref--;
1834 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1835 !proc->tmp_ref) {
1836 binder_inner_proc_unlock(proc);
1837 binder_free_proc(proc);
1838 return;
1839 }
1840 binder_inner_proc_unlock(proc);
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853static struct binder_thread *binder_get_txn_from(
1854 struct binder_transaction *t)
1855{
1856 struct binder_thread *from;
1857
1858 spin_lock(&t->lock);
1859 from = t->from;
1860 if (from)
1861 atomic_inc(&from->tmp_ref);
1862 spin_unlock(&t->lock);
1863 return from;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877static struct binder_thread *binder_get_txn_from_and_acq_inner(
1878 struct binder_transaction *t)
1879 __acquires(&t->from->proc->inner_lock)
1880{
1881 struct binder_thread *from;
1882
1883 from = binder_get_txn_from(t);
1884 if (!from) {
1885 __acquire(&from->proc->inner_lock);
1886 return NULL;
1887 }
1888 binder_inner_proc_lock(from->proc);
1889 if (t->from) {
1890 BUG_ON(from != t->from);
1891 return from;
1892 }
1893 binder_inner_proc_unlock(from->proc);
1894 __acquire(&from->proc->inner_lock);
1895 binder_thread_dec_tmpref(from);
1896 return NULL;
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909static void binder_free_txn_fixups(struct binder_transaction *t)
1910{
1911 struct binder_txn_fd_fixup *fixup, *tmp;
1912
1913 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1914 fput(fixup->file);
1915 list_del(&fixup->fixup_entry);
1916 kfree(fixup);
1917 }
1918}
1919
1920static void binder_free_transaction(struct binder_transaction *t)
1921{
1922 struct binder_proc *target_proc = t->to_proc;
1923
1924 if (target_proc) {
1925 binder_inner_proc_lock(target_proc);
1926 if (t->buffer)
1927 t->buffer->transaction = NULL;
1928 binder_inner_proc_unlock(target_proc);
1929 }
1930
1931
1932
1933
1934 binder_free_txn_fixups(t);
1935 kfree(t);
1936 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1937}
1938
1939static void binder_send_failed_reply(struct binder_transaction *t,
1940 uint32_t error_code)
1941{
1942 struct binder_thread *target_thread;
1943 struct binder_transaction *next;
1944
1945 BUG_ON(t->flags & TF_ONE_WAY);
1946 while (1) {
1947 target_thread = binder_get_txn_from_and_acq_inner(t);
1948 if (target_thread) {
1949 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1950 "send failed reply for transaction %d to %d:%d\n",
1951 t->debug_id,
1952 target_thread->proc->pid,
1953 target_thread->pid);
1954
1955 binder_pop_transaction_ilocked(target_thread, t);
1956 if (target_thread->reply_error.cmd == BR_OK) {
1957 target_thread->reply_error.cmd = error_code;
1958 binder_enqueue_thread_work_ilocked(
1959 target_thread,
1960 &target_thread->reply_error.work);
1961 wake_up_interruptible(&target_thread->wait);
1962 } else {
1963
1964
1965
1966
1967
1968
1969 pr_warn("Unexpected reply error: %u\n",
1970 target_thread->reply_error.cmd);
1971 }
1972 binder_inner_proc_unlock(target_thread->proc);
1973 binder_thread_dec_tmpref(target_thread);
1974 binder_free_transaction(t);
1975 return;
1976 } else {
1977 __release(&target_thread->proc->inner_lock);
1978 }
1979 next = t->from_parent;
1980
1981 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1982 "send failed reply for transaction %d, target dead\n",
1983 t->debug_id);
1984
1985 binder_free_transaction(t);
1986 if (next == NULL) {
1987 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1988 "reply failed, no target thread at root\n");
1989 return;
1990 }
1991 t = next;
1992 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1993 "reply failed, no target thread -- retry %d\n",
1994 t->debug_id);
1995 }
1996}
1997
1998
1999
2000
2001
2002
2003
2004static void binder_cleanup_transaction(struct binder_transaction *t,
2005 const char *reason,
2006 uint32_t error_code)
2007{
2008 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2009 binder_send_failed_reply(t, error_code);
2010 } else {
2011 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2012 "undelivered transaction %d, %s\n",
2013 t->debug_id, reason);
2014 binder_free_transaction(t);
2015 }
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static size_t binder_get_object(struct binder_proc *proc,
2030 struct binder_buffer *buffer,
2031 unsigned long offset,
2032 struct binder_object *object)
2033{
2034 size_t read_size;
2035 struct binder_object_header *hdr;
2036 size_t object_size = 0;
2037
2038 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2039 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2040 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2041 offset, read_size))
2042 return 0;
2043
2044
2045 hdr = &object->hdr;
2046 switch (hdr->type) {
2047 case BINDER_TYPE_BINDER:
2048 case BINDER_TYPE_WEAK_BINDER:
2049 case BINDER_TYPE_HANDLE:
2050 case BINDER_TYPE_WEAK_HANDLE:
2051 object_size = sizeof(struct flat_binder_object);
2052 break;
2053 case BINDER_TYPE_FD:
2054 object_size = sizeof(struct binder_fd_object);
2055 break;
2056 case BINDER_TYPE_PTR:
2057 object_size = sizeof(struct binder_buffer_object);
2058 break;
2059 case BINDER_TYPE_FDA:
2060 object_size = sizeof(struct binder_fd_array_object);
2061 break;
2062 default:
2063 return 0;
2064 }
2065 if (offset <= buffer->data_size - object_size &&
2066 buffer->data_size >= object_size)
2067 return object_size;
2068 else
2069 return 0;
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094static struct binder_buffer_object *binder_validate_ptr(
2095 struct binder_proc *proc,
2096 struct binder_buffer *b,
2097 struct binder_object *object,
2098 binder_size_t index,
2099 binder_size_t start_offset,
2100 binder_size_t *object_offsetp,
2101 binder_size_t num_valid)
2102{
2103 size_t object_size;
2104 binder_size_t object_offset;
2105 unsigned long buffer_offset;
2106
2107 if (index >= num_valid)
2108 return NULL;
2109
2110 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2111 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2112 b, buffer_offset,
2113 sizeof(object_offset)))
2114 return NULL;
2115 object_size = binder_get_object(proc, b, object_offset, object);
2116 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2117 return NULL;
2118 if (object_offsetp)
2119 *object_offsetp = object_offset;
2120
2121 return &object->bbo;
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163static bool binder_validate_fixup(struct binder_proc *proc,
2164 struct binder_buffer *b,
2165 binder_size_t objects_start_offset,
2166 binder_size_t buffer_obj_offset,
2167 binder_size_t fixup_offset,
2168 binder_size_t last_obj_offset,
2169 binder_size_t last_min_offset)
2170{
2171 if (!last_obj_offset) {
2172
2173 return false;
2174 }
2175
2176 while (last_obj_offset != buffer_obj_offset) {
2177 unsigned long buffer_offset;
2178 struct binder_object last_object;
2179 struct binder_buffer_object *last_bbo;
2180 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2181 &last_object);
2182 if (object_size != sizeof(*last_bbo))
2183 return false;
2184
2185 last_bbo = &last_object.bbo;
2186
2187
2188
2189
2190 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2191 return false;
2192 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2193 buffer_offset = objects_start_offset +
2194 sizeof(binder_size_t) * last_bbo->parent;
2195 if (binder_alloc_copy_from_buffer(&proc->alloc,
2196 &last_obj_offset,
2197 b, buffer_offset,
2198 sizeof(last_obj_offset)))
2199 return false;
2200 }
2201 return (fixup_offset >= last_min_offset);
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213struct binder_task_work_cb {
2214 struct callback_head twork;
2215 struct file *file;
2216};
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231static void binder_do_fd_close(struct callback_head *twork)
2232{
2233 struct binder_task_work_cb *twcb = container_of(twork,
2234 struct binder_task_work_cb, twork);
2235
2236 fput(twcb->file);
2237 kfree(twcb);
2238}
2239
2240
2241
2242
2243
2244
2245
2246
2247static void binder_deferred_fd_close(int fd)
2248{
2249 struct binder_task_work_cb *twcb;
2250
2251 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2252 if (!twcb)
2253 return;
2254 init_task_work(&twcb->twork, binder_do_fd_close);
2255 __close_fd_get_file(fd, &twcb->file);
2256 if (twcb->file)
2257 task_work_add(current, &twcb->twork, true);
2258 else
2259 kfree(twcb);
2260}
2261
2262static void binder_transaction_buffer_release(struct binder_proc *proc,
2263 struct binder_buffer *buffer,
2264 binder_size_t failed_at,
2265 bool is_failure)
2266{
2267 int debug_id = buffer->debug_id;
2268 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2269
2270 binder_debug(BINDER_DEBUG_TRANSACTION,
2271 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2272 proc->pid, buffer->debug_id,
2273 buffer->data_size, buffer->offsets_size,
2274 (unsigned long long)failed_at);
2275
2276 if (buffer->target_node)
2277 binder_dec_node(buffer->target_node, 1, 0);
2278
2279 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2280 off_end_offset = is_failure ? failed_at :
2281 off_start_offset + buffer->offsets_size;
2282 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2283 buffer_offset += sizeof(binder_size_t)) {
2284 struct binder_object_header *hdr;
2285 size_t object_size = 0;
2286 struct binder_object object;
2287 binder_size_t object_offset;
2288
2289 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2290 buffer, buffer_offset,
2291 sizeof(object_offset)))
2292 object_size = binder_get_object(proc, buffer,
2293 object_offset, &object);
2294 if (object_size == 0) {
2295 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2296 debug_id, (u64)object_offset, buffer->data_size);
2297 continue;
2298 }
2299 hdr = &object.hdr;
2300 switch (hdr->type) {
2301 case BINDER_TYPE_BINDER:
2302 case BINDER_TYPE_WEAK_BINDER: {
2303 struct flat_binder_object *fp;
2304 struct binder_node *node;
2305
2306 fp = to_flat_binder_object(hdr);
2307 node = binder_get_node(proc, fp->binder);
2308 if (node == NULL) {
2309 pr_err("transaction release %d bad node %016llx\n",
2310 debug_id, (u64)fp->binder);
2311 break;
2312 }
2313 binder_debug(BINDER_DEBUG_TRANSACTION,
2314 " node %d u%016llx\n",
2315 node->debug_id, (u64)node->ptr);
2316 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2317 0);
2318 binder_put_node(node);
2319 } break;
2320 case BINDER_TYPE_HANDLE:
2321 case BINDER_TYPE_WEAK_HANDLE: {
2322 struct flat_binder_object *fp;
2323 struct binder_ref_data rdata;
2324 int ret;
2325
2326 fp = to_flat_binder_object(hdr);
2327 ret = binder_dec_ref_for_handle(proc, fp->handle,
2328 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2329
2330 if (ret) {
2331 pr_err("transaction release %d bad handle %d, ret = %d\n",
2332 debug_id, fp->handle, ret);
2333 break;
2334 }
2335 binder_debug(BINDER_DEBUG_TRANSACTION,
2336 " ref %d desc %d\n",
2337 rdata.debug_id, rdata.desc);
2338 } break;
2339
2340 case BINDER_TYPE_FD: {
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350 WARN_ON(failed_at &&
2351 proc->tsk == current->group_leader);
2352 } break;
2353 case BINDER_TYPE_PTR:
2354
2355
2356
2357
2358 break;
2359 case BINDER_TYPE_FDA: {
2360 struct binder_fd_array_object *fda;
2361 struct binder_buffer_object *parent;
2362 struct binder_object ptr_object;
2363 binder_size_t fda_offset;
2364 size_t fd_index;
2365 binder_size_t fd_buf_size;
2366 binder_size_t num_valid;
2367
2368 if (proc->tsk != current->group_leader) {
2369
2370
2371
2372
2373
2374 continue;
2375 }
2376
2377 num_valid = (buffer_offset - off_start_offset) /
2378 sizeof(binder_size_t);
2379 fda = to_binder_fd_array_object(hdr);
2380 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2381 fda->parent,
2382 off_start_offset,
2383 NULL,
2384 num_valid);
2385 if (!parent) {
2386 pr_err("transaction release %d bad parent offset\n",
2387 debug_id);
2388 continue;
2389 }
2390 fd_buf_size = sizeof(u32) * fda->num_fds;
2391 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2392 pr_err("transaction release %d invalid number of fds (%lld)\n",
2393 debug_id, (u64)fda->num_fds);
2394 continue;
2395 }
2396 if (fd_buf_size > parent->length ||
2397 fda->parent_offset > parent->length - fd_buf_size) {
2398
2399 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2400 debug_id, (u64)fda->num_fds);
2401 continue;
2402 }
2403
2404
2405
2406
2407
2408
2409
2410 fda_offset =
2411 (parent->buffer - (uintptr_t)buffer->user_data) +
2412 fda->parent_offset;
2413 for (fd_index = 0; fd_index < fda->num_fds;
2414 fd_index++) {
2415 u32 fd;
2416 int err;
2417 binder_size_t offset = fda_offset +
2418 fd_index * sizeof(fd);
2419
2420 err = binder_alloc_copy_from_buffer(
2421 &proc->alloc, &fd, buffer,
2422 offset, sizeof(fd));
2423 WARN_ON(err);
2424 if (!err)
2425 binder_deferred_fd_close(fd);
2426 }
2427 } break;
2428 default:
2429 pr_err("transaction release %d bad object type %x\n",
2430 debug_id, hdr->type);
2431 break;
2432 }
2433 }
2434}
2435
2436static int binder_translate_binder(struct flat_binder_object *fp,
2437 struct binder_transaction *t,
2438 struct binder_thread *thread)
2439{
2440 struct binder_node *node;
2441 struct binder_proc *proc = thread->proc;
2442 struct binder_proc *target_proc = t->to_proc;
2443 struct binder_ref_data rdata;
2444 int ret = 0;
2445
2446 node = binder_get_node(proc, fp->binder);
2447 if (!node) {
2448 node = binder_new_node(proc, fp);
2449 if (!node)
2450 return -ENOMEM;
2451 }
2452 if (fp->cookie != node->cookie) {
2453 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2454 proc->pid, thread->pid, (u64)fp->binder,
2455 node->debug_id, (u64)fp->cookie,
2456 (u64)node->cookie);
2457 ret = -EINVAL;
2458 goto done;
2459 }
2460 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2461 ret = -EPERM;
2462 goto done;
2463 }
2464
2465 ret = binder_inc_ref_for_node(target_proc, node,
2466 fp->hdr.type == BINDER_TYPE_BINDER,
2467 &thread->todo, &rdata);
2468 if (ret)
2469 goto done;
2470
2471 if (fp->hdr.type == BINDER_TYPE_BINDER)
2472 fp->hdr.type = BINDER_TYPE_HANDLE;
2473 else
2474 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2475 fp->binder = 0;
2476 fp->handle = rdata.desc;
2477 fp->cookie = 0;
2478
2479 trace_binder_transaction_node_to_ref(t, node, &rdata);
2480 binder_debug(BINDER_DEBUG_TRANSACTION,
2481 " node %d u%016llx -> ref %d desc %d\n",
2482 node->debug_id, (u64)node->ptr,
2483 rdata.debug_id, rdata.desc);
2484done:
2485 binder_put_node(node);
2486 return ret;
2487}
2488
2489static int binder_translate_handle(struct flat_binder_object *fp,
2490 struct binder_transaction *t,
2491 struct binder_thread *thread)
2492{
2493 struct binder_proc *proc = thread->proc;
2494 struct binder_proc *target_proc = t->to_proc;
2495 struct binder_node *node;
2496 struct binder_ref_data src_rdata;
2497 int ret = 0;
2498
2499 node = binder_get_node_from_ref(proc, fp->handle,
2500 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2501 if (!node) {
2502 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2503 proc->pid, thread->pid, fp->handle);
2504 return -EINVAL;
2505 }
2506 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2507 ret = -EPERM;
2508 goto done;
2509 }
2510
2511 binder_node_lock(node);
2512 if (node->proc == target_proc) {
2513 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2514 fp->hdr.type = BINDER_TYPE_BINDER;
2515 else
2516 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2517 fp->binder = node->ptr;
2518 fp->cookie = node->cookie;
2519 if (node->proc)
2520 binder_inner_proc_lock(node->proc);
2521 else
2522 __acquire(&node->proc->inner_lock);
2523 binder_inc_node_nilocked(node,
2524 fp->hdr.type == BINDER_TYPE_BINDER,
2525 0, NULL);
2526 if (node->proc)
2527 binder_inner_proc_unlock(node->proc);
2528 else
2529 __release(&node->proc->inner_lock);
2530 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2531 binder_debug(BINDER_DEBUG_TRANSACTION,
2532 " ref %d desc %d -> node %d u%016llx\n",
2533 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2534 (u64)node->ptr);
2535 binder_node_unlock(node);
2536 } else {
2537 struct binder_ref_data dest_rdata;
2538
2539 binder_node_unlock(node);
2540 ret = binder_inc_ref_for_node(target_proc, node,
2541 fp->hdr.type == BINDER_TYPE_HANDLE,
2542 NULL, &dest_rdata);
2543 if (ret)
2544 goto done;
2545
2546 fp->binder = 0;
2547 fp->handle = dest_rdata.desc;
2548 fp->cookie = 0;
2549 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2550 &dest_rdata);
2551 binder_debug(BINDER_DEBUG_TRANSACTION,
2552 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2553 src_rdata.debug_id, src_rdata.desc,
2554 dest_rdata.debug_id, dest_rdata.desc,
2555 node->debug_id);
2556 }
2557done:
2558 binder_put_node(node);
2559 return ret;
2560}
2561
2562static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2563 struct binder_transaction *t,
2564 struct binder_thread *thread,
2565 struct binder_transaction *in_reply_to)
2566{
2567 struct binder_proc *proc = thread->proc;
2568 struct binder_proc *target_proc = t->to_proc;
2569 struct binder_txn_fd_fixup *fixup;
2570 struct file *file;
2571 int ret = 0;
2572 bool target_allows_fd;
2573
2574 if (in_reply_to)
2575 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2576 else
2577 target_allows_fd = t->buffer->target_node->accept_fds;
2578 if (!target_allows_fd) {
2579 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2580 proc->pid, thread->pid,
2581 in_reply_to ? "reply" : "transaction",
2582 fd);
2583 ret = -EPERM;
2584 goto err_fd_not_accepted;
2585 }
2586
2587 file = fget(fd);
2588 if (!file) {
2589 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2590 proc->pid, thread->pid, fd);
2591 ret = -EBADF;
2592 goto err_fget;
2593 }
2594 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2595 if (ret < 0) {
2596 ret = -EPERM;
2597 goto err_security;
2598 }
2599
2600
2601
2602
2603
2604
2605 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2606 if (!fixup) {
2607 ret = -ENOMEM;
2608 goto err_alloc;
2609 }
2610 fixup->file = file;
2611 fixup->offset = fd_offset;
2612 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2613 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2614
2615 return ret;
2616
2617err_alloc:
2618err_security:
2619 fput(file);
2620err_fget:
2621err_fd_not_accepted:
2622 return ret;
2623}
2624
2625static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2626 struct binder_buffer_object *parent,
2627 struct binder_transaction *t,
2628 struct binder_thread *thread,
2629 struct binder_transaction *in_reply_to)
2630{
2631 binder_size_t fdi, fd_buf_size;
2632 binder_size_t fda_offset;
2633 struct binder_proc *proc = thread->proc;
2634 struct binder_proc *target_proc = t->to_proc;
2635
2636 fd_buf_size = sizeof(u32) * fda->num_fds;
2637 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2638 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2639 proc->pid, thread->pid, (u64)fda->num_fds);
2640 return -EINVAL;
2641 }
2642 if (fd_buf_size > parent->length ||
2643 fda->parent_offset > parent->length - fd_buf_size) {
2644
2645 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2646 proc->pid, thread->pid, (u64)fda->num_fds);
2647 return -EINVAL;
2648 }
2649
2650
2651
2652
2653
2654
2655
2656 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2657 fda->parent_offset;
2658 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2659 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2660 proc->pid, thread->pid);
2661 return -EINVAL;
2662 }
2663 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2664 u32 fd;
2665 int ret;
2666 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2667
2668 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2669 &fd, t->buffer,
2670 offset, sizeof(fd));
2671 if (!ret)
2672 ret = binder_translate_fd(fd, offset, t, thread,
2673 in_reply_to);
2674 if (ret < 0)
2675 return ret;
2676 }
2677 return 0;
2678}
2679
2680static int binder_fixup_parent(struct binder_transaction *t,
2681 struct binder_thread *thread,
2682 struct binder_buffer_object *bp,
2683 binder_size_t off_start_offset,
2684 binder_size_t num_valid,
2685 binder_size_t last_fixup_obj_off,
2686 binder_size_t last_fixup_min_off)
2687{
2688 struct binder_buffer_object *parent;
2689 struct binder_buffer *b = t->buffer;
2690 struct binder_proc *proc = thread->proc;
2691 struct binder_proc *target_proc = t->to_proc;
2692 struct binder_object object;
2693 binder_size_t buffer_offset;
2694 binder_size_t parent_offset;
2695
2696 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2697 return 0;
2698
2699 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2700 off_start_offset, &parent_offset,
2701 num_valid);
2702 if (!parent) {
2703 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2704 proc->pid, thread->pid);
2705 return -EINVAL;
2706 }
2707
2708 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2709 parent_offset, bp->parent_offset,
2710 last_fixup_obj_off,
2711 last_fixup_min_off)) {
2712 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2713 proc->pid, thread->pid);
2714 return -EINVAL;
2715 }
2716
2717 if (parent->length < sizeof(binder_uintptr_t) ||
2718 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2719
2720 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2721 proc->pid, thread->pid);
2722 return -EINVAL;
2723 }
2724 buffer_offset = bp->parent_offset +
2725 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2726 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2727 &bp->buffer, sizeof(bp->buffer))) {
2728 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2729 proc->pid, thread->pid);
2730 return -EINVAL;
2731 }
2732
2733 return 0;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753static bool binder_proc_transaction(struct binder_transaction *t,
2754 struct binder_proc *proc,
2755 struct binder_thread *thread)
2756{
2757 struct binder_node *node = t->buffer->target_node;
2758 bool oneway = !!(t->flags & TF_ONE_WAY);
2759 bool pending_async = false;
2760
2761 BUG_ON(!node);
2762 binder_node_lock(node);
2763 if (oneway) {
2764 BUG_ON(thread);
2765 if (node->has_async_transaction) {
2766 pending_async = true;
2767 } else {
2768 node->has_async_transaction = true;
2769 }
2770 }
2771
2772 binder_inner_proc_lock(proc);
2773
2774 if (proc->is_dead || (thread && thread->is_dead)) {
2775 binder_inner_proc_unlock(proc);
2776 binder_node_unlock(node);
2777 return false;
2778 }
2779
2780 if (!thread && !pending_async)
2781 thread = binder_select_thread_ilocked(proc);
2782
2783 if (thread)
2784 binder_enqueue_thread_work_ilocked(thread, &t->work);
2785 else if (!pending_async)
2786 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2787 else
2788 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2789
2790 if (!pending_async)
2791 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2792
2793 binder_inner_proc_unlock(proc);
2794 binder_node_unlock(node);
2795
2796 return true;
2797}
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820static struct binder_node *binder_get_node_refs_for_txn(
2821 struct binder_node *node,
2822 struct binder_proc **procp,
2823 uint32_t *error)
2824{
2825 struct binder_node *target_node = NULL;
2826
2827 binder_node_inner_lock(node);
2828 if (node->proc) {
2829 target_node = node;
2830 binder_inc_node_nilocked(node, 1, 0, NULL);
2831 binder_inc_node_tmpref_ilocked(node);
2832 node->proc->tmp_ref++;
2833 *procp = node->proc;
2834 } else
2835 *error = BR_DEAD_REPLY;
2836 binder_node_inner_unlock(node);
2837
2838 return target_node;
2839}
2840
2841static void binder_transaction(struct binder_proc *proc,
2842 struct binder_thread *thread,
2843 struct binder_transaction_data *tr, int reply,
2844 binder_size_t extra_buffers_size)
2845{
2846 int ret;
2847 struct binder_transaction *t;
2848 struct binder_work *w;
2849 struct binder_work *tcomplete;
2850 binder_size_t buffer_offset = 0;
2851 binder_size_t off_start_offset, off_end_offset;
2852 binder_size_t off_min;
2853 binder_size_t sg_buf_offset, sg_buf_end_offset;
2854 struct binder_proc *target_proc = NULL;
2855 struct binder_thread *target_thread = NULL;
2856 struct binder_node *target_node = NULL;
2857 struct binder_transaction *in_reply_to = NULL;
2858 struct binder_transaction_log_entry *e;
2859 uint32_t return_error = 0;
2860 uint32_t return_error_param = 0;
2861 uint32_t return_error_line = 0;
2862 binder_size_t last_fixup_obj_off = 0;
2863 binder_size_t last_fixup_min_off = 0;
2864 struct binder_context *context = proc->context;
2865 int t_debug_id = atomic_inc_return(&binder_last_id);
2866 char *secctx = NULL;
2867 u32 secctx_sz = 0;
2868
2869 e = binder_transaction_log_add(&binder_transaction_log);
2870 e->debug_id = t_debug_id;
2871 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2872 e->from_proc = proc->pid;
2873 e->from_thread = thread->pid;
2874 e->target_handle = tr->target.handle;
2875 e->data_size = tr->data_size;
2876 e->offsets_size = tr->offsets_size;
2877 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2878
2879 if (reply) {
2880 binder_inner_proc_lock(proc);
2881 in_reply_to = thread->transaction_stack;
2882 if (in_reply_to == NULL) {
2883 binder_inner_proc_unlock(proc);
2884 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2885 proc->pid, thread->pid);
2886 return_error = BR_FAILED_REPLY;
2887 return_error_param = -EPROTO;
2888 return_error_line = __LINE__;
2889 goto err_empty_call_stack;
2890 }
2891 if (in_reply_to->to_thread != thread) {
2892 spin_lock(&in_reply_to->lock);
2893 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2894 proc->pid, thread->pid, in_reply_to->debug_id,
2895 in_reply_to->to_proc ?
2896 in_reply_to->to_proc->pid : 0,
2897 in_reply_to->to_thread ?
2898 in_reply_to->to_thread->pid : 0);
2899 spin_unlock(&in_reply_to->lock);
2900 binder_inner_proc_unlock(proc);
2901 return_error = BR_FAILED_REPLY;
2902 return_error_param = -EPROTO;
2903 return_error_line = __LINE__;
2904 in_reply_to = NULL;
2905 goto err_bad_call_stack;
2906 }
2907 thread->transaction_stack = in_reply_to->to_parent;
2908 binder_inner_proc_unlock(proc);
2909 binder_set_nice(in_reply_to->saved_priority);
2910 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2911 if (target_thread == NULL) {
2912
2913 __release(&target_thread->proc->inner_lock);
2914 return_error = BR_DEAD_REPLY;
2915 return_error_line = __LINE__;
2916 goto err_dead_binder;
2917 }
2918 if (target_thread->transaction_stack != in_reply_to) {
2919 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2920 proc->pid, thread->pid,
2921 target_thread->transaction_stack ?
2922 target_thread->transaction_stack->debug_id : 0,
2923 in_reply_to->debug_id);
2924 binder_inner_proc_unlock(target_thread->proc);
2925 return_error = BR_FAILED_REPLY;
2926 return_error_param = -EPROTO;
2927 return_error_line = __LINE__;
2928 in_reply_to = NULL;
2929 target_thread = NULL;
2930 goto err_dead_binder;
2931 }
2932 target_proc = target_thread->proc;
2933 target_proc->tmp_ref++;
2934 binder_inner_proc_unlock(target_thread->proc);
2935 } else {
2936 if (tr->target.handle) {
2937 struct binder_ref *ref;
2938
2939
2940
2941
2942
2943
2944
2945
2946 binder_proc_lock(proc);
2947 ref = binder_get_ref_olocked(proc, tr->target.handle,
2948 true);
2949 if (ref) {
2950 target_node = binder_get_node_refs_for_txn(
2951 ref->node, &target_proc,
2952 &return_error);
2953 } else {
2954 binder_user_error("%d:%d got transaction to invalid handle\n",
2955 proc->pid, thread->pid);
2956 return_error = BR_FAILED_REPLY;
2957 }
2958 binder_proc_unlock(proc);
2959 } else {
2960 mutex_lock(&context->context_mgr_node_lock);
2961 target_node = context->binder_context_mgr_node;
2962 if (target_node)
2963 target_node = binder_get_node_refs_for_txn(
2964 target_node, &target_proc,
2965 &return_error);
2966 else
2967 return_error = BR_DEAD_REPLY;
2968 mutex_unlock(&context->context_mgr_node_lock);
2969 if (target_node && target_proc->pid == proc->pid) {
2970 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2971 proc->pid, thread->pid);
2972 return_error = BR_FAILED_REPLY;
2973 return_error_param = -EINVAL;
2974 return_error_line = __LINE__;
2975 goto err_invalid_target_handle;
2976 }
2977 }
2978 if (!target_node) {
2979
2980
2981
2982 return_error_param = -EINVAL;
2983 return_error_line = __LINE__;
2984 goto err_dead_binder;
2985 }
2986 e->to_node = target_node->debug_id;
2987 if (security_binder_transaction(proc->tsk,
2988 target_proc->tsk) < 0) {
2989 return_error = BR_FAILED_REPLY;
2990 return_error_param = -EPERM;
2991 return_error_line = __LINE__;
2992 goto err_invalid_target_handle;
2993 }
2994 binder_inner_proc_lock(proc);
2995
2996 w = list_first_entry_or_null(&thread->todo,
2997 struct binder_work, entry);
2998 if (!(tr->flags & TF_ONE_WAY) && w &&
2999 w->type == BINDER_WORK_TRANSACTION) {
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3010 proc->pid, thread->pid);
3011 binder_inner_proc_unlock(proc);
3012 return_error = BR_FAILED_REPLY;
3013 return_error_param = -EPROTO;
3014 return_error_line = __LINE__;
3015 goto err_bad_todo_list;
3016 }
3017
3018 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3019 struct binder_transaction *tmp;
3020
3021 tmp = thread->transaction_stack;
3022 if (tmp->to_thread != thread) {
3023 spin_lock(&tmp->lock);
3024 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3025 proc->pid, thread->pid, tmp->debug_id,
3026 tmp->to_proc ? tmp->to_proc->pid : 0,
3027 tmp->to_thread ?
3028 tmp->to_thread->pid : 0);
3029 spin_unlock(&tmp->lock);
3030 binder_inner_proc_unlock(proc);
3031 return_error = BR_FAILED_REPLY;
3032 return_error_param = -EPROTO;
3033 return_error_line = __LINE__;
3034 goto err_bad_call_stack;
3035 }
3036 while (tmp) {
3037 struct binder_thread *from;
3038
3039 spin_lock(&tmp->lock);
3040 from = tmp->from;
3041 if (from && from->proc == target_proc) {
3042 atomic_inc(&from->tmp_ref);
3043 target_thread = from;
3044 spin_unlock(&tmp->lock);
3045 break;
3046 }
3047 spin_unlock(&tmp->lock);
3048 tmp = tmp->from_parent;
3049 }
3050 }
3051 binder_inner_proc_unlock(proc);
3052 }
3053 if (target_thread)
3054 e->to_thread = target_thread->pid;
3055 e->to_proc = target_proc->pid;
3056
3057
3058 t = kzalloc(sizeof(*t), GFP_KERNEL);
3059 if (t == NULL) {
3060 return_error = BR_FAILED_REPLY;
3061 return_error_param = -ENOMEM;
3062 return_error_line = __LINE__;
3063 goto err_alloc_t_failed;
3064 }
3065 INIT_LIST_HEAD(&t->fd_fixups);
3066 binder_stats_created(BINDER_STAT_TRANSACTION);
3067 spin_lock_init(&t->lock);
3068
3069 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3070 if (tcomplete == NULL) {
3071 return_error = BR_FAILED_REPLY;
3072 return_error_param = -ENOMEM;
3073 return_error_line = __LINE__;
3074 goto err_alloc_tcomplete_failed;
3075 }
3076 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3077
3078 t->debug_id = t_debug_id;
3079
3080 if (reply)
3081 binder_debug(BINDER_DEBUG_TRANSACTION,
3082 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3083 proc->pid, thread->pid, t->debug_id,
3084 target_proc->pid, target_thread->pid,
3085 (u64)tr->data.ptr.buffer,
3086 (u64)tr->data.ptr.offsets,
3087 (u64)tr->data_size, (u64)tr->offsets_size,
3088 (u64)extra_buffers_size);
3089 else
3090 binder_debug(BINDER_DEBUG_TRANSACTION,
3091 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3092 proc->pid, thread->pid, t->debug_id,
3093 target_proc->pid, target_node->debug_id,
3094 (u64)tr->data.ptr.buffer,
3095 (u64)tr->data.ptr.offsets,
3096 (u64)tr->data_size, (u64)tr->offsets_size,
3097 (u64)extra_buffers_size);
3098
3099 if (!reply && !(tr->flags & TF_ONE_WAY))
3100 t->from = thread;
3101 else
3102 t->from = NULL;
3103 t->sender_euid = task_euid(proc->tsk);
3104 t->to_proc = target_proc;
3105 t->to_thread = target_thread;
3106 t->code = tr->code;
3107 t->flags = tr->flags;
3108 t->priority = task_nice(current);
3109
3110 if (target_node && target_node->txn_security_ctx) {
3111 u32 secid;
3112 size_t added_size;
3113
3114 security_task_getsecid(proc->tsk, &secid);
3115 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3116 if (ret) {
3117 return_error = BR_FAILED_REPLY;
3118 return_error_param = ret;
3119 return_error_line = __LINE__;
3120 goto err_get_secctx_failed;
3121 }
3122 added_size = ALIGN(secctx_sz, sizeof(u64));
3123 extra_buffers_size += added_size;
3124 if (extra_buffers_size < added_size) {
3125
3126 return_error = BR_FAILED_REPLY;
3127 return_error_param = EINVAL;
3128 return_error_line = __LINE__;
3129 goto err_bad_extra_size;
3130 }
3131 }
3132
3133 trace_binder_transaction(reply, t, target_node);
3134
3135 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3136 tr->offsets_size, extra_buffers_size,
3137 !reply && (t->flags & TF_ONE_WAY));
3138 if (IS_ERR(t->buffer)) {
3139
3140
3141
3142 return_error_param = PTR_ERR(t->buffer);
3143 return_error = return_error_param == -ESRCH ?
3144 BR_DEAD_REPLY : BR_FAILED_REPLY;
3145 return_error_line = __LINE__;
3146 t->buffer = NULL;
3147 goto err_binder_alloc_buf_failed;
3148 }
3149 if (secctx) {
3150 int err;
3151 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3152 ALIGN(tr->offsets_size, sizeof(void *)) +
3153 ALIGN(extra_buffers_size, sizeof(void *)) -
3154 ALIGN(secctx_sz, sizeof(u64));
3155
3156 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3157 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3158 t->buffer, buf_offset,
3159 secctx, secctx_sz);
3160 if (err) {
3161 t->security_ctx = 0;
3162 WARN_ON(1);
3163 }
3164 security_release_secctx(secctx, secctx_sz);
3165 secctx = NULL;
3166 }
3167 t->buffer->debug_id = t->debug_id;
3168 t->buffer->transaction = t;
3169 t->buffer->target_node = target_node;
3170 trace_binder_transaction_alloc_buf(t->buffer);
3171
3172 if (binder_alloc_copy_user_to_buffer(
3173 &target_proc->alloc,
3174 t->buffer, 0,
3175 (const void __user *)
3176 (uintptr_t)tr->data.ptr.buffer,
3177 tr->data_size)) {
3178 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3179 proc->pid, thread->pid);
3180 return_error = BR_FAILED_REPLY;
3181 return_error_param = -EFAULT;
3182 return_error_line = __LINE__;
3183 goto err_copy_data_failed;
3184 }
3185 if (binder_alloc_copy_user_to_buffer(
3186 &target_proc->alloc,
3187 t->buffer,
3188 ALIGN(tr->data_size, sizeof(void *)),
3189 (const void __user *)
3190 (uintptr_t)tr->data.ptr.offsets,
3191 tr->offsets_size)) {
3192 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3193 proc->pid, thread->pid);
3194 return_error = BR_FAILED_REPLY;
3195 return_error_param = -EFAULT;
3196 return_error_line = __LINE__;
3197 goto err_copy_data_failed;
3198 }
3199 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3200 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3201 proc->pid, thread->pid, (u64)tr->offsets_size);
3202 return_error = BR_FAILED_REPLY;
3203 return_error_param = -EINVAL;
3204 return_error_line = __LINE__;
3205 goto err_bad_offset;
3206 }
3207 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3208 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3209 proc->pid, thread->pid,
3210 (u64)extra_buffers_size);
3211 return_error = BR_FAILED_REPLY;
3212 return_error_param = -EINVAL;
3213 return_error_line = __LINE__;
3214 goto err_bad_offset;
3215 }
3216 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3217 buffer_offset = off_start_offset;
3218 off_end_offset = off_start_offset + tr->offsets_size;
3219 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3220 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3221 ALIGN(secctx_sz, sizeof(u64));
3222 off_min = 0;
3223 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3224 buffer_offset += sizeof(binder_size_t)) {
3225 struct binder_object_header *hdr;
3226 size_t object_size;
3227 struct binder_object object;
3228 binder_size_t object_offset;
3229
3230 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3231 &object_offset,
3232 t->buffer,
3233 buffer_offset,
3234 sizeof(object_offset))) {
3235 return_error = BR_FAILED_REPLY;
3236 return_error_param = -EINVAL;
3237 return_error_line = __LINE__;
3238 goto err_bad_offset;
3239 }
3240 object_size = binder_get_object(target_proc, t->buffer,
3241 object_offset, &object);
3242 if (object_size == 0 || object_offset < off_min) {
3243 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3244 proc->pid, thread->pid,
3245 (u64)object_offset,
3246 (u64)off_min,
3247 (u64)t->buffer->data_size);
3248 return_error = BR_FAILED_REPLY;
3249 return_error_param = -EINVAL;
3250 return_error_line = __LINE__;
3251 goto err_bad_offset;
3252 }
3253
3254 hdr = &object.hdr;
3255 off_min = object_offset + object_size;
3256 switch (hdr->type) {
3257 case BINDER_TYPE_BINDER:
3258 case BINDER_TYPE_WEAK_BINDER: {
3259 struct flat_binder_object *fp;
3260
3261 fp = to_flat_binder_object(hdr);
3262 ret = binder_translate_binder(fp, t, thread);
3263
3264 if (ret < 0 ||
3265 binder_alloc_copy_to_buffer(&target_proc->alloc,
3266 t->buffer,
3267 object_offset,
3268 fp, sizeof(*fp))) {
3269 return_error = BR_FAILED_REPLY;
3270 return_error_param = ret;
3271 return_error_line = __LINE__;
3272 goto err_translate_failed;
3273 }
3274 } break;
3275 case BINDER_TYPE_HANDLE:
3276 case BINDER_TYPE_WEAK_HANDLE: {
3277 struct flat_binder_object *fp;
3278
3279 fp = to_flat_binder_object(hdr);
3280 ret = binder_translate_handle(fp, t, thread);
3281 if (ret < 0 ||
3282 binder_alloc_copy_to_buffer(&target_proc->alloc,
3283 t->buffer,
3284 object_offset,
3285 fp, sizeof(*fp))) {
3286 return_error = BR_FAILED_REPLY;
3287 return_error_param = ret;
3288 return_error_line = __LINE__;
3289 goto err_translate_failed;
3290 }
3291 } break;
3292
3293 case BINDER_TYPE_FD: {
3294 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3295 binder_size_t fd_offset = object_offset +
3296 (uintptr_t)&fp->fd - (uintptr_t)fp;
3297 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3298 thread, in_reply_to);
3299
3300 fp->pad_binder = 0;
3301 if (ret < 0 ||
3302 binder_alloc_copy_to_buffer(&target_proc->alloc,
3303 t->buffer,
3304 object_offset,
3305 fp, sizeof(*fp))) {
3306 return_error = BR_FAILED_REPLY;
3307 return_error_param = ret;
3308 return_error_line = __LINE__;
3309 goto err_translate_failed;
3310 }
3311 } break;
3312 case BINDER_TYPE_FDA: {
3313 struct binder_object ptr_object;
3314 binder_size_t parent_offset;
3315 struct binder_fd_array_object *fda =
3316 to_binder_fd_array_object(hdr);
3317 size_t num_valid = (buffer_offset - off_start_offset) *
3318 sizeof(binder_size_t);
3319 struct binder_buffer_object *parent =
3320 binder_validate_ptr(target_proc, t->buffer,
3321 &ptr_object, fda->parent,
3322 off_start_offset,
3323 &parent_offset,
3324 num_valid);
3325 if (!parent) {
3326 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3327 proc->pid, thread->pid);
3328 return_error = BR_FAILED_REPLY;
3329 return_error_param = -EINVAL;
3330 return_error_line = __LINE__;
3331 goto err_bad_parent;
3332 }
3333 if (!binder_validate_fixup(target_proc, t->buffer,
3334 off_start_offset,
3335 parent_offset,
3336 fda->parent_offset,
3337 last_fixup_obj_off,
3338 last_fixup_min_off)) {
3339 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3340 proc->pid, thread->pid);
3341 return_error = BR_FAILED_REPLY;
3342 return_error_param = -EINVAL;
3343 return_error_line = __LINE__;
3344 goto err_bad_parent;
3345 }
3346 ret = binder_translate_fd_array(fda, parent, t, thread,
3347 in_reply_to);
3348 if (ret < 0) {
3349 return_error = BR_FAILED_REPLY;
3350 return_error_param = ret;
3351 return_error_line = __LINE__;
3352 goto err_translate_failed;
3353 }
3354 last_fixup_obj_off = parent_offset;
3355 last_fixup_min_off =
3356 fda->parent_offset + sizeof(u32) * fda->num_fds;
3357 } break;
3358 case BINDER_TYPE_PTR: {
3359 struct binder_buffer_object *bp =
3360 to_binder_buffer_object(hdr);
3361 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3362 size_t num_valid;
3363
3364 if (bp->length > buf_left) {
3365 binder_user_error("%d:%d got transaction with too large buffer\n",
3366 proc->pid, thread->pid);
3367 return_error = BR_FAILED_REPLY;
3368 return_error_param = -EINVAL;
3369 return_error_line = __LINE__;
3370 goto err_bad_offset;
3371 }
3372 if (binder_alloc_copy_user_to_buffer(
3373 &target_proc->alloc,
3374 t->buffer,
3375 sg_buf_offset,
3376 (const void __user *)
3377 (uintptr_t)bp->buffer,
3378 bp->length)) {
3379 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3380 proc->pid, thread->pid);
3381 return_error_param = -EFAULT;
3382 return_error = BR_FAILED_REPLY;
3383 return_error_line = __LINE__;
3384 goto err_copy_data_failed;
3385 }
3386
3387 bp->buffer = (uintptr_t)
3388 t->buffer->user_data + sg_buf_offset;
3389 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3390
3391 num_valid = (buffer_offset - off_start_offset) *
3392 sizeof(binder_size_t);
3393 ret = binder_fixup_parent(t, thread, bp,
3394 off_start_offset,
3395 num_valid,
3396 last_fixup_obj_off,
3397 last_fixup_min_off);
3398 if (ret < 0 ||
3399 binder_alloc_copy_to_buffer(&target_proc->alloc,
3400 t->buffer,
3401 object_offset,
3402 bp, sizeof(*bp))) {
3403 return_error = BR_FAILED_REPLY;
3404 return_error_param = ret;
3405 return_error_line = __LINE__;
3406 goto err_translate_failed;
3407 }
3408 last_fixup_obj_off = object_offset;
3409 last_fixup_min_off = 0;
3410 } break;
3411 default:
3412 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3413 proc->pid, thread->pid, hdr->type);
3414 return_error = BR_FAILED_REPLY;
3415 return_error_param = -EINVAL;
3416 return_error_line = __LINE__;
3417 goto err_bad_object_type;
3418 }
3419 }
3420 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3421 t->work.type = BINDER_WORK_TRANSACTION;
3422
3423 if (reply) {
3424 binder_enqueue_thread_work(thread, tcomplete);
3425 binder_inner_proc_lock(target_proc);
3426 if (target_thread->is_dead) {
3427 binder_inner_proc_unlock(target_proc);
3428 goto err_dead_proc_or_thread;
3429 }
3430 BUG_ON(t->buffer->async_transaction != 0);
3431 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3432 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3433 binder_inner_proc_unlock(target_proc);
3434 wake_up_interruptible_sync(&target_thread->wait);
3435 binder_free_transaction(in_reply_to);
3436 } else if (!(t->flags & TF_ONE_WAY)) {
3437 BUG_ON(t->buffer->async_transaction != 0);
3438 binder_inner_proc_lock(proc);
3439
3440
3441
3442
3443
3444
3445
3446 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3447 t->need_reply = 1;
3448 t->from_parent = thread->transaction_stack;
3449 thread->transaction_stack = t;
3450 binder_inner_proc_unlock(proc);
3451 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3452 binder_inner_proc_lock(proc);
3453 binder_pop_transaction_ilocked(thread, t);
3454 binder_inner_proc_unlock(proc);
3455 goto err_dead_proc_or_thread;
3456 }
3457 } else {
3458 BUG_ON(target_node == NULL);
3459 BUG_ON(t->buffer->async_transaction != 1);
3460 binder_enqueue_thread_work(thread, tcomplete);
3461 if (!binder_proc_transaction(t, target_proc, NULL))
3462 goto err_dead_proc_or_thread;
3463 }
3464 if (target_thread)
3465 binder_thread_dec_tmpref(target_thread);
3466 binder_proc_dec_tmpref(target_proc);
3467 if (target_node)
3468 binder_dec_node_tmpref(target_node);
3469
3470
3471
3472
3473 smp_wmb();
3474 WRITE_ONCE(e->debug_id_done, t_debug_id);
3475 return;
3476
3477err_dead_proc_or_thread:
3478 return_error = BR_DEAD_REPLY;
3479 return_error_line = __LINE__;
3480 binder_dequeue_work(proc, tcomplete);
3481err_translate_failed:
3482err_bad_object_type:
3483err_bad_offset:
3484err_bad_parent:
3485err_copy_data_failed:
3486 binder_free_txn_fixups(t);
3487 trace_binder_transaction_failed_buffer_release(t->buffer);
3488 binder_transaction_buffer_release(target_proc, t->buffer,
3489 buffer_offset, true);
3490 if (target_node)
3491 binder_dec_node_tmpref(target_node);
3492 target_node = NULL;
3493 t->buffer->transaction = NULL;
3494 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3495err_binder_alloc_buf_failed:
3496err_bad_extra_size:
3497 if (secctx)
3498 security_release_secctx(secctx, secctx_sz);
3499err_get_secctx_failed:
3500 kfree(tcomplete);
3501 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3502err_alloc_tcomplete_failed:
3503 kfree(t);
3504 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3505err_alloc_t_failed:
3506err_bad_todo_list:
3507err_bad_call_stack:
3508err_empty_call_stack:
3509err_dead_binder:
3510err_invalid_target_handle:
3511 if (target_thread)
3512 binder_thread_dec_tmpref(target_thread);
3513 if (target_proc)
3514 binder_proc_dec_tmpref(target_proc);
3515 if (target_node) {
3516 binder_dec_node(target_node, 1, 0);
3517 binder_dec_node_tmpref(target_node);
3518 }
3519
3520 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3521 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3522 proc->pid, thread->pid, return_error, return_error_param,
3523 (u64)tr->data_size, (u64)tr->offsets_size,
3524 return_error_line);
3525
3526 {
3527 struct binder_transaction_log_entry *fe;
3528
3529 e->return_error = return_error;
3530 e->return_error_param = return_error_param;
3531 e->return_error_line = return_error_line;
3532 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3533 *fe = *e;
3534
3535
3536
3537
3538 smp_wmb();
3539 WRITE_ONCE(e->debug_id_done, t_debug_id);
3540 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3541 }
3542
3543 BUG_ON(thread->return_error.cmd != BR_OK);
3544 if (in_reply_to) {
3545 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3546 binder_enqueue_thread_work(thread, &thread->return_error.work);
3547 binder_send_failed_reply(in_reply_to, return_error);
3548 } else {
3549 thread->return_error.cmd = return_error;
3550 binder_enqueue_thread_work(thread, &thread->return_error.work);
3551 }
3552}
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564static void
3565binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3566{
3567 binder_inner_proc_lock(proc);
3568 if (buffer->transaction) {
3569 buffer->transaction->buffer = NULL;
3570 buffer->transaction = NULL;
3571 }
3572 binder_inner_proc_unlock(proc);
3573 if (buffer->async_transaction && buffer->target_node) {
3574 struct binder_node *buf_node;
3575 struct binder_work *w;
3576
3577 buf_node = buffer->target_node;
3578 binder_node_inner_lock(buf_node);
3579 BUG_ON(!buf_node->has_async_transaction);
3580 BUG_ON(buf_node->proc != proc);
3581 w = binder_dequeue_work_head_ilocked(
3582 &buf_node->async_todo);
3583 if (!w) {
3584 buf_node->has_async_transaction = false;
3585 } else {
3586 binder_enqueue_work_ilocked(
3587 w, &proc->todo);
3588 binder_wakeup_proc_ilocked(proc);
3589 }
3590 binder_node_inner_unlock(buf_node);
3591 }
3592 trace_binder_transaction_buffer_release(buffer);
3593 binder_transaction_buffer_release(proc, buffer, 0, false);
3594 binder_alloc_free_buf(&proc->alloc, buffer);
3595}
3596
3597static int binder_thread_write(struct binder_proc *proc,
3598 struct binder_thread *thread,
3599 binder_uintptr_t binder_buffer, size_t size,
3600 binder_size_t *consumed)
3601{
3602 uint32_t cmd;
3603 struct binder_context *context = proc->context;
3604 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3605 void __user *ptr = buffer + *consumed;
3606 void __user *end = buffer + size;
3607
3608 while (ptr < end && thread->return_error.cmd == BR_OK) {
3609 int ret;
3610
3611 if (get_user(cmd, (uint32_t __user *)ptr))
3612 return -EFAULT;
3613 ptr += sizeof(uint32_t);
3614 trace_binder_command(cmd);
3615 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3616 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3617 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3618 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3619 }
3620 switch (cmd) {
3621 case BC_INCREFS:
3622 case BC_ACQUIRE:
3623 case BC_RELEASE:
3624 case BC_DECREFS: {
3625 uint32_t target;
3626 const char *debug_string;
3627 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3628 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3629 struct binder_ref_data rdata;
3630
3631 if (get_user(target, (uint32_t __user *)ptr))
3632 return -EFAULT;
3633
3634 ptr += sizeof(uint32_t);
3635 ret = -1;
3636 if (increment && !target) {
3637 struct binder_node *ctx_mgr_node;
3638 mutex_lock(&context->context_mgr_node_lock);
3639 ctx_mgr_node = context->binder_context_mgr_node;
3640 if (ctx_mgr_node)
3641 ret = binder_inc_ref_for_node(
3642 proc, ctx_mgr_node,
3643 strong, NULL, &rdata);
3644 mutex_unlock(&context->context_mgr_node_lock);
3645 }
3646 if (ret)
3647 ret = binder_update_ref_for_handle(
3648 proc, target, increment, strong,
3649 &rdata);
3650 if (!ret && rdata.desc != target) {
3651 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3652 proc->pid, thread->pid,
3653 target, rdata.desc);
3654 }
3655 switch (cmd) {
3656 case BC_INCREFS:
3657 debug_string = "IncRefs";
3658 break;
3659 case BC_ACQUIRE:
3660 debug_string = "Acquire";
3661 break;
3662 case BC_RELEASE:
3663 debug_string = "Release";
3664 break;
3665 case BC_DECREFS:
3666 default:
3667 debug_string = "DecRefs";
3668 break;
3669 }
3670 if (ret) {
3671 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3672 proc->pid, thread->pid, debug_string,
3673 strong, target, ret);
3674 break;
3675 }
3676 binder_debug(BINDER_DEBUG_USER_REFS,
3677 "%d:%d %s ref %d desc %d s %d w %d\n",
3678 proc->pid, thread->pid, debug_string,
3679 rdata.debug_id, rdata.desc, rdata.strong,
3680 rdata.weak);
3681 break;
3682 }
3683 case BC_INCREFS_DONE:
3684 case BC_ACQUIRE_DONE: {
3685 binder_uintptr_t node_ptr;
3686 binder_uintptr_t cookie;
3687 struct binder_node *node;
3688 bool free_node;
3689
3690 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3691 return -EFAULT;
3692 ptr += sizeof(binder_uintptr_t);
3693 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3694 return -EFAULT;
3695 ptr += sizeof(binder_uintptr_t);
3696 node = binder_get_node(proc, node_ptr);
3697 if (node == NULL) {
3698 binder_user_error("%d:%d %s u%016llx no match\n",
3699 proc->pid, thread->pid,
3700 cmd == BC_INCREFS_DONE ?
3701 "BC_INCREFS_DONE" :
3702 "BC_ACQUIRE_DONE",
3703 (u64)node_ptr);
3704 break;
3705 }
3706 if (cookie != node->cookie) {
3707 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3708 proc->pid, thread->pid,
3709 cmd == BC_INCREFS_DONE ?
3710 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3711 (u64)node_ptr, node->debug_id,
3712 (u64)cookie, (u64)node->cookie);
3713 binder_put_node(node);
3714 break;
3715 }
3716 binder_node_inner_lock(node);
3717 if (cmd == BC_ACQUIRE_DONE) {
3718 if (node->pending_strong_ref == 0) {
3719 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3720 proc->pid, thread->pid,
3721 node->debug_id);
3722 binder_node_inner_unlock(node);
3723 binder_put_node(node);
3724 break;
3725 }
3726 node->pending_strong_ref = 0;
3727 } else {
3728 if (node->pending_weak_ref == 0) {
3729 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3730 proc->pid, thread->pid,
3731 node->debug_id);
3732 binder_node_inner_unlock(node);
3733 binder_put_node(node);
3734 break;
3735 }
3736 node->pending_weak_ref = 0;
3737 }
3738 free_node = binder_dec_node_nilocked(node,
3739 cmd == BC_ACQUIRE_DONE, 0);
3740 WARN_ON(free_node);
3741 binder_debug(BINDER_DEBUG_USER_REFS,
3742 "%d:%d %s node %d ls %d lw %d tr %d\n",
3743 proc->pid, thread->pid,
3744 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3745 node->debug_id, node->local_strong_refs,
3746 node->local_weak_refs, node->tmp_refs);
3747 binder_node_inner_unlock(node);
3748 binder_put_node(node);
3749 break;
3750 }
3751 case BC_ATTEMPT_ACQUIRE:
3752 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3753 return -EINVAL;
3754 case BC_ACQUIRE_RESULT:
3755 pr_err("BC_ACQUIRE_RESULT not supported\n");
3756 return -EINVAL;
3757
3758 case BC_FREE_BUFFER: {
3759 binder_uintptr_t data_ptr;
3760 struct binder_buffer *buffer;
3761
3762 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3763 return -EFAULT;
3764 ptr += sizeof(binder_uintptr_t);
3765
3766 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3767 data_ptr);
3768 if (IS_ERR_OR_NULL(buffer)) {
3769 if (PTR_ERR(buffer) == -EPERM) {
3770 binder_user_error(
3771 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3772 proc->pid, thread->pid,
3773 (u64)data_ptr);
3774 } else {
3775 binder_user_error(
3776 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3777 proc->pid, thread->pid,
3778 (u64)data_ptr);
3779 }
3780 break;
3781 }
3782 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3783 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3784 proc->pid, thread->pid, (u64)data_ptr,
3785 buffer->debug_id,
3786 buffer->transaction ? "active" : "finished");
3787 binder_free_buf(proc, buffer);
3788 break;
3789 }
3790
3791 case BC_TRANSACTION_SG:
3792 case BC_REPLY_SG: {
3793 struct binder_transaction_data_sg tr;
3794
3795 if (copy_from_user(&tr, ptr, sizeof(tr)))
3796 return -EFAULT;
3797 ptr += sizeof(tr);
3798 binder_transaction(proc, thread, &tr.transaction_data,
3799 cmd == BC_REPLY_SG, tr.buffers_size);
3800 break;
3801 }
3802 case BC_TRANSACTION:
3803 case BC_REPLY: {
3804 struct binder_transaction_data tr;
3805
3806 if (copy_from_user(&tr, ptr, sizeof(tr)))
3807 return -EFAULT;
3808 ptr += sizeof(tr);
3809 binder_transaction(proc, thread, &tr,
3810 cmd == BC_REPLY, 0);
3811 break;
3812 }
3813
3814 case BC_REGISTER_LOOPER:
3815 binder_debug(BINDER_DEBUG_THREADS,
3816 "%d:%d BC_REGISTER_LOOPER\n",
3817 proc->pid, thread->pid);
3818 binder_inner_proc_lock(proc);
3819 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3820 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3821 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3822 proc->pid, thread->pid);
3823 } else if (proc->requested_threads == 0) {
3824 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3825 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3826 proc->pid, thread->pid);
3827 } else {
3828 proc->requested_threads--;
3829 proc->requested_threads_started++;
3830 }
3831 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3832 binder_inner_proc_unlock(proc);
3833 break;
3834 case BC_ENTER_LOOPER:
3835 binder_debug(BINDER_DEBUG_THREADS,
3836 "%d:%d BC_ENTER_LOOPER\n",
3837 proc->pid, thread->pid);
3838 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3839 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3840 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3841 proc->pid, thread->pid);
3842 }
3843 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3844 break;
3845 case BC_EXIT_LOOPER:
3846 binder_debug(BINDER_DEBUG_THREADS,
3847 "%d:%d BC_EXIT_LOOPER\n",
3848 proc->pid, thread->pid);
3849 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3850 break;
3851
3852 case BC_REQUEST_DEATH_NOTIFICATION:
3853 case BC_CLEAR_DEATH_NOTIFICATION: {
3854 uint32_t target;
3855 binder_uintptr_t cookie;
3856 struct binder_ref *ref;
3857 struct binder_ref_death *death = NULL;
3858
3859 if (get_user(target, (uint32_t __user *)ptr))
3860 return -EFAULT;
3861 ptr += sizeof(uint32_t);
3862 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3863 return -EFAULT;
3864 ptr += sizeof(binder_uintptr_t);
3865 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3866
3867
3868
3869
3870 death = kzalloc(sizeof(*death), GFP_KERNEL);
3871 if (death == NULL) {
3872 WARN_ON(thread->return_error.cmd !=
3873 BR_OK);
3874 thread->return_error.cmd = BR_ERROR;
3875 binder_enqueue_thread_work(
3876 thread,
3877 &thread->return_error.work);
3878 binder_debug(
3879 BINDER_DEBUG_FAILED_TRANSACTION,
3880 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3881 proc->pid, thread->pid);
3882 break;
3883 }
3884 }
3885 binder_proc_lock(proc);
3886 ref = binder_get_ref_olocked(proc, target, false);
3887 if (ref == NULL) {
3888 binder_user_error("%d:%d %s invalid ref %d\n",
3889 proc->pid, thread->pid,
3890 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3891 "BC_REQUEST_DEATH_NOTIFICATION" :
3892 "BC_CLEAR_DEATH_NOTIFICATION",
3893 target);
3894 binder_proc_unlock(proc);
3895 kfree(death);
3896 break;
3897 }
3898
3899 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3900 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3901 proc->pid, thread->pid,
3902 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3903 "BC_REQUEST_DEATH_NOTIFICATION" :
3904 "BC_CLEAR_DEATH_NOTIFICATION",
3905 (u64)cookie, ref->data.debug_id,
3906 ref->data.desc, ref->data.strong,
3907 ref->data.weak, ref->node->debug_id);
3908
3909 binder_node_lock(ref->node);
3910 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3911 if (ref->death) {
3912 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3913 proc->pid, thread->pid);
3914 binder_node_unlock(ref->node);
3915 binder_proc_unlock(proc);
3916 kfree(death);
3917 break;
3918 }
3919 binder_stats_created(BINDER_STAT_DEATH);
3920 INIT_LIST_HEAD(&death->work.entry);
3921 death->cookie = cookie;
3922 ref->death = death;
3923 if (ref->node->proc == NULL) {
3924 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3925
3926 binder_inner_proc_lock(proc);
3927 binder_enqueue_work_ilocked(
3928 &ref->death->work, &proc->todo);
3929 binder_wakeup_proc_ilocked(proc);
3930 binder_inner_proc_unlock(proc);
3931 }
3932 } else {
3933 if (ref->death == NULL) {
3934 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3935 proc->pid, thread->pid);
3936 binder_node_unlock(ref->node);
3937 binder_proc_unlock(proc);
3938 break;
3939 }
3940 death = ref->death;
3941 if (death->cookie != cookie) {
3942 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3943 proc->pid, thread->pid,
3944 (u64)death->cookie,
3945 (u64)cookie);
3946 binder_node_unlock(ref->node);
3947 binder_proc_unlock(proc);
3948 break;
3949 }
3950 ref->death = NULL;
3951 binder_inner_proc_lock(proc);
3952 if (list_empty(&death->work.entry)) {
3953 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3954 if (thread->looper &
3955 (BINDER_LOOPER_STATE_REGISTERED |
3956 BINDER_LOOPER_STATE_ENTERED))
3957 binder_enqueue_thread_work_ilocked(
3958 thread,
3959 &death->work);
3960 else {
3961 binder_enqueue_work_ilocked(
3962 &death->work,
3963 &proc->todo);
3964 binder_wakeup_proc_ilocked(
3965 proc);
3966 }
3967 } else {
3968 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3969 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3970 }
3971 binder_inner_proc_unlock(proc);
3972 }
3973 binder_node_unlock(ref->node);
3974 binder_proc_unlock(proc);
3975 } break;
3976 case BC_DEAD_BINDER_DONE: {
3977 struct binder_work *w;
3978 binder_uintptr_t cookie;
3979 struct binder_ref_death *death = NULL;
3980
3981 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3982 return -EFAULT;
3983
3984 ptr += sizeof(cookie);
3985 binder_inner_proc_lock(proc);
3986 list_for_each_entry(w, &proc->delivered_death,
3987 entry) {
3988 struct binder_ref_death *tmp_death =
3989 container_of(w,
3990 struct binder_ref_death,
3991 work);
3992
3993 if (tmp_death->cookie == cookie) {
3994 death = tmp_death;
3995 break;
3996 }
3997 }
3998 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3999 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4000 proc->pid, thread->pid, (u64)cookie,
4001 death);
4002 if (death == NULL) {
4003 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4004 proc->pid, thread->pid, (u64)cookie);
4005 binder_inner_proc_unlock(proc);
4006 break;
4007 }
4008 binder_dequeue_work_ilocked(&death->work);
4009 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4010 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4011 if (thread->looper &
4012 (BINDER_LOOPER_STATE_REGISTERED |
4013 BINDER_LOOPER_STATE_ENTERED))
4014 binder_enqueue_thread_work_ilocked(
4015 thread, &death->work);
4016 else {
4017 binder_enqueue_work_ilocked(
4018 &death->work,
4019 &proc->todo);
4020 binder_wakeup_proc_ilocked(proc);
4021 }
4022 }
4023 binder_inner_proc_unlock(proc);
4024 } break;
4025
4026 default:
4027 pr_err("%d:%d unknown command %d\n",
4028 proc->pid, thread->pid, cmd);
4029 return -EINVAL;
4030 }
4031 *consumed = ptr - buffer;
4032 }
4033 return 0;
4034}
4035
4036static void binder_stat_br(struct binder_proc *proc,
4037 struct binder_thread *thread, uint32_t cmd)
4038{
4039 trace_binder_return(cmd);
4040 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4041 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4042 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4043 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4044 }
4045}
4046
4047static int binder_put_node_cmd(struct binder_proc *proc,
4048 struct binder_thread *thread,
4049 void __user **ptrp,
4050 binder_uintptr_t node_ptr,
4051 binder_uintptr_t node_cookie,
4052 int node_debug_id,
4053 uint32_t cmd, const char *cmd_name)
4054{
4055 void __user *ptr = *ptrp;
4056
4057 if (put_user(cmd, (uint32_t __user *)ptr))
4058 return -EFAULT;
4059 ptr += sizeof(uint32_t);
4060
4061 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4062 return -EFAULT;
4063 ptr += sizeof(binder_uintptr_t);
4064
4065 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4066 return -EFAULT;
4067 ptr += sizeof(binder_uintptr_t);
4068
4069 binder_stat_br(proc, thread, cmd);
4070 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4071 proc->pid, thread->pid, cmd_name, node_debug_id,
4072 (u64)node_ptr, (u64)node_cookie);
4073
4074 *ptrp = ptr;
4075 return 0;
4076}
4077
4078static int binder_wait_for_work(struct binder_thread *thread,
4079 bool do_proc_work)
4080{
4081 DEFINE_WAIT(wait);
4082 struct binder_proc *proc = thread->proc;
4083 int ret = 0;
4084
4085 freezer_do_not_count();
4086 binder_inner_proc_lock(proc);
4087 for (;;) {
4088 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4089 if (binder_has_work_ilocked(thread, do_proc_work))
4090 break;
4091 if (do_proc_work)
4092 list_add(&thread->waiting_thread_node,
4093 &proc->waiting_threads);
4094 binder_inner_proc_unlock(proc);
4095 schedule();
4096 binder_inner_proc_lock(proc);
4097 list_del_init(&thread->waiting_thread_node);
4098 if (signal_pending(current)) {
4099 ret = -ERESTARTSYS;
4100 break;
4101 }
4102 }
4103 finish_wait(&thread->wait, &wait);
4104 binder_inner_proc_unlock(proc);
4105 freezer_count();
4106
4107 return ret;
4108}
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124static int binder_apply_fd_fixups(struct binder_proc *proc,
4125 struct binder_transaction *t)
4126{
4127 struct binder_txn_fd_fixup *fixup, *tmp;
4128 int ret = 0;
4129
4130 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4131 int fd = get_unused_fd_flags(O_CLOEXEC);
4132
4133 if (fd < 0) {
4134 binder_debug(BINDER_DEBUG_TRANSACTION,
4135 "failed fd fixup txn %d fd %d\n",
4136 t->debug_id, fd);
4137 ret = -ENOMEM;
4138 break;
4139 }
4140 binder_debug(BINDER_DEBUG_TRANSACTION,
4141 "fd fixup txn %d fd %d\n",
4142 t->debug_id, fd);
4143 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4144 fd_install(fd, fixup->file);
4145 fixup->file = NULL;
4146 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4147 fixup->offset, &fd,
4148 sizeof(u32))) {
4149 ret = -EINVAL;
4150 break;
4151 }
4152 }
4153 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4154 if (fixup->file) {
4155 fput(fixup->file);
4156 } else if (ret) {
4157 u32 fd;
4158 int err;
4159
4160 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4161 t->buffer,
4162 fixup->offset,
4163 sizeof(fd));
4164 WARN_ON(err);
4165 if (!err)
4166 binder_deferred_fd_close(fd);
4167 }
4168 list_del(&fixup->fixup_entry);
4169 kfree(fixup);
4170 }
4171
4172 return ret;
4173}
4174
4175static int binder_thread_read(struct binder_proc *proc,
4176 struct binder_thread *thread,
4177 binder_uintptr_t binder_buffer, size_t size,
4178 binder_size_t *consumed, int non_block)
4179{
4180 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4181 void __user *ptr = buffer + *consumed;
4182 void __user *end = buffer + size;
4183
4184 int ret = 0;
4185 int wait_for_proc_work;
4186
4187 if (*consumed == 0) {
4188 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4189 return -EFAULT;
4190 ptr += sizeof(uint32_t);
4191 }
4192
4193retry:
4194 binder_inner_proc_lock(proc);
4195 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4196 binder_inner_proc_unlock(proc);
4197
4198 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4199
4200 trace_binder_wait_for_work(wait_for_proc_work,
4201 !!thread->transaction_stack,
4202 !binder_worklist_empty(proc, &thread->todo));
4203 if (wait_for_proc_work) {
4204 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4205 BINDER_LOOPER_STATE_ENTERED))) {
4206 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4207 proc->pid, thread->pid, thread->looper);
4208 wait_event_interruptible(binder_user_error_wait,
4209 binder_stop_on_user_error < 2);
4210 }
4211 binder_set_nice(proc->default_priority);
4212 }
4213
4214 if (non_block) {
4215 if (!binder_has_work(thread, wait_for_proc_work))
4216 ret = -EAGAIN;
4217 } else {
4218 ret = binder_wait_for_work(thread, wait_for_proc_work);
4219 }
4220
4221 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4222
4223 if (ret)
4224 return ret;
4225
4226 while (1) {
4227 uint32_t cmd;
4228 struct binder_transaction_data_secctx tr;
4229 struct binder_transaction_data *trd = &tr.transaction_data;
4230 struct binder_work *w = NULL;
4231 struct list_head *list = NULL;
4232 struct binder_transaction *t = NULL;
4233 struct binder_thread *t_from;
4234 size_t trsize = sizeof(*trd);
4235
4236 binder_inner_proc_lock(proc);
4237 if (!binder_worklist_empty_ilocked(&thread->todo))
4238 list = &thread->todo;
4239 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4240 wait_for_proc_work)
4241 list = &proc->todo;
4242 else {
4243 binder_inner_proc_unlock(proc);
4244
4245
4246 if (ptr - buffer == 4 && !thread->looper_need_return)
4247 goto retry;
4248 break;
4249 }
4250
4251 if (end - ptr < sizeof(tr) + 4) {
4252 binder_inner_proc_unlock(proc);
4253 break;
4254 }
4255 w = binder_dequeue_work_head_ilocked(list);
4256 if (binder_worklist_empty_ilocked(&thread->todo))
4257 thread->process_todo = false;
4258
4259 switch (w->type) {
4260 case BINDER_WORK_TRANSACTION: {
4261 binder_inner_proc_unlock(proc);
4262 t = container_of(w, struct binder_transaction, work);
4263 } break;
4264 case BINDER_WORK_RETURN_ERROR: {
4265 struct binder_error *e = container_of(
4266 w, struct binder_error, work);
4267
4268 WARN_ON(e->cmd == BR_OK);
4269 binder_inner_proc_unlock(proc);
4270 if (put_user(e->cmd, (uint32_t __user *)ptr))
4271 return -EFAULT;
4272 cmd = e->cmd;
4273 e->cmd = BR_OK;
4274 ptr += sizeof(uint32_t);
4275
4276 binder_stat_br(proc, thread, cmd);
4277 } break;
4278 case BINDER_WORK_TRANSACTION_COMPLETE: {
4279 binder_inner_proc_unlock(proc);
4280 cmd = BR_TRANSACTION_COMPLETE;
4281 kfree(w);
4282 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4283 if (put_user(cmd, (uint32_t __user *)ptr))
4284 return -EFAULT;
4285 ptr += sizeof(uint32_t);
4286
4287 binder_stat_br(proc, thread, cmd);
4288 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4289 "%d:%d BR_TRANSACTION_COMPLETE\n",
4290 proc->pid, thread->pid);
4291 } break;
4292 case BINDER_WORK_NODE: {
4293 struct binder_node *node = container_of(w, struct binder_node, work);
4294 int strong, weak;
4295 binder_uintptr_t node_ptr = node->ptr;
4296 binder_uintptr_t node_cookie = node->cookie;
4297 int node_debug_id = node->debug_id;
4298 int has_weak_ref;
4299 int has_strong_ref;
4300 void __user *orig_ptr = ptr;
4301
4302 BUG_ON(proc != node->proc);
4303 strong = node->internal_strong_refs ||
4304 node->local_strong_refs;
4305 weak = !hlist_empty(&node->refs) ||
4306 node->local_weak_refs ||
4307 node->tmp_refs || strong;
4308 has_strong_ref = node->has_strong_ref;
4309 has_weak_ref = node->has_weak_ref;
4310
4311 if (weak && !has_weak_ref) {
4312 node->has_weak_ref = 1;
4313 node->pending_weak_ref = 1;
4314 node->local_weak_refs++;
4315 }
4316 if (strong && !has_strong_ref) {
4317 node->has_strong_ref = 1;
4318 node->pending_strong_ref = 1;
4319 node->local_strong_refs++;
4320 }
4321 if (!strong && has_strong_ref)
4322 node->has_strong_ref = 0;
4323 if (!weak && has_weak_ref)
4324 node->has_weak_ref = 0;
4325 if (!weak && !strong) {
4326 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4327 "%d:%d node %d u%016llx c%016llx deleted\n",
4328 proc->pid, thread->pid,
4329 node_debug_id,
4330 (u64)node_ptr,
4331 (u64)node_cookie);
4332 rb_erase(&node->rb_node, &proc->nodes);
4333 binder_inner_proc_unlock(proc);
4334 binder_node_lock(node);
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344 binder_node_unlock(node);
4345 binder_free_node(node);
4346 } else
4347 binder_inner_proc_unlock(proc);
4348
4349 if (weak && !has_weak_ref)
4350 ret = binder_put_node_cmd(
4351 proc, thread, &ptr, node_ptr,
4352 node_cookie, node_debug_id,
4353 BR_INCREFS, "BR_INCREFS");
4354 if (!ret && strong && !has_strong_ref)
4355 ret = binder_put_node_cmd(
4356 proc, thread, &ptr, node_ptr,
4357 node_cookie, node_debug_id,
4358 BR_ACQUIRE, "BR_ACQUIRE");
4359 if (!ret && !strong && has_strong_ref)
4360 ret = binder_put_node_cmd(
4361 proc, thread, &ptr, node_ptr,
4362 node_cookie, node_debug_id,
4363 BR_RELEASE, "BR_RELEASE");
4364 if (!ret && !weak && has_weak_ref)
4365 ret = binder_put_node_cmd(
4366 proc, thread, &ptr, node_ptr,
4367 node_cookie, node_debug_id,
4368 BR_DECREFS, "BR_DECREFS");
4369 if (orig_ptr == ptr)
4370 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4371 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4372 proc->pid, thread->pid,
4373 node_debug_id,
4374 (u64)node_ptr,
4375 (u64)node_cookie);
4376 if (ret)
4377 return ret;
4378 } break;
4379 case BINDER_WORK_DEAD_BINDER:
4380 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4381 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4382 struct binder_ref_death *death;
4383 uint32_t cmd;
4384 binder_uintptr_t cookie;
4385
4386 death = container_of(w, struct binder_ref_death, work);
4387 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4388 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4389 else
4390 cmd = BR_DEAD_BINDER;
4391 cookie = death->cookie;
4392
4393 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4394 "%d:%d %s %016llx\n",
4395 proc->pid, thread->pid,
4396 cmd == BR_DEAD_BINDER ?
4397 "BR_DEAD_BINDER" :
4398 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4399 (u64)cookie);
4400 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4401 binder_inner_proc_unlock(proc);
4402 kfree(death);
4403 binder_stats_deleted(BINDER_STAT_DEATH);
4404 } else {
4405 binder_enqueue_work_ilocked(
4406 w, &proc->delivered_death);
4407 binder_inner_proc_unlock(proc);
4408 }
4409 if (put_user(cmd, (uint32_t __user *)ptr))
4410 return -EFAULT;
4411 ptr += sizeof(uint32_t);
4412 if (put_user(cookie,
4413 (binder_uintptr_t __user *)ptr))
4414 return -EFAULT;
4415 ptr += sizeof(binder_uintptr_t);
4416 binder_stat_br(proc, thread, cmd);
4417 if (cmd == BR_DEAD_BINDER)
4418 goto done;
4419 } break;
4420 default:
4421 binder_inner_proc_unlock(proc);
4422 pr_err("%d:%d: bad work type %d\n",
4423 proc->pid, thread->pid, w->type);
4424 break;
4425 }
4426
4427 if (!t)
4428 continue;
4429
4430 BUG_ON(t->buffer == NULL);
4431 if (t->buffer->target_node) {
4432 struct binder_node *target_node = t->buffer->target_node;
4433
4434 trd->target.ptr = target_node->ptr;
4435 trd->cookie = target_node->cookie;
4436 t->saved_priority = task_nice(current);
4437 if (t->priority < target_node->min_priority &&
4438 !(t->flags & TF_ONE_WAY))
4439 binder_set_nice(t->priority);
4440 else if (!(t->flags & TF_ONE_WAY) ||
4441 t->saved_priority > target_node->min_priority)
4442 binder_set_nice(target_node->min_priority);
4443 cmd = BR_TRANSACTION;
4444 } else {
4445 trd->target.ptr = 0;
4446 trd->cookie = 0;
4447 cmd = BR_REPLY;
4448 }
4449 trd->code = t->code;
4450 trd->flags = t->flags;
4451 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4452
4453 t_from = binder_get_txn_from(t);
4454 if (t_from) {
4455 struct task_struct *sender = t_from->proc->tsk;
4456
4457 trd->sender_pid =
4458 task_tgid_nr_ns(sender,
4459 task_active_pid_ns(current));
4460 } else {
4461 trd->sender_pid = 0;
4462 }
4463
4464 ret = binder_apply_fd_fixups(proc, t);
4465 if (ret) {
4466 struct binder_buffer *buffer = t->buffer;
4467 bool oneway = !!(t->flags & TF_ONE_WAY);
4468 int tid = t->debug_id;
4469
4470 if (t_from)
4471 binder_thread_dec_tmpref(t_from);
4472 buffer->transaction = NULL;
4473 binder_cleanup_transaction(t, "fd fixups failed",
4474 BR_FAILED_REPLY);
4475 binder_free_buf(proc, buffer);
4476 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4477 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4478 proc->pid, thread->pid,
4479 oneway ? "async " :
4480 (cmd == BR_REPLY ? "reply " : ""),
4481 tid, BR_FAILED_REPLY, ret, __LINE__);
4482 if (cmd == BR_REPLY) {
4483 cmd = BR_FAILED_REPLY;
4484 if (put_user(cmd, (uint32_t __user *)ptr))
4485 return -EFAULT;
4486 ptr += sizeof(uint32_t);
4487 binder_stat_br(proc, thread, cmd);
4488 break;
4489 }
4490 continue;
4491 }
4492 trd->data_size = t->buffer->data_size;
4493 trd->offsets_size = t->buffer->offsets_size;
4494 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4495 trd->data.ptr.offsets = trd->data.ptr.buffer +
4496 ALIGN(t->buffer->data_size,
4497 sizeof(void *));
4498
4499 tr.secctx = t->security_ctx;
4500 if (t->security_ctx) {
4501 cmd = BR_TRANSACTION_SEC_CTX;
4502 trsize = sizeof(tr);
4503 }
4504 if (put_user(cmd, (uint32_t __user *)ptr)) {
4505 if (t_from)
4506 binder_thread_dec_tmpref(t_from);
4507
4508 binder_cleanup_transaction(t, "put_user failed",
4509 BR_FAILED_REPLY);
4510
4511 return -EFAULT;
4512 }
4513 ptr += sizeof(uint32_t);
4514 if (copy_to_user(ptr, &tr, trsize)) {
4515 if (t_from)
4516 binder_thread_dec_tmpref(t_from);
4517
4518 binder_cleanup_transaction(t, "copy_to_user failed",
4519 BR_FAILED_REPLY);
4520
4521 return -EFAULT;
4522 }
4523 ptr += trsize;
4524
4525 trace_binder_transaction_received(t);
4526 binder_stat_br(proc, thread, cmd);
4527 binder_debug(BINDER_DEBUG_TRANSACTION,
4528 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4529 proc->pid, thread->pid,
4530 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4531 (cmd == BR_TRANSACTION_SEC_CTX) ?
4532 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4533 t->debug_id, t_from ? t_from->proc->pid : 0,
4534 t_from ? t_from->pid : 0, cmd,
4535 t->buffer->data_size, t->buffer->offsets_size,
4536 (u64)trd->data.ptr.buffer,
4537 (u64)trd->data.ptr.offsets);
4538
4539 if (t_from)
4540 binder_thread_dec_tmpref(t_from);
4541 t->buffer->allow_user_free = 1;
4542 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4543 binder_inner_proc_lock(thread->proc);
4544 t->to_parent = thread->transaction_stack;
4545 t->to_thread = thread;
4546 thread->transaction_stack = t;
4547 binder_inner_proc_unlock(thread->proc);
4548 } else {
4549 binder_free_transaction(t);
4550 }
4551 break;
4552 }
4553
4554done:
4555
4556 *consumed = ptr - buffer;
4557 binder_inner_proc_lock(proc);
4558 if (proc->requested_threads == 0 &&
4559 list_empty(&thread->proc->waiting_threads) &&
4560 proc->requested_threads_started < proc->max_threads &&
4561 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4562 BINDER_LOOPER_STATE_ENTERED))
4563 ) {
4564 proc->requested_threads++;
4565 binder_inner_proc_unlock(proc);
4566 binder_debug(BINDER_DEBUG_THREADS,
4567 "%d:%d BR_SPAWN_LOOPER\n",
4568 proc->pid, thread->pid);
4569 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4570 return -EFAULT;
4571 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4572 } else
4573 binder_inner_proc_unlock(proc);
4574 return 0;
4575}
4576
4577static void binder_release_work(struct binder_proc *proc,
4578 struct list_head *list)
4579{
4580 struct binder_work *w;
4581
4582 while (1) {
4583 w = binder_dequeue_work_head(proc, list);
4584 if (!w)
4585 return;
4586
4587 switch (w->type) {
4588 case BINDER_WORK_TRANSACTION: {
4589 struct binder_transaction *t;
4590
4591 t = container_of(w, struct binder_transaction, work);
4592
4593 binder_cleanup_transaction(t, "process died.",
4594 BR_DEAD_REPLY);
4595 } break;
4596 case BINDER_WORK_RETURN_ERROR: {
4597 struct binder_error *e = container_of(
4598 w, struct binder_error, work);
4599
4600 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4601 "undelivered TRANSACTION_ERROR: %u\n",
4602 e->cmd);
4603 } break;
4604 case BINDER_WORK_TRANSACTION_COMPLETE: {
4605 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4606 "undelivered TRANSACTION_COMPLETE\n");
4607 kfree(w);
4608 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4609 } break;
4610 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4611 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4612 struct binder_ref_death *death;
4613
4614 death = container_of(w, struct binder_ref_death, work);
4615 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4616 "undelivered death notification, %016llx\n",
4617 (u64)death->cookie);
4618 kfree(death);
4619 binder_stats_deleted(BINDER_STAT_DEATH);
4620 } break;
4621 default:
4622 pr_err("unexpected work type, %d, not freed\n",
4623 w->type);
4624 break;
4625 }
4626 }
4627
4628}
4629
4630static struct binder_thread *binder_get_thread_ilocked(
4631 struct binder_proc *proc, struct binder_thread *new_thread)
4632{
4633 struct binder_thread *thread = NULL;
4634 struct rb_node *parent = NULL;
4635 struct rb_node **p = &proc->threads.rb_node;
4636
4637 while (*p) {
4638 parent = *p;
4639 thread = rb_entry(parent, struct binder_thread, rb_node);
4640
4641 if (current->pid < thread->pid)
4642 p = &(*p)->rb_left;
4643 else if (current->pid > thread->pid)
4644 p = &(*p)->rb_right;
4645 else
4646 return thread;
4647 }
4648 if (!new_thread)
4649 return NULL;
4650 thread = new_thread;
4651 binder_stats_created(BINDER_STAT_THREAD);
4652 thread->proc = proc;
4653 thread->pid = current->pid;
4654 atomic_set(&thread->tmp_ref, 0);
4655 init_waitqueue_head(&thread->wait);
4656 INIT_LIST_HEAD(&thread->todo);
4657 rb_link_node(&thread->rb_node, parent, p);
4658 rb_insert_color(&thread->rb_node, &proc->threads);
4659 thread->looper_need_return = true;
4660 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4661 thread->return_error.cmd = BR_OK;
4662 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4663 thread->reply_error.cmd = BR_OK;
4664 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4665 return thread;
4666}
4667
4668static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4669{
4670 struct binder_thread *thread;
4671 struct binder_thread *new_thread;
4672
4673 binder_inner_proc_lock(proc);
4674 thread = binder_get_thread_ilocked(proc, NULL);
4675 binder_inner_proc_unlock(proc);
4676 if (!thread) {
4677 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4678 if (new_thread == NULL)
4679 return NULL;
4680 binder_inner_proc_lock(proc);
4681 thread = binder_get_thread_ilocked(proc, new_thread);
4682 binder_inner_proc_unlock(proc);
4683 if (thread != new_thread)
4684 kfree(new_thread);
4685 }
4686 return thread;
4687}
4688
4689static void binder_free_proc(struct binder_proc *proc)
4690{
4691 BUG_ON(!list_empty(&proc->todo));
4692 BUG_ON(!list_empty(&proc->delivered_death));
4693 binder_alloc_deferred_release(&proc->alloc);
4694 put_task_struct(proc->tsk);
4695 binder_stats_deleted(BINDER_STAT_PROC);
4696 kfree(proc);
4697}
4698
4699static void binder_free_thread(struct binder_thread *thread)
4700{
4701 BUG_ON(!list_empty(&thread->todo));
4702 binder_stats_deleted(BINDER_STAT_THREAD);
4703 binder_proc_dec_tmpref(thread->proc);
4704 kfree(thread);
4705}
4706
4707static int binder_thread_release(struct binder_proc *proc,
4708 struct binder_thread *thread)
4709{
4710 struct binder_transaction *t;
4711 struct binder_transaction *send_reply = NULL;
4712 int active_transactions = 0;
4713 struct binder_transaction *last_t = NULL;
4714
4715 binder_inner_proc_lock(thread->proc);
4716
4717
4718
4719
4720
4721
4722 proc->tmp_ref++;
4723
4724
4725
4726
4727 atomic_inc(&thread->tmp_ref);
4728 rb_erase(&thread->rb_node, &proc->threads);
4729 t = thread->transaction_stack;
4730 if (t) {
4731 spin_lock(&t->lock);
4732 if (t->to_thread == thread)
4733 send_reply = t;
4734 } else {
4735 __acquire(&t->lock);
4736 }
4737 thread->is_dead = true;
4738
4739 while (t) {
4740 last_t = t;
4741 active_transactions++;
4742 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4743 "release %d:%d transaction %d %s, still active\n",
4744 proc->pid, thread->pid,
4745 t->debug_id,
4746 (t->to_thread == thread) ? "in" : "out");
4747
4748 if (t->to_thread == thread) {
4749 t->to_proc = NULL;
4750 t->to_thread = NULL;
4751 if (t->buffer) {
4752 t->buffer->transaction = NULL;
4753 t->buffer = NULL;
4754 }
4755 t = t->to_parent;
4756 } else if (t->from == thread) {
4757 t->from = NULL;
4758 t = t->from_parent;
4759 } else
4760 BUG();
4761 spin_unlock(&last_t->lock);
4762 if (t)
4763 spin_lock(&t->lock);
4764 else
4765 __acquire(&t->lock);
4766 }
4767
4768 __release(&t->lock);
4769
4770
4771
4772
4773
4774
4775
4776 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4777 waitqueue_active(&thread->wait)) {
4778 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4779 }
4780
4781 binder_inner_proc_unlock(thread->proc);
4782
4783
4784
4785
4786
4787
4788
4789 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4790 synchronize_rcu();
4791
4792 if (send_reply)
4793 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4794 binder_release_work(proc, &thread->todo);
4795 binder_thread_dec_tmpref(thread);
4796 return active_transactions;
4797}
4798
4799static __poll_t binder_poll(struct file *filp,
4800 struct poll_table_struct *wait)
4801{
4802 struct binder_proc *proc = filp->private_data;
4803 struct binder_thread *thread = NULL;
4804 bool wait_for_proc_work;
4805
4806 thread = binder_get_thread(proc);
4807 if (!thread)
4808 return POLLERR;
4809
4810 binder_inner_proc_lock(thread->proc);
4811 thread->looper |= BINDER_LOOPER_STATE_POLL;
4812 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4813
4814 binder_inner_proc_unlock(thread->proc);
4815
4816 poll_wait(filp, &thread->wait, wait);
4817
4818 if (binder_has_work(thread, wait_for_proc_work))
4819 return EPOLLIN;
4820
4821 return 0;
4822}
4823
4824static int binder_ioctl_write_read(struct file *filp,
4825 unsigned int cmd, unsigned long arg,
4826 struct binder_thread *thread)
4827{
4828 int ret = 0;
4829 struct binder_proc *proc = filp->private_data;
4830 unsigned int size = _IOC_SIZE(cmd);
4831 void __user *ubuf = (void __user *)arg;
4832 struct binder_write_read bwr;
4833
4834 if (size != sizeof(struct binder_write_read)) {
4835 ret = -EINVAL;
4836 goto out;
4837 }
4838 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4839 ret = -EFAULT;
4840 goto out;
4841 }
4842 binder_debug(BINDER_DEBUG_READ_WRITE,
4843 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4844 proc->pid, thread->pid,
4845 (u64)bwr.write_size, (u64)bwr.write_buffer,
4846 (u64)bwr.read_size, (u64)bwr.read_buffer);
4847
4848 if (bwr.write_size > 0) {
4849 ret = binder_thread_write(proc, thread,
4850 bwr.write_buffer,
4851 bwr.write_size,
4852 &bwr.write_consumed);
4853 trace_binder_write_done(ret);
4854 if (ret < 0) {
4855 bwr.read_consumed = 0;
4856 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4857 ret = -EFAULT;
4858 goto out;
4859 }
4860 }
4861 if (bwr.read_size > 0) {
4862 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4863 bwr.read_size,
4864 &bwr.read_consumed,
4865 filp->f_flags & O_NONBLOCK);
4866 trace_binder_read_done(ret);
4867 binder_inner_proc_lock(proc);
4868 if (!binder_worklist_empty_ilocked(&proc->todo))
4869 binder_wakeup_proc_ilocked(proc);
4870 binder_inner_proc_unlock(proc);
4871 if (ret < 0) {
4872 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4873 ret = -EFAULT;
4874 goto out;
4875 }
4876 }
4877 binder_debug(BINDER_DEBUG_READ_WRITE,
4878 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4879 proc->pid, thread->pid,
4880 (u64)bwr.write_consumed, (u64)bwr.write_size,
4881 (u64)bwr.read_consumed, (u64)bwr.read_size);
4882 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4883 ret = -EFAULT;
4884 goto out;
4885 }
4886out:
4887 return ret;
4888}
4889
4890static int binder_ioctl_set_ctx_mgr(struct file *filp,
4891 struct flat_binder_object *fbo)
4892{
4893 int ret = 0;
4894 struct binder_proc *proc = filp->private_data;
4895 struct binder_context *context = proc->context;
4896 struct binder_node *new_node;
4897 kuid_t curr_euid = current_euid();
4898
4899 mutex_lock(&context->context_mgr_node_lock);
4900 if (context->binder_context_mgr_node) {
4901 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4902 ret = -EBUSY;
4903 goto out;
4904 }
4905 ret = security_binder_set_context_mgr(proc->tsk);
4906 if (ret < 0)
4907 goto out;
4908 if (uid_valid(context->binder_context_mgr_uid)) {
4909 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4910 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4911 from_kuid(&init_user_ns, curr_euid),
4912 from_kuid(&init_user_ns,
4913 context->binder_context_mgr_uid));
4914 ret = -EPERM;
4915 goto out;
4916 }
4917 } else {
4918 context->binder_context_mgr_uid = curr_euid;
4919 }
4920 new_node = binder_new_node(proc, fbo);
4921 if (!new_node) {
4922 ret = -ENOMEM;
4923 goto out;
4924 }
4925 binder_node_lock(new_node);
4926 new_node->local_weak_refs++;
4927 new_node->local_strong_refs++;
4928 new_node->has_strong_ref = 1;
4929 new_node->has_weak_ref = 1;
4930 context->binder_context_mgr_node = new_node;
4931 binder_node_unlock(new_node);
4932 binder_put_node(new_node);
4933out:
4934 mutex_unlock(&context->context_mgr_node_lock);
4935 return ret;
4936}
4937
4938static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4939 struct binder_node_info_for_ref *info)
4940{
4941 struct binder_node *node;
4942 struct binder_context *context = proc->context;
4943 __u32 handle = info->handle;
4944
4945 if (info->strong_count || info->weak_count || info->reserved1 ||
4946 info->reserved2 || info->reserved3) {
4947 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4948 proc->pid);
4949 return -EINVAL;
4950 }
4951
4952
4953 mutex_lock(&context->context_mgr_node_lock);
4954 if (!context->binder_context_mgr_node ||
4955 context->binder_context_mgr_node->proc != proc) {
4956 mutex_unlock(&context->context_mgr_node_lock);
4957 return -EPERM;
4958 }
4959 mutex_unlock(&context->context_mgr_node_lock);
4960
4961 node = binder_get_node_from_ref(proc, handle, true, NULL);
4962 if (!node)
4963 return -EINVAL;
4964
4965 info->strong_count = node->local_strong_refs +
4966 node->internal_strong_refs;
4967 info->weak_count = node->local_weak_refs;
4968
4969 binder_put_node(node);
4970
4971 return 0;
4972}
4973
4974static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4975 struct binder_node_debug_info *info)
4976{
4977 struct rb_node *n;
4978 binder_uintptr_t ptr = info->ptr;
4979
4980 memset(info, 0, sizeof(*info));
4981
4982 binder_inner_proc_lock(proc);
4983 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4984 struct binder_node *node = rb_entry(n, struct binder_node,
4985 rb_node);
4986 if (node->ptr > ptr) {
4987 info->ptr = node->ptr;
4988 info->cookie = node->cookie;
4989 info->has_strong_ref = node->has_strong_ref;
4990 info->has_weak_ref = node->has_weak_ref;
4991 break;
4992 }
4993 }
4994 binder_inner_proc_unlock(proc);
4995
4996 return 0;
4997}
4998
4999static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5000{
5001 int ret;
5002 struct binder_proc *proc = filp->private_data;
5003 struct binder_thread *thread;
5004 unsigned int size = _IOC_SIZE(cmd);
5005 void __user *ubuf = (void __user *)arg;
5006
5007
5008
5009
5010 binder_selftest_alloc(&proc->alloc);
5011
5012 trace_binder_ioctl(cmd, arg);
5013
5014 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5015 if (ret)
5016 goto err_unlocked;
5017
5018 thread = binder_get_thread(proc);
5019 if (thread == NULL) {
5020 ret = -ENOMEM;
5021 goto err;
5022 }
5023
5024 switch (cmd) {
5025 case BINDER_WRITE_READ:
5026 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5027 if (ret)
5028 goto err;
5029 break;
5030 case BINDER_SET_MAX_THREADS: {
5031 int max_threads;
5032
5033 if (copy_from_user(&max_threads, ubuf,
5034 sizeof(max_threads))) {
5035 ret = -EINVAL;
5036 goto err;
5037 }
5038 binder_inner_proc_lock(proc);
5039 proc->max_threads = max_threads;
5040 binder_inner_proc_unlock(proc);
5041 break;
5042 }
5043 case BINDER_SET_CONTEXT_MGR_EXT: {
5044 struct flat_binder_object fbo;
5045
5046 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5047 ret = -EINVAL;
5048 goto err;
5049 }
5050 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5051 if (ret)
5052 goto err;
5053 break;
5054 }
5055 case BINDER_SET_CONTEXT_MGR:
5056 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5057 if (ret)
5058 goto err;
5059 break;
5060 case BINDER_THREAD_EXIT:
5061 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5062 proc->pid, thread->pid);
5063 binder_thread_release(proc, thread);
5064 thread = NULL;
5065 break;
5066 case BINDER_VERSION: {
5067 struct binder_version __user *ver = ubuf;
5068
5069 if (size != sizeof(struct binder_version)) {
5070 ret = -EINVAL;
5071 goto err;
5072 }
5073 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5074 &ver->protocol_version)) {
5075 ret = -EINVAL;
5076 goto err;
5077 }
5078 break;
5079 }
5080 case BINDER_GET_NODE_INFO_FOR_REF: {
5081 struct binder_node_info_for_ref info;
5082
5083 if (copy_from_user(&info, ubuf, sizeof(info))) {
5084 ret = -EFAULT;
5085 goto err;
5086 }
5087
5088 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5089 if (ret < 0)
5090 goto err;
5091
5092 if (copy_to_user(ubuf, &info, sizeof(info))) {
5093 ret = -EFAULT;
5094 goto err;
5095 }
5096
5097 break;
5098 }
5099 case BINDER_GET_NODE_DEBUG_INFO: {
5100 struct binder_node_debug_info info;
5101
5102 if (copy_from_user(&info, ubuf, sizeof(info))) {
5103 ret = -EFAULT;
5104 goto err;
5105 }
5106
5107 ret = binder_ioctl_get_node_debug_info(proc, &info);
5108 if (ret < 0)
5109 goto err;
5110
5111 if (copy_to_user(ubuf, &info, sizeof(info))) {
5112 ret = -EFAULT;
5113 goto err;
5114 }
5115 break;
5116 }
5117 default:
5118 ret = -EINVAL;
5119 goto err;
5120 }
5121 ret = 0;
5122err:
5123 if (thread)
5124 thread->looper_need_return = false;
5125 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5126 if (ret && ret != -ERESTARTSYS)
5127 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5128err_unlocked:
5129 trace_binder_ioctl_done(ret);
5130 return ret;
5131}
5132
5133static void binder_vma_open(struct vm_area_struct *vma)
5134{
5135 struct binder_proc *proc = vma->vm_private_data;
5136
5137 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5138 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5139 proc->pid, vma->vm_start, vma->vm_end,
5140 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5141 (unsigned long)pgprot_val(vma->vm_page_prot));
5142}
5143
5144static void binder_vma_close(struct vm_area_struct *vma)
5145{
5146 struct binder_proc *proc = vma->vm_private_data;
5147
5148 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5149 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5150 proc->pid, vma->vm_start, vma->vm_end,
5151 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5152 (unsigned long)pgprot_val(vma->vm_page_prot));
5153 binder_alloc_vma_close(&proc->alloc);
5154}
5155
5156static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5157{
5158 return VM_FAULT_SIGBUS;
5159}
5160
5161static const struct vm_operations_struct binder_vm_ops = {
5162 .open = binder_vma_open,
5163 .close = binder_vma_close,
5164 .fault = binder_vm_fault,
5165};
5166
5167static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5168{
5169 int ret;
5170 struct binder_proc *proc = filp->private_data;
5171 const char *failure_string;
5172
5173 if (proc->tsk != current->group_leader)
5174 return -EINVAL;
5175
5176 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5177 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5178 __func__, proc->pid, vma->vm_start, vma->vm_end,
5179 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5180 (unsigned long)pgprot_val(vma->vm_page_prot));
5181
5182 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5183 ret = -EPERM;
5184 failure_string = "bad vm_flags";
5185 goto err_bad_arg;
5186 }
5187 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5188 vma->vm_flags &= ~VM_MAYWRITE;
5189
5190 vma->vm_ops = &binder_vm_ops;
5191 vma->vm_private_data = proc;
5192
5193 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5194 if (ret)
5195 return ret;
5196 return 0;
5197
5198err_bad_arg:
5199 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5200 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5201 return ret;
5202}
5203
5204static int binder_open(struct inode *nodp, struct file *filp)
5205{
5206 struct binder_proc *proc;
5207 struct binder_device *binder_dev;
5208 struct binderfs_info *info;
5209 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5210
5211 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5212 current->group_leader->pid, current->pid);
5213
5214 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5215 if (proc == NULL)
5216 return -ENOMEM;
5217 spin_lock_init(&proc->inner_lock);
5218 spin_lock_init(&proc->outer_lock);
5219 get_task_struct(current->group_leader);
5220 proc->tsk = current->group_leader;
5221 INIT_LIST_HEAD(&proc->todo);
5222 proc->default_priority = task_nice(current);
5223
5224 if (is_binderfs_device(nodp)) {
5225 binder_dev = nodp->i_private;
5226 info = nodp->i_sb->s_fs_info;
5227 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5228 } else {
5229 binder_dev = container_of(filp->private_data,
5230 struct binder_device, miscdev);
5231 }
5232 proc->context = &binder_dev->context;
5233 binder_alloc_init(&proc->alloc);
5234
5235 binder_stats_created(BINDER_STAT_PROC);
5236 proc->pid = current->group_leader->pid;
5237 INIT_LIST_HEAD(&proc->delivered_death);
5238 INIT_LIST_HEAD(&proc->waiting_threads);
5239 filp->private_data = proc;
5240
5241 mutex_lock(&binder_procs_lock);
5242 hlist_add_head(&proc->proc_node, &binder_procs);
5243 mutex_unlock(&binder_procs_lock);
5244
5245 if (binder_debugfs_dir_entry_proc) {
5246 char strbuf[11];
5247
5248 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5249
5250
5251
5252
5253
5254
5255
5256 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5257 binder_debugfs_dir_entry_proc,
5258 (void *)(unsigned long)proc->pid,
5259 &proc_fops);
5260 }
5261
5262 if (binder_binderfs_dir_entry_proc) {
5263 char strbuf[11];
5264 struct dentry *binderfs_entry;
5265
5266 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5277 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5278 if (!IS_ERR(binderfs_entry)) {
5279 proc->binderfs_entry = binderfs_entry;
5280 } else {
5281 int error;
5282
5283 error = PTR_ERR(binderfs_entry);
5284 if (error != -EEXIST) {
5285 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5286 strbuf, error);
5287 }
5288 }
5289 }
5290
5291 return 0;
5292}
5293
5294static int binder_flush(struct file *filp, fl_owner_t id)
5295{
5296 struct binder_proc *proc = filp->private_data;
5297
5298 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5299
5300 return 0;
5301}
5302
5303static void binder_deferred_flush(struct binder_proc *proc)
5304{
5305 struct rb_node *n;
5306 int wake_count = 0;
5307
5308 binder_inner_proc_lock(proc);
5309 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5310 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5311
5312 thread->looper_need_return = true;
5313 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5314 wake_up_interruptible(&thread->wait);
5315 wake_count++;
5316 }
5317 }
5318 binder_inner_proc_unlock(proc);
5319
5320 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5321 "binder_flush: %d woke %d threads\n", proc->pid,
5322 wake_count);
5323}
5324
5325static int binder_release(struct inode *nodp, struct file *filp)
5326{
5327 struct binder_proc *proc = filp->private_data;
5328
5329 debugfs_remove(proc->debugfs_entry);
5330
5331 if (proc->binderfs_entry) {
5332 binderfs_remove_file(proc->binderfs_entry);
5333 proc->binderfs_entry = NULL;
5334 }
5335
5336 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5337
5338 return 0;
5339}
5340
5341static int binder_node_release(struct binder_node *node, int refs)
5342{
5343 struct binder_ref *ref;
5344 int death = 0;
5345 struct binder_proc *proc = node->proc;
5346
5347 binder_release_work(proc, &node->async_todo);
5348
5349 binder_node_lock(node);
5350 binder_inner_proc_lock(proc);
5351 binder_dequeue_work_ilocked(&node->work);
5352
5353
5354
5355 BUG_ON(!node->tmp_refs);
5356 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5357 binder_inner_proc_unlock(proc);
5358 binder_node_unlock(node);
5359 binder_free_node(node);
5360
5361 return refs;
5362 }
5363
5364 node->proc = NULL;
5365 node->local_strong_refs = 0;
5366 node->local_weak_refs = 0;
5367 binder_inner_proc_unlock(proc);
5368
5369 spin_lock(&binder_dead_nodes_lock);
5370 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5371 spin_unlock(&binder_dead_nodes_lock);
5372
5373 hlist_for_each_entry(ref, &node->refs, node_entry) {
5374 refs++;
5375
5376
5377
5378
5379
5380
5381 binder_inner_proc_lock(ref->proc);
5382 if (!ref->death) {
5383 binder_inner_proc_unlock(ref->proc);
5384 continue;
5385 }
5386
5387 death++;
5388
5389 BUG_ON(!list_empty(&ref->death->work.entry));
5390 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5391 binder_enqueue_work_ilocked(&ref->death->work,
5392 &ref->proc->todo);
5393 binder_wakeup_proc_ilocked(ref->proc);
5394 binder_inner_proc_unlock(ref->proc);
5395 }
5396
5397 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5398 "node %d now dead, refs %d, death %d\n",
5399 node->debug_id, refs, death);
5400 binder_node_unlock(node);
5401 binder_put_node(node);
5402
5403 return refs;
5404}
5405
5406static void binder_deferred_release(struct binder_proc *proc)
5407{
5408 struct binder_context *context = proc->context;
5409 struct rb_node *n;
5410 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5411
5412 mutex_lock(&binder_procs_lock);
5413 hlist_del(&proc->proc_node);
5414 mutex_unlock(&binder_procs_lock);
5415
5416 mutex_lock(&context->context_mgr_node_lock);
5417 if (context->binder_context_mgr_node &&
5418 context->binder_context_mgr_node->proc == proc) {
5419 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5420 "%s: %d context_mgr_node gone\n",
5421 __func__, proc->pid);
5422 context->binder_context_mgr_node = NULL;
5423 }
5424 mutex_unlock(&context->context_mgr_node_lock);
5425 binder_inner_proc_lock(proc);
5426
5427
5428
5429
5430 proc->tmp_ref++;
5431
5432 proc->is_dead = true;
5433 threads = 0;
5434 active_transactions = 0;
5435 while ((n = rb_first(&proc->threads))) {
5436 struct binder_thread *thread;
5437
5438 thread = rb_entry(n, struct binder_thread, rb_node);
5439 binder_inner_proc_unlock(proc);
5440 threads++;
5441 active_transactions += binder_thread_release(proc, thread);
5442 binder_inner_proc_lock(proc);
5443 }
5444
5445 nodes = 0;
5446 incoming_refs = 0;
5447 while ((n = rb_first(&proc->nodes))) {
5448 struct binder_node *node;
5449
5450 node = rb_entry(n, struct binder_node, rb_node);
5451 nodes++;
5452
5453
5454
5455
5456
5457 binder_inc_node_tmpref_ilocked(node);
5458 rb_erase(&node->rb_node, &proc->nodes);
5459 binder_inner_proc_unlock(proc);
5460 incoming_refs = binder_node_release(node, incoming_refs);
5461 binder_inner_proc_lock(proc);
5462 }
5463 binder_inner_proc_unlock(proc);
5464
5465 outgoing_refs = 0;
5466 binder_proc_lock(proc);
5467 while ((n = rb_first(&proc->refs_by_desc))) {
5468 struct binder_ref *ref;
5469
5470 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5471 outgoing_refs++;
5472 binder_cleanup_ref_olocked(ref);
5473 binder_proc_unlock(proc);
5474 binder_free_ref(ref);
5475 binder_proc_lock(proc);
5476 }
5477 binder_proc_unlock(proc);
5478
5479 binder_release_work(proc, &proc->todo);
5480 binder_release_work(proc, &proc->delivered_death);
5481
5482 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5483 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5484 __func__, proc->pid, threads, nodes, incoming_refs,
5485 outgoing_refs, active_transactions);
5486
5487 binder_proc_dec_tmpref(proc);
5488}
5489
5490static void binder_deferred_func(struct work_struct *work)
5491{
5492 struct binder_proc *proc;
5493
5494 int defer;
5495
5496 do {
5497 mutex_lock(&binder_deferred_lock);
5498 if (!hlist_empty(&binder_deferred_list)) {
5499 proc = hlist_entry(binder_deferred_list.first,
5500 struct binder_proc, deferred_work_node);
5501 hlist_del_init(&proc->deferred_work_node);
5502 defer = proc->deferred_work;
5503 proc->deferred_work = 0;
5504 } else {
5505 proc = NULL;
5506 defer = 0;
5507 }
5508 mutex_unlock(&binder_deferred_lock);
5509
5510 if (defer & BINDER_DEFERRED_FLUSH)
5511 binder_deferred_flush(proc);
5512
5513 if (defer & BINDER_DEFERRED_RELEASE)
5514 binder_deferred_release(proc);
5515 } while (proc);
5516}
5517static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5518
5519static void
5520binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5521{
5522 mutex_lock(&binder_deferred_lock);
5523 proc->deferred_work |= defer;
5524 if (hlist_unhashed(&proc->deferred_work_node)) {
5525 hlist_add_head(&proc->deferred_work_node,
5526 &binder_deferred_list);
5527 schedule_work(&binder_deferred_work);
5528 }
5529 mutex_unlock(&binder_deferred_lock);
5530}
5531
5532static void print_binder_transaction_ilocked(struct seq_file *m,
5533 struct binder_proc *proc,
5534 const char *prefix,
5535 struct binder_transaction *t)
5536{
5537 struct binder_proc *to_proc;
5538 struct binder_buffer *buffer = t->buffer;
5539
5540 spin_lock(&t->lock);
5541 to_proc = t->to_proc;
5542 seq_printf(m,
5543 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5544 prefix, t->debug_id, t,
5545 t->from ? t->from->proc->pid : 0,
5546 t->from ? t->from->pid : 0,
5547 to_proc ? to_proc->pid : 0,
5548 t->to_thread ? t->to_thread->pid : 0,
5549 t->code, t->flags, t->priority, t->need_reply);
5550 spin_unlock(&t->lock);
5551
5552 if (proc != to_proc) {
5553
5554
5555
5556
5557 seq_puts(m, "\n");
5558 return;
5559 }
5560
5561 if (buffer == NULL) {
5562 seq_puts(m, " buffer free\n");
5563 return;
5564 }
5565 if (buffer->target_node)
5566 seq_printf(m, " node %d", buffer->target_node->debug_id);
5567 seq_printf(m, " size %zd:%zd data %pK\n",
5568 buffer->data_size, buffer->offsets_size,
5569 buffer->user_data);
5570}
5571
5572static void print_binder_work_ilocked(struct seq_file *m,
5573 struct binder_proc *proc,
5574 const char *prefix,
5575 const char *transaction_prefix,
5576 struct binder_work *w)
5577{
5578 struct binder_node *node;
5579 struct binder_transaction *t;
5580
5581 switch (w->type) {
5582 case BINDER_WORK_TRANSACTION:
5583 t = container_of(w, struct binder_transaction, work);
5584 print_binder_transaction_ilocked(
5585 m, proc, transaction_prefix, t);
5586 break;
5587 case BINDER_WORK_RETURN_ERROR: {
5588 struct binder_error *e = container_of(
5589 w, struct binder_error, work);
5590
5591 seq_printf(m, "%stransaction error: %u\n",
5592 prefix, e->cmd);
5593 } break;
5594 case BINDER_WORK_TRANSACTION_COMPLETE:
5595 seq_printf(m, "%stransaction complete\n", prefix);
5596 break;
5597 case BINDER_WORK_NODE:
5598 node = container_of(w, struct binder_node, work);
5599 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5600 prefix, node->debug_id,
5601 (u64)node->ptr, (u64)node->cookie);
5602 break;
5603 case BINDER_WORK_DEAD_BINDER:
5604 seq_printf(m, "%shas dead binder\n", prefix);
5605 break;
5606 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5607 seq_printf(m, "%shas cleared dead binder\n", prefix);
5608 break;
5609 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5610 seq_printf(m, "%shas cleared death notification\n", prefix);
5611 break;
5612 default:
5613 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5614 break;
5615 }
5616}
5617
5618static void print_binder_thread_ilocked(struct seq_file *m,
5619 struct binder_thread *thread,
5620 int print_always)
5621{
5622 struct binder_transaction *t;
5623 struct binder_work *w;
5624 size_t start_pos = m->count;
5625 size_t header_pos;
5626
5627 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5628 thread->pid, thread->looper,
5629 thread->looper_need_return,
5630 atomic_read(&thread->tmp_ref));
5631 header_pos = m->count;
5632 t = thread->transaction_stack;
5633 while (t) {
5634 if (t->from == thread) {
5635 print_binder_transaction_ilocked(m, thread->proc,
5636 " outgoing transaction", t);
5637 t = t->from_parent;
5638 } else if (t->to_thread == thread) {
5639 print_binder_transaction_ilocked(m, thread->proc,
5640 " incoming transaction", t);
5641 t = t->to_parent;
5642 } else {
5643 print_binder_transaction_ilocked(m, thread->proc,
5644 " bad transaction", t);
5645 t = NULL;
5646 }
5647 }
5648 list_for_each_entry(w, &thread->todo, entry) {
5649 print_binder_work_ilocked(m, thread->proc, " ",
5650 " pending transaction", w);
5651 }
5652 if (!print_always && m->count == header_pos)
5653 m->count = start_pos;
5654}
5655
5656static void print_binder_node_nilocked(struct seq_file *m,
5657 struct binder_node *node)
5658{
5659 struct binder_ref *ref;
5660 struct binder_work *w;
5661 int count;
5662
5663 count = 0;
5664 hlist_for_each_entry(ref, &node->refs, node_entry)
5665 count++;
5666
5667 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5668 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5669 node->has_strong_ref, node->has_weak_ref,
5670 node->local_strong_refs, node->local_weak_refs,
5671 node->internal_strong_refs, count, node->tmp_refs);
5672 if (count) {
5673 seq_puts(m, " proc");
5674 hlist_for_each_entry(ref, &node->refs, node_entry)
5675 seq_printf(m, " %d", ref->proc->pid);
5676 }
5677 seq_puts(m, "\n");
5678 if (node->proc) {
5679 list_for_each_entry(w, &node->async_todo, entry)
5680 print_binder_work_ilocked(m, node->proc, " ",
5681 " pending async transaction", w);
5682 }
5683}
5684
5685static void print_binder_ref_olocked(struct seq_file *m,
5686 struct binder_ref *ref)
5687{
5688 binder_node_lock(ref->node);
5689 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5690 ref->data.debug_id, ref->data.desc,
5691 ref->node->proc ? "" : "dead ",
5692 ref->node->debug_id, ref->data.strong,
5693 ref->data.weak, ref->death);
5694 binder_node_unlock(ref->node);
5695}
5696
5697static void print_binder_proc(struct seq_file *m,
5698 struct binder_proc *proc, int print_all)
5699{
5700 struct binder_work *w;
5701 struct rb_node *n;
5702 size_t start_pos = m->count;
5703 size_t header_pos;
5704 struct binder_node *last_node = NULL;
5705
5706 seq_printf(m, "proc %d\n", proc->pid);
5707 seq_printf(m, "context %s\n", proc->context->name);
5708 header_pos = m->count;
5709
5710 binder_inner_proc_lock(proc);
5711 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5712 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5713 rb_node), print_all);
5714
5715 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5716 struct binder_node *node = rb_entry(n, struct binder_node,
5717 rb_node);
5718 if (!print_all && !node->has_async_transaction)
5719 continue;
5720
5721
5722
5723
5724
5725
5726 binder_inc_node_tmpref_ilocked(node);
5727
5728 binder_inner_proc_unlock(proc);
5729 if (last_node)
5730 binder_put_node(last_node);
5731 binder_node_inner_lock(node);
5732 print_binder_node_nilocked(m, node);
5733 binder_node_inner_unlock(node);
5734 last_node = node;
5735 binder_inner_proc_lock(proc);
5736 }
5737 binder_inner_proc_unlock(proc);
5738 if (last_node)
5739 binder_put_node(last_node);
5740
5741 if (print_all) {
5742 binder_proc_lock(proc);
5743 for (n = rb_first(&proc->refs_by_desc);
5744 n != NULL;
5745 n = rb_next(n))
5746 print_binder_ref_olocked(m, rb_entry(n,
5747 struct binder_ref,
5748 rb_node_desc));
5749 binder_proc_unlock(proc);
5750 }
5751 binder_alloc_print_allocated(m, &proc->alloc);
5752 binder_inner_proc_lock(proc);
5753 list_for_each_entry(w, &proc->todo, entry)
5754 print_binder_work_ilocked(m, proc, " ",
5755 " pending transaction", w);
5756 list_for_each_entry(w, &proc->delivered_death, entry) {
5757 seq_puts(m, " has delivered dead binder\n");
5758 break;
5759 }
5760 binder_inner_proc_unlock(proc);
5761 if (!print_all && m->count == header_pos)
5762 m->count = start_pos;
5763}
5764
5765static const char * const binder_return_strings[] = {
5766 "BR_ERROR",
5767 "BR_OK",
5768 "BR_TRANSACTION",
5769 "BR_REPLY",
5770 "BR_ACQUIRE_RESULT",
5771 "BR_DEAD_REPLY",
5772 "BR_TRANSACTION_COMPLETE",
5773 "BR_INCREFS",
5774 "BR_ACQUIRE",
5775 "BR_RELEASE",
5776 "BR_DECREFS",
5777 "BR_ATTEMPT_ACQUIRE",
5778 "BR_NOOP",
5779 "BR_SPAWN_LOOPER",
5780 "BR_FINISHED",
5781 "BR_DEAD_BINDER",
5782 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5783 "BR_FAILED_REPLY"
5784};
5785
5786static const char * const binder_command_strings[] = {
5787 "BC_TRANSACTION",
5788 "BC_REPLY",
5789 "BC_ACQUIRE_RESULT",
5790 "BC_FREE_BUFFER",
5791 "BC_INCREFS",
5792 "BC_ACQUIRE",
5793 "BC_RELEASE",
5794 "BC_DECREFS",
5795 "BC_INCREFS_DONE",
5796 "BC_ACQUIRE_DONE",
5797 "BC_ATTEMPT_ACQUIRE",
5798 "BC_REGISTER_LOOPER",
5799 "BC_ENTER_LOOPER",
5800 "BC_EXIT_LOOPER",
5801 "BC_REQUEST_DEATH_NOTIFICATION",
5802 "BC_CLEAR_DEATH_NOTIFICATION",
5803 "BC_DEAD_BINDER_DONE",
5804 "BC_TRANSACTION_SG",
5805 "BC_REPLY_SG",
5806};
5807
5808static const char * const binder_objstat_strings[] = {
5809 "proc",
5810 "thread",
5811 "node",
5812 "ref",
5813 "death",
5814 "transaction",
5815 "transaction_complete"
5816};
5817
5818static void print_binder_stats(struct seq_file *m, const char *prefix,
5819 struct binder_stats *stats)
5820{
5821 int i;
5822
5823 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5824 ARRAY_SIZE(binder_command_strings));
5825 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5826 int temp = atomic_read(&stats->bc[i]);
5827
5828 if (temp)
5829 seq_printf(m, "%s%s: %d\n", prefix,
5830 binder_command_strings[i], temp);
5831 }
5832
5833 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5834 ARRAY_SIZE(binder_return_strings));
5835 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5836 int temp = atomic_read(&stats->br[i]);
5837
5838 if (temp)
5839 seq_printf(m, "%s%s: %d\n", prefix,
5840 binder_return_strings[i], temp);
5841 }
5842
5843 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5844 ARRAY_SIZE(binder_objstat_strings));
5845 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5846 ARRAY_SIZE(stats->obj_deleted));
5847 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5848 int created = atomic_read(&stats->obj_created[i]);
5849 int deleted = atomic_read(&stats->obj_deleted[i]);
5850
5851 if (created || deleted)
5852 seq_printf(m, "%s%s: active %d total %d\n",
5853 prefix,
5854 binder_objstat_strings[i],
5855 created - deleted,
5856 created);
5857 }
5858}
5859
5860static void print_binder_proc_stats(struct seq_file *m,
5861 struct binder_proc *proc)
5862{
5863 struct binder_work *w;
5864 struct binder_thread *thread;
5865 struct rb_node *n;
5866 int count, strong, weak, ready_threads;
5867 size_t free_async_space =
5868 binder_alloc_get_free_async_space(&proc->alloc);
5869
5870 seq_printf(m, "proc %d\n", proc->pid);
5871 seq_printf(m, "context %s\n", proc->context->name);
5872 count = 0;
5873 ready_threads = 0;
5874 binder_inner_proc_lock(proc);
5875 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5876 count++;
5877
5878 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5879 ready_threads++;
5880
5881 seq_printf(m, " threads: %d\n", count);
5882 seq_printf(m, " requested threads: %d+%d/%d\n"
5883 " ready threads %d\n"
5884 " free async space %zd\n", proc->requested_threads,
5885 proc->requested_threads_started, proc->max_threads,
5886 ready_threads,
5887 free_async_space);
5888 count = 0;
5889 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5890 count++;
5891 binder_inner_proc_unlock(proc);
5892 seq_printf(m, " nodes: %d\n", count);
5893 count = 0;
5894 strong = 0;
5895 weak = 0;
5896 binder_proc_lock(proc);
5897 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5898 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5899 rb_node_desc);
5900 count++;
5901 strong += ref->data.strong;
5902 weak += ref->data.weak;
5903 }
5904 binder_proc_unlock(proc);
5905 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5906
5907 count = binder_alloc_get_allocated_count(&proc->alloc);
5908 seq_printf(m, " buffers: %d\n", count);
5909
5910 binder_alloc_print_pages(m, &proc->alloc);
5911
5912 count = 0;
5913 binder_inner_proc_lock(proc);
5914 list_for_each_entry(w, &proc->todo, entry) {
5915 if (w->type == BINDER_WORK_TRANSACTION)
5916 count++;
5917 }
5918 binder_inner_proc_unlock(proc);
5919 seq_printf(m, " pending transactions: %d\n", count);
5920
5921 print_binder_stats(m, " ", &proc->stats);
5922}
5923
5924
5925int binder_state_show(struct seq_file *m, void *unused)
5926{
5927 struct binder_proc *proc;
5928 struct binder_node *node;
5929 struct binder_node *last_node = NULL;
5930
5931 seq_puts(m, "binder state:\n");
5932
5933 spin_lock(&binder_dead_nodes_lock);
5934 if (!hlist_empty(&binder_dead_nodes))
5935 seq_puts(m, "dead nodes:\n");
5936 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5937
5938
5939
5940
5941
5942 node->tmp_refs++;
5943 spin_unlock(&binder_dead_nodes_lock);
5944 if (last_node)
5945 binder_put_node(last_node);
5946 binder_node_lock(node);
5947 print_binder_node_nilocked(m, node);
5948 binder_node_unlock(node);
5949 last_node = node;
5950 spin_lock(&binder_dead_nodes_lock);
5951 }
5952 spin_unlock(&binder_dead_nodes_lock);
5953 if (last_node)
5954 binder_put_node(last_node);
5955
5956 mutex_lock(&binder_procs_lock);
5957 hlist_for_each_entry(proc, &binder_procs, proc_node)
5958 print_binder_proc(m, proc, 1);
5959 mutex_unlock(&binder_procs_lock);
5960
5961 return 0;
5962}
5963
5964int binder_stats_show(struct seq_file *m, void *unused)
5965{
5966 struct binder_proc *proc;
5967
5968 seq_puts(m, "binder stats:\n");
5969
5970 print_binder_stats(m, "", &binder_stats);
5971
5972 mutex_lock(&binder_procs_lock);
5973 hlist_for_each_entry(proc, &binder_procs, proc_node)
5974 print_binder_proc_stats(m, proc);
5975 mutex_unlock(&binder_procs_lock);
5976
5977 return 0;
5978}
5979
5980int binder_transactions_show(struct seq_file *m, void *unused)
5981{
5982 struct binder_proc *proc;
5983
5984 seq_puts(m, "binder transactions:\n");
5985 mutex_lock(&binder_procs_lock);
5986 hlist_for_each_entry(proc, &binder_procs, proc_node)
5987 print_binder_proc(m, proc, 0);
5988 mutex_unlock(&binder_procs_lock);
5989
5990 return 0;
5991}
5992
5993static int proc_show(struct seq_file *m, void *unused)
5994{
5995 struct binder_proc *itr;
5996 int pid = (unsigned long)m->private;
5997
5998 mutex_lock(&binder_procs_lock);
5999 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6000 if (itr->pid == pid) {
6001 seq_puts(m, "binder proc state:\n");
6002 print_binder_proc(m, itr, 1);
6003 }
6004 }
6005 mutex_unlock(&binder_procs_lock);
6006
6007 return 0;
6008}
6009
6010static void print_binder_transaction_log_entry(struct seq_file *m,
6011 struct binder_transaction_log_entry *e)
6012{
6013 int debug_id = READ_ONCE(e->debug_id_done);
6014
6015
6016
6017
6018 smp_rmb();
6019 seq_printf(m,
6020 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6021 e->debug_id, (e->call_type == 2) ? "reply" :
6022 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6023 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6024 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6025 e->return_error, e->return_error_param,
6026 e->return_error_line);
6027
6028
6029
6030
6031 smp_rmb();
6032 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6033 "\n" : " (incomplete)\n");
6034}
6035
6036int binder_transaction_log_show(struct seq_file *m, void *unused)
6037{
6038 struct binder_transaction_log *log = m->private;
6039 unsigned int log_cur = atomic_read(&log->cur);
6040 unsigned int count;
6041 unsigned int cur;
6042 int i;
6043
6044 count = log_cur + 1;
6045 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6046 0 : count % ARRAY_SIZE(log->entry);
6047 if (count > ARRAY_SIZE(log->entry) || log->full)
6048 count = ARRAY_SIZE(log->entry);
6049 for (i = 0; i < count; i++) {
6050 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6051
6052 print_binder_transaction_log_entry(m, &log->entry[index]);
6053 }
6054 return 0;
6055}
6056
6057const struct file_operations binder_fops = {
6058 .owner = THIS_MODULE,
6059 .poll = binder_poll,
6060 .unlocked_ioctl = binder_ioctl,
6061 .compat_ioctl = binder_ioctl,
6062 .mmap = binder_mmap,
6063 .open = binder_open,
6064 .flush = binder_flush,
6065 .release = binder_release,
6066};
6067
6068static int __init init_binder_device(const char *name)
6069{
6070 int ret;
6071 struct binder_device *binder_device;
6072
6073 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6074 if (!binder_device)
6075 return -ENOMEM;
6076
6077 binder_device->miscdev.fops = &binder_fops;
6078 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6079 binder_device->miscdev.name = name;
6080
6081 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6082 binder_device->context.name = name;
6083 mutex_init(&binder_device->context.context_mgr_node_lock);
6084
6085 ret = misc_register(&binder_device->miscdev);
6086 if (ret < 0) {
6087 kfree(binder_device);
6088 return ret;
6089 }
6090
6091 hlist_add_head(&binder_device->hlist, &binder_devices);
6092
6093 return ret;
6094}
6095
6096static int __init binder_init(void)
6097{
6098 int ret;
6099 char *device_name, *device_tmp;
6100 struct binder_device *device;
6101 struct hlist_node *tmp;
6102 char *device_names = NULL;
6103
6104 ret = binder_alloc_shrinker_init();
6105 if (ret)
6106 return ret;
6107
6108 atomic_set(&binder_transaction_log.cur, ~0U);
6109 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6110
6111 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6112 if (binder_debugfs_dir_entry_root)
6113 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6114 binder_debugfs_dir_entry_root);
6115
6116 if (binder_debugfs_dir_entry_root) {
6117 debugfs_create_file("state",
6118 0444,
6119 binder_debugfs_dir_entry_root,
6120 NULL,
6121 &binder_state_fops);
6122 debugfs_create_file("stats",
6123 0444,
6124 binder_debugfs_dir_entry_root,
6125 NULL,
6126 &binder_stats_fops);
6127 debugfs_create_file("transactions",
6128 0444,
6129 binder_debugfs_dir_entry_root,
6130 NULL,
6131 &binder_transactions_fops);
6132 debugfs_create_file("transaction_log",
6133 0444,
6134 binder_debugfs_dir_entry_root,
6135 &binder_transaction_log,
6136 &binder_transaction_log_fops);
6137 debugfs_create_file("failed_transaction_log",
6138 0444,
6139 binder_debugfs_dir_entry_root,
6140 &binder_transaction_log_failed,
6141 &binder_transaction_log_fops);
6142 }
6143
6144 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6145 strcmp(binder_devices_param, "") != 0) {
6146
6147
6148
6149
6150 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6151 if (!device_names) {
6152 ret = -ENOMEM;
6153 goto err_alloc_device_names_failed;
6154 }
6155
6156 device_tmp = device_names;
6157 while ((device_name = strsep(&device_tmp, ","))) {
6158 ret = init_binder_device(device_name);
6159 if (ret)
6160 goto err_init_binder_device_failed;
6161 }
6162 }
6163
6164 ret = init_binderfs();
6165 if (ret)
6166 goto err_init_binder_device_failed;
6167
6168 return ret;
6169
6170err_init_binder_device_failed:
6171 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6172 misc_deregister(&device->miscdev);
6173 hlist_del(&device->hlist);
6174 kfree(device);
6175 }
6176
6177 kfree(device_names);
6178
6179err_alloc_device_names_failed:
6180 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6181
6182 return ret;
6183}
6184
6185device_initcall(binder_init);
6186
6187#define CREATE_TRACE_POINTS
6188#include "binder_trace.h"
6189
6190MODULE_LICENSE("GPL v2");
6191