1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kernel.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include <linux/sched/mm.h>
31#include <linux/sched/coredump.h>
32#include <linux/export.h>
33#include <linux/rmap.h>
34#include <linux/mmu_notifier.h>
35#include <linux/swap.h>
36#include <linux/ptrace.h>
37#include <linux/kdebug.h>
38#include "../../mm/internal.h"
39#include <linux/percpu-rwsem.h>
40#include <linux/task_work.h>
41#include <linux/shmem_fs.h>
42
43#include <linux/uprobes.h>
44
45#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
46#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
47
48static struct rb_root uprobes_tree = RB_ROOT;
49
50
51
52
53#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
54
55static DEFINE_SPINLOCK(uprobes_treelock);
56
57#define UPROBES_HASH_SZ 13
58
59static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
60#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
61
62static struct percpu_rw_semaphore dup_mmap_sem;
63
64
65#define UPROBE_COPY_INSN 0
66
67struct uprobe {
68 struct rb_node rb_node;
69 atomic_t ref;
70 struct rw_semaphore register_rwsem;
71 struct rw_semaphore consumer_rwsem;
72 struct list_head pending_list;
73 struct uprobe_consumer *consumers;
74 struct inode *inode;
75 loff_t offset;
76 unsigned long flags;
77
78
79
80
81
82
83
84
85
86
87
88 struct arch_uprobe arch;
89};
90
91
92
93
94
95
96
97
98
99
100struct xol_area {
101 wait_queue_head_t wq;
102 atomic_t slot_count;
103 unsigned long *bitmap;
104
105 struct vm_special_mapping xol_mapping;
106 struct page *pages[2];
107
108
109
110
111
112 unsigned long vaddr;
113};
114
115
116
117
118
119
120
121
122
123static bool valid_vma(struct vm_area_struct *vma, bool is_register)
124{
125 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
126
127 if (is_register)
128 flags |= VM_WRITE;
129
130 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
131}
132
133static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
134{
135 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
136}
137
138static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
139{
140 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
141}
142
143
144
145
146
147
148
149
150
151
152
153
154static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
155 struct page *old_page, struct page *new_page)
156{
157 struct mm_struct *mm = vma->vm_mm;
158 struct page_vma_mapped_walk pvmw = {
159 .page = old_page,
160 .vma = vma,
161 .address = addr,
162 };
163 int err;
164
165 const unsigned long mmun_start = addr;
166 const unsigned long mmun_end = addr + PAGE_SIZE;
167 struct mem_cgroup *memcg;
168
169 VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
170
171 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
172 false);
173 if (err)
174 return err;
175
176
177 lock_page(old_page);
178
179 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
180 err = -EAGAIN;
181 if (!page_vma_mapped_walk(&pvmw)) {
182 mem_cgroup_cancel_charge(new_page, memcg, false);
183 goto unlock;
184 }
185 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
186
187 get_page(new_page);
188 page_add_new_anon_rmap(new_page, vma, addr, false);
189 mem_cgroup_commit_charge(new_page, memcg, false, false);
190 lru_cache_add_active_or_unevictable(new_page, vma);
191
192 if (!PageAnon(old_page)) {
193 dec_mm_counter(mm, mm_counter_file(old_page));
194 inc_mm_counter(mm, MM_ANONPAGES);
195 }
196
197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
198 ptep_clear_flush_notify(vma, addr, pvmw.pte);
199 set_pte_at_notify(mm, addr, pvmw.pte,
200 mk_pte(new_page, vma->vm_page_prot));
201
202 page_remove_rmap(old_page, false);
203 if (!page_mapped(old_page))
204 try_to_free_swap(old_page);
205 page_vma_mapped_walk_done(&pvmw);
206
207 if (vma->vm_flags & VM_LOCKED)
208 munlock_vma_page(old_page);
209 put_page(old_page);
210
211 err = 0;
212 unlock:
213 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
214 unlock_page(old_page);
215 return err;
216}
217
218
219
220
221
222
223
224bool __weak is_swbp_insn(uprobe_opcode_t *insn)
225{
226 return *insn == UPROBE_SWBP_INSN;
227}
228
229
230
231
232
233
234
235
236
237
238bool __weak is_trap_insn(uprobe_opcode_t *insn)
239{
240 return is_swbp_insn(insn);
241}
242
243static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
244{
245 void *kaddr = kmap_atomic(page);
246 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
247 kunmap_atomic(kaddr);
248}
249
250static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
251{
252 void *kaddr = kmap_atomic(page);
253 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
254 kunmap_atomic(kaddr);
255}
256
257static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
258{
259 uprobe_opcode_t old_opcode;
260 bool is_swbp;
261
262
263
264
265
266
267
268
269
270
271 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
272 is_swbp = is_swbp_insn(&old_opcode);
273
274 if (is_swbp_insn(new_opcode)) {
275 if (is_swbp)
276 return 0;
277 } else {
278 if (!is_swbp)
279 return 0;
280 }
281
282 return 1;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
303 uprobe_opcode_t opcode)
304{
305 struct page *old_page, *new_page;
306 struct vm_area_struct *vma;
307 int ret;
308
309retry:
310
311 ret = get_user_pages_remote(NULL, mm, vaddr, 1,
312 FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
313 if (ret <= 0)
314 return ret;
315
316 ret = verify_opcode(old_page, vaddr, &opcode);
317 if (ret <= 0)
318 goto put_old;
319
320 ret = anon_vma_prepare(vma);
321 if (ret)
322 goto put_old;
323
324 ret = -ENOMEM;
325 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
326 if (!new_page)
327 goto put_old;
328
329 __SetPageUptodate(new_page);
330 copy_highpage(new_page, old_page);
331 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
332
333 ret = __replace_page(vma, vaddr, old_page, new_page);
334 put_page(new_page);
335put_old:
336 put_page(old_page);
337
338 if (unlikely(ret == -EAGAIN))
339 goto retry;
340 return ret;
341}
342
343
344
345
346
347
348
349
350
351
352int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
353{
354 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
355}
356
357
358
359
360
361
362
363
364
365
366int __weak
367set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
368{
369 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
370}
371
372static struct uprobe *get_uprobe(struct uprobe *uprobe)
373{
374 atomic_inc(&uprobe->ref);
375 return uprobe;
376}
377
378static void put_uprobe(struct uprobe *uprobe)
379{
380 if (atomic_dec_and_test(&uprobe->ref))
381 kfree(uprobe);
382}
383
384static int match_uprobe(struct uprobe *l, struct uprobe *r)
385{
386 if (l->inode < r->inode)
387 return -1;
388
389 if (l->inode > r->inode)
390 return 1;
391
392 if (l->offset < r->offset)
393 return -1;
394
395 if (l->offset > r->offset)
396 return 1;
397
398 return 0;
399}
400
401static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
402{
403 struct uprobe u = { .inode = inode, .offset = offset };
404 struct rb_node *n = uprobes_tree.rb_node;
405 struct uprobe *uprobe;
406 int match;
407
408 while (n) {
409 uprobe = rb_entry(n, struct uprobe, rb_node);
410 match = match_uprobe(&u, uprobe);
411 if (!match)
412 return get_uprobe(uprobe);
413
414 if (match < 0)
415 n = n->rb_left;
416 else
417 n = n->rb_right;
418 }
419 return NULL;
420}
421
422
423
424
425
426static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
427{
428 struct uprobe *uprobe;
429
430 spin_lock(&uprobes_treelock);
431 uprobe = __find_uprobe(inode, offset);
432 spin_unlock(&uprobes_treelock);
433
434 return uprobe;
435}
436
437static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
438{
439 struct rb_node **p = &uprobes_tree.rb_node;
440 struct rb_node *parent = NULL;
441 struct uprobe *u;
442 int match;
443
444 while (*p) {
445 parent = *p;
446 u = rb_entry(parent, struct uprobe, rb_node);
447 match = match_uprobe(uprobe, u);
448 if (!match)
449 return get_uprobe(u);
450
451 if (match < 0)
452 p = &parent->rb_left;
453 else
454 p = &parent->rb_right;
455
456 }
457
458 u = NULL;
459 rb_link_node(&uprobe->rb_node, parent, p);
460 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
461
462 atomic_set(&uprobe->ref, 2);
463
464 return u;
465}
466
467
468
469
470
471
472
473
474
475static struct uprobe *insert_uprobe(struct uprobe *uprobe)
476{
477 struct uprobe *u;
478
479 spin_lock(&uprobes_treelock);
480 u = __insert_uprobe(uprobe);
481 spin_unlock(&uprobes_treelock);
482
483 return u;
484}
485
486static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
487{
488 struct uprobe *uprobe, *cur_uprobe;
489
490 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
491 if (!uprobe)
492 return NULL;
493
494 uprobe->inode = igrab(inode);
495 uprobe->offset = offset;
496 init_rwsem(&uprobe->register_rwsem);
497 init_rwsem(&uprobe->consumer_rwsem);
498
499
500 cur_uprobe = insert_uprobe(uprobe);
501
502 if (cur_uprobe) {
503 kfree(uprobe);
504 uprobe = cur_uprobe;
505 iput(inode);
506 }
507
508 return uprobe;
509}
510
511static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
512{
513 down_write(&uprobe->consumer_rwsem);
514 uc->next = uprobe->consumers;
515 uprobe->consumers = uc;
516 up_write(&uprobe->consumer_rwsem);
517}
518
519
520
521
522
523
524static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
525{
526 struct uprobe_consumer **con;
527 bool ret = false;
528
529 down_write(&uprobe->consumer_rwsem);
530 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
531 if (*con == uc) {
532 *con = uc->next;
533 ret = true;
534 break;
535 }
536 }
537 up_write(&uprobe->consumer_rwsem);
538
539 return ret;
540}
541
542static int __copy_insn(struct address_space *mapping, struct file *filp,
543 void *insn, int nbytes, loff_t offset)
544{
545 struct page *page;
546
547
548
549
550
551 if (mapping->a_ops->readpage)
552 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
553 else
554 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
555 if (IS_ERR(page))
556 return PTR_ERR(page);
557
558 copy_from_page(page, offset, insn, nbytes);
559 put_page(page);
560
561 return 0;
562}
563
564static int copy_insn(struct uprobe *uprobe, struct file *filp)
565{
566 struct address_space *mapping = uprobe->inode->i_mapping;
567 loff_t offs = uprobe->offset;
568 void *insn = &uprobe->arch.insn;
569 int size = sizeof(uprobe->arch.insn);
570 int len, err = -EIO;
571
572
573 do {
574 if (offs >= i_size_read(uprobe->inode))
575 break;
576
577 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
578 err = __copy_insn(mapping, filp, insn, len, offs);
579 if (err)
580 break;
581
582 insn += len;
583 offs += len;
584 size -= len;
585 } while (size);
586
587 return err;
588}
589
590static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
591 struct mm_struct *mm, unsigned long vaddr)
592{
593 int ret = 0;
594
595 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
596 return ret;
597
598
599 down_write(&uprobe->consumer_rwsem);
600 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
601 goto out;
602
603 ret = copy_insn(uprobe, file);
604 if (ret)
605 goto out;
606
607 ret = -ENOTSUPP;
608 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
609 goto out;
610
611 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
612 if (ret)
613 goto out;
614
615
616 BUG_ON((uprobe->offset & ~PAGE_MASK) +
617 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
618
619 smp_wmb();
620 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
621
622 out:
623 up_write(&uprobe->consumer_rwsem);
624
625 return ret;
626}
627
628static inline bool consumer_filter(struct uprobe_consumer *uc,
629 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
630{
631 return !uc->filter || uc->filter(uc, ctx, mm);
632}
633
634static bool filter_chain(struct uprobe *uprobe,
635 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
636{
637 struct uprobe_consumer *uc;
638 bool ret = false;
639
640 down_read(&uprobe->consumer_rwsem);
641 for (uc = uprobe->consumers; uc; uc = uc->next) {
642 ret = consumer_filter(uc, ctx, mm);
643 if (ret)
644 break;
645 }
646 up_read(&uprobe->consumer_rwsem);
647
648 return ret;
649}
650
651static int
652install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
653 struct vm_area_struct *vma, unsigned long vaddr)
654{
655 bool first_uprobe;
656 int ret;
657
658 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
659 if (ret)
660 return ret;
661
662
663
664
665
666 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
667 if (first_uprobe)
668 set_bit(MMF_HAS_UPROBES, &mm->flags);
669
670 ret = set_swbp(&uprobe->arch, mm, vaddr);
671 if (!ret)
672 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
673 else if (first_uprobe)
674 clear_bit(MMF_HAS_UPROBES, &mm->flags);
675
676 return ret;
677}
678
679static int
680remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
681{
682 set_bit(MMF_RECALC_UPROBES, &mm->flags);
683 return set_orig_insn(&uprobe->arch, mm, vaddr);
684}
685
686static inline bool uprobe_is_active(struct uprobe *uprobe)
687{
688 return !RB_EMPTY_NODE(&uprobe->rb_node);
689}
690
691
692
693
694
695static void delete_uprobe(struct uprobe *uprobe)
696{
697 if (WARN_ON(!uprobe_is_active(uprobe)))
698 return;
699
700 spin_lock(&uprobes_treelock);
701 rb_erase(&uprobe->rb_node, &uprobes_tree);
702 spin_unlock(&uprobes_treelock);
703 RB_CLEAR_NODE(&uprobe->rb_node);
704 iput(uprobe->inode);
705 put_uprobe(uprobe);
706}
707
708struct map_info {
709 struct map_info *next;
710 struct mm_struct *mm;
711 unsigned long vaddr;
712};
713
714static inline struct map_info *free_map_info(struct map_info *info)
715{
716 struct map_info *next = info->next;
717 kfree(info);
718 return next;
719}
720
721static struct map_info *
722build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
723{
724 unsigned long pgoff = offset >> PAGE_SHIFT;
725 struct vm_area_struct *vma;
726 struct map_info *curr = NULL;
727 struct map_info *prev = NULL;
728 struct map_info *info;
729 int more = 0;
730
731 again:
732 i_mmap_lock_read(mapping);
733 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
734 if (!valid_vma(vma, is_register))
735 continue;
736
737 if (!prev && !more) {
738
739
740
741
742 prev = kmalloc(sizeof(struct map_info),
743 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
744 if (prev)
745 prev->next = NULL;
746 }
747 if (!prev) {
748 more++;
749 continue;
750 }
751
752 if (!mmget_not_zero(vma->vm_mm))
753 continue;
754
755 info = prev;
756 prev = prev->next;
757 info->next = curr;
758 curr = info;
759
760 info->mm = vma->vm_mm;
761 info->vaddr = offset_to_vaddr(vma, offset);
762 }
763 i_mmap_unlock_read(mapping);
764
765 if (!more)
766 goto out;
767
768 prev = curr;
769 while (curr) {
770 mmput(curr->mm);
771 curr = curr->next;
772 }
773
774 do {
775 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
776 if (!info) {
777 curr = ERR_PTR(-ENOMEM);
778 goto out;
779 }
780 info->next = prev;
781 prev = info;
782 } while (--more);
783
784 goto again;
785 out:
786 while (prev)
787 prev = free_map_info(prev);
788 return curr;
789}
790
791static int
792register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
793{
794 bool is_register = !!new;
795 struct map_info *info;
796 int err = 0;
797
798 percpu_down_write(&dup_mmap_sem);
799 info = build_map_info(uprobe->inode->i_mapping,
800 uprobe->offset, is_register);
801 if (IS_ERR(info)) {
802 err = PTR_ERR(info);
803 goto out;
804 }
805
806 while (info) {
807 struct mm_struct *mm = info->mm;
808 struct vm_area_struct *vma;
809
810 if (err && is_register)
811 goto free;
812
813 down_write(&mm->mmap_sem);
814 vma = find_vma(mm, info->vaddr);
815 if (!vma || !valid_vma(vma, is_register) ||
816 file_inode(vma->vm_file) != uprobe->inode)
817 goto unlock;
818
819 if (vma->vm_start > info->vaddr ||
820 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
821 goto unlock;
822
823 if (is_register) {
824
825 if (consumer_filter(new,
826 UPROBE_FILTER_REGISTER, mm))
827 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
828 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
829 if (!filter_chain(uprobe,
830 UPROBE_FILTER_UNREGISTER, mm))
831 err |= remove_breakpoint(uprobe, mm, info->vaddr);
832 }
833
834 unlock:
835 up_write(&mm->mmap_sem);
836 free:
837 mmput(mm);
838 info = free_map_info(info);
839 }
840 out:
841 percpu_up_write(&dup_mmap_sem);
842 return err;
843}
844
845static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
846{
847 consumer_add(uprobe, uc);
848 return register_for_each_vma(uprobe, uc);
849}
850
851static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
852{
853 int err;
854
855 if (WARN_ON(!consumer_del(uprobe, uc)))
856 return;
857
858 err = register_for_each_vma(uprobe, NULL);
859
860 if (!uprobe->consumers && !err)
861 delete_uprobe(uprobe);
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
882{
883 struct uprobe *uprobe;
884 int ret;
885
886
887 if (!uc->handler && !uc->ret_handler)
888 return -EINVAL;
889
890
891 if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
892 return -EIO;
893
894 if (offset > i_size_read(inode))
895 return -EINVAL;
896
897 retry:
898 uprobe = alloc_uprobe(inode, offset);
899 if (!uprobe)
900 return -ENOMEM;
901
902
903
904
905 down_write(&uprobe->register_rwsem);
906 ret = -EAGAIN;
907 if (likely(uprobe_is_active(uprobe))) {
908 ret = __uprobe_register(uprobe, uc);
909 if (ret)
910 __uprobe_unregister(uprobe, uc);
911 }
912 up_write(&uprobe->register_rwsem);
913 put_uprobe(uprobe);
914
915 if (unlikely(ret == -EAGAIN))
916 goto retry;
917 return ret;
918}
919EXPORT_SYMBOL_GPL(uprobe_register);
920
921
922
923
924
925
926
927
928int uprobe_apply(struct inode *inode, loff_t offset,
929 struct uprobe_consumer *uc, bool add)
930{
931 struct uprobe *uprobe;
932 struct uprobe_consumer *con;
933 int ret = -ENOENT;
934
935 uprobe = find_uprobe(inode, offset);
936 if (WARN_ON(!uprobe))
937 return ret;
938
939 down_write(&uprobe->register_rwsem);
940 for (con = uprobe->consumers; con && con != uc ; con = con->next)
941 ;
942 if (con)
943 ret = register_for_each_vma(uprobe, add ? uc : NULL);
944 up_write(&uprobe->register_rwsem);
945 put_uprobe(uprobe);
946
947 return ret;
948}
949
950
951
952
953
954
955
956void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
957{
958 struct uprobe *uprobe;
959
960 uprobe = find_uprobe(inode, offset);
961 if (WARN_ON(!uprobe))
962 return;
963
964 down_write(&uprobe->register_rwsem);
965 __uprobe_unregister(uprobe, uc);
966 up_write(&uprobe->register_rwsem);
967 put_uprobe(uprobe);
968}
969EXPORT_SYMBOL_GPL(uprobe_unregister);
970
971static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
972{
973 struct vm_area_struct *vma;
974 int err = 0;
975
976 down_read(&mm->mmap_sem);
977 for (vma = mm->mmap; vma; vma = vma->vm_next) {
978 unsigned long vaddr;
979 loff_t offset;
980
981 if (!valid_vma(vma, false) ||
982 file_inode(vma->vm_file) != uprobe->inode)
983 continue;
984
985 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
986 if (uprobe->offset < offset ||
987 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
988 continue;
989
990 vaddr = offset_to_vaddr(vma, uprobe->offset);
991 err |= remove_breakpoint(uprobe, mm, vaddr);
992 }
993 up_read(&mm->mmap_sem);
994
995 return err;
996}
997
998static struct rb_node *
999find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1000{
1001 struct rb_node *n = uprobes_tree.rb_node;
1002
1003 while (n) {
1004 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1005
1006 if (inode < u->inode) {
1007 n = n->rb_left;
1008 } else if (inode > u->inode) {
1009 n = n->rb_right;
1010 } else {
1011 if (max < u->offset)
1012 n = n->rb_left;
1013 else if (min > u->offset)
1014 n = n->rb_right;
1015 else
1016 break;
1017 }
1018 }
1019
1020 return n;
1021}
1022
1023
1024
1025
1026static void build_probe_list(struct inode *inode,
1027 struct vm_area_struct *vma,
1028 unsigned long start, unsigned long end,
1029 struct list_head *head)
1030{
1031 loff_t min, max;
1032 struct rb_node *n, *t;
1033 struct uprobe *u;
1034
1035 INIT_LIST_HEAD(head);
1036 min = vaddr_to_offset(vma, start);
1037 max = min + (end - start) - 1;
1038
1039 spin_lock(&uprobes_treelock);
1040 n = find_node_in_range(inode, min, max);
1041 if (n) {
1042 for (t = n; t; t = rb_prev(t)) {
1043 u = rb_entry(t, struct uprobe, rb_node);
1044 if (u->inode != inode || u->offset < min)
1045 break;
1046 list_add(&u->pending_list, head);
1047 get_uprobe(u);
1048 }
1049 for (t = n; (t = rb_next(t)); ) {
1050 u = rb_entry(t, struct uprobe, rb_node);
1051 if (u->inode != inode || u->offset > max)
1052 break;
1053 list_add(&u->pending_list, head);
1054 get_uprobe(u);
1055 }
1056 }
1057 spin_unlock(&uprobes_treelock);
1058}
1059
1060
1061
1062
1063
1064
1065
1066int uprobe_mmap(struct vm_area_struct *vma)
1067{
1068 struct list_head tmp_list;
1069 struct uprobe *uprobe, *u;
1070 struct inode *inode;
1071
1072 if (no_uprobe_events() || !valid_vma(vma, true))
1073 return 0;
1074
1075 inode = file_inode(vma->vm_file);
1076 if (!inode)
1077 return 0;
1078
1079 mutex_lock(uprobes_mmap_hash(inode));
1080 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1081
1082
1083
1084
1085
1086 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1087 if (!fatal_signal_pending(current) &&
1088 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1089 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1090 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1091 }
1092 put_uprobe(uprobe);
1093 }
1094 mutex_unlock(uprobes_mmap_hash(inode));
1095
1096 return 0;
1097}
1098
1099static bool
1100vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1101{
1102 loff_t min, max;
1103 struct inode *inode;
1104 struct rb_node *n;
1105
1106 inode = file_inode(vma->vm_file);
1107
1108 min = vaddr_to_offset(vma, start);
1109 max = min + (end - start) - 1;
1110
1111 spin_lock(&uprobes_treelock);
1112 n = find_node_in_range(inode, min, max);
1113 spin_unlock(&uprobes_treelock);
1114
1115 return !!n;
1116}
1117
1118
1119
1120
1121void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1122{
1123 if (no_uprobe_events() || !valid_vma(vma, false))
1124 return;
1125
1126 if (!atomic_read(&vma->vm_mm->mm_users))
1127 return;
1128
1129 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1130 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1131 return;
1132
1133 if (vma_has_uprobes(vma, start, end))
1134 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1135}
1136
1137
1138static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1139{
1140 struct vm_area_struct *vma;
1141 int ret;
1142
1143 if (down_write_killable(&mm->mmap_sem))
1144 return -EINTR;
1145
1146 if (mm->uprobes_state.xol_area) {
1147 ret = -EALREADY;
1148 goto fail;
1149 }
1150
1151 if (!area->vaddr) {
1152
1153 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1154 PAGE_SIZE, 0, 0);
1155 if (area->vaddr & ~PAGE_MASK) {
1156 ret = area->vaddr;
1157 goto fail;
1158 }
1159 }
1160
1161 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1162 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1163 &area->xol_mapping);
1164 if (IS_ERR(vma)) {
1165 ret = PTR_ERR(vma);
1166 goto fail;
1167 }
1168
1169 ret = 0;
1170 smp_wmb();
1171 mm->uprobes_state.xol_area = area;
1172 fail:
1173 up_write(&mm->mmap_sem);
1174
1175 return ret;
1176}
1177
1178static struct xol_area *__create_xol_area(unsigned long vaddr)
1179{
1180 struct mm_struct *mm = current->mm;
1181 uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1182 struct xol_area *area;
1183
1184 area = kmalloc(sizeof(*area), GFP_KERNEL);
1185 if (unlikely(!area))
1186 goto out;
1187
1188 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1189 if (!area->bitmap)
1190 goto free_area;
1191
1192 area->xol_mapping.name = "[uprobes]";
1193 area->xol_mapping.fault = NULL;
1194 area->xol_mapping.pages = area->pages;
1195 area->pages[0] = alloc_page(GFP_HIGHUSER);
1196 if (!area->pages[0])
1197 goto free_bitmap;
1198 area->pages[1] = NULL;
1199
1200 area->vaddr = vaddr;
1201 init_waitqueue_head(&area->wq);
1202
1203 set_bit(0, area->bitmap);
1204 atomic_set(&area->slot_count, 1);
1205 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1206
1207 if (!xol_add_vma(mm, area))
1208 return area;
1209
1210 __free_page(area->pages[0]);
1211 free_bitmap:
1212 kfree(area->bitmap);
1213 free_area:
1214 kfree(area);
1215 out:
1216 return NULL;
1217}
1218
1219
1220
1221
1222
1223
1224
1225static struct xol_area *get_xol_area(void)
1226{
1227 struct mm_struct *mm = current->mm;
1228 struct xol_area *area;
1229
1230 if (!mm->uprobes_state.xol_area)
1231 __create_xol_area(0);
1232
1233 area = mm->uprobes_state.xol_area;
1234 smp_read_barrier_depends();
1235 return area;
1236}
1237
1238
1239
1240
1241void uprobe_clear_state(struct mm_struct *mm)
1242{
1243 struct xol_area *area = mm->uprobes_state.xol_area;
1244
1245 if (!area)
1246 return;
1247
1248 put_page(area->pages[0]);
1249 kfree(area->bitmap);
1250 kfree(area);
1251}
1252
1253void uprobe_start_dup_mmap(void)
1254{
1255 percpu_down_read(&dup_mmap_sem);
1256}
1257
1258void uprobe_end_dup_mmap(void)
1259{
1260 percpu_up_read(&dup_mmap_sem);
1261}
1262
1263void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1264{
1265 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1266 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1267
1268 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1269 }
1270}
1271
1272
1273
1274
1275static unsigned long xol_take_insn_slot(struct xol_area *area)
1276{
1277 unsigned long slot_addr;
1278 int slot_nr;
1279
1280 do {
1281 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1282 if (slot_nr < UINSNS_PER_PAGE) {
1283 if (!test_and_set_bit(slot_nr, area->bitmap))
1284 break;
1285
1286 slot_nr = UINSNS_PER_PAGE;
1287 continue;
1288 }
1289 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1290 } while (slot_nr >= UINSNS_PER_PAGE);
1291
1292 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1293 atomic_inc(&area->slot_count);
1294
1295 return slot_addr;
1296}
1297
1298
1299
1300
1301
1302static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1303{
1304 struct xol_area *area;
1305 unsigned long xol_vaddr;
1306
1307 area = get_xol_area();
1308 if (!area)
1309 return 0;
1310
1311 xol_vaddr = xol_take_insn_slot(area);
1312 if (unlikely(!xol_vaddr))
1313 return 0;
1314
1315 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1316 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1317
1318 return xol_vaddr;
1319}
1320
1321
1322
1323
1324
1325
1326static void xol_free_insn_slot(struct task_struct *tsk)
1327{
1328 struct xol_area *area;
1329 unsigned long vma_end;
1330 unsigned long slot_addr;
1331
1332 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1333 return;
1334
1335 slot_addr = tsk->utask->xol_vaddr;
1336 if (unlikely(!slot_addr))
1337 return;
1338
1339 area = tsk->mm->uprobes_state.xol_area;
1340 vma_end = area->vaddr + PAGE_SIZE;
1341 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1342 unsigned long offset;
1343 int slot_nr;
1344
1345 offset = slot_addr - area->vaddr;
1346 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1347 if (slot_nr >= UINSNS_PER_PAGE)
1348 return;
1349
1350 clear_bit(slot_nr, area->bitmap);
1351 atomic_dec(&area->slot_count);
1352 smp_mb__after_atomic();
1353 if (waitqueue_active(&area->wq))
1354 wake_up(&area->wq);
1355
1356 tsk->utask->xol_vaddr = 0;
1357 }
1358}
1359
1360void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1361 void *src, unsigned long len)
1362{
1363
1364 copy_to_page(page, vaddr, src, len);
1365
1366
1367
1368
1369
1370
1371
1372 flush_dcache_page(page);
1373}
1374
1375
1376
1377
1378
1379
1380
1381unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1382{
1383 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1384}
1385
1386unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1387{
1388 struct uprobe_task *utask = current->utask;
1389
1390 if (unlikely(utask && utask->active_uprobe))
1391 return utask->vaddr;
1392
1393 return instruction_pointer(regs);
1394}
1395
1396static struct return_instance *free_ret_instance(struct return_instance *ri)
1397{
1398 struct return_instance *next = ri->next;
1399 put_uprobe(ri->uprobe);
1400 kfree(ri);
1401 return next;
1402}
1403
1404
1405
1406
1407
1408void uprobe_free_utask(struct task_struct *t)
1409{
1410 struct uprobe_task *utask = t->utask;
1411 struct return_instance *ri;
1412
1413 if (!utask)
1414 return;
1415
1416 if (utask->active_uprobe)
1417 put_uprobe(utask->active_uprobe);
1418
1419 ri = utask->return_instances;
1420 while (ri)
1421 ri = free_ret_instance(ri);
1422
1423 xol_free_insn_slot(t);
1424 kfree(utask);
1425 t->utask = NULL;
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436static struct uprobe_task *get_utask(void)
1437{
1438 if (!current->utask)
1439 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1440 return current->utask;
1441}
1442
1443static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1444{
1445 struct uprobe_task *n_utask;
1446 struct return_instance **p, *o, *n;
1447
1448 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1449 if (!n_utask)
1450 return -ENOMEM;
1451 t->utask = n_utask;
1452
1453 p = &n_utask->return_instances;
1454 for (o = o_utask->return_instances; o; o = o->next) {
1455 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1456 if (!n)
1457 return -ENOMEM;
1458
1459 *n = *o;
1460 get_uprobe(n->uprobe);
1461 n->next = NULL;
1462
1463 *p = n;
1464 p = &n->next;
1465 n_utask->depth++;
1466 }
1467
1468 return 0;
1469}
1470
1471static void uprobe_warn(struct task_struct *t, const char *msg)
1472{
1473 pr_warn("uprobe: %s:%d failed to %s\n",
1474 current->comm, current->pid, msg);
1475}
1476
1477static void dup_xol_work(struct callback_head *work)
1478{
1479 if (current->flags & PF_EXITING)
1480 return;
1481
1482 if (!__create_xol_area(current->utask->dup_xol_addr) &&
1483 !fatal_signal_pending(current))
1484 uprobe_warn(current, "dup xol area");
1485}
1486
1487
1488
1489
1490void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1491{
1492 struct uprobe_task *utask = current->utask;
1493 struct mm_struct *mm = current->mm;
1494 struct xol_area *area;
1495
1496 t->utask = NULL;
1497
1498 if (!utask || !utask->return_instances)
1499 return;
1500
1501 if (mm == t->mm && !(flags & CLONE_VFORK))
1502 return;
1503
1504 if (dup_utask(t, utask))
1505 return uprobe_warn(t, "dup ret instances");
1506
1507
1508 area = mm->uprobes_state.xol_area;
1509 if (!area)
1510 return uprobe_warn(t, "dup xol area");
1511
1512 if (mm == t->mm)
1513 return;
1514
1515 t->utask->dup_xol_addr = area->vaddr;
1516 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1517 task_work_add(t, &t->utask->dup_xol_work, true);
1518}
1519
1520
1521
1522
1523
1524
1525
1526static unsigned long get_trampoline_vaddr(void)
1527{
1528 struct xol_area *area;
1529 unsigned long trampoline_vaddr = -1;
1530
1531 area = current->mm->uprobes_state.xol_area;
1532 smp_read_barrier_depends();
1533 if (area)
1534 trampoline_vaddr = area->vaddr;
1535
1536 return trampoline_vaddr;
1537}
1538
1539static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1540 struct pt_regs *regs)
1541{
1542 struct return_instance *ri = utask->return_instances;
1543 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1544
1545 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1546 ri = free_ret_instance(ri);
1547 utask->depth--;
1548 }
1549 utask->return_instances = ri;
1550}
1551
1552static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1553{
1554 struct return_instance *ri;
1555 struct uprobe_task *utask;
1556 unsigned long orig_ret_vaddr, trampoline_vaddr;
1557 bool chained;
1558
1559 if (!get_xol_area())
1560 return;
1561
1562 utask = get_utask();
1563 if (!utask)
1564 return;
1565
1566 if (utask->depth >= MAX_URETPROBE_DEPTH) {
1567 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1568 " nestedness limit pid/tgid=%d/%d\n",
1569 current->pid, current->tgid);
1570 return;
1571 }
1572
1573 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1574 if (!ri)
1575 return;
1576
1577 trampoline_vaddr = get_trampoline_vaddr();
1578 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1579 if (orig_ret_vaddr == -1)
1580 goto fail;
1581
1582
1583 chained = (orig_ret_vaddr == trampoline_vaddr);
1584 cleanup_return_instances(utask, chained, regs);
1585
1586
1587
1588
1589
1590
1591 if (chained) {
1592 if (!utask->return_instances) {
1593
1594
1595
1596
1597 uprobe_warn(current, "handle tail call");
1598 goto fail;
1599 }
1600 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1601 }
1602
1603 ri->uprobe = get_uprobe(uprobe);
1604 ri->func = instruction_pointer(regs);
1605 ri->stack = user_stack_pointer(regs);
1606 ri->orig_ret_vaddr = orig_ret_vaddr;
1607 ri->chained = chained;
1608
1609 utask->depth++;
1610 ri->next = utask->return_instances;
1611 utask->return_instances = ri;
1612
1613 return;
1614 fail:
1615 kfree(ri);
1616}
1617
1618
1619static int
1620pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1621{
1622 struct uprobe_task *utask;
1623 unsigned long xol_vaddr;
1624 int err;
1625
1626 utask = get_utask();
1627 if (!utask)
1628 return -ENOMEM;
1629
1630 xol_vaddr = xol_get_insn_slot(uprobe);
1631 if (!xol_vaddr)
1632 return -ENOMEM;
1633
1634 utask->xol_vaddr = xol_vaddr;
1635 utask->vaddr = bp_vaddr;
1636
1637 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1638 if (unlikely(err)) {
1639 xol_free_insn_slot(current);
1640 return err;
1641 }
1642
1643 utask->active_uprobe = uprobe;
1644 utask->state = UTASK_SSTEP;
1645 return 0;
1646}
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657bool uprobe_deny_signal(void)
1658{
1659 struct task_struct *t = current;
1660 struct uprobe_task *utask = t->utask;
1661
1662 if (likely(!utask || !utask->active_uprobe))
1663 return false;
1664
1665 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1666
1667 if (signal_pending(t)) {
1668 spin_lock_irq(&t->sighand->siglock);
1669 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1670 spin_unlock_irq(&t->sighand->siglock);
1671
1672 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1673 utask->state = UTASK_SSTEP_TRAPPED;
1674 set_tsk_thread_flag(t, TIF_UPROBE);
1675 }
1676 }
1677
1678 return true;
1679}
1680
1681static void mmf_recalc_uprobes(struct mm_struct *mm)
1682{
1683 struct vm_area_struct *vma;
1684
1685 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1686 if (!valid_vma(vma, false))
1687 continue;
1688
1689
1690
1691
1692
1693
1694 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1695 return;
1696 }
1697
1698 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1699}
1700
1701static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1702{
1703 struct page *page;
1704 uprobe_opcode_t opcode;
1705 int result;
1706
1707 pagefault_disable();
1708 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
1709 pagefault_enable();
1710
1711 if (likely(result == 0))
1712 goto out;
1713
1714
1715
1716
1717
1718
1719
1720 result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
1721 NULL, NULL);
1722 if (result < 0)
1723 return result;
1724
1725 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1726 put_page(page);
1727 out:
1728
1729 return is_trap_insn(&opcode);
1730}
1731
1732static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1733{
1734 struct mm_struct *mm = current->mm;
1735 struct uprobe *uprobe = NULL;
1736 struct vm_area_struct *vma;
1737
1738 down_read(&mm->mmap_sem);
1739 vma = find_vma(mm, bp_vaddr);
1740 if (vma && vma->vm_start <= bp_vaddr) {
1741 if (valid_vma(vma, false)) {
1742 struct inode *inode = file_inode(vma->vm_file);
1743 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1744
1745 uprobe = find_uprobe(inode, offset);
1746 }
1747
1748 if (!uprobe)
1749 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
1750 } else {
1751 *is_swbp = -EFAULT;
1752 }
1753
1754 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1755 mmf_recalc_uprobes(mm);
1756 up_read(&mm->mmap_sem);
1757
1758 return uprobe;
1759}
1760
1761static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1762{
1763 struct uprobe_consumer *uc;
1764 int remove = UPROBE_HANDLER_REMOVE;
1765 bool need_prep = false;
1766
1767 down_read(&uprobe->register_rwsem);
1768 for (uc = uprobe->consumers; uc; uc = uc->next) {
1769 int rc = 0;
1770
1771 if (uc->handler) {
1772 rc = uc->handler(uc, regs);
1773 WARN(rc & ~UPROBE_HANDLER_MASK,
1774 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1775 }
1776
1777 if (uc->ret_handler)
1778 need_prep = true;
1779
1780 remove &= rc;
1781 }
1782
1783 if (need_prep && !remove)
1784 prepare_uretprobe(uprobe, regs);
1785
1786 if (remove && uprobe->consumers) {
1787 WARN_ON(!uprobe_is_active(uprobe));
1788 unapply_uprobe(uprobe, current->mm);
1789 }
1790 up_read(&uprobe->register_rwsem);
1791}
1792
1793static void
1794handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1795{
1796 struct uprobe *uprobe = ri->uprobe;
1797 struct uprobe_consumer *uc;
1798
1799 down_read(&uprobe->register_rwsem);
1800 for (uc = uprobe->consumers; uc; uc = uc->next) {
1801 if (uc->ret_handler)
1802 uc->ret_handler(uc, ri->func, regs);
1803 }
1804 up_read(&uprobe->register_rwsem);
1805}
1806
1807static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1808{
1809 bool chained;
1810
1811 do {
1812 chained = ri->chained;
1813 ri = ri->next;
1814 } while (chained);
1815
1816 return ri;
1817}
1818
1819static void handle_trampoline(struct pt_regs *regs)
1820{
1821 struct uprobe_task *utask;
1822 struct return_instance *ri, *next;
1823 bool valid;
1824
1825 utask = current->utask;
1826 if (!utask)
1827 goto sigill;
1828
1829 ri = utask->return_instances;
1830 if (!ri)
1831 goto sigill;
1832
1833 do {
1834
1835
1836
1837
1838
1839
1840 next = find_next_ret_chain(ri);
1841 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
1842
1843 instruction_pointer_set(regs, ri->orig_ret_vaddr);
1844 do {
1845 if (valid)
1846 handle_uretprobe_chain(ri, regs);
1847 ri = free_ret_instance(ri);
1848 utask->depth--;
1849 } while (ri != next);
1850 } while (!valid);
1851
1852 utask->return_instances = ri;
1853 return;
1854
1855 sigill:
1856 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1857 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1858
1859}
1860
1861bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1862{
1863 return false;
1864}
1865
1866bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1867 struct pt_regs *regs)
1868{
1869 return true;
1870}
1871
1872
1873
1874
1875
1876static void handle_swbp(struct pt_regs *regs)
1877{
1878 struct uprobe *uprobe;
1879 unsigned long bp_vaddr;
1880 int uninitialized_var(is_swbp);
1881
1882 bp_vaddr = uprobe_get_swbp_addr(regs);
1883 if (bp_vaddr == get_trampoline_vaddr())
1884 return handle_trampoline(regs);
1885
1886 uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1887 if (!uprobe) {
1888 if (is_swbp > 0) {
1889
1890 send_sig(SIGTRAP, current, 0);
1891 } else {
1892
1893
1894
1895
1896
1897
1898
1899
1900 instruction_pointer_set(regs, bp_vaddr);
1901 }
1902 return;
1903 }
1904
1905
1906 instruction_pointer_set(regs, bp_vaddr);
1907
1908
1909
1910
1911
1912
1913 smp_rmb();
1914 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1915 goto out;
1916
1917
1918 if (!get_utask())
1919 goto out;
1920
1921 if (arch_uprobe_ignore(&uprobe->arch, regs))
1922 goto out;
1923
1924 handler_chain(uprobe, regs);
1925
1926 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1927 goto out;
1928
1929 if (!pre_ssout(uprobe, regs, bp_vaddr))
1930 return;
1931
1932
1933out:
1934 put_uprobe(uprobe);
1935}
1936
1937
1938
1939
1940
1941static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1942{
1943 struct uprobe *uprobe;
1944 int err = 0;
1945
1946 uprobe = utask->active_uprobe;
1947 if (utask->state == UTASK_SSTEP_ACK)
1948 err = arch_uprobe_post_xol(&uprobe->arch, regs);
1949 else if (utask->state == UTASK_SSTEP_TRAPPED)
1950 arch_uprobe_abort_xol(&uprobe->arch, regs);
1951 else
1952 WARN_ON_ONCE(1);
1953
1954 put_uprobe(uprobe);
1955 utask->active_uprobe = NULL;
1956 utask->state = UTASK_RUNNING;
1957 xol_free_insn_slot(current);
1958
1959 spin_lock_irq(¤t->sighand->siglock);
1960 recalc_sigpending();
1961 spin_unlock_irq(¤t->sighand->siglock);
1962
1963 if (unlikely(err)) {
1964 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1965 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1966 }
1967}
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980void uprobe_notify_resume(struct pt_regs *regs)
1981{
1982 struct uprobe_task *utask;
1983
1984 clear_thread_flag(TIF_UPROBE);
1985
1986 utask = current->utask;
1987 if (utask && utask->active_uprobe)
1988 handle_singlestep(utask, regs);
1989 else
1990 handle_swbp(regs);
1991}
1992
1993
1994
1995
1996
1997int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1998{
1999 if (!current->mm)
2000 return 0;
2001
2002 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) &&
2003 (!current->utask || !current->utask->return_instances))
2004 return 0;
2005
2006 set_thread_flag(TIF_UPROBE);
2007 return 1;
2008}
2009
2010
2011
2012
2013
2014int uprobe_post_sstep_notifier(struct pt_regs *regs)
2015{
2016 struct uprobe_task *utask = current->utask;
2017
2018 if (!current->mm || !utask || !utask->active_uprobe)
2019
2020 return 0;
2021
2022 utask->state = UTASK_SSTEP_ACK;
2023 set_thread_flag(TIF_UPROBE);
2024 return 1;
2025}
2026
2027static struct notifier_block uprobe_exception_nb = {
2028 .notifier_call = arch_uprobe_exception_notify,
2029 .priority = INT_MAX-1,
2030};
2031
2032static int __init init_uprobes(void)
2033{
2034 int i;
2035
2036 for (i = 0; i < UPROBES_HASH_SZ; i++)
2037 mutex_init(&uprobes_mmap_mutex[i]);
2038
2039 if (percpu_init_rwsem(&dup_mmap_sem))
2040 return -ENOMEM;
2041
2042 return register_die_notifier(&uprobe_exception_nb);
2043}
2044__initcall(init_uprobes);
2045