1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kernel.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include <linux/sched/mm.h>
31#include <linux/sched/coredump.h>
32#include <linux/export.h>
33#include <linux/rmap.h>
34#include <linux/mmu_notifier.h>
35#include <linux/swap.h>
36#include <linux/ptrace.h>
37#include <linux/kdebug.h>
38#include "../../mm/internal.h"
39#include <linux/percpu-rwsem.h>
40#include <linux/task_work.h>
41#include <linux/shmem_fs.h>
42
43#include <linux/uprobes.h>
44
45#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
46#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
47
48static struct rb_root uprobes_tree = RB_ROOT;
49
50
51
52
53#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
54
55static DEFINE_SPINLOCK(uprobes_treelock);
56
57#define UPROBES_HASH_SZ 13
58
59static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
60#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
61
62static struct percpu_rw_semaphore dup_mmap_sem;
63
64
65#define UPROBE_COPY_INSN 0
66
67struct uprobe {
68 struct rb_node rb_node;
69 atomic_t ref;
70 struct rw_semaphore register_rwsem;
71 struct rw_semaphore consumer_rwsem;
72 struct list_head pending_list;
73 struct uprobe_consumer *consumers;
74 struct inode *inode;
75 loff_t offset;
76 unsigned long flags;
77
78
79
80
81
82
83
84
85
86
87
88 struct arch_uprobe arch;
89};
90
91
92
93
94
95
96
97
98
99
100struct xol_area {
101 wait_queue_head_t wq;
102 atomic_t slot_count;
103 unsigned long *bitmap;
104
105 struct vm_special_mapping xol_mapping;
106 struct page *pages[2];
107
108
109
110
111
112 unsigned long vaddr;
113};
114
115
116
117
118
119
120
121
122
123static bool valid_vma(struct vm_area_struct *vma, bool is_register)
124{
125 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
126
127 if (is_register)
128 flags |= VM_WRITE;
129
130 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
131}
132
133static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
134{
135 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
136}
137
138static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
139{
140 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
141}
142
143
144
145
146
147
148
149
150
151
152
153
154static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
155 struct page *old_page, struct page *new_page)
156{
157 struct mm_struct *mm = vma->vm_mm;
158 struct page_vma_mapped_walk pvmw = {
159 .page = old_page,
160 .vma = vma,
161 .address = addr,
162 };
163 int err;
164
165 const unsigned long mmun_start = addr;
166 const unsigned long mmun_end = addr + PAGE_SIZE;
167 struct mem_cgroup *memcg;
168
169 VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
170
171 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
172 false);
173 if (err)
174 return err;
175
176
177 lock_page(old_page);
178
179 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
180 err = -EAGAIN;
181 if (!page_vma_mapped_walk(&pvmw)) {
182 mem_cgroup_cancel_charge(new_page, memcg, false);
183 goto unlock;
184 }
185 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
186
187 get_page(new_page);
188 page_add_new_anon_rmap(new_page, vma, addr, false);
189 mem_cgroup_commit_charge(new_page, memcg, false, false);
190 lru_cache_add_active_or_unevictable(new_page, vma);
191
192 if (!PageAnon(old_page)) {
193 dec_mm_counter(mm, mm_counter_file(old_page));
194 inc_mm_counter(mm, MM_ANONPAGES);
195 }
196
197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
198 ptep_clear_flush_notify(vma, addr, pvmw.pte);
199 set_pte_at_notify(mm, addr, pvmw.pte,
200 mk_pte(new_page, vma->vm_page_prot));
201
202 page_remove_rmap(old_page, false);
203 if (!page_mapped(old_page))
204 try_to_free_swap(old_page);
205 page_vma_mapped_walk_done(&pvmw);
206
207 if (vma->vm_flags & VM_LOCKED)
208 munlock_vma_page(old_page);
209 put_page(old_page);
210
211 err = 0;
212 unlock:
213 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
214 unlock_page(old_page);
215 return err;
216}
217
218
219
220
221
222
223
224bool __weak is_swbp_insn(uprobe_opcode_t *insn)
225{
226 return *insn == UPROBE_SWBP_INSN;
227}
228
229
230
231
232
233
234
235
236
237
238bool __weak is_trap_insn(uprobe_opcode_t *insn)
239{
240 return is_swbp_insn(insn);
241}
242
243static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
244{
245 void *kaddr = kmap_atomic(page);
246 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
247 kunmap_atomic(kaddr);
248}
249
250static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
251{
252 void *kaddr = kmap_atomic(page);
253 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
254 kunmap_atomic(kaddr);
255}
256
257static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
258{
259 uprobe_opcode_t old_opcode;
260 bool is_swbp;
261
262
263
264
265
266
267
268
269
270
271 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
272 is_swbp = is_swbp_insn(&old_opcode);
273
274 if (is_swbp_insn(new_opcode)) {
275 if (is_swbp)
276 return 0;
277 } else {
278 if (!is_swbp)
279 return 0;
280 }
281
282 return 1;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
303 uprobe_opcode_t opcode)
304{
305 struct page *old_page, *new_page;
306 struct vm_area_struct *vma;
307 int ret;
308
309retry:
310
311 ret = get_user_pages_remote(NULL, mm, vaddr, 1,
312 FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
313 if (ret <= 0)
314 return ret;
315
316 ret = verify_opcode(old_page, vaddr, &opcode);
317 if (ret <= 0)
318 goto put_old;
319
320 ret = anon_vma_prepare(vma);
321 if (ret)
322 goto put_old;
323
324 ret = -ENOMEM;
325 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
326 if (!new_page)
327 goto put_old;
328
329 __SetPageUptodate(new_page);
330 copy_highpage(new_page, old_page);
331 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
332
333 ret = __replace_page(vma, vaddr, old_page, new_page);
334 put_page(new_page);
335put_old:
336 put_page(old_page);
337
338 if (unlikely(ret == -EAGAIN))
339 goto retry;
340 return ret;
341}
342
343
344
345
346
347
348
349
350
351
352int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
353{
354 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
355}
356
357
358
359
360
361
362
363
364
365
366int __weak
367set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
368{
369 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
370}
371
372static struct uprobe *get_uprobe(struct uprobe *uprobe)
373{
374 atomic_inc(&uprobe->ref);
375 return uprobe;
376}
377
378static void put_uprobe(struct uprobe *uprobe)
379{
380 if (atomic_dec_and_test(&uprobe->ref))
381 kfree(uprobe);
382}
383
384static int match_uprobe(struct uprobe *l, struct uprobe *r)
385{
386 if (l->inode < r->inode)
387 return -1;
388
389 if (l->inode > r->inode)
390 return 1;
391
392 if (l->offset < r->offset)
393 return -1;
394
395 if (l->offset > r->offset)
396 return 1;
397
398 return 0;
399}
400
401static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
402{
403 struct uprobe u = { .inode = inode, .offset = offset };
404 struct rb_node *n = uprobes_tree.rb_node;
405 struct uprobe *uprobe;
406 int match;
407
408 while (n) {
409 uprobe = rb_entry(n, struct uprobe, rb_node);
410 match = match_uprobe(&u, uprobe);
411 if (!match)
412 return get_uprobe(uprobe);
413
414 if (match < 0)
415 n = n->rb_left;
416 else
417 n = n->rb_right;
418 }
419 return NULL;
420}
421
422
423
424
425
426static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
427{
428 struct uprobe *uprobe;
429
430 spin_lock(&uprobes_treelock);
431 uprobe = __find_uprobe(inode, offset);
432 spin_unlock(&uprobes_treelock);
433
434 return uprobe;
435}
436
437static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
438{
439 struct rb_node **p = &uprobes_tree.rb_node;
440 struct rb_node *parent = NULL;
441 struct uprobe *u;
442 int match;
443
444 while (*p) {
445 parent = *p;
446 u = rb_entry(parent, struct uprobe, rb_node);
447 match = match_uprobe(uprobe, u);
448 if (!match)
449 return get_uprobe(u);
450
451 if (match < 0)
452 p = &parent->rb_left;
453 else
454 p = &parent->rb_right;
455
456 }
457
458 u = NULL;
459 rb_link_node(&uprobe->rb_node, parent, p);
460 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
461
462 atomic_set(&uprobe->ref, 2);
463
464 return u;
465}
466
467
468
469
470
471
472
473
474
475static struct uprobe *insert_uprobe(struct uprobe *uprobe)
476{
477 struct uprobe *u;
478
479 spin_lock(&uprobes_treelock);
480 u = __insert_uprobe(uprobe);
481 spin_unlock(&uprobes_treelock);
482
483 return u;
484}
485
486static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
487{
488 struct uprobe *uprobe, *cur_uprobe;
489
490 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
491 if (!uprobe)
492 return NULL;
493
494 uprobe->inode = inode;
495 uprobe->offset = offset;
496 init_rwsem(&uprobe->register_rwsem);
497 init_rwsem(&uprobe->consumer_rwsem);
498
499
500 cur_uprobe = insert_uprobe(uprobe);
501
502 if (cur_uprobe) {
503 kfree(uprobe);
504 uprobe = cur_uprobe;
505 }
506
507 return uprobe;
508}
509
510static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
511{
512 down_write(&uprobe->consumer_rwsem);
513 uc->next = uprobe->consumers;
514 uprobe->consumers = uc;
515 up_write(&uprobe->consumer_rwsem);
516}
517
518
519
520
521
522
523static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
524{
525 struct uprobe_consumer **con;
526 bool ret = false;
527
528 down_write(&uprobe->consumer_rwsem);
529 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
530 if (*con == uc) {
531 *con = uc->next;
532 ret = true;
533 break;
534 }
535 }
536 up_write(&uprobe->consumer_rwsem);
537
538 return ret;
539}
540
541static int __copy_insn(struct address_space *mapping, struct file *filp,
542 void *insn, int nbytes, loff_t offset)
543{
544 struct page *page;
545
546
547
548
549
550 if (mapping->a_ops->readpage)
551 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
552 else
553 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
554 if (IS_ERR(page))
555 return PTR_ERR(page);
556
557 copy_from_page(page, offset, insn, nbytes);
558 put_page(page);
559
560 return 0;
561}
562
563static int copy_insn(struct uprobe *uprobe, struct file *filp)
564{
565 struct address_space *mapping = uprobe->inode->i_mapping;
566 loff_t offs = uprobe->offset;
567 void *insn = &uprobe->arch.insn;
568 int size = sizeof(uprobe->arch.insn);
569 int len, err = -EIO;
570
571
572 do {
573 if (offs >= i_size_read(uprobe->inode))
574 break;
575
576 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
577 err = __copy_insn(mapping, filp, insn, len, offs);
578 if (err)
579 break;
580
581 insn += len;
582 offs += len;
583 size -= len;
584 } while (size);
585
586 return err;
587}
588
589static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
590 struct mm_struct *mm, unsigned long vaddr)
591{
592 int ret = 0;
593
594 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
595 return ret;
596
597
598 down_write(&uprobe->consumer_rwsem);
599 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
600 goto out;
601
602 ret = copy_insn(uprobe, file);
603 if (ret)
604 goto out;
605
606 ret = -ENOTSUPP;
607 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
608 goto out;
609
610 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
611 if (ret)
612 goto out;
613
614
615 BUG_ON((uprobe->offset & ~PAGE_MASK) +
616 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
617
618 smp_wmb();
619 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
620
621 out:
622 up_write(&uprobe->consumer_rwsem);
623
624 return ret;
625}
626
627static inline bool consumer_filter(struct uprobe_consumer *uc,
628 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
629{
630 return !uc->filter || uc->filter(uc, ctx, mm);
631}
632
633static bool filter_chain(struct uprobe *uprobe,
634 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
635{
636 struct uprobe_consumer *uc;
637 bool ret = false;
638
639 down_read(&uprobe->consumer_rwsem);
640 for (uc = uprobe->consumers; uc; uc = uc->next) {
641 ret = consumer_filter(uc, ctx, mm);
642 if (ret)
643 break;
644 }
645 up_read(&uprobe->consumer_rwsem);
646
647 return ret;
648}
649
650static int
651install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
652 struct vm_area_struct *vma, unsigned long vaddr)
653{
654 bool first_uprobe;
655 int ret;
656
657 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
658 if (ret)
659 return ret;
660
661
662
663
664
665 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
666 if (first_uprobe)
667 set_bit(MMF_HAS_UPROBES, &mm->flags);
668
669 ret = set_swbp(&uprobe->arch, mm, vaddr);
670 if (!ret)
671 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
672 else if (first_uprobe)
673 clear_bit(MMF_HAS_UPROBES, &mm->flags);
674
675 return ret;
676}
677
678static int
679remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
680{
681 set_bit(MMF_RECALC_UPROBES, &mm->flags);
682 return set_orig_insn(&uprobe->arch, mm, vaddr);
683}
684
685static inline bool uprobe_is_active(struct uprobe *uprobe)
686{
687 return !RB_EMPTY_NODE(&uprobe->rb_node);
688}
689
690
691
692
693
694static void delete_uprobe(struct uprobe *uprobe)
695{
696 if (WARN_ON(!uprobe_is_active(uprobe)))
697 return;
698
699 spin_lock(&uprobes_treelock);
700 rb_erase(&uprobe->rb_node, &uprobes_tree);
701 spin_unlock(&uprobes_treelock);
702 RB_CLEAR_NODE(&uprobe->rb_node);
703 put_uprobe(uprobe);
704}
705
706struct map_info {
707 struct map_info *next;
708 struct mm_struct *mm;
709 unsigned long vaddr;
710};
711
712static inline struct map_info *free_map_info(struct map_info *info)
713{
714 struct map_info *next = info->next;
715 kfree(info);
716 return next;
717}
718
719static struct map_info *
720build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
721{
722 unsigned long pgoff = offset >> PAGE_SHIFT;
723 struct vm_area_struct *vma;
724 struct map_info *curr = NULL;
725 struct map_info *prev = NULL;
726 struct map_info *info;
727 int more = 0;
728
729 again:
730 i_mmap_lock_read(mapping);
731 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
732 if (!valid_vma(vma, is_register))
733 continue;
734
735 if (!prev && !more) {
736
737
738
739
740 prev = kmalloc(sizeof(struct map_info),
741 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
742 if (prev)
743 prev->next = NULL;
744 }
745 if (!prev) {
746 more++;
747 continue;
748 }
749
750 if (!mmget_not_zero(vma->vm_mm))
751 continue;
752
753 info = prev;
754 prev = prev->next;
755 info->next = curr;
756 curr = info;
757
758 info->mm = vma->vm_mm;
759 info->vaddr = offset_to_vaddr(vma, offset);
760 }
761 i_mmap_unlock_read(mapping);
762
763 if (!more)
764 goto out;
765
766 prev = curr;
767 while (curr) {
768 mmput(curr->mm);
769 curr = curr->next;
770 }
771
772 do {
773 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
774 if (!info) {
775 curr = ERR_PTR(-ENOMEM);
776 goto out;
777 }
778 info->next = prev;
779 prev = info;
780 } while (--more);
781
782 goto again;
783 out:
784 while (prev)
785 prev = free_map_info(prev);
786 return curr;
787}
788
789static int
790register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
791{
792 bool is_register = !!new;
793 struct map_info *info;
794 int err = 0;
795
796 percpu_down_write(&dup_mmap_sem);
797 info = build_map_info(uprobe->inode->i_mapping,
798 uprobe->offset, is_register);
799 if (IS_ERR(info)) {
800 err = PTR_ERR(info);
801 goto out;
802 }
803
804 while (info) {
805 struct mm_struct *mm = info->mm;
806 struct vm_area_struct *vma;
807
808 if (err && is_register)
809 goto free;
810
811 down_write(&mm->mmap_sem);
812 vma = find_vma(mm, info->vaddr);
813 if (!vma || !valid_vma(vma, is_register) ||
814 file_inode(vma->vm_file) != uprobe->inode)
815 goto unlock;
816
817 if (vma->vm_start > info->vaddr ||
818 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
819 goto unlock;
820
821 if (is_register) {
822
823 if (consumer_filter(new,
824 UPROBE_FILTER_REGISTER, mm))
825 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
826 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
827 if (!filter_chain(uprobe,
828 UPROBE_FILTER_UNREGISTER, mm))
829 err |= remove_breakpoint(uprobe, mm, info->vaddr);
830 }
831
832 unlock:
833 up_write(&mm->mmap_sem);
834 free:
835 mmput(mm);
836 info = free_map_info(info);
837 }
838 out:
839 percpu_up_write(&dup_mmap_sem);
840 return err;
841}
842
843static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
844{
845 consumer_add(uprobe, uc);
846 return register_for_each_vma(uprobe, uc);
847}
848
849static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
850{
851 int err;
852
853 if (WARN_ON(!consumer_del(uprobe, uc)))
854 return;
855
856 err = register_for_each_vma(uprobe, NULL);
857
858 if (!uprobe->consumers && !err)
859 delete_uprobe(uprobe);
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
881{
882 struct uprobe *uprobe;
883 int ret;
884
885
886 if (!uc->handler && !uc->ret_handler)
887 return -EINVAL;
888
889
890 if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
891 return -EIO;
892
893 if (offset > i_size_read(inode))
894 return -EINVAL;
895
896 retry:
897 uprobe = alloc_uprobe(inode, offset);
898 if (!uprobe)
899 return -ENOMEM;
900
901
902
903
904 down_write(&uprobe->register_rwsem);
905 ret = -EAGAIN;
906 if (likely(uprobe_is_active(uprobe))) {
907 ret = __uprobe_register(uprobe, uc);
908 if (ret)
909 __uprobe_unregister(uprobe, uc);
910 }
911 up_write(&uprobe->register_rwsem);
912 put_uprobe(uprobe);
913
914 if (unlikely(ret == -EAGAIN))
915 goto retry;
916 return ret;
917}
918EXPORT_SYMBOL_GPL(uprobe_register);
919
920
921
922
923
924
925
926
927int uprobe_apply(struct inode *inode, loff_t offset,
928 struct uprobe_consumer *uc, bool add)
929{
930 struct uprobe *uprobe;
931 struct uprobe_consumer *con;
932 int ret = -ENOENT;
933
934 uprobe = find_uprobe(inode, offset);
935 if (WARN_ON(!uprobe))
936 return ret;
937
938 down_write(&uprobe->register_rwsem);
939 for (con = uprobe->consumers; con && con != uc ; con = con->next)
940 ;
941 if (con)
942 ret = register_for_each_vma(uprobe, add ? uc : NULL);
943 up_write(&uprobe->register_rwsem);
944 put_uprobe(uprobe);
945
946 return ret;
947}
948
949
950
951
952
953
954
955void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
956{
957 struct uprobe *uprobe;
958
959 uprobe = find_uprobe(inode, offset);
960 if (WARN_ON(!uprobe))
961 return;
962
963 down_write(&uprobe->register_rwsem);
964 __uprobe_unregister(uprobe, uc);
965 up_write(&uprobe->register_rwsem);
966 put_uprobe(uprobe);
967}
968EXPORT_SYMBOL_GPL(uprobe_unregister);
969
970static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
971{
972 struct vm_area_struct *vma;
973 int err = 0;
974
975 down_read(&mm->mmap_sem);
976 for (vma = mm->mmap; vma; vma = vma->vm_next) {
977 unsigned long vaddr;
978 loff_t offset;
979
980 if (!valid_vma(vma, false) ||
981 file_inode(vma->vm_file) != uprobe->inode)
982 continue;
983
984 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
985 if (uprobe->offset < offset ||
986 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
987 continue;
988
989 vaddr = offset_to_vaddr(vma, uprobe->offset);
990 err |= remove_breakpoint(uprobe, mm, vaddr);
991 }
992 up_read(&mm->mmap_sem);
993
994 return err;
995}
996
997static struct rb_node *
998find_node_in_range(struct inode *inode, loff_t min, loff_t max)
999{
1000 struct rb_node *n = uprobes_tree.rb_node;
1001
1002 while (n) {
1003 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1004
1005 if (inode < u->inode) {
1006 n = n->rb_left;
1007 } else if (inode > u->inode) {
1008 n = n->rb_right;
1009 } else {
1010 if (max < u->offset)
1011 n = n->rb_left;
1012 else if (min > u->offset)
1013 n = n->rb_right;
1014 else
1015 break;
1016 }
1017 }
1018
1019 return n;
1020}
1021
1022
1023
1024
1025static void build_probe_list(struct inode *inode,
1026 struct vm_area_struct *vma,
1027 unsigned long start, unsigned long end,
1028 struct list_head *head)
1029{
1030 loff_t min, max;
1031 struct rb_node *n, *t;
1032 struct uprobe *u;
1033
1034 INIT_LIST_HEAD(head);
1035 min = vaddr_to_offset(vma, start);
1036 max = min + (end - start) - 1;
1037
1038 spin_lock(&uprobes_treelock);
1039 n = find_node_in_range(inode, min, max);
1040 if (n) {
1041 for (t = n; t; t = rb_prev(t)) {
1042 u = rb_entry(t, struct uprobe, rb_node);
1043 if (u->inode != inode || u->offset < min)
1044 break;
1045 list_add(&u->pending_list, head);
1046 get_uprobe(u);
1047 }
1048 for (t = n; (t = rb_next(t)); ) {
1049 u = rb_entry(t, struct uprobe, rb_node);
1050 if (u->inode != inode || u->offset > max)
1051 break;
1052 list_add(&u->pending_list, head);
1053 get_uprobe(u);
1054 }
1055 }
1056 spin_unlock(&uprobes_treelock);
1057}
1058
1059
1060
1061
1062
1063
1064
1065int uprobe_mmap(struct vm_area_struct *vma)
1066{
1067 struct list_head tmp_list;
1068 struct uprobe *uprobe, *u;
1069 struct inode *inode;
1070
1071 if (no_uprobe_events() || !valid_vma(vma, true))
1072 return 0;
1073
1074 inode = file_inode(vma->vm_file);
1075 if (!inode)
1076 return 0;
1077
1078 mutex_lock(uprobes_mmap_hash(inode));
1079 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1080
1081
1082
1083
1084
1085 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1086 if (!fatal_signal_pending(current) &&
1087 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1088 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1089 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1090 }
1091 put_uprobe(uprobe);
1092 }
1093 mutex_unlock(uprobes_mmap_hash(inode));
1094
1095 return 0;
1096}
1097
1098static bool
1099vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1100{
1101 loff_t min, max;
1102 struct inode *inode;
1103 struct rb_node *n;
1104
1105 inode = file_inode(vma->vm_file);
1106
1107 min = vaddr_to_offset(vma, start);
1108 max = min + (end - start) - 1;
1109
1110 spin_lock(&uprobes_treelock);
1111 n = find_node_in_range(inode, min, max);
1112 spin_unlock(&uprobes_treelock);
1113
1114 return !!n;
1115}
1116
1117
1118
1119
1120void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1121{
1122 if (no_uprobe_events() || !valid_vma(vma, false))
1123 return;
1124
1125 if (!atomic_read(&vma->vm_mm->mm_users))
1126 return;
1127
1128 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1129 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1130 return;
1131
1132 if (vma_has_uprobes(vma, start, end))
1133 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1134}
1135
1136
1137static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1138{
1139 struct vm_area_struct *vma;
1140 int ret;
1141
1142 if (down_write_killable(&mm->mmap_sem))
1143 return -EINTR;
1144
1145 if (mm->uprobes_state.xol_area) {
1146 ret = -EALREADY;
1147 goto fail;
1148 }
1149
1150 if (!area->vaddr) {
1151
1152 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1153 PAGE_SIZE, 0, 0);
1154 if (area->vaddr & ~PAGE_MASK) {
1155 ret = area->vaddr;
1156 goto fail;
1157 }
1158 }
1159
1160 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1161 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1162 &area->xol_mapping);
1163 if (IS_ERR(vma)) {
1164 ret = PTR_ERR(vma);
1165 goto fail;
1166 }
1167
1168 ret = 0;
1169
1170 smp_store_release(&mm->uprobes_state.xol_area, area);
1171 fail:
1172 up_write(&mm->mmap_sem);
1173
1174 return ret;
1175}
1176
1177static struct xol_area *__create_xol_area(unsigned long vaddr)
1178{
1179 struct mm_struct *mm = current->mm;
1180 uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1181 struct xol_area *area;
1182
1183 area = kmalloc(sizeof(*area), GFP_KERNEL);
1184 if (unlikely(!area))
1185 goto out;
1186
1187 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1188 if (!area->bitmap)
1189 goto free_area;
1190
1191 area->xol_mapping.name = "[uprobes]";
1192 area->xol_mapping.fault = NULL;
1193 area->xol_mapping.pages = area->pages;
1194 area->pages[0] = alloc_page(GFP_HIGHUSER);
1195 if (!area->pages[0])
1196 goto free_bitmap;
1197 area->pages[1] = NULL;
1198
1199 area->vaddr = vaddr;
1200 init_waitqueue_head(&area->wq);
1201
1202 set_bit(0, area->bitmap);
1203 atomic_set(&area->slot_count, 1);
1204 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1205
1206 if (!xol_add_vma(mm, area))
1207 return area;
1208
1209 __free_page(area->pages[0]);
1210 free_bitmap:
1211 kfree(area->bitmap);
1212 free_area:
1213 kfree(area);
1214 out:
1215 return NULL;
1216}
1217
1218
1219
1220
1221
1222
1223
1224static struct xol_area *get_xol_area(void)
1225{
1226 struct mm_struct *mm = current->mm;
1227 struct xol_area *area;
1228
1229 if (!mm->uprobes_state.xol_area)
1230 __create_xol_area(0);
1231
1232
1233 area = READ_ONCE(mm->uprobes_state.xol_area);
1234 return area;
1235}
1236
1237
1238
1239
1240void uprobe_clear_state(struct mm_struct *mm)
1241{
1242 struct xol_area *area = mm->uprobes_state.xol_area;
1243
1244 if (!area)
1245 return;
1246
1247 put_page(area->pages[0]);
1248 kfree(area->bitmap);
1249 kfree(area);
1250}
1251
1252void uprobe_start_dup_mmap(void)
1253{
1254 percpu_down_read(&dup_mmap_sem);
1255}
1256
1257void uprobe_end_dup_mmap(void)
1258{
1259 percpu_up_read(&dup_mmap_sem);
1260}
1261
1262void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1263{
1264 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1265 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1266
1267 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1268 }
1269}
1270
1271
1272
1273
1274static unsigned long xol_take_insn_slot(struct xol_area *area)
1275{
1276 unsigned long slot_addr;
1277 int slot_nr;
1278
1279 do {
1280 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1281 if (slot_nr < UINSNS_PER_PAGE) {
1282 if (!test_and_set_bit(slot_nr, area->bitmap))
1283 break;
1284
1285 slot_nr = UINSNS_PER_PAGE;
1286 continue;
1287 }
1288 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1289 } while (slot_nr >= UINSNS_PER_PAGE);
1290
1291 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1292 atomic_inc(&area->slot_count);
1293
1294 return slot_addr;
1295}
1296
1297
1298
1299
1300
1301static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1302{
1303 struct xol_area *area;
1304 unsigned long xol_vaddr;
1305
1306 area = get_xol_area();
1307 if (!area)
1308 return 0;
1309
1310 xol_vaddr = xol_take_insn_slot(area);
1311 if (unlikely(!xol_vaddr))
1312 return 0;
1313
1314 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1315 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1316
1317 return xol_vaddr;
1318}
1319
1320
1321
1322
1323
1324
1325static void xol_free_insn_slot(struct task_struct *tsk)
1326{
1327 struct xol_area *area;
1328 unsigned long vma_end;
1329 unsigned long slot_addr;
1330
1331 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1332 return;
1333
1334 slot_addr = tsk->utask->xol_vaddr;
1335 if (unlikely(!slot_addr))
1336 return;
1337
1338 area = tsk->mm->uprobes_state.xol_area;
1339 vma_end = area->vaddr + PAGE_SIZE;
1340 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1341 unsigned long offset;
1342 int slot_nr;
1343
1344 offset = slot_addr - area->vaddr;
1345 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1346 if (slot_nr >= UINSNS_PER_PAGE)
1347 return;
1348
1349 clear_bit(slot_nr, area->bitmap);
1350 atomic_dec(&area->slot_count);
1351 smp_mb__after_atomic();
1352 if (waitqueue_active(&area->wq))
1353 wake_up(&area->wq);
1354
1355 tsk->utask->xol_vaddr = 0;
1356 }
1357}
1358
1359void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1360 void *src, unsigned long len)
1361{
1362
1363 copy_to_page(page, vaddr, src, len);
1364
1365
1366
1367
1368
1369
1370
1371 flush_dcache_page(page);
1372}
1373
1374
1375
1376
1377
1378
1379
1380unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1381{
1382 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1383}
1384
1385unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1386{
1387 struct uprobe_task *utask = current->utask;
1388
1389 if (unlikely(utask && utask->active_uprobe))
1390 return utask->vaddr;
1391
1392 return instruction_pointer(regs);
1393}
1394
1395static struct return_instance *free_ret_instance(struct return_instance *ri)
1396{
1397 struct return_instance *next = ri->next;
1398 put_uprobe(ri->uprobe);
1399 kfree(ri);
1400 return next;
1401}
1402
1403
1404
1405
1406
1407void uprobe_free_utask(struct task_struct *t)
1408{
1409 struct uprobe_task *utask = t->utask;
1410 struct return_instance *ri;
1411
1412 if (!utask)
1413 return;
1414
1415 if (utask->active_uprobe)
1416 put_uprobe(utask->active_uprobe);
1417
1418 ri = utask->return_instances;
1419 while (ri)
1420 ri = free_ret_instance(ri);
1421
1422 xol_free_insn_slot(t);
1423 kfree(utask);
1424 t->utask = NULL;
1425}
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435static struct uprobe_task *get_utask(void)
1436{
1437 if (!current->utask)
1438 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1439 return current->utask;
1440}
1441
1442static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1443{
1444 struct uprobe_task *n_utask;
1445 struct return_instance **p, *o, *n;
1446
1447 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1448 if (!n_utask)
1449 return -ENOMEM;
1450 t->utask = n_utask;
1451
1452 p = &n_utask->return_instances;
1453 for (o = o_utask->return_instances; o; o = o->next) {
1454 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1455 if (!n)
1456 return -ENOMEM;
1457
1458 *n = *o;
1459 get_uprobe(n->uprobe);
1460 n->next = NULL;
1461
1462 *p = n;
1463 p = &n->next;
1464 n_utask->depth++;
1465 }
1466
1467 return 0;
1468}
1469
1470static void uprobe_warn(struct task_struct *t, const char *msg)
1471{
1472 pr_warn("uprobe: %s:%d failed to %s\n",
1473 current->comm, current->pid, msg);
1474}
1475
1476static void dup_xol_work(struct callback_head *work)
1477{
1478 if (current->flags & PF_EXITING)
1479 return;
1480
1481 if (!__create_xol_area(current->utask->dup_xol_addr) &&
1482 !fatal_signal_pending(current))
1483 uprobe_warn(current, "dup xol area");
1484}
1485
1486
1487
1488
1489void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1490{
1491 struct uprobe_task *utask = current->utask;
1492 struct mm_struct *mm = current->mm;
1493 struct xol_area *area;
1494
1495 t->utask = NULL;
1496
1497 if (!utask || !utask->return_instances)
1498 return;
1499
1500 if (mm == t->mm && !(flags & CLONE_VFORK))
1501 return;
1502
1503 if (dup_utask(t, utask))
1504 return uprobe_warn(t, "dup ret instances");
1505
1506
1507 area = mm->uprobes_state.xol_area;
1508 if (!area)
1509 return uprobe_warn(t, "dup xol area");
1510
1511 if (mm == t->mm)
1512 return;
1513
1514 t->utask->dup_xol_addr = area->vaddr;
1515 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1516 task_work_add(t, &t->utask->dup_xol_work, true);
1517}
1518
1519
1520
1521
1522
1523
1524
1525static unsigned long get_trampoline_vaddr(void)
1526{
1527 struct xol_area *area;
1528 unsigned long trampoline_vaddr = -1;
1529
1530
1531 area = READ_ONCE(current->mm->uprobes_state.xol_area);
1532 if (area)
1533 trampoline_vaddr = area->vaddr;
1534
1535 return trampoline_vaddr;
1536}
1537
1538static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1539 struct pt_regs *regs)
1540{
1541 struct return_instance *ri = utask->return_instances;
1542 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1543
1544 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1545 ri = free_ret_instance(ri);
1546 utask->depth--;
1547 }
1548 utask->return_instances = ri;
1549}
1550
1551static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1552{
1553 struct return_instance *ri;
1554 struct uprobe_task *utask;
1555 unsigned long orig_ret_vaddr, trampoline_vaddr;
1556 bool chained;
1557
1558 if (!get_xol_area())
1559 return;
1560
1561 utask = get_utask();
1562 if (!utask)
1563 return;
1564
1565 if (utask->depth >= MAX_URETPROBE_DEPTH) {
1566 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1567 " nestedness limit pid/tgid=%d/%d\n",
1568 current->pid, current->tgid);
1569 return;
1570 }
1571
1572 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1573 if (!ri)
1574 return;
1575
1576 trampoline_vaddr = get_trampoline_vaddr();
1577 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1578 if (orig_ret_vaddr == -1)
1579 goto fail;
1580
1581
1582 chained = (orig_ret_vaddr == trampoline_vaddr);
1583 cleanup_return_instances(utask, chained, regs);
1584
1585
1586
1587
1588
1589
1590 if (chained) {
1591 if (!utask->return_instances) {
1592
1593
1594
1595
1596 uprobe_warn(current, "handle tail call");
1597 goto fail;
1598 }
1599 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1600 }
1601
1602 ri->uprobe = get_uprobe(uprobe);
1603 ri->func = instruction_pointer(regs);
1604 ri->stack = user_stack_pointer(regs);
1605 ri->orig_ret_vaddr = orig_ret_vaddr;
1606 ri->chained = chained;
1607
1608 utask->depth++;
1609 ri->next = utask->return_instances;
1610 utask->return_instances = ri;
1611
1612 return;
1613 fail:
1614 kfree(ri);
1615}
1616
1617
1618static int
1619pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1620{
1621 struct uprobe_task *utask;
1622 unsigned long xol_vaddr;
1623 int err;
1624
1625 utask = get_utask();
1626 if (!utask)
1627 return -ENOMEM;
1628
1629 xol_vaddr = xol_get_insn_slot(uprobe);
1630 if (!xol_vaddr)
1631 return -ENOMEM;
1632
1633 utask->xol_vaddr = xol_vaddr;
1634 utask->vaddr = bp_vaddr;
1635
1636 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1637 if (unlikely(err)) {
1638 xol_free_insn_slot(current);
1639 return err;
1640 }
1641
1642 utask->active_uprobe = uprobe;
1643 utask->state = UTASK_SSTEP;
1644 return 0;
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656bool uprobe_deny_signal(void)
1657{
1658 struct task_struct *t = current;
1659 struct uprobe_task *utask = t->utask;
1660
1661 if (likely(!utask || !utask->active_uprobe))
1662 return false;
1663
1664 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1665
1666 if (signal_pending(t)) {
1667 spin_lock_irq(&t->sighand->siglock);
1668 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1669 spin_unlock_irq(&t->sighand->siglock);
1670
1671 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1672 utask->state = UTASK_SSTEP_TRAPPED;
1673 set_tsk_thread_flag(t, TIF_UPROBE);
1674 }
1675 }
1676
1677 return true;
1678}
1679
1680static void mmf_recalc_uprobes(struct mm_struct *mm)
1681{
1682 struct vm_area_struct *vma;
1683
1684 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1685 if (!valid_vma(vma, false))
1686 continue;
1687
1688
1689
1690
1691
1692
1693 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1694 return;
1695 }
1696
1697 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1698}
1699
1700static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1701{
1702 struct page *page;
1703 uprobe_opcode_t opcode;
1704 int result;
1705
1706 pagefault_disable();
1707 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
1708 pagefault_enable();
1709
1710 if (likely(result == 0))
1711 goto out;
1712
1713
1714
1715
1716
1717
1718
1719 result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
1720 NULL, NULL);
1721 if (result < 0)
1722 return result;
1723
1724 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1725 put_page(page);
1726 out:
1727
1728 return is_trap_insn(&opcode);
1729}
1730
1731static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1732{
1733 struct mm_struct *mm = current->mm;
1734 struct uprobe *uprobe = NULL;
1735 struct vm_area_struct *vma;
1736
1737 down_read(&mm->mmap_sem);
1738 vma = find_vma(mm, bp_vaddr);
1739 if (vma && vma->vm_start <= bp_vaddr) {
1740 if (valid_vma(vma, false)) {
1741 struct inode *inode = file_inode(vma->vm_file);
1742 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1743
1744 uprobe = find_uprobe(inode, offset);
1745 }
1746
1747 if (!uprobe)
1748 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
1749 } else {
1750 *is_swbp = -EFAULT;
1751 }
1752
1753 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1754 mmf_recalc_uprobes(mm);
1755 up_read(&mm->mmap_sem);
1756
1757 return uprobe;
1758}
1759
1760static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1761{
1762 struct uprobe_consumer *uc;
1763 int remove = UPROBE_HANDLER_REMOVE;
1764 bool need_prep = false;
1765
1766 down_read(&uprobe->register_rwsem);
1767 for (uc = uprobe->consumers; uc; uc = uc->next) {
1768 int rc = 0;
1769
1770 if (uc->handler) {
1771 rc = uc->handler(uc, regs);
1772 WARN(rc & ~UPROBE_HANDLER_MASK,
1773 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1774 }
1775
1776 if (uc->ret_handler)
1777 need_prep = true;
1778
1779 remove &= rc;
1780 }
1781
1782 if (need_prep && !remove)
1783 prepare_uretprobe(uprobe, regs);
1784
1785 if (remove && uprobe->consumers) {
1786 WARN_ON(!uprobe_is_active(uprobe));
1787 unapply_uprobe(uprobe, current->mm);
1788 }
1789 up_read(&uprobe->register_rwsem);
1790}
1791
1792static void
1793handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1794{
1795 struct uprobe *uprobe = ri->uprobe;
1796 struct uprobe_consumer *uc;
1797
1798 down_read(&uprobe->register_rwsem);
1799 for (uc = uprobe->consumers; uc; uc = uc->next) {
1800 if (uc->ret_handler)
1801 uc->ret_handler(uc, ri->func, regs);
1802 }
1803 up_read(&uprobe->register_rwsem);
1804}
1805
1806static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1807{
1808 bool chained;
1809
1810 do {
1811 chained = ri->chained;
1812 ri = ri->next;
1813 } while (chained);
1814
1815 return ri;
1816}
1817
1818static void handle_trampoline(struct pt_regs *regs)
1819{
1820 struct uprobe_task *utask;
1821 struct return_instance *ri, *next;
1822 bool valid;
1823
1824 utask = current->utask;
1825 if (!utask)
1826 goto sigill;
1827
1828 ri = utask->return_instances;
1829 if (!ri)
1830 goto sigill;
1831
1832 do {
1833
1834
1835
1836
1837
1838
1839 next = find_next_ret_chain(ri);
1840 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
1841
1842 instruction_pointer_set(regs, ri->orig_ret_vaddr);
1843 do {
1844 if (valid)
1845 handle_uretprobe_chain(ri, regs);
1846 ri = free_ret_instance(ri);
1847 utask->depth--;
1848 } while (ri != next);
1849 } while (!valid);
1850
1851 utask->return_instances = ri;
1852 return;
1853
1854 sigill:
1855 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1856 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1857
1858}
1859
1860bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1861{
1862 return false;
1863}
1864
1865bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1866 struct pt_regs *regs)
1867{
1868 return true;
1869}
1870
1871
1872
1873
1874
1875static void handle_swbp(struct pt_regs *regs)
1876{
1877 struct uprobe *uprobe;
1878 unsigned long bp_vaddr;
1879 int uninitialized_var(is_swbp);
1880
1881 bp_vaddr = uprobe_get_swbp_addr(regs);
1882 if (bp_vaddr == get_trampoline_vaddr())
1883 return handle_trampoline(regs);
1884
1885 uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1886 if (!uprobe) {
1887 if (is_swbp > 0) {
1888
1889 send_sig(SIGTRAP, current, 0);
1890 } else {
1891
1892
1893
1894
1895
1896
1897
1898
1899 instruction_pointer_set(regs, bp_vaddr);
1900 }
1901 return;
1902 }
1903
1904
1905 instruction_pointer_set(regs, bp_vaddr);
1906
1907
1908
1909
1910
1911
1912 smp_rmb();
1913 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1914 goto out;
1915
1916
1917 if (!get_utask())
1918 goto out;
1919
1920 if (arch_uprobe_ignore(&uprobe->arch, regs))
1921 goto out;
1922
1923 handler_chain(uprobe, regs);
1924
1925 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1926 goto out;
1927
1928 if (!pre_ssout(uprobe, regs, bp_vaddr))
1929 return;
1930
1931
1932out:
1933 put_uprobe(uprobe);
1934}
1935
1936
1937
1938
1939
1940static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1941{
1942 struct uprobe *uprobe;
1943 int err = 0;
1944
1945 uprobe = utask->active_uprobe;
1946 if (utask->state == UTASK_SSTEP_ACK)
1947 err = arch_uprobe_post_xol(&uprobe->arch, regs);
1948 else if (utask->state == UTASK_SSTEP_TRAPPED)
1949 arch_uprobe_abort_xol(&uprobe->arch, regs);
1950 else
1951 WARN_ON_ONCE(1);
1952
1953 put_uprobe(uprobe);
1954 utask->active_uprobe = NULL;
1955 utask->state = UTASK_RUNNING;
1956 xol_free_insn_slot(current);
1957
1958 spin_lock_irq(¤t->sighand->siglock);
1959 recalc_sigpending();
1960 spin_unlock_irq(¤t->sighand->siglock);
1961
1962 if (unlikely(err)) {
1963 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1964 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1965 }
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979void uprobe_notify_resume(struct pt_regs *regs)
1980{
1981 struct uprobe_task *utask;
1982
1983 clear_thread_flag(TIF_UPROBE);
1984
1985 utask = current->utask;
1986 if (utask && utask->active_uprobe)
1987 handle_singlestep(utask, regs);
1988 else
1989 handle_swbp(regs);
1990}
1991
1992
1993
1994
1995
1996int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1997{
1998 if (!current->mm)
1999 return 0;
2000
2001 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) &&
2002 (!current->utask || !current->utask->return_instances))
2003 return 0;
2004
2005 set_thread_flag(TIF_UPROBE);
2006 return 1;
2007}
2008
2009
2010
2011
2012
2013int uprobe_post_sstep_notifier(struct pt_regs *regs)
2014{
2015 struct uprobe_task *utask = current->utask;
2016
2017 if (!current->mm || !utask || !utask->active_uprobe)
2018
2019 return 0;
2020
2021 utask->state = UTASK_SSTEP_ACK;
2022 set_thread_flag(TIF_UPROBE);
2023 return 1;
2024}
2025
2026static struct notifier_block uprobe_exception_nb = {
2027 .notifier_call = arch_uprobe_exception_notify,
2028 .priority = INT_MAX-1,
2029};
2030
2031static int __init init_uprobes(void)
2032{
2033 int i;
2034
2035 for (i = 0; i < UPROBES_HASH_SZ; i++)
2036 mutex_init(&uprobes_mmap_mutex[i]);
2037
2038 if (percpu_init_rwsem(&dup_mmap_sem))
2039 return -ENOMEM;
2040
2041 return register_die_notifier(&uprobe_exception_nb);
2042}
2043__initcall(init_uprobes);
2044