1
2
3
4
5#include <linux/module.h>
6#include <linux/mutex.h>
7#include <linux/types.h>
8#include <linux/jhash.h>
9#include <linux/list.h>
10#include <linux/rcupdate.h>
11#include <linux/tracepoint.h>
12#include <linux/err.h>
13#include <linux/slab.h>
14#include <linux/sched/signal.h>
15#include <linux/sched/task.h>
16#include <linux/static_key.h>
17
18enum tp_func_state {
19 TP_FUNC_0,
20 TP_FUNC_1,
21 TP_FUNC_2,
22 TP_FUNC_N,
23};
24
25extern tracepoint_ptr_t __start___tracepoints_ptrs[];
26extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
27
28DEFINE_SRCU(tracepoint_srcu);
29EXPORT_SYMBOL_GPL(tracepoint_srcu);
30
31enum tp_transition_sync {
32 TP_TRANSITION_SYNC_1_0_1,
33 TP_TRANSITION_SYNC_N_2_1,
34
35 _NR_TP_TRANSITION_SYNC,
36};
37
38struct tp_transition_snapshot {
39 unsigned long rcu;
40 unsigned long srcu;
41 bool ongoing;
42};
43
44
45static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
46
47static void tp_rcu_get_state(enum tp_transition_sync sync)
48{
49 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
50
51
52 snapshot->rcu = get_state_synchronize_rcu();
53 snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
54 snapshot->ongoing = true;
55}
56
57static void tp_rcu_cond_sync(enum tp_transition_sync sync)
58{
59 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
60
61 if (!snapshot->ongoing)
62 return;
63 cond_synchronize_rcu(snapshot->rcu);
64 if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
65 synchronize_srcu(&tracepoint_srcu);
66 snapshot->ongoing = false;
67}
68
69
70static const int tracepoint_debug;
71
72#ifdef CONFIG_MODULES
73
74
75
76static DEFINE_MUTEX(tracepoint_module_list_mutex);
77
78
79static LIST_HEAD(tracepoint_module_list);
80#endif
81
82
83
84
85
86static DEFINE_MUTEX(tracepoints_mutex);
87
88static struct rcu_head *early_probes;
89static bool ok_to_free_tracepoints;
90
91
92
93
94
95
96struct tp_probes {
97 struct rcu_head rcu;
98 struct tracepoint_func probes[];
99};
100
101
102static void tp_stub_func(void)
103{
104 return;
105}
106
107static inline void *allocate_probes(int count)
108{
109 struct tp_probes *p = kmalloc(struct_size(p, probes, count),
110 GFP_KERNEL);
111 return p == NULL ? NULL : p->probes;
112}
113
114static void srcu_free_old_probes(struct rcu_head *head)
115{
116 kfree(container_of(head, struct tp_probes, rcu));
117}
118
119static void rcu_free_old_probes(struct rcu_head *head)
120{
121 call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
122}
123
124static __init int release_early_probes(void)
125{
126 struct rcu_head *tmp;
127
128 ok_to_free_tracepoints = true;
129
130 while (early_probes) {
131 tmp = early_probes;
132 early_probes = tmp->next;
133 call_rcu(tmp, rcu_free_old_probes);
134 }
135
136 return 0;
137}
138
139
140postcore_initcall(release_early_probes);
141
142static inline void release_probes(struct tracepoint_func *old)
143{
144 if (old) {
145 struct tp_probes *tp_probes = container_of(old,
146 struct tp_probes, probes[0]);
147
148
149
150
151
152 if (unlikely(!ok_to_free_tracepoints)) {
153 tp_probes->rcu.next = early_probes;
154 early_probes = &tp_probes->rcu;
155 return;
156 }
157
158
159
160
161
162
163
164 call_rcu(&tp_probes->rcu, rcu_free_old_probes);
165 }
166}
167
168static void debug_print_probes(struct tracepoint_func *funcs)
169{
170 int i;
171
172 if (!tracepoint_debug || !funcs)
173 return;
174
175 for (i = 0; funcs[i].func; i++)
176 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
177}
178
179static struct tracepoint_func *
180func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
181 int prio)
182{
183 struct tracepoint_func *old, *new;
184 int iter_probes;
185 int nr_probes = 0;
186 int pos = -1;
187
188 if (WARN_ON(!tp_func->func))
189 return ERR_PTR(-EINVAL);
190
191 debug_print_probes(*funcs);
192 old = *funcs;
193 if (old) {
194
195 for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
196 if (old[iter_probes].func == tp_stub_func)
197 continue;
198 if (old[iter_probes].func == tp_func->func &&
199 old[iter_probes].data == tp_func->data)
200 return ERR_PTR(-EEXIST);
201 nr_probes++;
202 }
203 }
204
205 new = allocate_probes(nr_probes + 2);
206 if (new == NULL)
207 return ERR_PTR(-ENOMEM);
208 if (old) {
209 nr_probes = 0;
210 for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
211 if (old[iter_probes].func == tp_stub_func)
212 continue;
213
214 if (pos < 0 && old[iter_probes].prio < prio)
215 pos = nr_probes++;
216 new[nr_probes++] = old[iter_probes];
217 }
218 if (pos < 0)
219 pos = nr_probes++;
220
221 } else {
222 pos = 0;
223 nr_probes = 1;
224 }
225 new[pos] = *tp_func;
226 new[nr_probes].func = NULL;
227 *funcs = new;
228 debug_print_probes(*funcs);
229 return old;
230}
231
232static void *func_remove(struct tracepoint_func **funcs,
233 struct tracepoint_func *tp_func)
234{
235 int nr_probes = 0, nr_del = 0, i;
236 struct tracepoint_func *old, *new;
237
238 old = *funcs;
239
240 if (!old)
241 return ERR_PTR(-ENOENT);
242
243 debug_print_probes(*funcs);
244
245 if (tp_func->func) {
246 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
247 if ((old[nr_probes].func == tp_func->func &&
248 old[nr_probes].data == tp_func->data) ||
249 old[nr_probes].func == tp_stub_func)
250 nr_del++;
251 }
252 }
253
254
255
256
257
258 if (nr_probes - nr_del == 0) {
259
260 *funcs = NULL;
261 debug_print_probes(*funcs);
262 return old;
263 } else {
264 int j = 0;
265
266
267 new = allocate_probes(nr_probes - nr_del + 1);
268 if (new) {
269 for (i = 0; old[i].func; i++) {
270 if ((old[i].func != tp_func->func ||
271 old[i].data != tp_func->data) &&
272 old[i].func != tp_stub_func)
273 new[j++] = old[i];
274 }
275 new[nr_probes - nr_del].func = NULL;
276 *funcs = new;
277 } else {
278
279
280
281
282 for (i = 0; old[i].func; i++) {
283 if (old[i].func == tp_func->func &&
284 old[i].data == tp_func->data)
285 WRITE_ONCE(old[i].func, tp_stub_func);
286 }
287 *funcs = old;
288 }
289 }
290 debug_print_probes(*funcs);
291 return old;
292}
293
294
295
296
297static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
298{
299 if (!tp_funcs)
300 return TP_FUNC_0;
301 if (!tp_funcs[1].func)
302 return TP_FUNC_1;
303 if (!tp_funcs[2].func)
304 return TP_FUNC_2;
305 return TP_FUNC_N;
306}
307
308static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
309{
310 void *func = tp->iterator;
311
312
313 if (!tp->static_call_key)
314 return;
315 if (nr_func_state(tp_funcs) == TP_FUNC_1)
316 func = tp_funcs[0].func;
317 __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
318}
319
320
321
322
323static int tracepoint_add_func(struct tracepoint *tp,
324 struct tracepoint_func *func, int prio,
325 bool warn)
326{
327 struct tracepoint_func *old, *tp_funcs;
328 int ret;
329
330 if (tp->regfunc && !static_key_enabled(&tp->key)) {
331 ret = tp->regfunc();
332 if (ret < 0)
333 return ret;
334 }
335
336 tp_funcs = rcu_dereference_protected(tp->funcs,
337 lockdep_is_held(&tracepoints_mutex));
338 old = func_add(&tp_funcs, func, prio);
339 if (IS_ERR(old)) {
340 WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
341 return PTR_ERR(old);
342 }
343
344
345
346
347
348
349
350 switch (nr_func_state(tp_funcs)) {
351 case TP_FUNC_1:
352
353
354
355
356 tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
357
358 tracepoint_update_call(tp, tp_funcs);
359
360 rcu_assign_pointer(tp->funcs, tp_funcs);
361 static_key_enable(&tp->key);
362 break;
363 case TP_FUNC_2:
364
365 tracepoint_update_call(tp, tp_funcs);
366
367
368
369
370
371 fallthrough;
372 case TP_FUNC_N:
373 rcu_assign_pointer(tp->funcs, tp_funcs);
374
375
376
377
378 if (tp_funcs[0].data != old[0].data)
379 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
380 break;
381 default:
382 WARN_ON_ONCE(1);
383 break;
384 }
385
386 release_probes(old);
387 return 0;
388}
389
390
391
392
393
394
395
396static int tracepoint_remove_func(struct tracepoint *tp,
397 struct tracepoint_func *func)
398{
399 struct tracepoint_func *old, *tp_funcs;
400
401 tp_funcs = rcu_dereference_protected(tp->funcs,
402 lockdep_is_held(&tracepoints_mutex));
403 old = func_remove(&tp_funcs, func);
404 if (WARN_ON_ONCE(IS_ERR(old)))
405 return PTR_ERR(old);
406
407 if (tp_funcs == old)
408
409 return 0;
410
411 switch (nr_func_state(tp_funcs)) {
412 case TP_FUNC_0:
413
414 if (tp->unregfunc && static_key_enabled(&tp->key))
415 tp->unregfunc();
416
417 static_key_disable(&tp->key);
418
419 tracepoint_update_call(tp, tp_funcs);
420
421 rcu_assign_pointer(tp->funcs, NULL);
422
423
424
425
426 tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
427 break;
428 case TP_FUNC_1:
429 rcu_assign_pointer(tp->funcs, tp_funcs);
430
431
432
433
434
435
436
437 if (tp_funcs[0].data != old[0].data)
438 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
439 tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
440
441 tracepoint_update_call(tp, tp_funcs);
442 break;
443 case TP_FUNC_2:
444 fallthrough;
445 case TP_FUNC_N:
446 rcu_assign_pointer(tp->funcs, tp_funcs);
447
448
449
450
451 if (tp_funcs[0].data != old[0].data)
452 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
453 break;
454 default:
455 WARN_ON_ONCE(1);
456 break;
457 }
458 release_probes(old);
459 return 0;
460}
461
462
463
464
465
466
467
468
469
470
471
472int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
473 void *data, int prio)
474{
475 struct tracepoint_func tp_func;
476 int ret;
477
478 mutex_lock(&tracepoints_mutex);
479 tp_func.func = probe;
480 tp_func.data = data;
481 tp_func.prio = prio;
482 ret = tracepoint_add_func(tp, &tp_func, prio, false);
483 mutex_unlock(&tracepoints_mutex);
484 return ret;
485}
486EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
502 void *data, int prio)
503{
504 struct tracepoint_func tp_func;
505 int ret;
506
507 mutex_lock(&tracepoints_mutex);
508 tp_func.func = probe;
509 tp_func.data = data;
510 tp_func.prio = prio;
511 ret = tracepoint_add_func(tp, &tp_func, prio, true);
512 mutex_unlock(&tracepoints_mutex);
513 return ret;
514}
515EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
516
517
518
519
520
521
522
523
524
525
526
527
528
529int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
530{
531 return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
532}
533EXPORT_SYMBOL_GPL(tracepoint_probe_register);
534
535
536
537
538
539
540
541
542
543int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
544{
545 struct tracepoint_func tp_func;
546 int ret;
547
548 mutex_lock(&tracepoints_mutex);
549 tp_func.func = probe;
550 tp_func.data = data;
551 ret = tracepoint_remove_func(tp, &tp_func);
552 mutex_unlock(&tracepoints_mutex);
553 return ret;
554}
555EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
556
557static void for_each_tracepoint_range(
558 tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
559 void (*fct)(struct tracepoint *tp, void *priv),
560 void *priv)
561{
562 tracepoint_ptr_t *iter;
563
564 if (!begin)
565 return;
566 for (iter = begin; iter < end; iter++)
567 fct(tracepoint_ptr_deref(iter), priv);
568}
569
570#ifdef CONFIG_MODULES
571bool trace_module_has_bad_taint(struct module *mod)
572{
573 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
574 (1 << TAINT_UNSIGNED_MODULE));
575}
576
577static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
578
579
580
581
582
583
584
585
586
587
588int register_tracepoint_module_notifier(struct notifier_block *nb)
589{
590 struct tp_module *tp_mod;
591 int ret;
592
593 mutex_lock(&tracepoint_module_list_mutex);
594 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
595 if (ret)
596 goto end;
597 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
598 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
599end:
600 mutex_unlock(&tracepoint_module_list_mutex);
601 return ret;
602}
603EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
604
605
606
607
608
609
610
611
612int unregister_tracepoint_module_notifier(struct notifier_block *nb)
613{
614 struct tp_module *tp_mod;
615 int ret;
616
617 mutex_lock(&tracepoint_module_list_mutex);
618 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
619 if (ret)
620 goto end;
621 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
622 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
623end:
624 mutex_unlock(&tracepoint_module_list_mutex);
625 return ret;
626
627}
628EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
629
630
631
632
633
634static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
635{
636 WARN_ON_ONCE(tp->funcs);
637}
638
639static int tracepoint_module_coming(struct module *mod)
640{
641 struct tp_module *tp_mod;
642 int ret = 0;
643
644 if (!mod->num_tracepoints)
645 return 0;
646
647
648
649
650
651
652 if (trace_module_has_bad_taint(mod))
653 return 0;
654 mutex_lock(&tracepoint_module_list_mutex);
655 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
656 if (!tp_mod) {
657 ret = -ENOMEM;
658 goto end;
659 }
660 tp_mod->mod = mod;
661 list_add_tail(&tp_mod->list, &tracepoint_module_list);
662 blocking_notifier_call_chain(&tracepoint_notify_list,
663 MODULE_STATE_COMING, tp_mod);
664end:
665 mutex_unlock(&tracepoint_module_list_mutex);
666 return ret;
667}
668
669static void tracepoint_module_going(struct module *mod)
670{
671 struct tp_module *tp_mod;
672
673 if (!mod->num_tracepoints)
674 return;
675
676 mutex_lock(&tracepoint_module_list_mutex);
677 list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
678 if (tp_mod->mod == mod) {
679 blocking_notifier_call_chain(&tracepoint_notify_list,
680 MODULE_STATE_GOING, tp_mod);
681 list_del(&tp_mod->list);
682 kfree(tp_mod);
683
684
685
686
687 for_each_tracepoint_range(mod->tracepoints_ptrs,
688 mod->tracepoints_ptrs + mod->num_tracepoints,
689 tp_module_going_check_quiescent, NULL);
690 break;
691 }
692 }
693
694
695
696
697
698
699 mutex_unlock(&tracepoint_module_list_mutex);
700}
701
702static int tracepoint_module_notify(struct notifier_block *self,
703 unsigned long val, void *data)
704{
705 struct module *mod = data;
706 int ret = 0;
707
708 switch (val) {
709 case MODULE_STATE_COMING:
710 ret = tracepoint_module_coming(mod);
711 break;
712 case MODULE_STATE_LIVE:
713 break;
714 case MODULE_STATE_GOING:
715 tracepoint_module_going(mod);
716 break;
717 case MODULE_STATE_UNFORMED:
718 break;
719 }
720 return notifier_from_errno(ret);
721}
722
723static struct notifier_block tracepoint_module_nb = {
724 .notifier_call = tracepoint_module_notify,
725 .priority = 0,
726};
727
728static __init int init_tracepoints(void)
729{
730 int ret;
731
732 ret = register_module_notifier(&tracepoint_module_nb);
733 if (ret)
734 pr_warn("Failed to register tracepoint module enter notifier\n");
735
736 return ret;
737}
738__initcall(init_tracepoints);
739#endif
740
741
742
743
744
745
746void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
747 void *priv)
748{
749 for_each_tracepoint_range(__start___tracepoints_ptrs,
750 __stop___tracepoints_ptrs, fct, priv);
751}
752EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
753
754#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
755
756
757static int sys_tracepoint_refcount;
758
759int syscall_regfunc(void)
760{
761 struct task_struct *p, *t;
762
763 if (!sys_tracepoint_refcount) {
764 read_lock(&tasklist_lock);
765 for_each_process_thread(p, t) {
766 set_task_syscall_work(t, SYSCALL_TRACEPOINT);
767 }
768 read_unlock(&tasklist_lock);
769 }
770 sys_tracepoint_refcount++;
771
772 return 0;
773}
774
775void syscall_unregfunc(void)
776{
777 struct task_struct *p, *t;
778
779 sys_tracepoint_refcount--;
780 if (!sys_tracepoint_refcount) {
781 read_lock(&tasklist_lock);
782 for_each_process_thread(p, t) {
783 clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
784 }
785 read_unlock(&tasklist_lock);
786 }
787}
788#endif
789