1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/ftrace.h>
15
16#include "trace_dynevent.h"
17#include "trace_probe.h"
18#include "trace_probe_tmpl.h"
19
20#define EPROBE_EVENT_SYSTEM "eprobes"
21
22struct trace_eprobe {
23
24 const char *event_system;
25
26
27 const char *event_name;
28
29 struct trace_event_call *event;
30
31 struct dyn_event devent;
32 struct trace_probe tp;
33};
34
35struct eprobe_data {
36 struct trace_event_file *file;
37 struct trace_eprobe *ep;
38};
39
40static int __trace_eprobe_create(int argc, const char *argv[]);
41
42static void trace_event_probe_cleanup(struct trace_eprobe *ep)
43{
44 if (!ep)
45 return;
46 trace_probe_cleanup(&ep->tp);
47 kfree(ep->event_name);
48 kfree(ep->event_system);
49 if (ep->event)
50 trace_event_put_ref(ep->event);
51 kfree(ep);
52}
53
54static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev)
55{
56 return container_of(ev, struct trace_eprobe, devent);
57}
58
59static int eprobe_dyn_event_create(const char *raw_command)
60{
61 return trace_probe_create(raw_command, __trace_eprobe_create);
62}
63
64static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev)
65{
66 struct trace_eprobe *ep = to_trace_eprobe(ev);
67 int i;
68
69 seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp),
70 trace_probe_name(&ep->tp));
71 seq_printf(m, " %s.%s", ep->event_system, ep->event_name);
72
73 for (i = 0; i < ep->tp.nr_args; i++)
74 seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm);
75 seq_putc(m, '\n');
76
77 return 0;
78}
79
80static int unregister_trace_eprobe(struct trace_eprobe *ep)
81{
82
83 if (trace_probe_has_sibling(&ep->tp))
84 goto unreg;
85
86
87 if (trace_probe_is_enabled(&ep->tp))
88 return -EBUSY;
89
90
91 if (trace_probe_unregister_event_call(&ep->tp))
92 return -EBUSY;
93
94unreg:
95 dyn_event_remove(&ep->devent);
96 trace_probe_unlink(&ep->tp);
97
98 return 0;
99}
100
101static int eprobe_dyn_event_release(struct dyn_event *ev)
102{
103 struct trace_eprobe *ep = to_trace_eprobe(ev);
104 int ret = unregister_trace_eprobe(ep);
105
106 if (!ret)
107 trace_event_probe_cleanup(ep);
108 return ret;
109}
110
111static bool eprobe_dyn_event_is_busy(struct dyn_event *ev)
112{
113 struct trace_eprobe *ep = to_trace_eprobe(ev);
114
115 return trace_probe_is_enabled(&ep->tp);
116}
117
118static bool eprobe_dyn_event_match(const char *system, const char *event,
119 int argc, const char **argv, struct dyn_event *ev)
120{
121 struct trace_eprobe *ep = to_trace_eprobe(ev);
122 const char *slash;
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142 if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
143 return false;
144
145
146 if (strcmp(trace_probe_name(&ep->tp), event) != 0)
147 return false;
148
149
150 if (argc < 1)
151 return true;
152
153
154
155 slash = strchr(argv[0], '/');
156 if (!slash)
157 slash = strchr(argv[0], '.');
158 if (!slash)
159 return false;
160
161 if (strncmp(ep->event_system, argv[0], slash - argv[0]))
162 return false;
163 if (strcmp(ep->event_name, slash + 1))
164 return false;
165
166 argc--;
167 argv++;
168
169
170 if (argc < 1)
171 return true;
172
173 return trace_probe_match_command_args(&ep->tp, argc, argv);
174}
175
176static struct dyn_event_operations eprobe_dyn_event_ops = {
177 .create = eprobe_dyn_event_create,
178 .show = eprobe_dyn_event_show,
179 .is_busy = eprobe_dyn_event_is_busy,
180 .free = eprobe_dyn_event_release,
181 .match = eprobe_dyn_event_match,
182};
183
184static struct trace_eprobe *alloc_event_probe(const char *group,
185 const char *this_event,
186 struct trace_event_call *event,
187 int nargs)
188{
189 struct trace_eprobe *ep;
190 const char *event_name;
191 const char *sys_name;
192 int ret = -ENOMEM;
193
194 if (!event)
195 return ERR_PTR(-ENODEV);
196
197 sys_name = event->class->system;
198 event_name = trace_event_name(event);
199
200 ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL);
201 if (!ep) {
202 trace_event_put_ref(event);
203 goto error;
204 }
205 ep->event = event;
206 ep->event_name = kstrdup(event_name, GFP_KERNEL);
207 if (!ep->event_name)
208 goto error;
209 ep->event_system = kstrdup(sys_name, GFP_KERNEL);
210 if (!ep->event_system)
211 goto error;
212
213 ret = trace_probe_init(&ep->tp, this_event, group, false);
214 if (ret < 0)
215 goto error;
216
217 dyn_event_init(&ep->devent, &eprobe_dyn_event_ops);
218 return ep;
219error:
220 trace_event_probe_cleanup(ep);
221 return ERR_PTR(ret);
222}
223
224static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
225{
226 struct probe_arg *parg = &ep->tp.args[i];
227 struct ftrace_event_field *field;
228 struct list_head *head;
229
230 head = trace_get_fields(ep->event);
231 list_for_each_entry(field, head, link) {
232 if (!strcmp(parg->code->data, field->name)) {
233 kfree(parg->code->data);
234 parg->code->data = field;
235 return 0;
236 }
237 }
238 kfree(parg->code->data);
239 parg->code->data = NULL;
240 return -ENOENT;
241}
242
243static int eprobe_event_define_fields(struct trace_event_call *event_call)
244{
245 struct eprobe_trace_entry_head field;
246 struct trace_probe *tp;
247
248 tp = trace_probe_primary_from_call(event_call);
249 if (WARN_ON_ONCE(!tp))
250 return -ENOENT;
251
252 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
253}
254
255static struct trace_event_fields eprobe_fields_array[] = {
256 { .type = TRACE_FUNCTION_TYPE,
257 .define_fields = eprobe_event_define_fields },
258 {}
259};
260
261
262static enum print_line_t
263print_eprobe_event(struct trace_iterator *iter, int flags,
264 struct trace_event *event)
265{
266 struct eprobe_trace_entry_head *field;
267 struct trace_event_call *pevent;
268 struct trace_event *probed_event;
269 struct trace_seq *s = &iter->seq;
270 struct trace_eprobe *ep;
271 struct trace_probe *tp;
272 unsigned int type;
273
274 field = (struct eprobe_trace_entry_head *)iter->ent;
275 tp = trace_probe_primary_from_call(
276 container_of(event, struct trace_event_call, event));
277 if (WARN_ON_ONCE(!tp))
278 goto out;
279
280 ep = container_of(tp, struct trace_eprobe, tp);
281 type = ep->event->event.type;
282
283 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
284
285 probed_event = ftrace_find_event(type);
286 if (probed_event) {
287 pevent = container_of(probed_event, struct trace_event_call, event);
288 trace_seq_printf(s, "%s.%s", pevent->class->system,
289 trace_event_name(pevent));
290 } else {
291 trace_seq_printf(s, "%u", type);
292 }
293
294 trace_seq_putc(s, ')');
295
296 if (print_probe_args(s, tp->args, tp->nr_args,
297 (u8 *)&field[1], field) < 0)
298 goto out;
299
300 trace_seq_putc(s, '\n');
301 out:
302 return trace_handle_return(s);
303}
304
305static unsigned long get_event_field(struct fetch_insn *code, void *rec)
306{
307 struct ftrace_event_field *field = code->data;
308 unsigned long val;
309 void *addr;
310
311 addr = rec + field->offset;
312
313 switch (field->size) {
314 case 1:
315 if (field->is_signed)
316 val = *(char *)addr;
317 else
318 val = *(unsigned char *)addr;
319 break;
320 case 2:
321 if (field->is_signed)
322 val = *(short *)addr;
323 else
324 val = *(unsigned short *)addr;
325 break;
326 case 4:
327 if (field->is_signed)
328 val = *(int *)addr;
329 else
330 val = *(unsigned int *)addr;
331 break;
332 default:
333 if (field->is_signed)
334 val = *(long *)addr;
335 else
336 val = *(unsigned long *)addr;
337 break;
338 }
339 return val;
340}
341
342static int get_eprobe_size(struct trace_probe *tp, void *rec)
343{
344 struct probe_arg *arg;
345 int i, len, ret = 0;
346
347 for (i = 0; i < tp->nr_args; i++) {
348 arg = tp->args + i;
349 if (unlikely(arg->dynamic)) {
350 unsigned long val;
351
352 val = get_event_field(arg->code, rec);
353 len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL);
354 if (len > 0)
355 ret += len;
356 }
357 }
358
359 return ret;
360}
361
362
363
364
365static int
366process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
367 void *base)
368{
369 unsigned long val;
370
371 val = get_event_field(code, rec);
372 return process_fetch_insn_bottom(code + 1, val, dest, base);
373}
374NOKPROBE_SYMBOL(process_fetch_insn)
375
376
377static nokprobe_inline int
378fetch_store_strlen_user(unsigned long addr)
379{
380 const void __user *uaddr = (__force const void __user *)addr;
381
382 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
383}
384
385
386static nokprobe_inline int
387fetch_store_strlen(unsigned long addr)
388{
389 int ret, len = 0;
390 u8 c;
391
392#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
393 if (addr < TASK_SIZE)
394 return fetch_store_strlen_user(addr);
395#endif
396
397 do {
398 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
399 len++;
400 } while (c && ret == 0 && len < MAX_STRING_SIZE);
401
402 return (ret < 0) ? ret : len;
403}
404
405
406
407
408
409static nokprobe_inline int
410fetch_store_string_user(unsigned long addr, void *dest, void *base)
411{
412 const void __user *uaddr = (__force const void __user *)addr;
413 int maxlen = get_loc_len(*(u32 *)dest);
414 void *__dest;
415 long ret;
416
417 if (unlikely(!maxlen))
418 return -ENOMEM;
419
420 __dest = get_loc_data(dest, base);
421
422 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
423 if (ret >= 0)
424 *(u32 *)dest = make_data_loc(ret, __dest - base);
425
426 return ret;
427}
428
429
430
431
432
433static nokprobe_inline int
434fetch_store_string(unsigned long addr, void *dest, void *base)
435{
436 int maxlen = get_loc_len(*(u32 *)dest);
437 void *__dest;
438 long ret;
439
440#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
441 if ((unsigned long)addr < TASK_SIZE)
442 return fetch_store_string_user(addr, dest, base);
443#endif
444
445 if (unlikely(!maxlen))
446 return -ENOMEM;
447
448 __dest = get_loc_data(dest, base);
449
450
451
452
453
454 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
455 if (ret >= 0)
456 *(u32 *)dest = make_data_loc(ret, __dest - base);
457
458 return ret;
459}
460
461static nokprobe_inline int
462probe_mem_read_user(void *dest, void *src, size_t size)
463{
464 const void __user *uaddr = (__force const void __user *)src;
465
466 return copy_from_user_nofault(dest, uaddr, size);
467}
468
469static nokprobe_inline int
470probe_mem_read(void *dest, void *src, size_t size)
471{
472#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
473 if ((unsigned long)src < TASK_SIZE)
474 return probe_mem_read_user(dest, src, size);
475#endif
476 return copy_from_kernel_nofault(dest, src, size);
477}
478
479
480static inline void
481__eprobe_trace_func(struct eprobe_data *edata, void *rec)
482{
483 struct eprobe_trace_entry_head *entry;
484 struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp);
485 struct trace_event_buffer fbuffer;
486 int dsize;
487
488 if (WARN_ON_ONCE(call != edata->file->event_call))
489 return;
490
491 if (trace_trigger_soft_disabled(edata->file))
492 return;
493
494 dsize = get_eprobe_size(&edata->ep->tp, rec);
495
496 entry = trace_event_buffer_reserve(&fbuffer, edata->file,
497 sizeof(*entry) + edata->ep->tp.size + dsize);
498
499 if (!entry)
500 return;
501
502 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
503 store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
504
505 trace_event_buffer_commit(&fbuffer);
506}
507
508
509
510
511
512
513
514static int eprobe_trigger_init(struct event_trigger_data *data)
515{
516 return 0;
517}
518
519static void eprobe_trigger_free(struct event_trigger_data *data)
520{
521
522}
523
524static int eprobe_trigger_print(struct seq_file *m,
525 struct event_trigger_data *data)
526{
527
528 return 0;
529}
530
531static void eprobe_trigger_func(struct event_trigger_data *data,
532 struct trace_buffer *buffer, void *rec,
533 struct ring_buffer_event *rbe)
534{
535 struct eprobe_data *edata = data->private_data;
536
537 __eprobe_trace_func(edata, rec);
538}
539
540static struct event_trigger_ops eprobe_trigger_ops = {
541 .trigger = eprobe_trigger_func,
542 .print = eprobe_trigger_print,
543 .init = eprobe_trigger_init,
544 .free = eprobe_trigger_free,
545};
546
547static int eprobe_trigger_cmd_parse(struct event_command *cmd_ops,
548 struct trace_event_file *file,
549 char *glob, char *cmd,
550 char *param_and_filter)
551{
552 return -1;
553}
554
555static int eprobe_trigger_reg_func(char *glob,
556 struct event_trigger_data *data,
557 struct trace_event_file *file)
558{
559 return -1;
560}
561
562static void eprobe_trigger_unreg_func(char *glob,
563 struct event_trigger_data *data,
564 struct trace_event_file *file)
565{
566
567}
568
569static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd,
570 char *param)
571{
572 return &eprobe_trigger_ops;
573}
574
575static struct event_command event_trigger_cmd = {
576 .name = "eprobe",
577 .trigger_type = ETT_EVENT_EPROBE,
578 .flags = EVENT_CMD_FL_NEEDS_REC,
579 .parse = eprobe_trigger_cmd_parse,
580 .reg = eprobe_trigger_reg_func,
581 .unreg = eprobe_trigger_unreg_func,
582 .unreg_all = NULL,
583 .get_trigger_ops = eprobe_trigger_get_ops,
584 .set_filter = NULL,
585};
586
587static struct event_trigger_data *
588new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
589{
590 struct event_trigger_data *trigger;
591 struct eprobe_data *edata;
592
593 edata = kzalloc(sizeof(*edata), GFP_KERNEL);
594 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
595 if (!trigger || !edata) {
596 kfree(edata);
597 kfree(trigger);
598 return ERR_PTR(-ENOMEM);
599 }
600
601 trigger->flags = EVENT_TRIGGER_FL_PROBE;
602 trigger->count = -1;
603 trigger->ops = &eprobe_trigger_ops;
604
605
606
607
608
609
610 trigger->cmd_ops = &event_trigger_cmd;
611
612 INIT_LIST_HEAD(&trigger->list);
613 RCU_INIT_POINTER(trigger->filter, NULL);
614
615 edata->file = file;
616 edata->ep = ep;
617 trigger->private_data = edata;
618
619 return trigger;
620}
621
622static int enable_eprobe(struct trace_eprobe *ep,
623 struct trace_event_file *eprobe_file)
624{
625 struct event_trigger_data *trigger;
626 struct trace_event_file *file;
627 struct trace_array *tr = eprobe_file->tr;
628
629 file = find_event_file(tr, ep->event_system, ep->event_name);
630 if (!file)
631 return -ENOENT;
632 trigger = new_eprobe_trigger(ep, eprobe_file);
633 if (IS_ERR(trigger))
634 return PTR_ERR(trigger);
635
636 list_add_tail_rcu(&trigger->list, &file->triggers);
637
638 trace_event_trigger_enable_disable(file, 1);
639 update_cond_flag(file);
640
641 return 0;
642}
643
644static struct trace_event_functions eprobe_funcs = {
645 .trace = print_eprobe_event
646};
647
648static int disable_eprobe(struct trace_eprobe *ep,
649 struct trace_array *tr)
650{
651 struct event_trigger_data *trigger = NULL, *iter;
652 struct trace_event_file *file;
653 struct eprobe_data *edata;
654
655 file = find_event_file(tr, ep->event_system, ep->event_name);
656 if (!file)
657 return -ENOENT;
658
659 list_for_each_entry(iter, &file->triggers, list) {
660 if (!(iter->flags & EVENT_TRIGGER_FL_PROBE))
661 continue;
662 edata = iter->private_data;
663 if (edata->ep == ep) {
664 trigger = iter;
665 break;
666 }
667 }
668 if (!trigger)
669 return -ENODEV;
670
671 list_del_rcu(&trigger->list);
672
673 trace_event_trigger_enable_disable(file, 0);
674 update_cond_flag(file);
675
676
677 tracepoint_synchronize_unregister();
678
679 kfree(edata);
680 kfree(trigger);
681
682 return 0;
683}
684
685static int enable_trace_eprobe(struct trace_event_call *call,
686 struct trace_event_file *file)
687{
688 struct trace_probe *pos, *tp;
689 struct trace_eprobe *ep;
690 bool enabled;
691 int ret = 0;
692
693 tp = trace_probe_primary_from_call(call);
694 if (WARN_ON_ONCE(!tp))
695 return -ENODEV;
696 enabled = trace_probe_is_enabled(tp);
697
698
699 if (file) {
700 ret = trace_probe_add_file(tp, file);
701 if (ret)
702 return ret;
703 } else
704 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
705
706 if (enabled)
707 return 0;
708
709 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
710 ep = container_of(pos, struct trace_eprobe, tp);
711 ret = enable_eprobe(ep, file);
712 if (ret)
713 break;
714 enabled = true;
715 }
716
717 if (ret) {
718
719 if (enabled)
720 disable_eprobe(ep, file->tr);
721 if (file)
722 trace_probe_remove_file(tp, file);
723 else
724 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
725 }
726
727 return ret;
728}
729
730static int disable_trace_eprobe(struct trace_event_call *call,
731 struct trace_event_file *file)
732{
733 struct trace_probe *pos, *tp;
734 struct trace_eprobe *ep;
735
736 tp = trace_probe_primary_from_call(call);
737 if (WARN_ON_ONCE(!tp))
738 return -ENODEV;
739
740 if (file) {
741 if (!trace_probe_get_file_link(tp, file))
742 return -ENOENT;
743 if (!trace_probe_has_single_file(tp))
744 goto out;
745 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
746 } else
747 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
748
749 if (!trace_probe_is_enabled(tp)) {
750 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
751 ep = container_of(pos, struct trace_eprobe, tp);
752 disable_eprobe(ep, file->tr);
753 }
754 }
755
756 out:
757 if (file)
758
759
760
761
762
763
764 trace_probe_remove_file(tp, file);
765
766 return 0;
767}
768
769static int eprobe_register(struct trace_event_call *event,
770 enum trace_reg type, void *data)
771{
772 struct trace_event_file *file = data;
773
774 switch (type) {
775 case TRACE_REG_REGISTER:
776 return enable_trace_eprobe(event, file);
777 case TRACE_REG_UNREGISTER:
778 return disable_trace_eprobe(event, file);
779#ifdef CONFIG_PERF_EVENTS
780 case TRACE_REG_PERF_REGISTER:
781 case TRACE_REG_PERF_UNREGISTER:
782 case TRACE_REG_PERF_OPEN:
783 case TRACE_REG_PERF_CLOSE:
784 case TRACE_REG_PERF_ADD:
785 case TRACE_REG_PERF_DEL:
786 return 0;
787#endif
788 }
789 return 0;
790}
791
792static inline void init_trace_eprobe_call(struct trace_eprobe *ep)
793{
794 struct trace_event_call *call = trace_probe_event_call(&ep->tp);
795
796 call->flags = TRACE_EVENT_FL_EPROBE;
797 call->event.funcs = &eprobe_funcs;
798 call->class->fields_array = eprobe_fields_array;
799 call->class->reg = eprobe_register;
800}
801
802static struct trace_event_call *
803find_and_get_event(const char *system, const char *event_name)
804{
805 struct trace_event_call *tp_event;
806 const char *name;
807
808 list_for_each_entry(tp_event, &ftrace_events, list) {
809
810 if (tp_event->flags &
811 (TRACE_EVENT_FL_IGNORE_ENABLE |
812 TRACE_EVENT_FL_KPROBE |
813 TRACE_EVENT_FL_UPROBE |
814 TRACE_EVENT_FL_EPROBE))
815 continue;
816 if (!tp_event->class->system ||
817 strcmp(system, tp_event->class->system))
818 continue;
819 name = trace_event_name(tp_event);
820 if (!name || strcmp(event_name, name))
821 continue;
822 if (!trace_event_try_get_ref(tp_event)) {
823 return NULL;
824 break;
825 }
826 return tp_event;
827 break;
828 }
829 return NULL;
830}
831
832static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[], int i)
833{
834 unsigned int flags = TPARG_FL_KERNEL | TPARG_FL_TPOINT;
835 int ret;
836
837 ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], flags);
838 if (ret)
839 return ret;
840
841 if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG)
842 ret = trace_eprobe_tp_arg_update(ep, i);
843
844 return ret;
845}
846
847static int __trace_eprobe_create(int argc, const char *argv[])
848{
849
850
851
852
853
854
855 const char *event = NULL, *group = EPROBE_EVENT_SYSTEM;
856 const char *sys_event = NULL, *sys_name = NULL;
857 struct trace_event_call *event_call;
858 struct trace_eprobe *ep = NULL;
859 char buf1[MAX_EVENT_NAME_LEN];
860 char buf2[MAX_EVENT_NAME_LEN];
861 int ret = 0;
862 int i;
863
864 if (argc < 2 || argv[0][0] != 'e')
865 return -ECANCELED;
866
867 trace_probe_log_init("event_probe", argc, argv);
868
869 event = strchr(&argv[0][1], ':');
870 if (event) {
871 event++;
872 ret = traceprobe_parse_event_name(&event, &group, buf1,
873 event - argv[0]);
874 if (ret)
875 goto parse_error;
876 } else {
877 strscpy(buf1, argv[1], MAX_EVENT_NAME_LEN);
878 sanitize_event_name(buf1);
879 event = buf1;
880 }
881 if (!is_good_name(event) || !is_good_name(group))
882 goto parse_error;
883
884 sys_event = argv[1];
885 ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2,
886 sys_event - argv[1]);
887 if (ret || !sys_name)
888 goto parse_error;
889 if (!is_good_name(sys_event) || !is_good_name(sys_name))
890 goto parse_error;
891
892 mutex_lock(&event_mutex);
893 event_call = find_and_get_event(sys_name, sys_event);
894 ep = alloc_event_probe(group, event, event_call, argc - 2);
895 mutex_unlock(&event_mutex);
896
897 if (IS_ERR(ep)) {
898 ret = PTR_ERR(ep);
899
900 WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV);
901 ep = NULL;
902 goto error;
903 }
904
905 argc -= 2; argv += 2;
906
907 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
908 trace_probe_log_set_index(i + 2);
909 ret = trace_eprobe_tp_update_arg(ep, argv, i);
910 if (ret)
911 goto error;
912 }
913 ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT);
914 if (ret < 0)
915 goto error;
916 init_trace_eprobe_call(ep);
917 mutex_lock(&event_mutex);
918 ret = trace_probe_register_event_call(&ep->tp);
919 if (ret) {
920 if (ret == -EEXIST) {
921 trace_probe_log_set_index(0);
922 trace_probe_log_err(0, EVENT_EXIST);
923 }
924 mutex_unlock(&event_mutex);
925 goto error;
926 }
927 ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
928 mutex_unlock(&event_mutex);
929 return ret;
930parse_error:
931 ret = -EINVAL;
932error:
933 trace_event_probe_cleanup(ep);
934 return ret;
935}
936
937
938
939
940
941static __init int trace_events_eprobe_init_early(void)
942{
943 int err = 0;
944
945 err = dyn_event_register(&eprobe_dyn_event_ops);
946 if (err)
947 pr_warn("Could not register eprobe_dyn_event_ops\n");
948
949 return err;
950}
951core_initcall(trace_events_eprobe_init_early);
952