1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) fmt
12
13#include <linux/workqueue.h>
14#include <linux/spinlock.h>
15#include <linux/kthread.h>
16#include <linux/tracefs.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/ctype.h>
20#include <linux/slab.h>
21#include <linux/delay.h>
22
23#include <asm/setup.h>
24
25#include "trace_output.h"
26
27#undef TRACE_SYSTEM
28#define TRACE_SYSTEM "TRACE_SYSTEM"
29
30DEFINE_MUTEX(event_mutex);
31
32LIST_HEAD(ftrace_events);
33static LIST_HEAD(ftrace_common_fields);
34
35#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
36
37static struct kmem_cache *field_cachep;
38static struct kmem_cache *file_cachep;
39
40#define SYSTEM_FL_FREE_NAME (1 << 31)
41
42static inline int system_refcount(struct event_subsystem *system)
43{
44 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
45}
46
47static int system_refcount_inc(struct event_subsystem *system)
48{
49 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
50}
51
52static int system_refcount_dec(struct event_subsystem *system)
53{
54 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
55}
56
57
58#define do_for_each_event_file(tr, file) \
59 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
60 list_for_each_entry(file, &tr->events, list)
61
62#define do_for_each_event_file_safe(tr, file) \
63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
64 struct ftrace_event_file *___n; \
65 list_for_each_entry_safe(file, ___n, &tr->events, list)
66
67#define while_for_each_event_file() \
68 }
69
70static struct list_head *
71trace_get_fields(struct ftrace_event_call *event_call)
72{
73 if (!event_call->class->get_fields)
74 return &event_call->class->fields;
75 return event_call->class->get_fields(event_call);
76}
77
78static struct ftrace_event_field *
79__find_event_field(struct list_head *head, char *name)
80{
81 struct ftrace_event_field *field;
82
83 list_for_each_entry(field, head, link) {
84 if (!strcmp(field->name, name))
85 return field;
86 }
87
88 return NULL;
89}
90
91struct ftrace_event_field *
92trace_find_event_field(struct ftrace_event_call *call, char *name)
93{
94 struct ftrace_event_field *field;
95 struct list_head *head;
96
97 field = __find_event_field(&ftrace_common_fields, name);
98 if (field)
99 return field;
100
101 head = trace_get_fields(call);
102 return __find_event_field(head, name);
103}
104
105static int __trace_define_field(struct list_head *head, const char *type,
106 const char *name, int offset, int size,
107 int is_signed, int filter_type)
108{
109 struct ftrace_event_field *field;
110
111 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
112 if (!field)
113 return -ENOMEM;
114
115 field->name = name;
116 field->type = type;
117
118 if (filter_type == FILTER_OTHER)
119 field->filter_type = filter_assign_type(type);
120 else
121 field->filter_type = filter_type;
122
123 field->offset = offset;
124 field->size = size;
125 field->is_signed = is_signed;
126
127 list_add(&field->link, head);
128
129 return 0;
130}
131
132int trace_define_field(struct ftrace_event_call *call, const char *type,
133 const char *name, int offset, int size, int is_signed,
134 int filter_type)
135{
136 struct list_head *head;
137
138 if (WARN_ON(!call->class))
139 return 0;
140
141 head = trace_get_fields(call);
142 return __trace_define_field(head, type, name, offset, size,
143 is_signed, filter_type);
144}
145EXPORT_SYMBOL_GPL(trace_define_field);
146
147#define __common_field(type, item) \
148 ret = __trace_define_field(&ftrace_common_fields, #type, \
149 "common_" #item, \
150 offsetof(typeof(ent), item), \
151 sizeof(ent.item), \
152 is_signed_type(type), FILTER_OTHER); \
153 if (ret) \
154 return ret;
155
156static int trace_define_common_fields(void)
157{
158 int ret;
159 struct trace_entry ent;
160
161 __common_field(unsigned short, type);
162 __common_field(unsigned char, flags);
163 __common_field(unsigned char, preempt_count);
164 __common_field(int, pid);
165
166 return ret;
167}
168
169static void trace_destroy_fields(struct ftrace_event_call *call)
170{
171 struct ftrace_event_field *field, *next;
172 struct list_head *head;
173
174 head = trace_get_fields(call);
175 list_for_each_entry_safe(field, next, head, link) {
176 list_del(&field->link);
177 kmem_cache_free(field_cachep, field);
178 }
179}
180
181int trace_event_raw_init(struct ftrace_event_call *call)
182{
183 int id;
184
185 id = register_ftrace_event(&call->event);
186 if (!id)
187 return -ENODEV;
188
189 return 0;
190}
191EXPORT_SYMBOL_GPL(trace_event_raw_init);
192
193void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
194 struct ftrace_event_file *ftrace_file,
195 unsigned long len)
196{
197 struct ftrace_event_call *event_call = ftrace_file->event_call;
198
199 local_save_flags(fbuffer->flags);
200 fbuffer->pc = preempt_count();
201 fbuffer->ftrace_file = ftrace_file;
202
203 fbuffer->event =
204 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
205 event_call->event.type, len,
206 fbuffer->flags, fbuffer->pc);
207 if (!fbuffer->event)
208 return NULL;
209
210 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
211 return fbuffer->entry;
212}
213EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
214
215static DEFINE_SPINLOCK(tracepoint_iter_lock);
216
217static void output_printk(struct ftrace_event_buffer *fbuffer)
218{
219 struct ftrace_event_call *event_call;
220 struct trace_event *event;
221 unsigned long flags;
222 struct trace_iterator *iter = tracepoint_print_iter;
223
224 if (!iter)
225 return;
226
227 event_call = fbuffer->ftrace_file->event_call;
228 if (!event_call || !event_call->event.funcs ||
229 !event_call->event.funcs->trace)
230 return;
231
232 event = &fbuffer->ftrace_file->event_call->event;
233
234 spin_lock_irqsave(&tracepoint_iter_lock, flags);
235 trace_seq_init(&iter->seq);
236 iter->ent = fbuffer->entry;
237 event_call->event.funcs->trace(iter, 0, event);
238 trace_seq_putc(&iter->seq, 0);
239 printk("%s", iter->seq.buffer);
240
241 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
242}
243
244void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
245{
246 if (tracepoint_printk)
247 output_printk(fbuffer);
248
249 event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
250 fbuffer->event, fbuffer->entry,
251 fbuffer->flags, fbuffer->pc);
252}
253EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
254
255int ftrace_event_reg(struct ftrace_event_call *call,
256 enum trace_reg type, void *data)
257{
258 struct ftrace_event_file *file = data;
259
260 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
261 switch (type) {
262 case TRACE_REG_REGISTER:
263 return tracepoint_probe_register(call->tp,
264 call->class->probe,
265 file);
266 case TRACE_REG_UNREGISTER:
267 tracepoint_probe_unregister(call->tp,
268 call->class->probe,
269 file);
270 return 0;
271
272#ifdef CONFIG_PERF_EVENTS
273 case TRACE_REG_PERF_REGISTER:
274 return tracepoint_probe_register(call->tp,
275 call->class->perf_probe,
276 call);
277 case TRACE_REG_PERF_UNREGISTER:
278 tracepoint_probe_unregister(call->tp,
279 call->class->perf_probe,
280 call);
281 return 0;
282 case TRACE_REG_PERF_OPEN:
283 case TRACE_REG_PERF_CLOSE:
284 case TRACE_REG_PERF_ADD:
285 case TRACE_REG_PERF_DEL:
286 return 0;
287#endif
288 }
289 return 0;
290}
291EXPORT_SYMBOL_GPL(ftrace_event_reg);
292
293void trace_event_enable_cmd_record(bool enable)
294{
295 struct ftrace_event_file *file;
296 struct trace_array *tr;
297
298 mutex_lock(&event_mutex);
299 do_for_each_event_file(tr, file) {
300
301 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
302 continue;
303
304 if (enable) {
305 tracing_start_cmdline_record();
306 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
307 } else {
308 tracing_stop_cmdline_record();
309 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
310 }
311 } while_for_each_event_file();
312 mutex_unlock(&event_mutex);
313}
314
315static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
316 int enable, int soft_disable)
317{
318 struct ftrace_event_call *call = file->event_call;
319 int ret = 0;
320 int disable;
321
322 switch (enable) {
323 case 0:
324
325
326
327
328
329
330
331
332
333
334
335
336
337 if (soft_disable) {
338 if (atomic_dec_return(&file->sm_ref) > 0)
339 break;
340 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
341 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
342 } else
343 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
344
345 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
346 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
347 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
348 tracing_stop_cmdline_record();
349 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
350 }
351 call->class->reg(call, TRACE_REG_UNREGISTER, file);
352 }
353
354 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
355 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
356 else
357 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
358 break;
359 case 1:
360
361
362
363
364
365
366
367
368 if (!soft_disable)
369 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
370 else {
371 if (atomic_inc_return(&file->sm_ref) > 1)
372 break;
373 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
374 }
375
376 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
377
378
379 if (soft_disable)
380 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
381
382 if (trace_flags & TRACE_ITER_RECORD_CMD) {
383 tracing_start_cmdline_record();
384 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
385 }
386 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
387 if (ret) {
388 tracing_stop_cmdline_record();
389 pr_info("event trace: Could not enable event "
390 "%s\n", ftrace_event_name(call));
391 break;
392 }
393 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
394
395
396 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
397 }
398 break;
399 }
400
401 return ret;
402}
403
404int trace_event_enable_disable(struct ftrace_event_file *file,
405 int enable, int soft_disable)
406{
407 return __ftrace_event_enable_disable(file, enable, soft_disable);
408}
409
410static int ftrace_event_enable_disable(struct ftrace_event_file *file,
411 int enable)
412{
413 return __ftrace_event_enable_disable(file, enable, 0);
414}
415
416static void ftrace_clear_events(struct trace_array *tr)
417{
418 struct ftrace_event_file *file;
419
420 mutex_lock(&event_mutex);
421 list_for_each_entry(file, &tr->events, list) {
422 ftrace_event_enable_disable(file, 0);
423 }
424 mutex_unlock(&event_mutex);
425}
426
427static void __put_system(struct event_subsystem *system)
428{
429 struct event_filter *filter = system->filter;
430
431 WARN_ON_ONCE(system_refcount(system) == 0);
432 if (system_refcount_dec(system))
433 return;
434
435 list_del(&system->list);
436
437 if (filter) {
438 kfree(filter->filter_string);
439 kfree(filter);
440 }
441 if (system->ref_count & SYSTEM_FL_FREE_NAME)
442 kfree(system->name);
443 kfree(system);
444}
445
446static void __get_system(struct event_subsystem *system)
447{
448 WARN_ON_ONCE(system_refcount(system) == 0);
449 system_refcount_inc(system);
450}
451
452static void __get_system_dir(struct ftrace_subsystem_dir *dir)
453{
454 WARN_ON_ONCE(dir->ref_count == 0);
455 dir->ref_count++;
456 __get_system(dir->subsystem);
457}
458
459static void __put_system_dir(struct ftrace_subsystem_dir *dir)
460{
461 WARN_ON_ONCE(dir->ref_count == 0);
462
463 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
464
465 __put_system(dir->subsystem);
466 if (!--dir->ref_count)
467 kfree(dir);
468}
469
470static void put_system(struct ftrace_subsystem_dir *dir)
471{
472 mutex_lock(&event_mutex);
473 __put_system_dir(dir);
474 mutex_unlock(&event_mutex);
475}
476
477static void remove_subsystem(struct ftrace_subsystem_dir *dir)
478{
479 if (!dir)
480 return;
481
482 if (!--dir->nr_events) {
483 tracefs_remove_recursive(dir->entry);
484 list_del(&dir->list);
485 __put_system_dir(dir);
486 }
487}
488
489static void remove_event_file_dir(struct ftrace_event_file *file)
490{
491 struct dentry *dir = file->dir;
492 struct dentry *child;
493
494 if (dir) {
495 spin_lock(&dir->d_lock);
496 list_for_each_entry(child, &dir->d_subdirs, d_child) {
497 if (d_really_is_positive(child))
498 d_inode(child)->i_private = NULL;
499 }
500 spin_unlock(&dir->d_lock);
501
502 tracefs_remove_recursive(dir);
503 }
504
505 list_del(&file->list);
506 remove_subsystem(file->system);
507 free_event_filter(file->filter);
508 kmem_cache_free(file_cachep, file);
509}
510
511
512
513
514static int
515__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
516 const char *sub, const char *event, int set)
517{
518 struct ftrace_event_file *file;
519 struct ftrace_event_call *call;
520 const char *name;
521 int ret = -EINVAL;
522
523 list_for_each_entry(file, &tr->events, list) {
524
525 call = file->event_call;
526 name = ftrace_event_name(call);
527
528 if (!name || !call->class || !call->class->reg)
529 continue;
530
531 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
532 continue;
533
534 if (match &&
535 strcmp(match, name) != 0 &&
536 strcmp(match, call->class->system) != 0)
537 continue;
538
539 if (sub && strcmp(sub, call->class->system) != 0)
540 continue;
541
542 if (event && strcmp(event, name) != 0)
543 continue;
544
545 ftrace_event_enable_disable(file, set);
546
547 ret = 0;
548 }
549
550 return ret;
551}
552
553static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
554 const char *sub, const char *event, int set)
555{
556 int ret;
557
558 mutex_lock(&event_mutex);
559 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
560 mutex_unlock(&event_mutex);
561
562 return ret;
563}
564
565static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
566{
567 char *event = NULL, *sub = NULL, *match;
568 int ret;
569
570
571
572
573
574
575
576
577
578
579
580
581
582 match = strsep(&buf, ":");
583 if (buf) {
584 sub = match;
585 event = buf;
586 match = NULL;
587
588 if (!strlen(sub) || strcmp(sub, "*") == 0)
589 sub = NULL;
590 if (!strlen(event) || strcmp(event, "*") == 0)
591 event = NULL;
592 }
593
594 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
595
596
597 if (buf)
598 *(buf - 1) = ':';
599
600 return ret;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615int trace_set_clr_event(const char *system, const char *event, int set)
616{
617 struct trace_array *tr = top_trace_array();
618
619 if (!tr)
620 return -ENODEV;
621
622 return __ftrace_set_clr_event(tr, NULL, system, event, set);
623}
624EXPORT_SYMBOL_GPL(trace_set_clr_event);
625
626
627#define EVENT_BUF_SIZE 127
628
629static ssize_t
630ftrace_event_write(struct file *file, const char __user *ubuf,
631 size_t cnt, loff_t *ppos)
632{
633 struct trace_parser parser;
634 struct seq_file *m = file->private_data;
635 struct trace_array *tr = m->private;
636 ssize_t read, ret;
637
638 if (!cnt)
639 return 0;
640
641 ret = tracing_update_buffers();
642 if (ret < 0)
643 return ret;
644
645 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
646 return -ENOMEM;
647
648 read = trace_get_user(&parser, ubuf, cnt, ppos);
649
650 if (read >= 0 && trace_parser_loaded((&parser))) {
651 int set = 1;
652
653 if (*parser.buffer == '!')
654 set = 0;
655
656 parser.buffer[parser.idx] = 0;
657
658 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
659 if (ret)
660 goto out_put;
661 }
662
663 ret = read;
664
665 out_put:
666 trace_parser_put(&parser);
667
668 return ret;
669}
670
671static void *
672t_next(struct seq_file *m, void *v, loff_t *pos)
673{
674 struct ftrace_event_file *file = v;
675 struct ftrace_event_call *call;
676 struct trace_array *tr = m->private;
677
678 (*pos)++;
679
680 list_for_each_entry_continue(file, &tr->events, list) {
681 call = file->event_call;
682
683
684
685
686 if (call->class && call->class->reg)
687 return file;
688 }
689
690 return NULL;
691}
692
693static void *t_start(struct seq_file *m, loff_t *pos)
694{
695 struct ftrace_event_file *file;
696 struct trace_array *tr = m->private;
697 loff_t l;
698
699 mutex_lock(&event_mutex);
700
701 file = list_entry(&tr->events, struct ftrace_event_file, list);
702 for (l = 0; l <= *pos; ) {
703 file = t_next(m, file, &l);
704 if (!file)
705 break;
706 }
707 return file;
708}
709
710static void *
711s_next(struct seq_file *m, void *v, loff_t *pos)
712{
713 struct ftrace_event_file *file = v;
714 struct trace_array *tr = m->private;
715
716 (*pos)++;
717
718 list_for_each_entry_continue(file, &tr->events, list) {
719 if (file->flags & FTRACE_EVENT_FL_ENABLED)
720 return file;
721 }
722
723 return NULL;
724}
725
726static void *s_start(struct seq_file *m, loff_t *pos)
727{
728 struct ftrace_event_file *file;
729 struct trace_array *tr = m->private;
730 loff_t l;
731
732 mutex_lock(&event_mutex);
733
734 file = list_entry(&tr->events, struct ftrace_event_file, list);
735 for (l = 0; l <= *pos; ) {
736 file = s_next(m, file, &l);
737 if (!file)
738 break;
739 }
740 return file;
741}
742
743static int t_show(struct seq_file *m, void *v)
744{
745 struct ftrace_event_file *file = v;
746 struct ftrace_event_call *call = file->event_call;
747
748 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
749 seq_printf(m, "%s:", call->class->system);
750 seq_printf(m, "%s\n", ftrace_event_name(call));
751
752 return 0;
753}
754
755static void t_stop(struct seq_file *m, void *p)
756{
757 mutex_unlock(&event_mutex);
758}
759
760static ssize_t
761event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
762 loff_t *ppos)
763{
764 struct ftrace_event_file *file;
765 unsigned long flags;
766 char buf[4] = "0";
767
768 mutex_lock(&event_mutex);
769 file = event_file_data(filp);
770 if (likely(file))
771 flags = file->flags;
772 mutex_unlock(&event_mutex);
773
774 if (!file)
775 return -ENODEV;
776
777 if (flags & FTRACE_EVENT_FL_ENABLED &&
778 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
779 strcpy(buf, "1");
780
781 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
782 flags & FTRACE_EVENT_FL_SOFT_MODE)
783 strcat(buf, "*");
784
785 strcat(buf, "\n");
786
787 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
788}
789
790static ssize_t
791event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
792 loff_t *ppos)
793{
794 struct ftrace_event_file *file;
795 unsigned long val;
796 int ret;
797
798 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
799 if (ret)
800 return ret;
801
802 ret = tracing_update_buffers();
803 if (ret < 0)
804 return ret;
805
806 switch (val) {
807 case 0:
808 case 1:
809 ret = -ENODEV;
810 mutex_lock(&event_mutex);
811 file = event_file_data(filp);
812 if (likely(file))
813 ret = ftrace_event_enable_disable(file, val);
814 mutex_unlock(&event_mutex);
815 break;
816
817 default:
818 return -EINVAL;
819 }
820
821 *ppos += cnt;
822
823 return ret ? ret : cnt;
824}
825
826static ssize_t
827system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
828 loff_t *ppos)
829{
830 const char set_to_char[4] = { '?', '0', '1', 'X' };
831 struct ftrace_subsystem_dir *dir = filp->private_data;
832 struct event_subsystem *system = dir->subsystem;
833 struct ftrace_event_call *call;
834 struct ftrace_event_file *file;
835 struct trace_array *tr = dir->tr;
836 char buf[2];
837 int set = 0;
838 int ret;
839
840 mutex_lock(&event_mutex);
841 list_for_each_entry(file, &tr->events, list) {
842 call = file->event_call;
843 if (!ftrace_event_name(call) || !call->class || !call->class->reg)
844 continue;
845
846 if (system && strcmp(call->class->system, system->name) != 0)
847 continue;
848
849
850
851
852
853
854 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
855
856
857
858
859 if (set == 3)
860 break;
861 }
862 mutex_unlock(&event_mutex);
863
864 buf[0] = set_to_char[set];
865 buf[1] = '\n';
866
867 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
868
869 return ret;
870}
871
872static ssize_t
873system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
874 loff_t *ppos)
875{
876 struct ftrace_subsystem_dir *dir = filp->private_data;
877 struct event_subsystem *system = dir->subsystem;
878 const char *name = NULL;
879 unsigned long val;
880 ssize_t ret;
881
882 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
883 if (ret)
884 return ret;
885
886 ret = tracing_update_buffers();
887 if (ret < 0)
888 return ret;
889
890 if (val != 0 && val != 1)
891 return -EINVAL;
892
893
894
895
896
897 if (system)
898 name = system->name;
899
900 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
901 if (ret)
902 goto out;
903
904 ret = cnt;
905
906out:
907 *ppos += cnt;
908
909 return ret;
910}
911
912enum {
913 FORMAT_HEADER = 1,
914 FORMAT_FIELD_SEPERATOR = 2,
915 FORMAT_PRINTFMT = 3,
916};
917
918static void *f_next(struct seq_file *m, void *v, loff_t *pos)
919{
920 struct ftrace_event_call *call = event_file_data(m->private);
921 struct list_head *common_head = &ftrace_common_fields;
922 struct list_head *head = trace_get_fields(call);
923 struct list_head *node = v;
924
925 (*pos)++;
926
927 switch ((unsigned long)v) {
928 case FORMAT_HEADER:
929 node = common_head;
930 break;
931
932 case FORMAT_FIELD_SEPERATOR:
933 node = head;
934 break;
935
936 case FORMAT_PRINTFMT:
937
938 return NULL;
939 }
940
941 node = node->prev;
942 if (node == common_head)
943 return (void *)FORMAT_FIELD_SEPERATOR;
944 else if (node == head)
945 return (void *)FORMAT_PRINTFMT;
946 else
947 return node;
948}
949
950static int f_show(struct seq_file *m, void *v)
951{
952 struct ftrace_event_call *call = event_file_data(m->private);
953 struct ftrace_event_field *field;
954 const char *array_descriptor;
955
956 switch ((unsigned long)v) {
957 case FORMAT_HEADER:
958 seq_printf(m, "name: %s\n", ftrace_event_name(call));
959 seq_printf(m, "ID: %d\n", call->event.type);
960 seq_puts(m, "format:\n");
961 return 0;
962
963 case FORMAT_FIELD_SEPERATOR:
964 seq_putc(m, '\n');
965 return 0;
966
967 case FORMAT_PRINTFMT:
968 seq_printf(m, "\nprint fmt: %s\n",
969 call->print_fmt);
970 return 0;
971 }
972
973 field = list_entry(v, struct ftrace_event_field, link);
974
975
976
977
978
979
980
981 array_descriptor = strchr(field->type, '[');
982
983 if (!strncmp(field->type, "__data_loc", 10))
984 array_descriptor = NULL;
985
986 if (!array_descriptor)
987 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
988 field->type, field->name, field->offset,
989 field->size, !!field->is_signed);
990 else
991 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
992 (int)(array_descriptor - field->type),
993 field->type, field->name,
994 array_descriptor, field->offset,
995 field->size, !!field->is_signed);
996
997 return 0;
998}
999
1000static void *f_start(struct seq_file *m, loff_t *pos)
1001{
1002 void *p = (void *)FORMAT_HEADER;
1003 loff_t l = 0;
1004
1005
1006 mutex_lock(&event_mutex);
1007 if (!event_file_data(m->private))
1008 return ERR_PTR(-ENODEV);
1009
1010 while (l < *pos && p)
1011 p = f_next(m, p, &l);
1012
1013 return p;
1014}
1015
1016static void f_stop(struct seq_file *m, void *p)
1017{
1018 mutex_unlock(&event_mutex);
1019}
1020
1021static const struct seq_operations trace_format_seq_ops = {
1022 .start = f_start,
1023 .next = f_next,
1024 .stop = f_stop,
1025 .show = f_show,
1026};
1027
1028static int trace_format_open(struct inode *inode, struct file *file)
1029{
1030 struct seq_file *m;
1031 int ret;
1032
1033 ret = seq_open(file, &trace_format_seq_ops);
1034 if (ret < 0)
1035 return ret;
1036
1037 m = file->private_data;
1038 m->private = file;
1039
1040 return 0;
1041}
1042
1043static ssize_t
1044event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1045{
1046 int id = (long)event_file_data(filp);
1047 char buf[32];
1048 int len;
1049
1050 if (*ppos)
1051 return 0;
1052
1053 if (unlikely(!id))
1054 return -ENODEV;
1055
1056 len = sprintf(buf, "%d\n", id);
1057
1058 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1059}
1060
1061static ssize_t
1062event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1063 loff_t *ppos)
1064{
1065 struct ftrace_event_file *file;
1066 struct trace_seq *s;
1067 int r = -ENODEV;
1068
1069 if (*ppos)
1070 return 0;
1071
1072 s = kmalloc(sizeof(*s), GFP_KERNEL);
1073
1074 if (!s)
1075 return -ENOMEM;
1076
1077 trace_seq_init(s);
1078
1079 mutex_lock(&event_mutex);
1080 file = event_file_data(filp);
1081 if (file)
1082 print_event_filter(file, s);
1083 mutex_unlock(&event_mutex);
1084
1085 if (file)
1086 r = simple_read_from_buffer(ubuf, cnt, ppos,
1087 s->buffer, trace_seq_used(s));
1088
1089 kfree(s);
1090
1091 return r;
1092}
1093
1094static ssize_t
1095event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1096 loff_t *ppos)
1097{
1098 struct ftrace_event_file *file;
1099 char *buf;
1100 int err = -ENODEV;
1101
1102 if (cnt >= PAGE_SIZE)
1103 return -EINVAL;
1104
1105 buf = (char *)__get_free_page(GFP_TEMPORARY);
1106 if (!buf)
1107 return -ENOMEM;
1108
1109 if (copy_from_user(buf, ubuf, cnt)) {
1110 free_page((unsigned long) buf);
1111 return -EFAULT;
1112 }
1113 buf[cnt] = '\0';
1114
1115 mutex_lock(&event_mutex);
1116 file = event_file_data(filp);
1117 if (file)
1118 err = apply_event_filter(file, buf);
1119 mutex_unlock(&event_mutex);
1120
1121 free_page((unsigned long) buf);
1122 if (err < 0)
1123 return err;
1124
1125 *ppos += cnt;
1126
1127 return cnt;
1128}
1129
1130static LIST_HEAD(event_subsystems);
1131
1132static int subsystem_open(struct inode *inode, struct file *filp)
1133{
1134 struct event_subsystem *system = NULL;
1135 struct ftrace_subsystem_dir *dir = NULL;
1136 struct trace_array *tr;
1137 int ret;
1138
1139 if (tracing_is_disabled())
1140 return -ENODEV;
1141
1142
1143 mutex_lock(&trace_types_lock);
1144 mutex_lock(&event_mutex);
1145 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1146 list_for_each_entry(dir, &tr->systems, list) {
1147 if (dir == inode->i_private) {
1148
1149 if (dir->nr_events) {
1150 __get_system_dir(dir);
1151 system = dir->subsystem;
1152 }
1153 goto exit_loop;
1154 }
1155 }
1156 }
1157 exit_loop:
1158 mutex_unlock(&event_mutex);
1159 mutex_unlock(&trace_types_lock);
1160
1161 if (!system)
1162 return -ENODEV;
1163
1164
1165 WARN_ON(!dir);
1166
1167
1168 if (trace_array_get(tr) < 0) {
1169 put_system(dir);
1170 return -ENODEV;
1171 }
1172
1173 ret = tracing_open_generic(inode, filp);
1174 if (ret < 0) {
1175 trace_array_put(tr);
1176 put_system(dir);
1177 }
1178
1179 return ret;
1180}
1181
1182static int system_tr_open(struct inode *inode, struct file *filp)
1183{
1184 struct ftrace_subsystem_dir *dir;
1185 struct trace_array *tr = inode->i_private;
1186 int ret;
1187
1188 if (tracing_is_disabled())
1189 return -ENODEV;
1190
1191 if (trace_array_get(tr) < 0)
1192 return -ENODEV;
1193
1194
1195 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1196 if (!dir) {
1197 trace_array_put(tr);
1198 return -ENOMEM;
1199 }
1200
1201 dir->tr = tr;
1202
1203 ret = tracing_open_generic(inode, filp);
1204 if (ret < 0) {
1205 trace_array_put(tr);
1206 kfree(dir);
1207 return ret;
1208 }
1209
1210 filp->private_data = dir;
1211
1212 return 0;
1213}
1214
1215static int subsystem_release(struct inode *inode, struct file *file)
1216{
1217 struct ftrace_subsystem_dir *dir = file->private_data;
1218
1219 trace_array_put(dir->tr);
1220
1221
1222
1223
1224
1225
1226 if (dir->subsystem)
1227 put_system(dir);
1228 else
1229 kfree(dir);
1230
1231 return 0;
1232}
1233
1234static ssize_t
1235subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1236 loff_t *ppos)
1237{
1238 struct ftrace_subsystem_dir *dir = filp->private_data;
1239 struct event_subsystem *system = dir->subsystem;
1240 struct trace_seq *s;
1241 int r;
1242
1243 if (*ppos)
1244 return 0;
1245
1246 s = kmalloc(sizeof(*s), GFP_KERNEL);
1247 if (!s)
1248 return -ENOMEM;
1249
1250 trace_seq_init(s);
1251
1252 print_subsystem_event_filter(system, s);
1253 r = simple_read_from_buffer(ubuf, cnt, ppos,
1254 s->buffer, trace_seq_used(s));
1255
1256 kfree(s);
1257
1258 return r;
1259}
1260
1261static ssize_t
1262subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1263 loff_t *ppos)
1264{
1265 struct ftrace_subsystem_dir *dir = filp->private_data;
1266 char *buf;
1267 int err;
1268
1269 if (cnt >= PAGE_SIZE)
1270 return -EINVAL;
1271
1272 buf = (char *)__get_free_page(GFP_TEMPORARY);
1273 if (!buf)
1274 return -ENOMEM;
1275
1276 if (copy_from_user(buf, ubuf, cnt)) {
1277 free_page((unsigned long) buf);
1278 return -EFAULT;
1279 }
1280 buf[cnt] = '\0';
1281
1282 err = apply_subsystem_event_filter(dir, buf);
1283 free_page((unsigned long) buf);
1284 if (err < 0)
1285 return err;
1286
1287 *ppos += cnt;
1288
1289 return cnt;
1290}
1291
1292static ssize_t
1293show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1294{
1295 int (*func)(struct trace_seq *s) = filp->private_data;
1296 struct trace_seq *s;
1297 int r;
1298
1299 if (*ppos)
1300 return 0;
1301
1302 s = kmalloc(sizeof(*s), GFP_KERNEL);
1303 if (!s)
1304 return -ENOMEM;
1305
1306 trace_seq_init(s);
1307
1308 func(s);
1309 r = simple_read_from_buffer(ubuf, cnt, ppos,
1310 s->buffer, trace_seq_used(s));
1311
1312 kfree(s);
1313
1314 return r;
1315}
1316
1317static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1318static int ftrace_event_set_open(struct inode *inode, struct file *file);
1319static int ftrace_event_release(struct inode *inode, struct file *file);
1320
1321static const struct seq_operations show_event_seq_ops = {
1322 .start = t_start,
1323 .next = t_next,
1324 .show = t_show,
1325 .stop = t_stop,
1326};
1327
1328static const struct seq_operations show_set_event_seq_ops = {
1329 .start = s_start,
1330 .next = s_next,
1331 .show = t_show,
1332 .stop = t_stop,
1333};
1334
1335static const struct file_operations ftrace_avail_fops = {
1336 .open = ftrace_event_avail_open,
1337 .read = seq_read,
1338 .llseek = seq_lseek,
1339 .release = seq_release,
1340};
1341
1342static const struct file_operations ftrace_set_event_fops = {
1343 .open = ftrace_event_set_open,
1344 .read = seq_read,
1345 .write = ftrace_event_write,
1346 .llseek = seq_lseek,
1347 .release = ftrace_event_release,
1348};
1349
1350static const struct file_operations ftrace_enable_fops = {
1351 .open = tracing_open_generic,
1352 .read = event_enable_read,
1353 .write = event_enable_write,
1354 .llseek = default_llseek,
1355};
1356
1357static const struct file_operations ftrace_event_format_fops = {
1358 .open = trace_format_open,
1359 .read = seq_read,
1360 .llseek = seq_lseek,
1361 .release = seq_release,
1362};
1363
1364static const struct file_operations ftrace_event_id_fops = {
1365 .read = event_id_read,
1366 .llseek = default_llseek,
1367};
1368
1369static const struct file_operations ftrace_event_filter_fops = {
1370 .open = tracing_open_generic,
1371 .read = event_filter_read,
1372 .write = event_filter_write,
1373 .llseek = default_llseek,
1374};
1375
1376static const struct file_operations ftrace_subsystem_filter_fops = {
1377 .open = subsystem_open,
1378 .read = subsystem_filter_read,
1379 .write = subsystem_filter_write,
1380 .llseek = default_llseek,
1381 .release = subsystem_release,
1382};
1383
1384static const struct file_operations ftrace_system_enable_fops = {
1385 .open = subsystem_open,
1386 .read = system_enable_read,
1387 .write = system_enable_write,
1388 .llseek = default_llseek,
1389 .release = subsystem_release,
1390};
1391
1392static const struct file_operations ftrace_tr_enable_fops = {
1393 .open = system_tr_open,
1394 .read = system_enable_read,
1395 .write = system_enable_write,
1396 .llseek = default_llseek,
1397 .release = subsystem_release,
1398};
1399
1400static const struct file_operations ftrace_show_header_fops = {
1401 .open = tracing_open_generic,
1402 .read = show_header,
1403 .llseek = default_llseek,
1404};
1405
1406static int
1407ftrace_event_open(struct inode *inode, struct file *file,
1408 const struct seq_operations *seq_ops)
1409{
1410 struct seq_file *m;
1411 int ret;
1412
1413 ret = seq_open(file, seq_ops);
1414 if (ret < 0)
1415 return ret;
1416 m = file->private_data;
1417
1418 m->private = inode->i_private;
1419
1420 return ret;
1421}
1422
1423static int ftrace_event_release(struct inode *inode, struct file *file)
1424{
1425 struct trace_array *tr = inode->i_private;
1426
1427 trace_array_put(tr);
1428
1429 return seq_release(inode, file);
1430}
1431
1432static int
1433ftrace_event_avail_open(struct inode *inode, struct file *file)
1434{
1435 const struct seq_operations *seq_ops = &show_event_seq_ops;
1436
1437 return ftrace_event_open(inode, file, seq_ops);
1438}
1439
1440static int
1441ftrace_event_set_open(struct inode *inode, struct file *file)
1442{
1443 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1444 struct trace_array *tr = inode->i_private;
1445 int ret;
1446
1447 if (trace_array_get(tr) < 0)
1448 return -ENODEV;
1449
1450 if ((file->f_mode & FMODE_WRITE) &&
1451 (file->f_flags & O_TRUNC))
1452 ftrace_clear_events(tr);
1453
1454 ret = ftrace_event_open(inode, file, seq_ops);
1455 if (ret < 0)
1456 trace_array_put(tr);
1457 return ret;
1458}
1459
1460static struct event_subsystem *
1461create_new_subsystem(const char *name)
1462{
1463 struct event_subsystem *system;
1464
1465
1466 system = kmalloc(sizeof(*system), GFP_KERNEL);
1467 if (!system)
1468 return NULL;
1469
1470 system->ref_count = 1;
1471
1472
1473 if (!core_kernel_data((unsigned long)name)) {
1474 system->ref_count |= SYSTEM_FL_FREE_NAME;
1475 system->name = kstrdup(name, GFP_KERNEL);
1476 if (!system->name)
1477 goto out_free;
1478 } else
1479 system->name = name;
1480
1481 system->filter = NULL;
1482
1483 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1484 if (!system->filter)
1485 goto out_free;
1486
1487 list_add(&system->list, &event_subsystems);
1488
1489 return system;
1490
1491 out_free:
1492 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1493 kfree(system->name);
1494 kfree(system);
1495 return NULL;
1496}
1497
1498static struct dentry *
1499event_subsystem_dir(struct trace_array *tr, const char *name,
1500 struct ftrace_event_file *file, struct dentry *parent)
1501{
1502 struct ftrace_subsystem_dir *dir;
1503 struct event_subsystem *system;
1504 struct dentry *entry;
1505
1506
1507 list_for_each_entry(dir, &tr->systems, list) {
1508 system = dir->subsystem;
1509 if (strcmp(system->name, name) == 0) {
1510 dir->nr_events++;
1511 file->system = dir;
1512 return dir->entry;
1513 }
1514 }
1515
1516
1517 list_for_each_entry(system, &event_subsystems, list) {
1518 if (strcmp(system->name, name) == 0)
1519 break;
1520 }
1521
1522 if (&system->list == &event_subsystems)
1523 system = NULL;
1524
1525 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1526 if (!dir)
1527 goto out_fail;
1528
1529 if (!system) {
1530 system = create_new_subsystem(name);
1531 if (!system)
1532 goto out_free;
1533 } else
1534 __get_system(system);
1535
1536 dir->entry = tracefs_create_dir(name, parent);
1537 if (!dir->entry) {
1538 pr_warn("Failed to create system directory %s\n", name);
1539 __put_system(system);
1540 goto out_free;
1541 }
1542
1543 dir->tr = tr;
1544 dir->ref_count = 1;
1545 dir->nr_events = 1;
1546 dir->subsystem = system;
1547 file->system = dir;
1548
1549 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1550 &ftrace_subsystem_filter_fops);
1551 if (!entry) {
1552 kfree(system->filter);
1553 system->filter = NULL;
1554 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1555 }
1556
1557 trace_create_file("enable", 0644, dir->entry, dir,
1558 &ftrace_system_enable_fops);
1559
1560 list_add(&dir->list, &tr->systems);
1561
1562 return dir->entry;
1563
1564 out_free:
1565 kfree(dir);
1566 out_fail:
1567
1568 if (!dir || !system)
1569 pr_warn("No memory to create event subsystem %s\n", name);
1570 return NULL;
1571}
1572
1573static int
1574event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1575{
1576 struct ftrace_event_call *call = file->event_call;
1577 struct trace_array *tr = file->tr;
1578 struct list_head *head;
1579 struct dentry *d_events;
1580 const char *name;
1581 int ret;
1582
1583
1584
1585
1586
1587 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1588 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1589 if (!d_events)
1590 return -ENOMEM;
1591 } else
1592 d_events = parent;
1593
1594 name = ftrace_event_name(call);
1595 file->dir = tracefs_create_dir(name, d_events);
1596 if (!file->dir) {
1597 pr_warn("Could not create tracefs '%s' directory\n", name);
1598 return -1;
1599 }
1600
1601 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1602 trace_create_file("enable", 0644, file->dir, file,
1603 &ftrace_enable_fops);
1604
1605#ifdef CONFIG_PERF_EVENTS
1606 if (call->event.type && call->class->reg)
1607 trace_create_file("id", 0444, file->dir,
1608 (void *)(long)call->event.type,
1609 &ftrace_event_id_fops);
1610#endif
1611
1612
1613
1614
1615
1616 head = trace_get_fields(call);
1617 if (list_empty(head)) {
1618 ret = call->class->define_fields(call);
1619 if (ret < 0) {
1620 pr_warn("Could not initialize trace point events/%s\n",
1621 name);
1622 return -1;
1623 }
1624 }
1625 trace_create_file("filter", 0644, file->dir, file,
1626 &ftrace_event_filter_fops);
1627
1628 trace_create_file("trigger", 0644, file->dir, file,
1629 &event_trigger_fops);
1630
1631 trace_create_file("format", 0444, file->dir, call,
1632 &ftrace_event_format_fops);
1633
1634 return 0;
1635}
1636
1637static void remove_event_from_tracers(struct ftrace_event_call *call)
1638{
1639 struct ftrace_event_file *file;
1640 struct trace_array *tr;
1641
1642 do_for_each_event_file_safe(tr, file) {
1643 if (file->event_call != call)
1644 continue;
1645
1646 remove_event_file_dir(file);
1647
1648
1649
1650
1651
1652
1653 break;
1654 } while_for_each_event_file();
1655}
1656
1657static void event_remove(struct ftrace_event_call *call)
1658{
1659 struct trace_array *tr;
1660 struct ftrace_event_file *file;
1661
1662 do_for_each_event_file(tr, file) {
1663 if (file->event_call != call)
1664 continue;
1665 ftrace_event_enable_disable(file, 0);
1666
1667
1668
1669
1670
1671
1672 break;
1673 } while_for_each_event_file();
1674
1675 if (call->event.funcs)
1676 __unregister_ftrace_event(&call->event);
1677 remove_event_from_tracers(call);
1678 list_del(&call->list);
1679}
1680
1681static int event_init(struct ftrace_event_call *call)
1682{
1683 int ret = 0;
1684 const char *name;
1685
1686 name = ftrace_event_name(call);
1687 if (WARN_ON(!name))
1688 return -EINVAL;
1689
1690 if (call->class->raw_init) {
1691 ret = call->class->raw_init(call);
1692 if (ret < 0 && ret != -ENOSYS)
1693 pr_warn("Could not initialize trace events/%s\n", name);
1694 }
1695
1696 return ret;
1697}
1698
1699static int
1700__register_event(struct ftrace_event_call *call, struct module *mod)
1701{
1702 int ret;
1703
1704 ret = event_init(call);
1705 if (ret < 0)
1706 return ret;
1707
1708 list_add(&call->list, &ftrace_events);
1709 call->mod = mod;
1710
1711 return 0;
1712}
1713
1714static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
1715{
1716 int rlen;
1717 int elen;
1718
1719
1720 elen = snprintf(ptr, 0, "%ld", map->enum_value);
1721
1722 if (len < elen)
1723 return NULL;
1724
1725 snprintf(ptr, elen + 1, "%ld", map->enum_value);
1726
1727
1728 rlen = strlen(ptr + len);
1729 memmove(ptr + elen, ptr + len, rlen);
1730
1731 ptr[elen + rlen] = 0;
1732
1733 return ptr + elen;
1734}
1735
1736static void update_event_printk(struct ftrace_event_call *call,
1737 struct trace_enum_map *map)
1738{
1739 char *ptr;
1740 int quote = 0;
1741 int len = strlen(map->enum_string);
1742
1743 for (ptr = call->print_fmt; *ptr; ptr++) {
1744 if (*ptr == '\\') {
1745 ptr++;
1746
1747 if (!*ptr)
1748 break;
1749 continue;
1750 }
1751 if (*ptr == '"') {
1752 quote ^= 1;
1753 continue;
1754 }
1755 if (quote)
1756 continue;
1757 if (isdigit(*ptr)) {
1758
1759 do {
1760 ptr++;
1761
1762 } while (isalnum(*ptr));
1763 if (!*ptr)
1764 break;
1765
1766
1767
1768
1769 continue;
1770 }
1771 if (isalpha(*ptr) || *ptr == '_') {
1772 if (strncmp(map->enum_string, ptr, len) == 0 &&
1773 !isalnum(ptr[len]) && ptr[len] != '_') {
1774 ptr = enum_replace(ptr, map, len);
1775
1776 if (WARN_ON_ONCE(!ptr))
1777 return;
1778
1779
1780
1781
1782
1783
1784
1785 continue;
1786 }
1787 skip_more:
1788 do {
1789 ptr++;
1790 } while (isalnum(*ptr) || *ptr == '_');
1791 if (!*ptr)
1792 break;
1793
1794
1795
1796
1797 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
1798 ptr += *ptr == '.' ? 1 : 2;
1799 if (!*ptr)
1800 break;
1801 goto skip_more;
1802 }
1803
1804
1805
1806
1807 continue;
1808 }
1809 }
1810}
1811
1812void trace_event_enum_update(struct trace_enum_map **map, int len)
1813{
1814 struct ftrace_event_call *call, *p;
1815 const char *last_system = NULL;
1816 int last_i;
1817 int i;
1818
1819 down_write(&trace_event_sem);
1820 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1821
1822 if (!last_system || call->class->system != last_system) {
1823 last_i = 0;
1824 last_system = call->class->system;
1825 }
1826
1827 for (i = last_i; i < len; i++) {
1828 if (call->class->system == map[i]->system) {
1829
1830 if (!last_i)
1831 last_i = i;
1832 update_event_printk(call, map[i]);
1833 }
1834 }
1835 }
1836 up_write(&trace_event_sem);
1837}
1838
1839static struct ftrace_event_file *
1840trace_create_new_event(struct ftrace_event_call *call,
1841 struct trace_array *tr)
1842{
1843 struct ftrace_event_file *file;
1844
1845 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1846 if (!file)
1847 return NULL;
1848
1849 file->event_call = call;
1850 file->tr = tr;
1851 atomic_set(&file->sm_ref, 0);
1852 atomic_set(&file->tm_ref, 0);
1853 INIT_LIST_HEAD(&file->triggers);
1854 list_add(&file->list, &tr->events);
1855
1856 return file;
1857}
1858
1859
1860static int
1861__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1862{
1863 struct ftrace_event_file *file;
1864
1865 file = trace_create_new_event(call, tr);
1866 if (!file)
1867 return -ENOMEM;
1868
1869 return event_create_dir(tr->event_dir, file);
1870}
1871
1872
1873
1874
1875
1876
1877static __init int
1878__trace_early_add_new_event(struct ftrace_event_call *call,
1879 struct trace_array *tr)
1880{
1881 struct ftrace_event_file *file;
1882
1883 file = trace_create_new_event(call, tr);
1884 if (!file)
1885 return -ENOMEM;
1886
1887 return 0;
1888}
1889
1890struct ftrace_module_file_ops;
1891static void __add_event_to_tracers(struct ftrace_event_call *call);
1892
1893
1894int trace_add_event_call(struct ftrace_event_call *call)
1895{
1896 int ret;
1897 mutex_lock(&trace_types_lock);
1898 mutex_lock(&event_mutex);
1899
1900 ret = __register_event(call, NULL);
1901 if (ret >= 0)
1902 __add_event_to_tracers(call);
1903
1904 mutex_unlock(&event_mutex);
1905 mutex_unlock(&trace_types_lock);
1906 return ret;
1907}
1908
1909
1910
1911
1912
1913static void __trace_remove_event_call(struct ftrace_event_call *call)
1914{
1915 event_remove(call);
1916 trace_destroy_fields(call);
1917 free_event_filter(call->filter);
1918 call->filter = NULL;
1919}
1920
1921static int probe_remove_event_call(struct ftrace_event_call *call)
1922{
1923 struct trace_array *tr;
1924 struct ftrace_event_file *file;
1925
1926#ifdef CONFIG_PERF_EVENTS
1927 if (call->perf_refcount)
1928 return -EBUSY;
1929#endif
1930 do_for_each_event_file(tr, file) {
1931 if (file->event_call != call)
1932 continue;
1933
1934
1935
1936
1937
1938 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1939 return -EBUSY;
1940
1941
1942
1943
1944
1945
1946 break;
1947 } while_for_each_event_file();
1948
1949 __trace_remove_event_call(call);
1950
1951 return 0;
1952}
1953
1954
1955int trace_remove_event_call(struct ftrace_event_call *call)
1956{
1957 int ret;
1958
1959 mutex_lock(&trace_types_lock);
1960 mutex_lock(&event_mutex);
1961 down_write(&trace_event_sem);
1962 ret = probe_remove_event_call(call);
1963 up_write(&trace_event_sem);
1964 mutex_unlock(&event_mutex);
1965 mutex_unlock(&trace_types_lock);
1966
1967 return ret;
1968}
1969
1970#define for_each_event(event, start, end) \
1971 for (event = start; \
1972 (unsigned long)event < (unsigned long)end; \
1973 event++)
1974
1975#ifdef CONFIG_MODULES
1976
1977static void trace_module_add_events(struct module *mod)
1978{
1979 struct ftrace_event_call **call, **start, **end;
1980
1981 if (!mod->num_trace_events)
1982 return;
1983
1984
1985 if (trace_module_has_bad_taint(mod)) {
1986 pr_err("%s: module has bad taint, not creating trace events\n",
1987 mod->name);
1988 return;
1989 }
1990
1991 start = mod->trace_events;
1992 end = mod->trace_events + mod->num_trace_events;
1993
1994 for_each_event(call, start, end) {
1995 __register_event(*call, mod);
1996 __add_event_to_tracers(*call);
1997 }
1998}
1999
2000static void trace_module_remove_events(struct module *mod)
2001{
2002 struct ftrace_event_call *call, *p;
2003 bool clear_trace = false;
2004
2005 down_write(&trace_event_sem);
2006 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2007 if (call->mod == mod) {
2008 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2009 clear_trace = true;
2010 __trace_remove_event_call(call);
2011 }
2012 }
2013 up_write(&trace_event_sem);
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 if (clear_trace)
2024 tracing_reset_all_online_cpus();
2025}
2026
2027static int trace_module_notify(struct notifier_block *self,
2028 unsigned long val, void *data)
2029{
2030 struct module *mod = data;
2031
2032 mutex_lock(&trace_types_lock);
2033 mutex_lock(&event_mutex);
2034 switch (val) {
2035 case MODULE_STATE_COMING:
2036 trace_module_add_events(mod);
2037 break;
2038 case MODULE_STATE_GOING:
2039 trace_module_remove_events(mod);
2040 break;
2041 }
2042 mutex_unlock(&event_mutex);
2043 mutex_unlock(&trace_types_lock);
2044
2045 return 0;
2046}
2047
2048static struct notifier_block trace_module_nb = {
2049 .notifier_call = trace_module_notify,
2050 .priority = 1,
2051};
2052#endif
2053
2054
2055static void
2056__trace_add_event_dirs(struct trace_array *tr)
2057{
2058 struct ftrace_event_call *call;
2059 int ret;
2060
2061 list_for_each_entry(call, &ftrace_events, list) {
2062 ret = __trace_add_new_event(call, tr);
2063 if (ret < 0)
2064 pr_warn("Could not create directory for event %s\n",
2065 ftrace_event_name(call));
2066 }
2067}
2068
2069struct ftrace_event_file *
2070find_event_file(struct trace_array *tr, const char *system, const char *event)
2071{
2072 struct ftrace_event_file *file;
2073 struct ftrace_event_call *call;
2074 const char *name;
2075
2076 list_for_each_entry(file, &tr->events, list) {
2077
2078 call = file->event_call;
2079 name = ftrace_event_name(call);
2080
2081 if (!name || !call->class || !call->class->reg)
2082 continue;
2083
2084 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2085 continue;
2086
2087 if (strcmp(event, name) == 0 &&
2088 strcmp(system, call->class->system) == 0)
2089 return file;
2090 }
2091 return NULL;
2092}
2093
2094#ifdef CONFIG_DYNAMIC_FTRACE
2095
2096
2097#define ENABLE_EVENT_STR "enable_event"
2098#define DISABLE_EVENT_STR "disable_event"
2099
2100struct event_probe_data {
2101 struct ftrace_event_file *file;
2102 unsigned long count;
2103 int ref;
2104 bool enable;
2105};
2106
2107static void
2108event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2109{
2110 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2111 struct event_probe_data *data = *pdata;
2112
2113 if (!data)
2114 return;
2115
2116 if (data->enable)
2117 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2118 else
2119 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2120}
2121
2122static void
2123event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2124{
2125 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2126 struct event_probe_data *data = *pdata;
2127
2128 if (!data)
2129 return;
2130
2131 if (!data->count)
2132 return;
2133
2134
2135 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2136 return;
2137
2138 if (data->count != -1)
2139 (data->count)--;
2140
2141 event_enable_probe(ip, parent_ip, _data);
2142}
2143
2144static int
2145event_enable_print(struct seq_file *m, unsigned long ip,
2146 struct ftrace_probe_ops *ops, void *_data)
2147{
2148 struct event_probe_data *data = _data;
2149
2150 seq_printf(m, "%ps:", (void *)ip);
2151
2152 seq_printf(m, "%s:%s:%s",
2153 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2154 data->file->event_call->class->system,
2155 ftrace_event_name(data->file->event_call));
2156
2157 if (data->count == -1)
2158 seq_puts(m, ":unlimited\n");
2159 else
2160 seq_printf(m, ":count=%ld\n", data->count);
2161
2162 return 0;
2163}
2164
2165static int
2166event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2167 void **_data)
2168{
2169 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2170 struct event_probe_data *data = *pdata;
2171
2172 data->ref++;
2173 return 0;
2174}
2175
2176static void
2177event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2178 void **_data)
2179{
2180 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2181 struct event_probe_data *data = *pdata;
2182
2183 if (WARN_ON_ONCE(data->ref <= 0))
2184 return;
2185
2186 data->ref--;
2187 if (!data->ref) {
2188
2189 __ftrace_event_enable_disable(data->file, 0, 1);
2190 module_put(data->file->event_call->mod);
2191 kfree(data);
2192 }
2193 *pdata = NULL;
2194}
2195
2196static struct ftrace_probe_ops event_enable_probe_ops = {
2197 .func = event_enable_probe,
2198 .print = event_enable_print,
2199 .init = event_enable_init,
2200 .free = event_enable_free,
2201};
2202
2203static struct ftrace_probe_ops event_enable_count_probe_ops = {
2204 .func = event_enable_count_probe,
2205 .print = event_enable_print,
2206 .init = event_enable_init,
2207 .free = event_enable_free,
2208};
2209
2210static struct ftrace_probe_ops event_disable_probe_ops = {
2211 .func = event_enable_probe,
2212 .print = event_enable_print,
2213 .init = event_enable_init,
2214 .free = event_enable_free,
2215};
2216
2217static struct ftrace_probe_ops event_disable_count_probe_ops = {
2218 .func = event_enable_count_probe,
2219 .print = event_enable_print,
2220 .init = event_enable_init,
2221 .free = event_enable_free,
2222};
2223
2224static int
2225event_enable_func(struct ftrace_hash *hash,
2226 char *glob, char *cmd, char *param, int enabled)
2227{
2228 struct trace_array *tr = top_trace_array();
2229 struct ftrace_event_file *file;
2230 struct ftrace_probe_ops *ops;
2231 struct event_probe_data *data;
2232 const char *system;
2233 const char *event;
2234 char *number;
2235 bool enable;
2236 int ret;
2237
2238 if (!tr)
2239 return -ENODEV;
2240
2241
2242 if (!enabled || !param)
2243 return -EINVAL;
2244
2245 system = strsep(¶m, ":");
2246 if (!param)
2247 return -EINVAL;
2248
2249 event = strsep(¶m, ":");
2250
2251 mutex_lock(&event_mutex);
2252
2253 ret = -EINVAL;
2254 file = find_event_file(tr, system, event);
2255 if (!file)
2256 goto out;
2257
2258 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2259
2260 if (enable)
2261 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2262 else
2263 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2264
2265 if (glob[0] == '!') {
2266 unregister_ftrace_function_probe_func(glob+1, ops);
2267 ret = 0;
2268 goto out;
2269 }
2270
2271 ret = -ENOMEM;
2272 data = kzalloc(sizeof(*data), GFP_KERNEL);
2273 if (!data)
2274 goto out;
2275
2276 data->enable = enable;
2277 data->count = -1;
2278 data->file = file;
2279
2280 if (!param)
2281 goto out_reg;
2282
2283 number = strsep(¶m, ":");
2284
2285 ret = -EINVAL;
2286 if (!strlen(number))
2287 goto out_free;
2288
2289
2290
2291
2292
2293 ret = kstrtoul(number, 0, &data->count);
2294 if (ret)
2295 goto out_free;
2296
2297 out_reg:
2298
2299 ret = try_module_get(file->event_call->mod);
2300 if (!ret) {
2301 ret = -EBUSY;
2302 goto out_free;
2303 }
2304
2305 ret = __ftrace_event_enable_disable(file, 1, 1);
2306 if (ret < 0)
2307 goto out_put;
2308 ret = register_ftrace_function_probe(glob, ops, data);
2309
2310
2311
2312
2313
2314 if (!ret) {
2315 ret = -ENOENT;
2316 goto out_disable;
2317 } else if (ret < 0)
2318 goto out_disable;
2319
2320 ret = 0;
2321 out:
2322 mutex_unlock(&event_mutex);
2323 return ret;
2324
2325 out_disable:
2326 __ftrace_event_enable_disable(file, 0, 1);
2327 out_put:
2328 module_put(file->event_call->mod);
2329 out_free:
2330 kfree(data);
2331 goto out;
2332}
2333
2334static struct ftrace_func_command event_enable_cmd = {
2335 .name = ENABLE_EVENT_STR,
2336 .func = event_enable_func,
2337};
2338
2339static struct ftrace_func_command event_disable_cmd = {
2340 .name = DISABLE_EVENT_STR,
2341 .func = event_enable_func,
2342};
2343
2344static __init int register_event_cmds(void)
2345{
2346 int ret;
2347
2348 ret = register_ftrace_command(&event_enable_cmd);
2349 if (WARN_ON(ret < 0))
2350 return ret;
2351 ret = register_ftrace_command(&event_disable_cmd);
2352 if (WARN_ON(ret < 0))
2353 unregister_ftrace_command(&event_enable_cmd);
2354 return ret;
2355}
2356#else
2357static inline int register_event_cmds(void) { return 0; }
2358#endif
2359
2360
2361
2362
2363
2364
2365
2366
2367static __init void
2368__trace_early_add_event_dirs(struct trace_array *tr)
2369{
2370 struct ftrace_event_file *file;
2371 int ret;
2372
2373
2374 list_for_each_entry(file, &tr->events, list) {
2375 ret = event_create_dir(tr->event_dir, file);
2376 if (ret < 0)
2377 pr_warn("Could not create directory for event %s\n",
2378 ftrace_event_name(file->event_call));
2379 }
2380}
2381
2382
2383
2384
2385
2386
2387
2388static __init void
2389__trace_early_add_events(struct trace_array *tr)
2390{
2391 struct ftrace_event_call *call;
2392 int ret;
2393
2394 list_for_each_entry(call, &ftrace_events, list) {
2395
2396 if (WARN_ON_ONCE(call->mod))
2397 continue;
2398
2399 ret = __trace_early_add_new_event(call, tr);
2400 if (ret < 0)
2401 pr_warn("Could not create early event %s\n",
2402 ftrace_event_name(call));
2403 }
2404}
2405
2406
2407static void
2408__trace_remove_event_dirs(struct trace_array *tr)
2409{
2410 struct ftrace_event_file *file, *next;
2411
2412 list_for_each_entry_safe(file, next, &tr->events, list)
2413 remove_event_file_dir(file);
2414}
2415
2416static void __add_event_to_tracers(struct ftrace_event_call *call)
2417{
2418 struct trace_array *tr;
2419
2420 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2421 __trace_add_new_event(call, tr);
2422}
2423
2424extern struct ftrace_event_call *__start_ftrace_events[];
2425extern struct ftrace_event_call *__stop_ftrace_events[];
2426
2427static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2428
2429static __init int setup_trace_event(char *str)
2430{
2431 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2432 ring_buffer_expanded = true;
2433 tracing_selftest_disabled = true;
2434
2435 return 1;
2436}
2437__setup("trace_event=", setup_trace_event);
2438
2439
2440static int
2441create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2442{
2443 struct dentry *d_events;
2444 struct dentry *entry;
2445
2446 entry = tracefs_create_file("set_event", 0644, parent,
2447 tr, &ftrace_set_event_fops);
2448 if (!entry) {
2449 pr_warn("Could not create tracefs 'set_event' entry\n");
2450 return -ENOMEM;
2451 }
2452
2453 d_events = tracefs_create_dir("events", parent);
2454 if (!d_events) {
2455 pr_warn("Could not create tracefs 'events' directory\n");
2456 return -ENOMEM;
2457 }
2458
2459
2460 trace_create_file("header_page", 0444, d_events,
2461 ring_buffer_print_page_header,
2462 &ftrace_show_header_fops);
2463
2464 trace_create_file("header_event", 0444, d_events,
2465 ring_buffer_print_entry_header,
2466 &ftrace_show_header_fops);
2467
2468 trace_create_file("enable", 0644, d_events,
2469 tr, &ftrace_tr_enable_fops);
2470
2471 tr->event_dir = d_events;
2472
2473 return 0;
2474}
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2488{
2489 int ret;
2490
2491 mutex_lock(&event_mutex);
2492
2493 ret = create_event_toplevel_files(parent, tr);
2494 if (ret)
2495 goto out_unlock;
2496
2497 down_write(&trace_event_sem);
2498 __trace_add_event_dirs(tr);
2499 up_write(&trace_event_sem);
2500
2501 out_unlock:
2502 mutex_unlock(&event_mutex);
2503
2504 return ret;
2505}
2506
2507
2508
2509
2510
2511static __init int
2512early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2513{
2514 int ret;
2515
2516 mutex_lock(&event_mutex);
2517
2518 ret = create_event_toplevel_files(parent, tr);
2519 if (ret)
2520 goto out_unlock;
2521
2522 down_write(&trace_event_sem);
2523 __trace_early_add_event_dirs(tr);
2524 up_write(&trace_event_sem);
2525
2526 out_unlock:
2527 mutex_unlock(&event_mutex);
2528
2529 return ret;
2530}
2531
2532int event_trace_del_tracer(struct trace_array *tr)
2533{
2534 mutex_lock(&event_mutex);
2535
2536
2537 clear_event_triggers(tr);
2538
2539
2540 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2541
2542
2543 synchronize_sched();
2544
2545 down_write(&trace_event_sem);
2546 __trace_remove_event_dirs(tr);
2547 tracefs_remove_recursive(tr->event_dir);
2548 up_write(&trace_event_sem);
2549
2550 tr->event_dir = NULL;
2551
2552 mutex_unlock(&event_mutex);
2553
2554 return 0;
2555}
2556
2557static __init int event_trace_memsetup(void)
2558{
2559 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2560 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2561 return 0;
2562}
2563
2564static __init void
2565early_enable_events(struct trace_array *tr, bool disable_first)
2566{
2567 char *buf = bootup_event_buf;
2568 char *token;
2569 int ret;
2570
2571 while (true) {
2572 token = strsep(&buf, ",");
2573
2574 if (!token)
2575 break;
2576 if (!*token)
2577 continue;
2578
2579
2580 if (disable_first)
2581 ftrace_set_clr_event(tr, token, 0);
2582
2583 ret = ftrace_set_clr_event(tr, token, 1);
2584 if (ret)
2585 pr_warn("Failed to enable trace event: %s\n", token);
2586
2587
2588 if (buf)
2589 *(buf - 1) = ',';
2590 }
2591}
2592
2593static __init int event_trace_enable(void)
2594{
2595 struct trace_array *tr = top_trace_array();
2596 struct ftrace_event_call **iter, *call;
2597 int ret;
2598
2599 if (!tr)
2600 return -ENODEV;
2601
2602 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2603
2604 call = *iter;
2605 ret = event_init(call);
2606 if (!ret)
2607 list_add(&call->list, &ftrace_events);
2608 }
2609
2610
2611
2612
2613
2614
2615
2616 __trace_early_add_events(tr);
2617
2618 early_enable_events(tr, false);
2619
2620 trace_printk_start_comm();
2621
2622 register_event_cmds();
2623
2624 register_trigger_cmds();
2625
2626 return 0;
2627}
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639static __init int event_trace_enable_again(void)
2640{
2641 struct trace_array *tr;
2642
2643 tr = top_trace_array();
2644 if (!tr)
2645 return -ENODEV;
2646
2647 early_enable_events(tr, true);
2648
2649 return 0;
2650}
2651
2652early_initcall(event_trace_enable_again);
2653
2654static __init int event_trace_init(void)
2655{
2656 struct trace_array *tr;
2657 struct dentry *d_tracer;
2658 struct dentry *entry;
2659 int ret;
2660
2661 tr = top_trace_array();
2662 if (!tr)
2663 return -ENODEV;
2664
2665 d_tracer = tracing_init_dentry();
2666 if (IS_ERR(d_tracer))
2667 return 0;
2668
2669 entry = tracefs_create_file("available_events", 0444, d_tracer,
2670 tr, &ftrace_avail_fops);
2671 if (!entry)
2672 pr_warn("Could not create tracefs 'available_events' entry\n");
2673
2674 if (trace_define_common_fields())
2675 pr_warn("tracing: Failed to allocate common fields");
2676
2677 ret = early_event_add_tracer(d_tracer, tr);
2678 if (ret)
2679 return ret;
2680
2681#ifdef CONFIG_MODULES
2682 ret = register_module_notifier(&trace_module_nb);
2683 if (ret)
2684 pr_warn("Failed to register trace events module notifier\n");
2685#endif
2686 return 0;
2687}
2688
2689void __init trace_event_init(void)
2690{
2691 event_trace_memsetup();
2692 init_ftrace_syscalls();
2693 event_trace_enable();
2694}
2695
2696fs_initcall(event_trace_init);
2697
2698#ifdef CONFIG_FTRACE_STARTUP_TEST
2699
2700static DEFINE_SPINLOCK(test_spinlock);
2701static DEFINE_SPINLOCK(test_spinlock_irq);
2702static DEFINE_MUTEX(test_mutex);
2703
2704static __init void test_work(struct work_struct *dummy)
2705{
2706 spin_lock(&test_spinlock);
2707 spin_lock_irq(&test_spinlock_irq);
2708 udelay(1);
2709 spin_unlock_irq(&test_spinlock_irq);
2710 spin_unlock(&test_spinlock);
2711
2712 mutex_lock(&test_mutex);
2713 msleep(1);
2714 mutex_unlock(&test_mutex);
2715}
2716
2717static __init int event_test_thread(void *unused)
2718{
2719 void *test_malloc;
2720
2721 test_malloc = kmalloc(1234, GFP_KERNEL);
2722 if (!test_malloc)
2723 pr_info("failed to kmalloc\n");
2724
2725 schedule_on_each_cpu(test_work);
2726
2727 kfree(test_malloc);
2728
2729 set_current_state(TASK_INTERRUPTIBLE);
2730 while (!kthread_should_stop()) {
2731 schedule();
2732 set_current_state(TASK_INTERRUPTIBLE);
2733 }
2734 __set_current_state(TASK_RUNNING);
2735
2736 return 0;
2737}
2738
2739
2740
2741
2742static __init void event_test_stuff(void)
2743{
2744 struct task_struct *test_thread;
2745
2746 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2747 msleep(1);
2748 kthread_stop(test_thread);
2749}
2750
2751
2752
2753
2754
2755static __init void event_trace_self_tests(void)
2756{
2757 struct ftrace_subsystem_dir *dir;
2758 struct ftrace_event_file *file;
2759 struct ftrace_event_call *call;
2760 struct event_subsystem *system;
2761 struct trace_array *tr;
2762 int ret;
2763
2764 tr = top_trace_array();
2765 if (!tr)
2766 return;
2767
2768 pr_info("Running tests on trace events:\n");
2769
2770 list_for_each_entry(file, &tr->events, list) {
2771
2772 call = file->event_call;
2773
2774
2775 if (!call->class || !call->class->probe)
2776 continue;
2777
2778
2779
2780
2781
2782
2783
2784#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2785 if (call->class->system &&
2786 strcmp(call->class->system, "syscalls") == 0)
2787 continue;
2788#endif
2789
2790 pr_info("Testing event %s: ", ftrace_event_name(call));
2791
2792
2793
2794
2795
2796 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2797 pr_warn("Enabled event during self test!\n");
2798 WARN_ON_ONCE(1);
2799 continue;
2800 }
2801
2802 ftrace_event_enable_disable(file, 1);
2803 event_test_stuff();
2804 ftrace_event_enable_disable(file, 0);
2805
2806 pr_cont("OK\n");
2807 }
2808
2809
2810
2811 pr_info("Running tests on trace event systems:\n");
2812
2813 list_for_each_entry(dir, &tr->systems, list) {
2814
2815 system = dir->subsystem;
2816
2817
2818 if (strcmp(system->name, "ftrace") == 0)
2819 continue;
2820
2821 pr_info("Testing event system %s: ", system->name);
2822
2823 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2824 if (WARN_ON_ONCE(ret)) {
2825 pr_warn("error enabling system %s\n",
2826 system->name);
2827 continue;
2828 }
2829
2830 event_test_stuff();
2831
2832 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2833 if (WARN_ON_ONCE(ret)) {
2834 pr_warn("error disabling system %s\n",
2835 system->name);
2836 continue;
2837 }
2838
2839 pr_cont("OK\n");
2840 }
2841
2842
2843
2844 pr_info("Running tests on all trace events:\n");
2845 pr_info("Testing all events: ");
2846
2847 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2848 if (WARN_ON_ONCE(ret)) {
2849 pr_warn("error enabling all events\n");
2850 return;
2851 }
2852
2853 event_test_stuff();
2854
2855
2856 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2857 if (WARN_ON_ONCE(ret)) {
2858 pr_warn("error disabling all events\n");
2859 return;
2860 }
2861
2862 pr_cont("OK\n");
2863}
2864
2865#ifdef CONFIG_FUNCTION_TRACER
2866
2867static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2868
2869static void
2870function_test_events_call(unsigned long ip, unsigned long parent_ip,
2871 struct ftrace_ops *op, struct pt_regs *pt_regs)
2872{
2873 struct ring_buffer_event *event;
2874 struct ring_buffer *buffer;
2875 struct ftrace_entry *entry;
2876 unsigned long flags;
2877 long disabled;
2878 int cpu;
2879 int pc;
2880
2881 pc = preempt_count();
2882 preempt_disable_notrace();
2883 cpu = raw_smp_processor_id();
2884 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2885
2886 if (disabled != 1)
2887 goto out;
2888
2889 local_save_flags(flags);
2890
2891 event = trace_current_buffer_lock_reserve(&buffer,
2892 TRACE_FN, sizeof(*entry),
2893 flags, pc);
2894 if (!event)
2895 goto out;
2896 entry = ring_buffer_event_data(event);
2897 entry->ip = ip;
2898 entry->parent_ip = parent_ip;
2899
2900 trace_buffer_unlock_commit(buffer, event, flags, pc);
2901
2902 out:
2903 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2904 preempt_enable_notrace();
2905}
2906
2907static struct ftrace_ops trace_ops __initdata =
2908{
2909 .func = function_test_events_call,
2910 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2911};
2912
2913static __init void event_trace_self_test_with_function(void)
2914{
2915 int ret;
2916 ret = register_ftrace_function(&trace_ops);
2917 if (WARN_ON(ret < 0)) {
2918 pr_info("Failed to enable function tracer for event tests\n");
2919 return;
2920 }
2921 pr_info("Running tests again, along with the function tracer\n");
2922 event_trace_self_tests();
2923 unregister_ftrace_function(&trace_ops);
2924}
2925#else
2926static __init void event_trace_self_test_with_function(void)
2927{
2928}
2929#endif
2930
2931static __init int event_trace_self_tests_init(void)
2932{
2933 if (!tracing_selftest_disabled) {
2934 event_trace_self_tests();
2935 event_trace_self_test_with_function();
2936 }
2937
2938 return 0;
2939}
2940
2941late_initcall(event_trace_self_tests_init);
2942
2943#endif
2944