1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/ring_buffer.h>
16#include <generated/utsrelease.h>
17#include <linux/stacktrace.h>
18#include <linux/writeback.h>
19#include <linux/kallsyms.h>
20#include <linux/security.h>
21#include <linux/seq_file.h>
22#include <linux/notifier.h>
23#include <linux/irqflags.h>
24#include <linux/debugfs.h>
25#include <linux/tracefs.h>
26#include <linux/pagemap.h>
27#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
30#include <linux/vmalloc.h>
31#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
34#include <linux/splice.h>
35#include <linux/kdebug.h>
36#include <linux/string.h>
37#include <linux/mount.h>
38#include <linux/rwsem.h>
39#include <linux/slab.h>
40#include <linux/ctype.h>
41#include <linux/init.h>
42#include <linux/poll.h>
43#include <linux/nmi.h>
44#include <linux/fs.h>
45#include <linux/trace.h>
46#include <linux/sched/clock.h>
47#include <linux/sched/rt.h>
48#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
51
52#include "trace.h"
53#include "trace_output.h"
54
55
56
57
58
59bool ring_buffer_expanded;
60
61
62
63
64
65
66
67
68static bool __read_mostly tracing_selftest_running;
69
70
71
72
73bool __read_mostly tracing_selftest_disabled;
74
75
76struct trace_iterator *tracepoint_print_iter;
77int tracepoint_printk;
78static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
79
80
81static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83};
84
85static int
86dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
87{
88 return 0;
89}
90
91
92
93
94
95
96static DEFINE_PER_CPU(bool, trace_taskinfo_save);
97
98
99
100
101
102
103
104static int tracing_disabled = 1;
105
106cpumask_var_t __read_mostly tracing_buffer_mask;
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124enum ftrace_dump_mode ftrace_dump_on_oops;
125
126
127int __disable_trace_on_warning;
128
129#ifdef CONFIG_TRACE_EVAL_MAP_FILE
130
131struct trace_eval_map_head {
132 struct module *mod;
133 unsigned long length;
134};
135
136union trace_eval_map_item;
137
138struct trace_eval_map_tail {
139
140
141
142
143 union trace_eval_map_item *next;
144 const char *end;
145};
146
147static DEFINE_MUTEX(trace_eval_mutex);
148
149
150
151
152
153
154
155
156union trace_eval_map_item {
157 struct trace_eval_map map;
158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
160};
161
162static union trace_eval_map_item *trace_eval_maps;
163#endif
164
165int tracing_set_tracer(struct trace_array *tr, const char *buf);
166static void ftrace_trace_userstack(struct trace_buffer *buffer,
167 unsigned long flags, int pc);
168
169#define MAX_TRACER_SIZE 100
170static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
171static char *default_bootup_tracer;
172
173static bool allocate_snapshot;
174
175static int __init set_cmdline_ftrace(char *str)
176{
177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
178 default_bootup_tracer = bootup_tracer_buf;
179
180 ring_buffer_expanded = true;
181 return 1;
182}
183__setup("ftrace=", set_cmdline_ftrace);
184
185static int __init set_ftrace_dump_on_oops(char *str)
186{
187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
189 return 1;
190 }
191
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
194 return 1;
195 }
196
197 return 0;
198}
199__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
200
201static int __init stop_trace_on_warning(char *str)
202{
203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
205 return 1;
206}
207__setup("traceoff_on_warning", stop_trace_on_warning);
208
209static int __init boot_alloc_snapshot(char *str)
210{
211 allocate_snapshot = true;
212
213 ring_buffer_expanded = true;
214 return 1;
215}
216__setup("alloc_snapshot", boot_alloc_snapshot);
217
218
219static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
220
221static int __init set_trace_boot_options(char *str)
222{
223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
224 return 0;
225}
226__setup("trace_options=", set_trace_boot_options);
227
228static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229static char *trace_boot_clock __initdata;
230
231static int __init set_trace_boot_clock(char *str)
232{
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
235 return 0;
236}
237__setup("trace_clock=", set_trace_boot_clock);
238
239static int __init set_tracepoint_printk(char *str)
240{
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
243 return 1;
244}
245__setup("tp_printk", set_tracepoint_printk);
246
247unsigned long long ns2usecs(u64 nsec)
248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
254
255#define TRACE_DEFAULT_FLAGS \
256 (FUNCTION_DEFAULT_FLAGS | \
257 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
259 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
260 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
261
262
263#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
264 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
265
266
267#define ZEROED_TRACE_FLAGS \
268 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
269
270
271
272
273
274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
277
278LIST_HEAD(ftrace_trace_arrays);
279
280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
304
305
306
307
308
309
310
311
312void trace_array_put(struct trace_array *this_tr)
313{
314 if (!this_tr)
315 return;
316
317 mutex_lock(&trace_types_lock);
318 __trace_array_put(this_tr);
319 mutex_unlock(&trace_types_lock);
320}
321EXPORT_SYMBOL_GPL(trace_array_put);
322
323int tracing_check_open_get_tr(struct trace_array *tr)
324{
325 int ret;
326
327 ret = security_locked_down(LOCKDOWN_TRACEFS);
328 if (ret)
329 return ret;
330
331 if (tracing_disabled)
332 return -ENODEV;
333
334 if (tr && trace_array_get(tr) < 0)
335 return -ENODEV;
336
337 return 0;
338}
339
340int call_filter_check_discard(struct trace_event_call *call, void *rec,
341 struct trace_buffer *buffer,
342 struct ring_buffer_event *event)
343{
344 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
345 !filter_match_preds(call->filter, rec)) {
346 __trace_event_discard_commit(buffer, event);
347 return 1;
348 }
349
350 return 0;
351}
352
353void trace_free_pid_list(struct trace_pid_list *pid_list)
354{
355 vfree(pid_list->pids);
356 kfree(pid_list);
357}
358
359
360
361
362
363
364
365
366bool
367trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
368{
369
370
371
372
373 if (search_pid >= filtered_pids->pid_max)
374 return false;
375
376 return test_bit(search_pid, filtered_pids->pids);
377}
378
379
380
381
382
383
384
385
386
387
388bool
389trace_ignore_this_task(struct trace_pid_list *filtered_pids,
390 struct trace_pid_list *filtered_no_pids,
391 struct task_struct *task)
392{
393
394
395
396
397
398
399
400
401 return (filtered_pids &&
402 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
403 (filtered_no_pids &&
404 trace_find_filtered_pid(filtered_no_pids, task->pid));
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
420 struct task_struct *self,
421 struct task_struct *task)
422{
423 if (!pid_list)
424 return;
425
426
427 if (self) {
428 if (!trace_find_filtered_pid(pid_list, self->pid))
429 return;
430 }
431
432
433 if (task->pid >= pid_list->pid_max)
434 return;
435
436
437 if (self)
438 set_bit(task->pid, pid_list->pids);
439 else
440 clear_bit(task->pid, pid_list->pids);
441}
442
443
444
445
446
447
448
449
450
451
452
453
454
455void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
456{
457 unsigned long pid = (unsigned long)v;
458
459 (*pos)++;
460
461
462 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
463
464
465 if (pid < pid_list->pid_max)
466 return (void *)(pid + 1);
467
468 return NULL;
469}
470
471
472
473
474
475
476
477
478
479
480
481
482void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
483{
484 unsigned long pid;
485 loff_t l = 0;
486
487 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
488 if (pid >= pid_list->pid_max)
489 return NULL;
490
491
492 for (pid++; pid && l < *pos;
493 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
494 ;
495 return (void *)pid;
496}
497
498
499
500
501
502
503
504
505
506int trace_pid_show(struct seq_file *m, void *v)
507{
508 unsigned long pid = (unsigned long)v - 1;
509
510 seq_printf(m, "%lu\n", pid);
511 return 0;
512}
513
514
515#define PID_BUF_SIZE 127
516
517int trace_pid_write(struct trace_pid_list *filtered_pids,
518 struct trace_pid_list **new_pid_list,
519 const char __user *ubuf, size_t cnt)
520{
521 struct trace_pid_list *pid_list;
522 struct trace_parser parser;
523 unsigned long val;
524 int nr_pids = 0;
525 ssize_t read = 0;
526 ssize_t ret = 0;
527 loff_t pos;
528 pid_t pid;
529
530 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
531 return -ENOMEM;
532
533
534
535
536
537
538
539 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
540 if (!pid_list) {
541 trace_parser_put(&parser);
542 return -ENOMEM;
543 }
544
545 pid_list->pid_max = READ_ONCE(pid_max);
546
547
548 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
549 pid_list->pid_max = filtered_pids->pid_max;
550
551 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
552 if (!pid_list->pids) {
553 trace_parser_put(&parser);
554 kfree(pid_list);
555 return -ENOMEM;
556 }
557
558 if (filtered_pids) {
559
560 for_each_set_bit(pid, filtered_pids->pids,
561 filtered_pids->pid_max) {
562 set_bit(pid, pid_list->pids);
563 nr_pids++;
564 }
565 }
566
567 while (cnt > 0) {
568
569 pos = 0;
570
571 ret = trace_get_user(&parser, ubuf, cnt, &pos);
572 if (ret < 0 || !trace_parser_loaded(&parser))
573 break;
574
575 read += ret;
576 ubuf += ret;
577 cnt -= ret;
578
579 ret = -EINVAL;
580 if (kstrtoul(parser.buffer, 0, &val))
581 break;
582 if (val >= pid_list->pid_max)
583 break;
584
585 pid = (pid_t)val;
586
587 set_bit(pid, pid_list->pids);
588 nr_pids++;
589
590 trace_parser_clear(&parser);
591 ret = 0;
592 }
593 trace_parser_put(&parser);
594
595 if (ret < 0) {
596 trace_free_pid_list(pid_list);
597 return ret;
598 }
599
600 if (!nr_pids) {
601
602 trace_free_pid_list(pid_list);
603 read = ret;
604 pid_list = NULL;
605 }
606
607 *new_pid_list = pid_list;
608
609 return read;
610}
611
612static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
613{
614 u64 ts;
615
616
617 if (!buf->buffer)
618 return trace_clock_local();
619
620 ts = ring_buffer_time_stamp(buf->buffer, cpu);
621 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
622
623 return ts;
624}
625
626u64 ftrace_now(int cpu)
627{
628 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
629}
630
631
632
633
634
635
636
637
638
639
640int tracing_is_enabled(void)
641{
642
643
644
645
646
647 smp_rmb();
648 return !global_trace.buffer_disabled;
649}
650
651
652
653
654
655
656
657
658
659
660
661#define TRACE_BUF_SIZE_DEFAULT 1441792UL
662
663static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
664
665
666static struct tracer *trace_types __read_mostly;
667
668
669
670
671DEFINE_MUTEX(trace_types_lock);
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695#ifdef CONFIG_SMP
696static DECLARE_RWSEM(all_cpu_access_lock);
697static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
698
699static inline void trace_access_lock(int cpu)
700{
701 if (cpu == RING_BUFFER_ALL_CPUS) {
702
703 down_write(&all_cpu_access_lock);
704 } else {
705
706
707
708 down_read(&all_cpu_access_lock);
709
710
711 mutex_lock(&per_cpu(cpu_access_lock, cpu));
712 }
713}
714
715static inline void trace_access_unlock(int cpu)
716{
717 if (cpu == RING_BUFFER_ALL_CPUS) {
718 up_write(&all_cpu_access_lock);
719 } else {
720 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
721 up_read(&all_cpu_access_lock);
722 }
723}
724
725static inline void trace_access_lock_init(void)
726{
727 int cpu;
728
729 for_each_possible_cpu(cpu)
730 mutex_init(&per_cpu(cpu_access_lock, cpu));
731}
732
733#else
734
735static DEFINE_MUTEX(access_lock);
736
737static inline void trace_access_lock(int cpu)
738{
739 (void)cpu;
740 mutex_lock(&access_lock);
741}
742
743static inline void trace_access_unlock(int cpu)
744{
745 (void)cpu;
746 mutex_unlock(&access_lock);
747}
748
749static inline void trace_access_lock_init(void)
750{
751}
752
753#endif
754
755#ifdef CONFIG_STACKTRACE
756static void __ftrace_trace_stack(struct trace_buffer *buffer,
757 unsigned long flags,
758 int skip, int pc, struct pt_regs *regs);
759static inline void ftrace_trace_stack(struct trace_array *tr,
760 struct trace_buffer *buffer,
761 unsigned long flags,
762 int skip, int pc, struct pt_regs *regs);
763
764#else
765static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
766 unsigned long flags,
767 int skip, int pc, struct pt_regs *regs)
768{
769}
770static inline void ftrace_trace_stack(struct trace_array *tr,
771 struct trace_buffer *buffer,
772 unsigned long flags,
773 int skip, int pc, struct pt_regs *regs)
774{
775}
776
777#endif
778
779static __always_inline void
780trace_event_setup(struct ring_buffer_event *event,
781 int type, unsigned long flags, int pc)
782{
783 struct trace_entry *ent = ring_buffer_event_data(event);
784
785 tracing_generic_entry_update(ent, type, flags, pc);
786}
787
788static __always_inline struct ring_buffer_event *
789__trace_buffer_lock_reserve(struct trace_buffer *buffer,
790 int type,
791 unsigned long len,
792 unsigned long flags, int pc)
793{
794 struct ring_buffer_event *event;
795
796 event = ring_buffer_lock_reserve(buffer, len);
797 if (event != NULL)
798 trace_event_setup(event, type, flags, pc);
799
800 return event;
801}
802
803void tracer_tracing_on(struct trace_array *tr)
804{
805 if (tr->array_buffer.buffer)
806 ring_buffer_record_on(tr->array_buffer.buffer);
807
808
809
810
811
812
813
814
815 tr->buffer_disabled = 0;
816
817 smp_wmb();
818}
819
820
821
822
823
824
825
826void tracing_on(void)
827{
828 tracer_tracing_on(&global_trace);
829}
830EXPORT_SYMBOL_GPL(tracing_on);
831
832
833static __always_inline void
834__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
835{
836 __this_cpu_write(trace_taskinfo_save, true);
837
838
839 if (this_cpu_read(trace_buffered_event) == event) {
840
841 ring_buffer_write(buffer, event->array[0], &event->array[1]);
842
843 this_cpu_dec(trace_buffered_event_cnt);
844 } else
845 ring_buffer_unlock_commit(buffer, event);
846}
847
848
849
850
851
852
853
854int __trace_puts(unsigned long ip, const char *str, int size)
855{
856 struct ring_buffer_event *event;
857 struct trace_buffer *buffer;
858 struct print_entry *entry;
859 unsigned long irq_flags;
860 int alloc;
861 int pc;
862
863 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
864 return 0;
865
866 pc = preempt_count();
867
868 if (unlikely(tracing_selftest_running || tracing_disabled))
869 return 0;
870
871 alloc = sizeof(*entry) + size + 2;
872
873 local_save_flags(irq_flags);
874 buffer = global_trace.array_buffer.buffer;
875 ring_buffer_nest_start(buffer);
876 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
877 irq_flags, pc);
878 if (!event) {
879 size = 0;
880 goto out;
881 }
882
883 entry = ring_buffer_event_data(event);
884 entry->ip = ip;
885
886 memcpy(&entry->buf, str, size);
887
888
889 if (entry->buf[size - 1] != '\n') {
890 entry->buf[size] = '\n';
891 entry->buf[size + 1] = '\0';
892 } else
893 entry->buf[size] = '\0';
894
895 __buffer_unlock_commit(buffer, event);
896 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
897 out:
898 ring_buffer_nest_end(buffer);
899 return size;
900}
901EXPORT_SYMBOL_GPL(__trace_puts);
902
903
904
905
906
907
908int __trace_bputs(unsigned long ip, const char *str)
909{
910 struct ring_buffer_event *event;
911 struct trace_buffer *buffer;
912 struct bputs_entry *entry;
913 unsigned long irq_flags;
914 int size = sizeof(struct bputs_entry);
915 int ret = 0;
916 int pc;
917
918 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
919 return 0;
920
921 pc = preempt_count();
922
923 if (unlikely(tracing_selftest_running || tracing_disabled))
924 return 0;
925
926 local_save_flags(irq_flags);
927 buffer = global_trace.array_buffer.buffer;
928
929 ring_buffer_nest_start(buffer);
930 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
931 irq_flags, pc);
932 if (!event)
933 goto out;
934
935 entry = ring_buffer_event_data(event);
936 entry->ip = ip;
937 entry->str = str;
938
939 __buffer_unlock_commit(buffer, event);
940 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
941
942 ret = 1;
943 out:
944 ring_buffer_nest_end(buffer);
945 return ret;
946}
947EXPORT_SYMBOL_GPL(__trace_bputs);
948
949#ifdef CONFIG_TRACER_SNAPSHOT
950static void tracing_snapshot_instance_cond(struct trace_array *tr,
951 void *cond_data)
952{
953 struct tracer *tracer = tr->current_trace;
954 unsigned long flags;
955
956 if (in_nmi()) {
957 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
958 internal_trace_puts("*** snapshot is being ignored ***\n");
959 return;
960 }
961
962 if (!tr->allocated_snapshot) {
963 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
964 internal_trace_puts("*** stopping trace here! ***\n");
965 tracing_off();
966 return;
967 }
968
969
970 if (tracer->use_max_tr) {
971 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
972 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
973 return;
974 }
975
976 local_irq_save(flags);
977 update_max_tr(tr, current, smp_processor_id(), cond_data);
978 local_irq_restore(flags);
979}
980
981void tracing_snapshot_instance(struct trace_array *tr)
982{
983 tracing_snapshot_instance_cond(tr, NULL);
984}
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000void tracing_snapshot(void)
1001{
1002 struct trace_array *tr = &global_trace;
1003
1004 tracing_snapshot_instance(tr);
1005}
1006EXPORT_SYMBOL_GPL(tracing_snapshot);
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1022{
1023 tracing_snapshot_instance_cond(tr, cond_data);
1024}
1025EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041void *tracing_cond_snapshot_data(struct trace_array *tr)
1042{
1043 void *cond_data = NULL;
1044
1045 arch_spin_lock(&tr->max_lock);
1046
1047 if (tr->cond_snapshot)
1048 cond_data = tr->cond_snapshot->cond_data;
1049
1050 arch_spin_unlock(&tr->max_lock);
1051
1052 return cond_data;
1053}
1054EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1055
1056static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1057 struct array_buffer *size_buf, int cpu_id);
1058static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1059
1060int tracing_alloc_snapshot_instance(struct trace_array *tr)
1061{
1062 int ret;
1063
1064 if (!tr->allocated_snapshot) {
1065
1066
1067 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1068 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1069 if (ret < 0)
1070 return ret;
1071
1072 tr->allocated_snapshot = true;
1073 }
1074
1075 return 0;
1076}
1077
1078static void free_snapshot(struct trace_array *tr)
1079{
1080
1081
1082
1083
1084
1085 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1086 set_buffer_entries(&tr->max_buffer, 1);
1087 tracing_reset_online_cpus(&tr->max_buffer);
1088 tr->allocated_snapshot = false;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101int tracing_alloc_snapshot(void)
1102{
1103 struct trace_array *tr = &global_trace;
1104 int ret;
1105
1106 ret = tracing_alloc_snapshot_instance(tr);
1107 WARN_ON(ret < 0);
1108
1109 return ret;
1110}
1111EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124void tracing_snapshot_alloc(void)
1125{
1126 int ret;
1127
1128 ret = tracing_alloc_snapshot();
1129 if (ret < 0)
1130 return;
1131
1132 tracing_snapshot();
1133}
1134EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1150 cond_update_fn_t update)
1151{
1152 struct cond_snapshot *cond_snapshot;
1153 int ret = 0;
1154
1155 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1156 if (!cond_snapshot)
1157 return -ENOMEM;
1158
1159 cond_snapshot->cond_data = cond_data;
1160 cond_snapshot->update = update;
1161
1162 mutex_lock(&trace_types_lock);
1163
1164 ret = tracing_alloc_snapshot_instance(tr);
1165 if (ret)
1166 goto fail_unlock;
1167
1168 if (tr->current_trace->use_max_tr) {
1169 ret = -EBUSY;
1170 goto fail_unlock;
1171 }
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 if (tr->cond_snapshot) {
1182 ret = -EBUSY;
1183 goto fail_unlock;
1184 }
1185
1186 arch_spin_lock(&tr->max_lock);
1187 tr->cond_snapshot = cond_snapshot;
1188 arch_spin_unlock(&tr->max_lock);
1189
1190 mutex_unlock(&trace_types_lock);
1191
1192 return ret;
1193
1194 fail_unlock:
1195 mutex_unlock(&trace_types_lock);
1196 kfree(cond_snapshot);
1197 return ret;
1198}
1199EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211int tracing_snapshot_cond_disable(struct trace_array *tr)
1212{
1213 int ret = 0;
1214
1215 arch_spin_lock(&tr->max_lock);
1216
1217 if (!tr->cond_snapshot)
1218 ret = -EINVAL;
1219 else {
1220 kfree(tr->cond_snapshot);
1221 tr->cond_snapshot = NULL;
1222 }
1223
1224 arch_spin_unlock(&tr->max_lock);
1225
1226 return ret;
1227}
1228EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1229#else
1230void tracing_snapshot(void)
1231{
1232 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1233}
1234EXPORT_SYMBOL_GPL(tracing_snapshot);
1235void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1236{
1237 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1238}
1239EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1240int tracing_alloc_snapshot(void)
1241{
1242 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1243 return -ENODEV;
1244}
1245EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1246void tracing_snapshot_alloc(void)
1247{
1248
1249 tracing_snapshot();
1250}
1251EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1252void *tracing_cond_snapshot_data(struct trace_array *tr)
1253{
1254 return NULL;
1255}
1256EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1257int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1258{
1259 return -ENODEV;
1260}
1261EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1262int tracing_snapshot_cond_disable(struct trace_array *tr)
1263{
1264 return false;
1265}
1266EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1267#endif
1268
1269void tracer_tracing_off(struct trace_array *tr)
1270{
1271 if (tr->array_buffer.buffer)
1272 ring_buffer_record_off(tr->array_buffer.buffer);
1273
1274
1275
1276
1277
1278
1279
1280
1281 tr->buffer_disabled = 1;
1282
1283 smp_wmb();
1284}
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294void tracing_off(void)
1295{
1296 tracer_tracing_off(&global_trace);
1297}
1298EXPORT_SYMBOL_GPL(tracing_off);
1299
1300void disable_trace_on_warning(void)
1301{
1302 if (__disable_trace_on_warning) {
1303 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1304 "Disabling tracing due to warning\n");
1305 tracing_off();
1306 }
1307}
1308
1309
1310
1311
1312
1313
1314
1315bool tracer_tracing_is_on(struct trace_array *tr)
1316{
1317 if (tr->array_buffer.buffer)
1318 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1319 return !tr->buffer_disabled;
1320}
1321
1322
1323
1324
1325int tracing_is_on(void)
1326{
1327 return tracer_tracing_is_on(&global_trace);
1328}
1329EXPORT_SYMBOL_GPL(tracing_is_on);
1330
1331static int __init set_buf_size(char *str)
1332{
1333 unsigned long buf_size;
1334
1335 if (!str)
1336 return 0;
1337 buf_size = memparse(str, &str);
1338
1339 if (buf_size == 0)
1340 return 0;
1341 trace_buf_size = buf_size;
1342 return 1;
1343}
1344__setup("trace_buf_size=", set_buf_size);
1345
1346static int __init set_tracing_thresh(char *str)
1347{
1348 unsigned long threshold;
1349 int ret;
1350
1351 if (!str)
1352 return 0;
1353 ret = kstrtoul(str, 0, &threshold);
1354 if (ret < 0)
1355 return 0;
1356 tracing_thresh = threshold * 1000;
1357 return 1;
1358}
1359__setup("tracing_thresh=", set_tracing_thresh);
1360
1361unsigned long nsecs_to_usecs(unsigned long nsecs)
1362{
1363 return nsecs / 1000;
1364}
1365
1366
1367
1368
1369
1370
1371
1372#undef C
1373#define C(a, b) b
1374
1375
1376static const char *trace_options[] = {
1377 TRACE_FLAGS
1378 NULL
1379};
1380
1381static struct {
1382 u64 (*func)(void);
1383 const char *name;
1384 int in_ns;
1385} trace_clocks[] = {
1386 { trace_clock_local, "local", 1 },
1387 { trace_clock_global, "global", 1 },
1388 { trace_clock_counter, "counter", 0 },
1389 { trace_clock_jiffies, "uptime", 0 },
1390 { trace_clock, "perf", 1 },
1391 { ktime_get_mono_fast_ns, "mono", 1 },
1392 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1393 { ktime_get_boot_fast_ns, "boot", 1 },
1394 ARCH_TRACE_CLOCKS
1395};
1396
1397bool trace_clock_in_ns(struct trace_array *tr)
1398{
1399 if (trace_clocks[tr->clock_id].in_ns)
1400 return true;
1401
1402 return false;
1403}
1404
1405
1406
1407
1408int trace_parser_get_init(struct trace_parser *parser, int size)
1409{
1410 memset(parser, 0, sizeof(*parser));
1411
1412 parser->buffer = kmalloc(size, GFP_KERNEL);
1413 if (!parser->buffer)
1414 return 1;
1415
1416 parser->size = size;
1417 return 0;
1418}
1419
1420
1421
1422
1423void trace_parser_put(struct trace_parser *parser)
1424{
1425 kfree(parser->buffer);
1426 parser->buffer = NULL;
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1441 size_t cnt, loff_t *ppos)
1442{
1443 char ch;
1444 size_t read = 0;
1445 ssize_t ret;
1446
1447 if (!*ppos)
1448 trace_parser_clear(parser);
1449
1450 ret = get_user(ch, ubuf++);
1451 if (ret)
1452 goto out;
1453
1454 read++;
1455 cnt--;
1456
1457
1458
1459
1460
1461 if (!parser->cont) {
1462
1463 while (cnt && isspace(ch)) {
1464 ret = get_user(ch, ubuf++);
1465 if (ret)
1466 goto out;
1467 read++;
1468 cnt--;
1469 }
1470
1471 parser->idx = 0;
1472
1473
1474 if (isspace(ch) || !ch) {
1475 *ppos += read;
1476 ret = read;
1477 goto out;
1478 }
1479 }
1480
1481
1482 while (cnt && !isspace(ch) && ch) {
1483 if (parser->idx < parser->size - 1)
1484 parser->buffer[parser->idx++] = ch;
1485 else {
1486 ret = -EINVAL;
1487 goto out;
1488 }
1489 ret = get_user(ch, ubuf++);
1490 if (ret)
1491 goto out;
1492 read++;
1493 cnt--;
1494 }
1495
1496
1497 if (isspace(ch) || !ch) {
1498 parser->buffer[parser->idx] = 0;
1499 parser->cont = false;
1500 } else if (parser->idx < parser->size - 1) {
1501 parser->cont = true;
1502 parser->buffer[parser->idx++] = ch;
1503
1504 parser->buffer[parser->idx] = 0;
1505 } else {
1506 ret = -EINVAL;
1507 goto out;
1508 }
1509
1510 *ppos += read;
1511 ret = read;
1512
1513out:
1514 return ret;
1515}
1516
1517
1518static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1519{
1520 int len;
1521
1522 if (trace_seq_used(s) <= s->seq.readpos)
1523 return -EBUSY;
1524
1525 len = trace_seq_used(s) - s->seq.readpos;
1526 if (cnt > len)
1527 cnt = len;
1528 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1529
1530 s->seq.readpos += cnt;
1531 return cnt;
1532}
1533
1534unsigned long __read_mostly tracing_thresh;
1535static const struct file_operations tracing_max_lat_fops;
1536
1537#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1538 defined(CONFIG_FSNOTIFY)
1539
1540static struct workqueue_struct *fsnotify_wq;
1541
1542static void latency_fsnotify_workfn(struct work_struct *work)
1543{
1544 struct trace_array *tr = container_of(work, struct trace_array,
1545 fsnotify_work);
1546 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1547}
1548
1549static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1550{
1551 struct trace_array *tr = container_of(iwork, struct trace_array,
1552 fsnotify_irqwork);
1553 queue_work(fsnotify_wq, &tr->fsnotify_work);
1554}
1555
1556static void trace_create_maxlat_file(struct trace_array *tr,
1557 struct dentry *d_tracer)
1558{
1559 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1560 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1561 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1562 d_tracer, &tr->max_latency,
1563 &tracing_max_lat_fops);
1564}
1565
1566__init static int latency_fsnotify_init(void)
1567{
1568 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1569 WQ_UNBOUND | WQ_HIGHPRI, 0);
1570 if (!fsnotify_wq) {
1571 pr_err("Unable to allocate tr_max_lat_wq\n");
1572 return -ENOMEM;
1573 }
1574 return 0;
1575}
1576
1577late_initcall_sync(latency_fsnotify_init);
1578
1579void latency_fsnotify(struct trace_array *tr)
1580{
1581 if (!fsnotify_wq)
1582 return;
1583
1584
1585
1586
1587
1588 irq_work_queue(&tr->fsnotify_irqwork);
1589}
1590
1591
1592
1593
1594
1595#else
1596
1597#define trace_create_maxlat_file(tr, d_tracer) \
1598 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1599 &tr->max_latency, &tracing_max_lat_fops)
1600
1601#endif
1602
1603#ifdef CONFIG_TRACER_MAX_TRACE
1604
1605
1606
1607
1608
1609static void
1610__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1611{
1612 struct array_buffer *trace_buf = &tr->array_buffer;
1613 struct array_buffer *max_buf = &tr->max_buffer;
1614 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1615 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1616
1617 max_buf->cpu = cpu;
1618 max_buf->time_start = data->preempt_timestamp;
1619
1620 max_data->saved_latency = tr->max_latency;
1621 max_data->critical_start = data->critical_start;
1622 max_data->critical_end = data->critical_end;
1623
1624 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1625 max_data->pid = tsk->pid;
1626
1627
1628
1629
1630 if (tsk == current)
1631 max_data->uid = current_uid();
1632 else
1633 max_data->uid = task_uid(tsk);
1634
1635 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1636 max_data->policy = tsk->policy;
1637 max_data->rt_priority = tsk->rt_priority;
1638
1639
1640 tracing_record_cmdline(tsk);
1641 latency_fsnotify(tr);
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654void
1655update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1656 void *cond_data)
1657{
1658 if (tr->stop_count)
1659 return;
1660
1661 WARN_ON_ONCE(!irqs_disabled());
1662
1663 if (!tr->allocated_snapshot) {
1664
1665 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1666 return;
1667 }
1668
1669 arch_spin_lock(&tr->max_lock);
1670
1671
1672 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1673 ring_buffer_record_on(tr->max_buffer.buffer);
1674 else
1675 ring_buffer_record_off(tr->max_buffer.buffer);
1676
1677#ifdef CONFIG_TRACER_SNAPSHOT
1678 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1679 goto out_unlock;
1680#endif
1681 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1682
1683 __update_max_tr(tr, tsk, cpu);
1684
1685 out_unlock:
1686 arch_spin_unlock(&tr->max_lock);
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697void
1698update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1699{
1700 int ret;
1701
1702 if (tr->stop_count)
1703 return;
1704
1705 WARN_ON_ONCE(!irqs_disabled());
1706 if (!tr->allocated_snapshot) {
1707
1708 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1709 return;
1710 }
1711
1712 arch_spin_lock(&tr->max_lock);
1713
1714 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1715
1716 if (ret == -EBUSY) {
1717
1718
1719
1720
1721
1722
1723 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1724 "Failed to swap buffers due to commit in progress\n");
1725 }
1726
1727 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1728
1729 __update_max_tr(tr, tsk, cpu);
1730 arch_spin_unlock(&tr->max_lock);
1731}
1732#endif
1733
1734static int wait_on_pipe(struct trace_iterator *iter, int full)
1735{
1736
1737 if (trace_buffer_iter(iter, iter->cpu_file))
1738 return 0;
1739
1740 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1741 full);
1742}
1743
1744#ifdef CONFIG_FTRACE_STARTUP_TEST
1745static bool selftests_can_run;
1746
1747struct trace_selftests {
1748 struct list_head list;
1749 struct tracer *type;
1750};
1751
1752static LIST_HEAD(postponed_selftests);
1753
1754static int save_selftest(struct tracer *type)
1755{
1756 struct trace_selftests *selftest;
1757
1758 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1759 if (!selftest)
1760 return -ENOMEM;
1761
1762 selftest->type = type;
1763 list_add(&selftest->list, &postponed_selftests);
1764 return 0;
1765}
1766
1767static int run_tracer_selftest(struct tracer *type)
1768{
1769 struct trace_array *tr = &global_trace;
1770 struct tracer *saved_tracer = tr->current_trace;
1771 int ret;
1772
1773 if (!type->selftest || tracing_selftest_disabled)
1774 return 0;
1775
1776
1777
1778
1779
1780
1781 if (!selftests_can_run)
1782 return save_selftest(type);
1783
1784
1785
1786
1787
1788
1789
1790
1791 tracing_reset_online_cpus(&tr->array_buffer);
1792
1793 tr->current_trace = type;
1794
1795#ifdef CONFIG_TRACER_MAX_TRACE
1796 if (type->use_max_tr) {
1797
1798 if (ring_buffer_expanded)
1799 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1800 RING_BUFFER_ALL_CPUS);
1801 tr->allocated_snapshot = true;
1802 }
1803#endif
1804
1805
1806 pr_info("Testing tracer %s: ", type->name);
1807 ret = type->selftest(type, tr);
1808
1809 tr->current_trace = saved_tracer;
1810 if (ret) {
1811 printk(KERN_CONT "FAILED!\n");
1812
1813 WARN_ON(1);
1814 return -1;
1815 }
1816
1817 tracing_reset_online_cpus(&tr->array_buffer);
1818
1819#ifdef CONFIG_TRACER_MAX_TRACE
1820 if (type->use_max_tr) {
1821 tr->allocated_snapshot = false;
1822
1823
1824 if (ring_buffer_expanded)
1825 ring_buffer_resize(tr->max_buffer.buffer, 1,
1826 RING_BUFFER_ALL_CPUS);
1827 }
1828#endif
1829
1830 printk(KERN_CONT "PASSED\n");
1831 return 0;
1832}
1833
1834static __init int init_trace_selftests(void)
1835{
1836 struct trace_selftests *p, *n;
1837 struct tracer *t, **last;
1838 int ret;
1839
1840 selftests_can_run = true;
1841
1842 mutex_lock(&trace_types_lock);
1843
1844 if (list_empty(&postponed_selftests))
1845 goto out;
1846
1847 pr_info("Running postponed tracer tests:\n");
1848
1849 tracing_selftest_running = true;
1850 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1851
1852
1853
1854 cond_resched();
1855 ret = run_tracer_selftest(p->type);
1856
1857 if (ret < 0) {
1858 WARN(1, "tracer: %s failed selftest, disabling\n",
1859 p->type->name);
1860 last = &trace_types;
1861 for (t = trace_types; t; t = t->next) {
1862 if (t == p->type) {
1863 *last = t->next;
1864 break;
1865 }
1866 last = &t->next;
1867 }
1868 }
1869 list_del(&p->list);
1870 kfree(p);
1871 }
1872 tracing_selftest_running = false;
1873
1874 out:
1875 mutex_unlock(&trace_types_lock);
1876
1877 return 0;
1878}
1879core_initcall(init_trace_selftests);
1880#else
1881static inline int run_tracer_selftest(struct tracer *type)
1882{
1883 return 0;
1884}
1885#endif
1886
1887static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1888
1889static void __init apply_trace_boot_options(void);
1890
1891
1892
1893
1894
1895
1896
1897int __init register_tracer(struct tracer *type)
1898{
1899 struct tracer *t;
1900 int ret = 0;
1901
1902 if (!type->name) {
1903 pr_info("Tracer must have a name\n");
1904 return -1;
1905 }
1906
1907 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1908 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1909 return -1;
1910 }
1911
1912 if (security_locked_down(LOCKDOWN_TRACEFS)) {
1913 pr_warn("Can not register tracer %s due to lockdown\n",
1914 type->name);
1915 return -EPERM;
1916 }
1917
1918 mutex_lock(&trace_types_lock);
1919
1920 tracing_selftest_running = true;
1921
1922 for (t = trace_types; t; t = t->next) {
1923 if (strcmp(type->name, t->name) == 0) {
1924
1925 pr_info("Tracer %s already registered\n",
1926 type->name);
1927 ret = -1;
1928 goto out;
1929 }
1930 }
1931
1932 if (!type->set_flag)
1933 type->set_flag = &dummy_set_flag;
1934 if (!type->flags) {
1935
1936 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1937 if (!type->flags) {
1938 ret = -ENOMEM;
1939 goto out;
1940 }
1941 type->flags->val = 0;
1942 type->flags->opts = dummy_tracer_opt;
1943 } else
1944 if (!type->flags->opts)
1945 type->flags->opts = dummy_tracer_opt;
1946
1947
1948 type->flags->trace = type;
1949
1950 ret = run_tracer_selftest(type);
1951 if (ret < 0)
1952 goto out;
1953
1954 type->next = trace_types;
1955 trace_types = type;
1956 add_tracer_options(&global_trace, type);
1957
1958 out:
1959 tracing_selftest_running = false;
1960 mutex_unlock(&trace_types_lock);
1961
1962 if (ret || !default_bootup_tracer)
1963 goto out_unlock;
1964
1965 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1966 goto out_unlock;
1967
1968 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1969
1970 tracing_set_tracer(&global_trace, type->name);
1971 default_bootup_tracer = NULL;
1972
1973 apply_trace_boot_options();
1974
1975
1976 tracing_selftest_disabled = true;
1977#ifdef CONFIG_FTRACE_STARTUP_TEST
1978 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1979 type->name);
1980#endif
1981
1982 out_unlock:
1983 return ret;
1984}
1985
1986static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
1987{
1988 struct trace_buffer *buffer = buf->buffer;
1989
1990 if (!buffer)
1991 return;
1992
1993 ring_buffer_record_disable(buffer);
1994
1995
1996 synchronize_rcu();
1997 ring_buffer_reset_cpu(buffer, cpu);
1998
1999 ring_buffer_record_enable(buffer);
2000}
2001
2002void tracing_reset_online_cpus(struct array_buffer *buf)
2003{
2004 struct trace_buffer *buffer = buf->buffer;
2005
2006 if (!buffer)
2007 return;
2008
2009 ring_buffer_record_disable(buffer);
2010
2011
2012 synchronize_rcu();
2013
2014 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2015
2016 ring_buffer_reset_online_cpus(buffer);
2017
2018 ring_buffer_record_enable(buffer);
2019}
2020
2021
2022void tracing_reset_all_online_cpus(void)
2023{
2024 struct trace_array *tr;
2025
2026 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2027 if (!tr->clear_trace)
2028 continue;
2029 tr->clear_trace = false;
2030 tracing_reset_online_cpus(&tr->array_buffer);
2031#ifdef CONFIG_TRACER_MAX_TRACE
2032 tracing_reset_online_cpus(&tr->max_buffer);
2033#endif
2034 }
2035}
2036
2037static int *tgid_map;
2038
2039#define SAVED_CMDLINES_DEFAULT 128
2040#define NO_CMDLINE_MAP UINT_MAX
2041static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2042struct saved_cmdlines_buffer {
2043 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2044 unsigned *map_cmdline_to_pid;
2045 unsigned cmdline_num;
2046 int cmdline_idx;
2047 char *saved_cmdlines;
2048};
2049static struct saved_cmdlines_buffer *savedcmd;
2050
2051
2052static atomic_t trace_record_taskinfo_disabled __read_mostly;
2053
2054static inline char *get_saved_cmdlines(int idx)
2055{
2056 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2057}
2058
2059static inline void set_cmdline(int idx, const char *cmdline)
2060{
2061 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2062}
2063
2064static int allocate_cmdlines_buffer(unsigned int val,
2065 struct saved_cmdlines_buffer *s)
2066{
2067 s->map_cmdline_to_pid = kmalloc_array(val,
2068 sizeof(*s->map_cmdline_to_pid),
2069 GFP_KERNEL);
2070 if (!s->map_cmdline_to_pid)
2071 return -ENOMEM;
2072
2073 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2074 if (!s->saved_cmdlines) {
2075 kfree(s->map_cmdline_to_pid);
2076 return -ENOMEM;
2077 }
2078
2079 s->cmdline_idx = 0;
2080 s->cmdline_num = val;
2081 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2082 sizeof(s->map_pid_to_cmdline));
2083 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2084 val * sizeof(*s->map_cmdline_to_pid));
2085
2086 return 0;
2087}
2088
2089static int trace_create_savedcmd(void)
2090{
2091 int ret;
2092
2093 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2094 if (!savedcmd)
2095 return -ENOMEM;
2096
2097 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2098 if (ret < 0) {
2099 kfree(savedcmd);
2100 savedcmd = NULL;
2101 return -ENOMEM;
2102 }
2103
2104 return 0;
2105}
2106
2107int is_tracing_stopped(void)
2108{
2109 return global_trace.stop_count;
2110}
2111
2112
2113
2114
2115
2116
2117
2118void tracing_start(void)
2119{
2120 struct trace_buffer *buffer;
2121 unsigned long flags;
2122
2123 if (tracing_disabled)
2124 return;
2125
2126 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2127 if (--global_trace.stop_count) {
2128 if (global_trace.stop_count < 0) {
2129
2130 WARN_ON_ONCE(1);
2131 global_trace.stop_count = 0;
2132 }
2133 goto out;
2134 }
2135
2136
2137 arch_spin_lock(&global_trace.max_lock);
2138
2139 buffer = global_trace.array_buffer.buffer;
2140 if (buffer)
2141 ring_buffer_record_enable(buffer);
2142
2143#ifdef CONFIG_TRACER_MAX_TRACE
2144 buffer = global_trace.max_buffer.buffer;
2145 if (buffer)
2146 ring_buffer_record_enable(buffer);
2147#endif
2148
2149 arch_spin_unlock(&global_trace.max_lock);
2150
2151 out:
2152 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2153}
2154
2155static void tracing_start_tr(struct trace_array *tr)
2156{
2157 struct trace_buffer *buffer;
2158 unsigned long flags;
2159
2160 if (tracing_disabled)
2161 return;
2162
2163
2164 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2165 return tracing_start();
2166
2167 raw_spin_lock_irqsave(&tr->start_lock, flags);
2168
2169 if (--tr->stop_count) {
2170 if (tr->stop_count < 0) {
2171
2172 WARN_ON_ONCE(1);
2173 tr->stop_count = 0;
2174 }
2175 goto out;
2176 }
2177
2178 buffer = tr->array_buffer.buffer;
2179 if (buffer)
2180 ring_buffer_record_enable(buffer);
2181
2182 out:
2183 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2184}
2185
2186
2187
2188
2189
2190
2191
2192void tracing_stop(void)
2193{
2194 struct trace_buffer *buffer;
2195 unsigned long flags;
2196
2197 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2198 if (global_trace.stop_count++)
2199 goto out;
2200
2201
2202 arch_spin_lock(&global_trace.max_lock);
2203
2204 buffer = global_trace.array_buffer.buffer;
2205 if (buffer)
2206 ring_buffer_record_disable(buffer);
2207
2208#ifdef CONFIG_TRACER_MAX_TRACE
2209 buffer = global_trace.max_buffer.buffer;
2210 if (buffer)
2211 ring_buffer_record_disable(buffer);
2212#endif
2213
2214 arch_spin_unlock(&global_trace.max_lock);
2215
2216 out:
2217 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2218}
2219
2220static void tracing_stop_tr(struct trace_array *tr)
2221{
2222 struct trace_buffer *buffer;
2223 unsigned long flags;
2224
2225
2226 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2227 return tracing_stop();
2228
2229 raw_spin_lock_irqsave(&tr->start_lock, flags);
2230 if (tr->stop_count++)
2231 goto out;
2232
2233 buffer = tr->array_buffer.buffer;
2234 if (buffer)
2235 ring_buffer_record_disable(buffer);
2236
2237 out:
2238 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2239}
2240
2241static int trace_save_cmdline(struct task_struct *tsk)
2242{
2243 unsigned pid, idx;
2244
2245
2246 if (!tsk->pid)
2247 return 1;
2248
2249 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2250 return 0;
2251
2252
2253
2254
2255
2256
2257
2258 if (!arch_spin_trylock(&trace_cmdline_lock))
2259 return 0;
2260
2261 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2262 if (idx == NO_CMDLINE_MAP) {
2263 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2264
2265
2266
2267
2268
2269
2270
2271 pid = savedcmd->map_cmdline_to_pid[idx];
2272 if (pid != NO_CMDLINE_MAP)
2273 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2274
2275 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2276 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2277
2278 savedcmd->cmdline_idx = idx;
2279 }
2280
2281 set_cmdline(idx, tsk->comm);
2282
2283 arch_spin_unlock(&trace_cmdline_lock);
2284
2285 return 1;
2286}
2287
2288static void __trace_find_cmdline(int pid, char comm[])
2289{
2290 unsigned map;
2291
2292 if (!pid) {
2293 strcpy(comm, "<idle>");
2294 return;
2295 }
2296
2297 if (WARN_ON_ONCE(pid < 0)) {
2298 strcpy(comm, "<XXX>");
2299 return;
2300 }
2301
2302 if (pid > PID_MAX_DEFAULT) {
2303 strcpy(comm, "<...>");
2304 return;
2305 }
2306
2307 map = savedcmd->map_pid_to_cmdline[pid];
2308 if (map != NO_CMDLINE_MAP)
2309 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2310 else
2311 strcpy(comm, "<...>");
2312}
2313
2314void trace_find_cmdline(int pid, char comm[])
2315{
2316 preempt_disable();
2317 arch_spin_lock(&trace_cmdline_lock);
2318
2319 __trace_find_cmdline(pid, comm);
2320
2321 arch_spin_unlock(&trace_cmdline_lock);
2322 preempt_enable();
2323}
2324
2325int trace_find_tgid(int pid)
2326{
2327 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2328 return 0;
2329
2330 return tgid_map[pid];
2331}
2332
2333static int trace_save_tgid(struct task_struct *tsk)
2334{
2335
2336 if (!tsk->pid)
2337 return 1;
2338
2339 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2340 return 0;
2341
2342 tgid_map[tsk->pid] = tsk->tgid;
2343 return 1;
2344}
2345
2346static bool tracing_record_taskinfo_skip(int flags)
2347{
2348 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2349 return true;
2350 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2351 return true;
2352 if (!__this_cpu_read(trace_taskinfo_save))
2353 return true;
2354 return false;
2355}
2356
2357
2358
2359
2360
2361
2362
2363
2364void tracing_record_taskinfo(struct task_struct *task, int flags)
2365{
2366 bool done;
2367
2368 if (tracing_record_taskinfo_skip(flags))
2369 return;
2370
2371
2372
2373
2374
2375 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2376 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2377
2378
2379 if (!done)
2380 return;
2381
2382 __this_cpu_write(trace_taskinfo_save, false);
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2394 struct task_struct *next, int flags)
2395{
2396 bool done;
2397
2398 if (tracing_record_taskinfo_skip(flags))
2399 return;
2400
2401
2402
2403
2404
2405 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2406 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2407 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2408 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2409
2410
2411 if (!done)
2412 return;
2413
2414 __this_cpu_write(trace_taskinfo_save, false);
2415}
2416
2417
2418void tracing_record_cmdline(struct task_struct *task)
2419{
2420 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2421}
2422
2423void tracing_record_tgid(struct task_struct *task)
2424{
2425 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2426}
2427
2428
2429
2430
2431
2432
2433enum print_line_t trace_handle_return(struct trace_seq *s)
2434{
2435 return trace_seq_has_overflowed(s) ?
2436 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2437}
2438EXPORT_SYMBOL_GPL(trace_handle_return);
2439
2440void
2441tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2442 unsigned long flags, int pc)
2443{
2444 struct task_struct *tsk = current;
2445
2446 entry->preempt_count = pc & 0xff;
2447 entry->pid = (tsk) ? tsk->pid : 0;
2448 entry->type = type;
2449 entry->flags =
2450#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2451 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2452#else
2453 TRACE_FLAG_IRQS_NOSUPPORT |
2454#endif
2455 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2456 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2457 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2458 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2459 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2460}
2461EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2462
2463struct ring_buffer_event *
2464trace_buffer_lock_reserve(struct trace_buffer *buffer,
2465 int type,
2466 unsigned long len,
2467 unsigned long flags, int pc)
2468{
2469 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2470}
2471
2472DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2473DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2474static int trace_buffered_event_ref;
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490void trace_buffered_event_enable(void)
2491{
2492 struct ring_buffer_event *event;
2493 struct page *page;
2494 int cpu;
2495
2496 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2497
2498 if (trace_buffered_event_ref++)
2499 return;
2500
2501 for_each_tracing_cpu(cpu) {
2502 page = alloc_pages_node(cpu_to_node(cpu),
2503 GFP_KERNEL | __GFP_NORETRY, 0);
2504 if (!page)
2505 goto failed;
2506
2507 event = page_address(page);
2508 memset(event, 0, sizeof(*event));
2509
2510 per_cpu(trace_buffered_event, cpu) = event;
2511
2512 preempt_disable();
2513 if (cpu == smp_processor_id() &&
2514 this_cpu_read(trace_buffered_event) !=
2515 per_cpu(trace_buffered_event, cpu))
2516 WARN_ON_ONCE(1);
2517 preempt_enable();
2518 }
2519
2520 return;
2521 failed:
2522 trace_buffered_event_disable();
2523}
2524
2525static void enable_trace_buffered_event(void *data)
2526{
2527
2528 smp_rmb();
2529 this_cpu_dec(trace_buffered_event_cnt);
2530}
2531
2532static void disable_trace_buffered_event(void *data)
2533{
2534 this_cpu_inc(trace_buffered_event_cnt);
2535}
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545void trace_buffered_event_disable(void)
2546{
2547 int cpu;
2548
2549 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2550
2551 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2552 return;
2553
2554 if (--trace_buffered_event_ref)
2555 return;
2556
2557 preempt_disable();
2558
2559 smp_call_function_many(tracing_buffer_mask,
2560 disable_trace_buffered_event, NULL, 1);
2561 preempt_enable();
2562
2563
2564 synchronize_rcu();
2565
2566 for_each_tracing_cpu(cpu) {
2567 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2568 per_cpu(trace_buffered_event, cpu) = NULL;
2569 }
2570
2571
2572
2573
2574 smp_wmb();
2575
2576 preempt_disable();
2577
2578 smp_call_function_many(tracing_buffer_mask,
2579 enable_trace_buffered_event, NULL, 1);
2580 preempt_enable();
2581}
2582
2583static struct trace_buffer *temp_buffer;
2584
2585struct ring_buffer_event *
2586trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2587 struct trace_event_file *trace_file,
2588 int type, unsigned long len,
2589 unsigned long flags, int pc)
2590{
2591 struct ring_buffer_event *entry;
2592 int val;
2593
2594 *current_rb = trace_file->tr->array_buffer.buffer;
2595
2596 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2597 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2598 (entry = this_cpu_read(trace_buffered_event))) {
2599
2600 val = this_cpu_inc_return(trace_buffered_event_cnt);
2601 if (val == 1) {
2602 trace_event_setup(entry, type, flags, pc);
2603 entry->array[0] = len;
2604 return entry;
2605 }
2606 this_cpu_dec(trace_buffered_event_cnt);
2607 }
2608
2609 entry = __trace_buffer_lock_reserve(*current_rb,
2610 type, len, flags, pc);
2611
2612
2613
2614
2615
2616
2617 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2618 *current_rb = temp_buffer;
2619 entry = __trace_buffer_lock_reserve(*current_rb,
2620 type, len, flags, pc);
2621 }
2622 return entry;
2623}
2624EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2625
2626static DEFINE_SPINLOCK(tracepoint_iter_lock);
2627static DEFINE_MUTEX(tracepoint_printk_mutex);
2628
2629static void output_printk(struct trace_event_buffer *fbuffer)
2630{
2631 struct trace_event_call *event_call;
2632 struct trace_event_file *file;
2633 struct trace_event *event;
2634 unsigned long flags;
2635 struct trace_iterator *iter = tracepoint_print_iter;
2636
2637
2638 if (WARN_ON_ONCE(!iter))
2639 return;
2640
2641 event_call = fbuffer->trace_file->event_call;
2642 if (!event_call || !event_call->event.funcs ||
2643 !event_call->event.funcs->trace)
2644 return;
2645
2646 file = fbuffer->trace_file;
2647 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2648 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2649 !filter_match_preds(file->filter, fbuffer->entry)))
2650 return;
2651
2652 event = &fbuffer->trace_file->event_call->event;
2653
2654 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2655 trace_seq_init(&iter->seq);
2656 iter->ent = fbuffer->entry;
2657 event_call->event.funcs->trace(iter, 0, event);
2658 trace_seq_putc(&iter->seq, 0);
2659 printk("%s", iter->seq.buffer);
2660
2661 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2662}
2663
2664int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2665 void *buffer, size_t *lenp,
2666 loff_t *ppos)
2667{
2668 int save_tracepoint_printk;
2669 int ret;
2670
2671 mutex_lock(&tracepoint_printk_mutex);
2672 save_tracepoint_printk = tracepoint_printk;
2673
2674 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2675
2676
2677
2678
2679
2680 if (!tracepoint_print_iter)
2681 tracepoint_printk = 0;
2682
2683 if (save_tracepoint_printk == tracepoint_printk)
2684 goto out;
2685
2686 if (tracepoint_printk)
2687 static_key_enable(&tracepoint_printk_key.key);
2688 else
2689 static_key_disable(&tracepoint_printk_key.key);
2690
2691 out:
2692 mutex_unlock(&tracepoint_printk_mutex);
2693
2694 return ret;
2695}
2696
2697void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2698{
2699 if (static_key_false(&tracepoint_printk_key.key))
2700 output_printk(fbuffer);
2701
2702 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2703 fbuffer->event, fbuffer->entry,
2704 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2705}
2706EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2707
2708
2709
2710
2711
2712
2713
2714
2715# define STACK_SKIP 3
2716
2717void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2718 struct trace_buffer *buffer,
2719 struct ring_buffer_event *event,
2720 unsigned long flags, int pc,
2721 struct pt_regs *regs)
2722{
2723 __buffer_unlock_commit(buffer, event);
2724
2725
2726
2727
2728
2729
2730
2731 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2732 ftrace_trace_userstack(buffer, flags, pc);
2733}
2734
2735
2736
2737
2738void
2739trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2740 struct ring_buffer_event *event)
2741{
2742 __buffer_unlock_commit(buffer, event);
2743}
2744
2745static void
2746trace_process_export(struct trace_export *export,
2747 struct ring_buffer_event *event)
2748{
2749 struct trace_entry *entry;
2750 unsigned int size = 0;
2751
2752 entry = ring_buffer_event_data(event);
2753 size = ring_buffer_event_length(event);
2754 export->write(export, entry, size);
2755}
2756
2757static DEFINE_MUTEX(ftrace_export_lock);
2758
2759static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2760
2761static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2762
2763static inline void ftrace_exports_enable(void)
2764{
2765 static_branch_enable(&ftrace_exports_enabled);
2766}
2767
2768static inline void ftrace_exports_disable(void)
2769{
2770 static_branch_disable(&ftrace_exports_enabled);
2771}
2772
2773static void ftrace_exports(struct ring_buffer_event *event)
2774{
2775 struct trace_export *export;
2776
2777 preempt_disable_notrace();
2778
2779 export = rcu_dereference_raw_check(ftrace_exports_list);
2780 while (export) {
2781 trace_process_export(export, event);
2782 export = rcu_dereference_raw_check(export->next);
2783 }
2784
2785 preempt_enable_notrace();
2786}
2787
2788static inline void
2789add_trace_export(struct trace_export **list, struct trace_export *export)
2790{
2791 rcu_assign_pointer(export->next, *list);
2792
2793
2794
2795
2796
2797
2798 rcu_assign_pointer(*list, export);
2799}
2800
2801static inline int
2802rm_trace_export(struct trace_export **list, struct trace_export *export)
2803{
2804 struct trace_export **p;
2805
2806 for (p = list; *p != NULL; p = &(*p)->next)
2807 if (*p == export)
2808 break;
2809
2810 if (*p != export)
2811 return -1;
2812
2813 rcu_assign_pointer(*p, (*p)->next);
2814
2815 return 0;
2816}
2817
2818static inline void
2819add_ftrace_export(struct trace_export **list, struct trace_export *export)
2820{
2821 if (*list == NULL)
2822 ftrace_exports_enable();
2823
2824 add_trace_export(list, export);
2825}
2826
2827static inline int
2828rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2829{
2830 int ret;
2831
2832 ret = rm_trace_export(list, export);
2833 if (*list == NULL)
2834 ftrace_exports_disable();
2835
2836 return ret;
2837}
2838
2839int register_ftrace_export(struct trace_export *export)
2840{
2841 if (WARN_ON_ONCE(!export->write))
2842 return -1;
2843
2844 mutex_lock(&ftrace_export_lock);
2845
2846 add_ftrace_export(&ftrace_exports_list, export);
2847
2848 mutex_unlock(&ftrace_export_lock);
2849
2850 return 0;
2851}
2852EXPORT_SYMBOL_GPL(register_ftrace_export);
2853
2854int unregister_ftrace_export(struct trace_export *export)
2855{
2856 int ret;
2857
2858 mutex_lock(&ftrace_export_lock);
2859
2860 ret = rm_ftrace_export(&ftrace_exports_list, export);
2861
2862 mutex_unlock(&ftrace_export_lock);
2863
2864 return ret;
2865}
2866EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2867
2868void
2869trace_function(struct trace_array *tr,
2870 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2871 int pc)
2872{
2873 struct trace_event_call *call = &event_function;
2874 struct trace_buffer *buffer = tr->array_buffer.buffer;
2875 struct ring_buffer_event *event;
2876 struct ftrace_entry *entry;
2877
2878 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2879 flags, pc);
2880 if (!event)
2881 return;
2882 entry = ring_buffer_event_data(event);
2883 entry->ip = ip;
2884 entry->parent_ip = parent_ip;
2885
2886 if (!call_filter_check_discard(call, entry, buffer, event)) {
2887 if (static_branch_unlikely(&ftrace_exports_enabled))
2888 ftrace_exports(event);
2889 __buffer_unlock_commit(buffer, event);
2890 }
2891}
2892
2893#ifdef CONFIG_STACKTRACE
2894
2895
2896#define FTRACE_KSTACK_NESTING 4
2897
2898#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2899
2900struct ftrace_stack {
2901 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2902};
2903
2904
2905struct ftrace_stacks {
2906 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2907};
2908
2909static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2910static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2911
2912static void __ftrace_trace_stack(struct trace_buffer *buffer,
2913 unsigned long flags,
2914 int skip, int pc, struct pt_regs *regs)
2915{
2916 struct trace_event_call *call = &event_kernel_stack;
2917 struct ring_buffer_event *event;
2918 unsigned int size, nr_entries;
2919 struct ftrace_stack *fstack;
2920 struct stack_entry *entry;
2921 int stackidx;
2922
2923
2924
2925
2926
2927#ifndef CONFIG_UNWINDER_ORC
2928 if (!regs)
2929 skip++;
2930#endif
2931
2932 preempt_disable_notrace();
2933
2934 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2935
2936
2937 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2938 goto out;
2939
2940
2941
2942
2943
2944
2945
2946
2947 barrier();
2948
2949 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2950 size = ARRAY_SIZE(fstack->calls);
2951
2952 if (regs) {
2953 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2954 size, skip);
2955 } else {
2956 nr_entries = stack_trace_save(fstack->calls, size, skip);
2957 }
2958
2959 size = nr_entries * sizeof(unsigned long);
2960 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2961 sizeof(*entry) + size, flags, pc);
2962 if (!event)
2963 goto out;
2964 entry = ring_buffer_event_data(event);
2965
2966 memcpy(&entry->caller, fstack->calls, size);
2967 entry->size = nr_entries;
2968
2969 if (!call_filter_check_discard(call, entry, buffer, event))
2970 __buffer_unlock_commit(buffer, event);
2971
2972 out:
2973
2974 barrier();
2975 __this_cpu_dec(ftrace_stack_reserve);
2976 preempt_enable_notrace();
2977
2978}
2979
2980static inline void ftrace_trace_stack(struct trace_array *tr,
2981 struct trace_buffer *buffer,
2982 unsigned long flags,
2983 int skip, int pc, struct pt_regs *regs)
2984{
2985 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2986 return;
2987
2988 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2989}
2990
2991void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2992 int pc)
2993{
2994 struct trace_buffer *buffer = tr->array_buffer.buffer;
2995
2996 if (rcu_is_watching()) {
2997 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2998 return;
2999 }
3000
3001
3002
3003
3004
3005
3006
3007 if (unlikely(in_nmi()))
3008 return;
3009
3010 rcu_irq_enter_irqson();
3011 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3012 rcu_irq_exit_irqson();
3013}
3014
3015
3016
3017
3018
3019void trace_dump_stack(int skip)
3020{
3021 unsigned long flags;
3022
3023 if (tracing_disabled || tracing_selftest_running)
3024 return;
3025
3026 local_save_flags(flags);
3027
3028#ifndef CONFIG_UNWINDER_ORC
3029
3030 skip++;
3031#endif
3032 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3033 flags, skip, preempt_count(), NULL);
3034}
3035EXPORT_SYMBOL_GPL(trace_dump_stack);
3036
3037#ifdef CONFIG_USER_STACKTRACE_SUPPORT
3038static DEFINE_PER_CPU(int, user_stack_count);
3039
3040static void
3041ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
3042{
3043 struct trace_event_call *call = &event_user_stack;
3044 struct ring_buffer_event *event;
3045 struct userstack_entry *entry;
3046
3047 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
3048 return;
3049
3050
3051
3052
3053
3054 if (unlikely(in_nmi()))
3055 return;
3056
3057
3058
3059
3060
3061 preempt_disable();
3062 if (__this_cpu_read(user_stack_count))
3063 goto out;
3064
3065 __this_cpu_inc(user_stack_count);
3066
3067 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3068 sizeof(*entry), flags, pc);
3069 if (!event)
3070 goto out_drop_count;
3071 entry = ring_buffer_event_data(event);
3072
3073 entry->tgid = current->tgid;
3074 memset(&entry->caller, 0, sizeof(entry->caller));
3075
3076 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3077 if (!call_filter_check_discard(call, entry, buffer, event))
3078 __buffer_unlock_commit(buffer, event);
3079
3080 out_drop_count:
3081 __this_cpu_dec(user_stack_count);
3082 out:
3083 preempt_enable();
3084}
3085#else
3086static void ftrace_trace_userstack(struct trace_buffer *buffer,
3087 unsigned long flags, int pc)
3088{
3089}
3090#endif
3091
3092#endif
3093
3094
3095struct trace_buffer_struct {
3096 int nesting;
3097 char buffer[4][TRACE_BUF_SIZE];
3098};
3099
3100static struct trace_buffer_struct *trace_percpu_buffer;
3101
3102
3103
3104
3105
3106static char *get_trace_buf(void)
3107{
3108 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3109
3110 if (!buffer || buffer->nesting >= 4)
3111 return NULL;
3112
3113 buffer->nesting++;
3114
3115
3116 barrier();
3117 return &buffer->buffer[buffer->nesting][0];
3118}
3119
3120static void put_trace_buf(void)
3121{
3122
3123 barrier();
3124 this_cpu_dec(trace_percpu_buffer->nesting);
3125}
3126
3127static int alloc_percpu_trace_buffer(void)
3128{
3129 struct trace_buffer_struct *buffers;
3130
3131 if (trace_percpu_buffer)
3132 return 0;
3133
3134 buffers = alloc_percpu(struct trace_buffer_struct);
3135 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3136 return -ENOMEM;
3137
3138 trace_percpu_buffer = buffers;
3139 return 0;
3140}
3141
3142static int buffers_allocated;
3143
3144void trace_printk_init_buffers(void)
3145{
3146 if (buffers_allocated)
3147 return;
3148
3149 if (alloc_percpu_trace_buffer())
3150 return;
3151
3152
3153
3154 pr_warn("\n");
3155 pr_warn("**********************************************************\n");
3156 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3157 pr_warn("** **\n");
3158 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3159 pr_warn("** **\n");
3160 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3161 pr_warn("** unsafe for production use. **\n");
3162 pr_warn("** **\n");
3163 pr_warn("** If you see this message and you are not debugging **\n");
3164 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3165 pr_warn("** **\n");
3166 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3167 pr_warn("**********************************************************\n");
3168
3169
3170 tracing_update_buffers();
3171
3172 buffers_allocated = 1;
3173
3174
3175
3176
3177
3178
3179
3180 if (global_trace.array_buffer.buffer)
3181 tracing_start_cmdline_record();
3182}
3183EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3184
3185void trace_printk_start_comm(void)
3186{
3187
3188 if (!buffers_allocated)
3189 return;
3190 tracing_start_cmdline_record();
3191}
3192
3193static void trace_printk_start_stop_comm(int enabled)
3194{
3195 if (!buffers_allocated)
3196 return;
3197
3198 if (enabled)
3199 tracing_start_cmdline_record();
3200 else
3201 tracing_stop_cmdline_record();
3202}
3203
3204
3205
3206
3207
3208
3209
3210int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3211{
3212 struct trace_event_call *call = &event_bprint;
3213 struct ring_buffer_event *event;
3214 struct trace_buffer *buffer;
3215 struct trace_array *tr = &global_trace;
3216 struct bprint_entry *entry;
3217 unsigned long flags;
3218 char *tbuffer;
3219 int len = 0, size, pc;
3220
3221 if (unlikely(tracing_selftest_running || tracing_disabled))
3222 return 0;
3223
3224
3225 pause_graph_tracing();
3226
3227 pc = preempt_count();
3228 preempt_disable_notrace();
3229
3230 tbuffer = get_trace_buf();
3231 if (!tbuffer) {
3232 len = 0;
3233 goto out_nobuffer;
3234 }
3235
3236 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3237
3238 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3239 goto out_put;
3240
3241 local_save_flags(flags);
3242 size = sizeof(*entry) + sizeof(u32) * len;
3243 buffer = tr->array_buffer.buffer;
3244 ring_buffer_nest_start(buffer);
3245 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3246 flags, pc);
3247 if (!event)
3248 goto out;
3249 entry = ring_buffer_event_data(event);
3250 entry->ip = ip;
3251 entry->fmt = fmt;
3252
3253 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3254 if (!call_filter_check_discard(call, entry, buffer, event)) {
3255 __buffer_unlock_commit(buffer, event);
3256 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3257 }
3258
3259out:
3260 ring_buffer_nest_end(buffer);
3261out_put:
3262 put_trace_buf();
3263
3264out_nobuffer:
3265 preempt_enable_notrace();
3266 unpause_graph_tracing();
3267
3268 return len;
3269}
3270EXPORT_SYMBOL_GPL(trace_vbprintk);
3271
3272__printf(3, 0)
3273static int
3274__trace_array_vprintk(struct trace_buffer *buffer,
3275 unsigned long ip, const char *fmt, va_list args)
3276{
3277 struct trace_event_call *call = &event_print;
3278 struct ring_buffer_event *event;
3279 int len = 0, size, pc;
3280 struct print_entry *entry;
3281 unsigned long flags;
3282 char *tbuffer;
3283
3284 if (tracing_disabled || tracing_selftest_running)
3285 return 0;
3286
3287
3288 pause_graph_tracing();
3289
3290 pc = preempt_count();
3291 preempt_disable_notrace();
3292
3293
3294 tbuffer = get_trace_buf();
3295 if (!tbuffer) {
3296 len = 0;
3297 goto out_nobuffer;
3298 }
3299
3300 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3301
3302 local_save_flags(flags);
3303 size = sizeof(*entry) + len + 1;
3304 ring_buffer_nest_start(buffer);
3305 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3306 flags, pc);
3307 if (!event)
3308 goto out;
3309 entry = ring_buffer_event_data(event);
3310 entry->ip = ip;
3311
3312 memcpy(&entry->buf, tbuffer, len + 1);
3313 if (!call_filter_check_discard(call, entry, buffer, event)) {
3314 __buffer_unlock_commit(buffer, event);
3315 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3316 }
3317
3318out:
3319 ring_buffer_nest_end(buffer);
3320 put_trace_buf();
3321
3322out_nobuffer:
3323 preempt_enable_notrace();
3324 unpause_graph_tracing();
3325
3326 return len;
3327}
3328
3329__printf(3, 0)
3330int trace_array_vprintk(struct trace_array *tr,
3331 unsigned long ip, const char *fmt, va_list args)
3332{
3333 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3334}
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356__printf(3, 0)
3357int trace_array_printk(struct trace_array *tr,
3358 unsigned long ip, const char *fmt, ...)
3359{
3360 int ret;
3361 va_list ap;
3362
3363 if (!tr)
3364 return -ENOENT;
3365
3366
3367 if (tr == &global_trace)
3368 return 0;
3369
3370 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3371 return 0;
3372
3373 va_start(ap, fmt);
3374 ret = trace_array_vprintk(tr, ip, fmt, ap);
3375 va_end(ap);
3376 return ret;
3377}
3378EXPORT_SYMBOL_GPL(trace_array_printk);
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388int trace_array_init_printk(struct trace_array *tr)
3389{
3390 if (!tr)
3391 return -ENOENT;
3392
3393
3394 if (tr == &global_trace)
3395 return -EINVAL;
3396
3397 return alloc_percpu_trace_buffer();
3398}
3399EXPORT_SYMBOL_GPL(trace_array_init_printk);
3400
3401__printf(3, 4)
3402int trace_array_printk_buf(struct trace_buffer *buffer,
3403 unsigned long ip, const char *fmt, ...)
3404{
3405 int ret;
3406 va_list ap;
3407
3408 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3409 return 0;
3410
3411 va_start(ap, fmt);
3412 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3413 va_end(ap);
3414 return ret;
3415}
3416
3417__printf(2, 0)
3418int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3419{
3420 return trace_array_vprintk(&global_trace, ip, fmt, args);
3421}
3422EXPORT_SYMBOL_GPL(trace_vprintk);
3423
3424static void trace_iterator_increment(struct trace_iterator *iter)
3425{
3426 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3427
3428 iter->idx++;
3429 if (buf_iter)
3430 ring_buffer_iter_advance(buf_iter);
3431}
3432
3433static struct trace_entry *
3434peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3435 unsigned long *lost_events)
3436{
3437 struct ring_buffer_event *event;
3438 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3439
3440 if (buf_iter) {
3441 event = ring_buffer_iter_peek(buf_iter, ts);
3442 if (lost_events)
3443 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3444 (unsigned long)-1 : 0;
3445 } else {
3446 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3447 lost_events);
3448 }
3449
3450 if (event) {
3451 iter->ent_size = ring_buffer_event_length(event);
3452 return ring_buffer_event_data(event);
3453 }
3454 iter->ent_size = 0;
3455 return NULL;
3456}
3457
3458static struct trace_entry *
3459__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3460 unsigned long *missing_events, u64 *ent_ts)
3461{
3462 struct trace_buffer *buffer = iter->array_buffer->buffer;
3463 struct trace_entry *ent, *next = NULL;
3464 unsigned long lost_events = 0, next_lost = 0;
3465 int cpu_file = iter->cpu_file;
3466 u64 next_ts = 0, ts;
3467 int next_cpu = -1;
3468 int next_size = 0;
3469 int cpu;
3470
3471
3472
3473
3474
3475 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3476 if (ring_buffer_empty_cpu(buffer, cpu_file))
3477 return NULL;
3478 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3479 if (ent_cpu)
3480 *ent_cpu = cpu_file;
3481
3482 return ent;
3483 }
3484
3485 for_each_tracing_cpu(cpu) {
3486
3487 if (ring_buffer_empty_cpu(buffer, cpu))
3488 continue;
3489
3490 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3491
3492
3493
3494
3495 if (ent && (!next || ts < next_ts)) {
3496 next = ent;
3497 next_cpu = cpu;
3498 next_ts = ts;
3499 next_lost = lost_events;
3500 next_size = iter->ent_size;
3501 }
3502 }
3503
3504 iter->ent_size = next_size;
3505
3506 if (ent_cpu)
3507 *ent_cpu = next_cpu;
3508
3509 if (ent_ts)
3510 *ent_ts = next_ts;
3511
3512 if (missing_events)
3513 *missing_events = next_lost;
3514
3515 return next;
3516}
3517
3518#define STATIC_TEMP_BUF_SIZE 128
3519static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
3520
3521
3522struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3523 int *ent_cpu, u64 *ent_ts)
3524{
3525
3526 int ent_size = iter->ent_size;
3527 struct trace_entry *entry;
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537 if (iter->temp == static_temp_buf &&
3538 STATIC_TEMP_BUF_SIZE < ent_size)
3539 return NULL;
3540
3541
3542
3543
3544
3545
3546 if (iter->ent && iter->ent != iter->temp) {
3547 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3548 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3549 void *temp;
3550 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3551 if (!temp)
3552 return NULL;
3553 kfree(iter->temp);
3554 iter->temp = temp;
3555 iter->temp_size = iter->ent_size;
3556 }
3557 memcpy(iter->temp, iter->ent, iter->ent_size);
3558 iter->ent = iter->temp;
3559 }
3560 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3561
3562 iter->ent_size = ent_size;
3563
3564 return entry;
3565}
3566
3567
3568void *trace_find_next_entry_inc(struct trace_iterator *iter)
3569{
3570 iter->ent = __find_next_entry(iter, &iter->cpu,
3571 &iter->lost_events, &iter->ts);
3572
3573 if (iter->ent)
3574 trace_iterator_increment(iter);
3575
3576 return iter->ent ? iter : NULL;
3577}
3578
3579static void trace_consume(struct trace_iterator *iter)
3580{
3581 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3582 &iter->lost_events);
3583}
3584
3585static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3586{
3587 struct trace_iterator *iter = m->private;
3588 int i = (int)*pos;
3589 void *ent;
3590
3591 WARN_ON_ONCE(iter->leftover);
3592
3593 (*pos)++;
3594
3595
3596 if (iter->idx > i)
3597 return NULL;
3598
3599 if (iter->idx < 0)
3600 ent = trace_find_next_entry_inc(iter);
3601 else
3602 ent = iter;
3603
3604 while (ent && iter->idx < i)
3605 ent = trace_find_next_entry_inc(iter);
3606
3607 iter->pos = *pos;
3608
3609 return ent;
3610}
3611
3612void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3613{
3614 struct ring_buffer_iter *buf_iter;
3615 unsigned long entries = 0;
3616 u64 ts;
3617
3618 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3619
3620 buf_iter = trace_buffer_iter(iter, cpu);
3621 if (!buf_iter)
3622 return;
3623
3624 ring_buffer_iter_reset(buf_iter);
3625
3626
3627
3628
3629
3630
3631 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3632 if (ts >= iter->array_buffer->time_start)
3633 break;
3634 entries++;
3635 ring_buffer_iter_advance(buf_iter);
3636 }
3637
3638 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3639}
3640
3641
3642
3643
3644
3645static void *s_start(struct seq_file *m, loff_t *pos)
3646{
3647 struct trace_iterator *iter = m->private;
3648 struct trace_array *tr = iter->tr;
3649 int cpu_file = iter->cpu_file;
3650 void *p = NULL;
3651 loff_t l = 0;
3652 int cpu;
3653
3654
3655
3656
3657
3658
3659
3660 mutex_lock(&trace_types_lock);
3661 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3662 *iter->trace = *tr->current_trace;
3663 mutex_unlock(&trace_types_lock);
3664
3665#ifdef CONFIG_TRACER_MAX_TRACE
3666 if (iter->snapshot && iter->trace->use_max_tr)
3667 return ERR_PTR(-EBUSY);
3668#endif
3669
3670 if (!iter->snapshot)
3671 atomic_inc(&trace_record_taskinfo_disabled);
3672
3673 if (*pos != iter->pos) {
3674 iter->ent = NULL;
3675 iter->cpu = 0;
3676 iter->idx = -1;
3677
3678 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3679 for_each_tracing_cpu(cpu)
3680 tracing_iter_reset(iter, cpu);
3681 } else
3682 tracing_iter_reset(iter, cpu_file);
3683
3684 iter->leftover = 0;
3685 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3686 ;
3687
3688 } else {
3689
3690
3691
3692
3693 if (iter->leftover)
3694 p = iter;
3695 else {
3696 l = *pos - 1;
3697 p = s_next(m, p, &l);
3698 }
3699 }
3700
3701 trace_event_read_lock();
3702 trace_access_lock(cpu_file);
3703 return p;
3704}
3705
3706static void s_stop(struct seq_file *m, void *p)
3707{
3708 struct trace_iterator *iter = m->private;
3709
3710#ifdef CONFIG_TRACER_MAX_TRACE
3711 if (iter->snapshot && iter->trace->use_max_tr)
3712 return;
3713#endif
3714
3715 if (!iter->snapshot)
3716 atomic_dec(&trace_record_taskinfo_disabled);
3717
3718 trace_access_unlock(iter->cpu_file);
3719 trace_event_read_unlock();
3720}
3721
3722static void
3723get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3724 unsigned long *entries, int cpu)
3725{
3726 unsigned long count;
3727
3728 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3729
3730
3731
3732
3733
3734 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3735 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3736
3737 *total = count;
3738 } else
3739 *total = count +
3740 ring_buffer_overrun_cpu(buf->buffer, cpu);
3741 *entries = count;
3742}
3743
3744static void
3745get_total_entries(struct array_buffer *buf,
3746 unsigned long *total, unsigned long *entries)
3747{
3748 unsigned long t, e;
3749 int cpu;
3750
3751 *total = 0;
3752 *entries = 0;
3753
3754 for_each_tracing_cpu(cpu) {
3755 get_total_entries_cpu(buf, &t, &e, cpu);
3756 *total += t;
3757 *entries += e;
3758 }
3759}
3760
3761unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3762{
3763 unsigned long total, entries;
3764
3765 if (!tr)
3766 tr = &global_trace;
3767
3768 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3769
3770 return entries;
3771}
3772
3773unsigned long trace_total_entries(struct trace_array *tr)
3774{
3775 unsigned long total, entries;
3776
3777 if (!tr)
3778 tr = &global_trace;
3779
3780 get_total_entries(&tr->array_buffer, &total, &entries);
3781
3782 return entries;
3783}
3784
3785static void print_lat_help_header(struct seq_file *m)
3786{
3787 seq_puts(m, "# _------=> CPU# \n"
3788 "# / _-----=> irqs-off \n"
3789 "# | / _----=> need-resched \n"
3790 "# || / _---=> hardirq/softirq \n"
3791 "# ||| / _--=> preempt-depth \n"
3792 "# |||| / delay \n"
3793 "# cmd pid ||||| time | caller \n"
3794 "# \\ / ||||| \\ | / \n");
3795}
3796
3797static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3798{
3799 unsigned long total;
3800 unsigned long entries;
3801
3802 get_total_entries(buf, &total, &entries);
3803 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3804 entries, total, num_online_cpus());
3805 seq_puts(m, "#\n");
3806}
3807
3808static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3809 unsigned int flags)
3810{
3811 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3812
3813 print_event_info(buf, m);
3814
3815 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3816 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3817}
3818
3819static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3820 unsigned int flags)
3821{
3822 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3823 const char *space = " ";
3824 int prec = tgid ? 12 : 2;
3825
3826 print_event_info(buf, m);
3827
3828 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3829 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3830 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3831 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3832 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3833 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3834 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3835}
3836
3837void
3838print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3839{
3840 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3841 struct array_buffer *buf = iter->array_buffer;
3842 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3843 struct tracer *type = iter->trace;
3844 unsigned long entries;
3845 unsigned long total;
3846 const char *name = "preemption";
3847
3848 name = type->name;
3849
3850 get_total_entries(buf, &total, &entries);
3851
3852 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3853 name, UTS_RELEASE);
3854 seq_puts(m, "# -----------------------------------"
3855 "---------------------------------\n");
3856 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3857 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3858 nsecs_to_usecs(data->saved_latency),
3859 entries,
3860 total,
3861 buf->cpu,
3862#if defined(CONFIG_PREEMPT_NONE)
3863 "server",
3864#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3865 "desktop",
3866#elif defined(CONFIG_PREEMPT)
3867 "preempt",
3868#elif defined(CONFIG_PREEMPT_RT)
3869 "preempt_rt",
3870#else
3871 "unknown",
3872#endif
3873
3874 0, 0, 0, 0);
3875#ifdef CONFIG_SMP
3876 seq_printf(m, " #P:%d)\n", num_online_cpus());
3877#else
3878 seq_puts(m, ")\n");
3879#endif
3880 seq_puts(m, "# -----------------\n");
3881 seq_printf(m, "# | task: %.16s-%d "
3882 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3883 data->comm, data->pid,
3884 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3885 data->policy, data->rt_priority);
3886 seq_puts(m, "# -----------------\n");
3887
3888 if (data->critical_start) {
3889 seq_puts(m, "# => started at: ");
3890 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3891 trace_print_seq(m, &iter->seq);
3892 seq_puts(m, "\n# => ended at: ");
3893 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3894 trace_print_seq(m, &iter->seq);
3895 seq_puts(m, "\n#\n");
3896 }
3897
3898 seq_puts(m, "#\n");
3899}
3900
3901static void test_cpu_buff_start(struct trace_iterator *iter)
3902{
3903 struct trace_seq *s = &iter->seq;
3904 struct trace_array *tr = iter->tr;
3905
3906 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3907 return;
3908
3909 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3910 return;
3911
3912 if (cpumask_available(iter->started) &&
3913 cpumask_test_cpu(iter->cpu, iter->started))
3914 return;
3915
3916 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
3917 return;
3918
3919 if (cpumask_available(iter->started))
3920 cpumask_set_cpu(iter->cpu, iter->started);
3921
3922
3923 if (iter->idx > 1)
3924 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3925 iter->cpu);
3926}
3927
3928static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3929{
3930 struct trace_array *tr = iter->tr;
3931 struct trace_seq *s = &iter->seq;
3932 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3933 struct trace_entry *entry;
3934 struct trace_event *event;
3935
3936 entry = iter->ent;
3937
3938 test_cpu_buff_start(iter);
3939
3940 event = ftrace_find_event(entry->type);
3941
3942 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3943 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3944 trace_print_lat_context(iter);
3945 else
3946 trace_print_context(iter);
3947 }
3948
3949 if (trace_seq_has_overflowed(s))
3950 return TRACE_TYPE_PARTIAL_LINE;
3951
3952 if (event)
3953 return event->funcs->trace(iter, sym_flags, event);
3954
3955 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3956
3957 return trace_handle_return(s);
3958}
3959
3960static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3961{
3962 struct trace_array *tr = iter->tr;
3963 struct trace_seq *s = &iter->seq;
3964 struct trace_entry *entry;
3965 struct trace_event *event;
3966
3967 entry = iter->ent;
3968
3969 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3970 trace_seq_printf(s, "%d %d %llu ",
3971 entry->pid, iter->cpu, iter->ts);
3972
3973 if (trace_seq_has_overflowed(s))
3974 return TRACE_TYPE_PARTIAL_LINE;
3975
3976 event = ftrace_find_event(entry->type);
3977 if (event)
3978 return event->funcs->raw(iter, 0, event);
3979
3980 trace_seq_printf(s, "%d ?\n", entry->type);
3981
3982 return trace_handle_return(s);
3983}
3984
3985static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3986{
3987 struct trace_array *tr = iter->tr;
3988 struct trace_seq *s = &iter->seq;
3989 unsigned char newline = '\n';
3990 struct trace_entry *entry;
3991 struct trace_event *event;
3992
3993 entry = iter->ent;
3994
3995 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3996 SEQ_PUT_HEX_FIELD(s, entry->pid);
3997 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3998 SEQ_PUT_HEX_FIELD(s, iter->ts);
3999 if (trace_seq_has_overflowed(s))
4000 return TRACE_TYPE_PARTIAL_LINE;
4001 }
4002
4003 event = ftrace_find_event(entry->type);
4004 if (event) {
4005 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4006 if (ret != TRACE_TYPE_HANDLED)
4007 return ret;
4008 }
4009
4010 SEQ_PUT_FIELD(s, newline);
4011
4012 return trace_handle_return(s);
4013}
4014
4015static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4016{
4017 struct trace_array *tr = iter->tr;
4018 struct trace_seq *s = &iter->seq;
4019 struct trace_entry *entry;
4020 struct trace_event *event;
4021
4022 entry = iter->ent;
4023
4024 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4025 SEQ_PUT_FIELD(s, entry->pid);
4026 SEQ_PUT_FIELD(s, iter->cpu);
4027 SEQ_PUT_FIELD(s, iter->ts);
4028 if (trace_seq_has_overflowed(s))
4029 return TRACE_TYPE_PARTIAL_LINE;
4030 }
4031
4032 event = ftrace_find_event(entry->type);
4033 return event ? event->funcs->binary(iter, 0, event) :
4034 TRACE_TYPE_HANDLED;
4035}
4036
4037int trace_empty(struct trace_iterator *iter)
4038{
4039 struct ring_buffer_iter *buf_iter;
4040 int cpu;
4041
4042
4043 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4044 cpu = iter->cpu_file;
4045 buf_iter = trace_buffer_iter(iter, cpu);
4046 if (buf_iter) {
4047 if (!ring_buffer_iter_empty(buf_iter))
4048 return 0;
4049 } else {
4050 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4051 return 0;
4052 }
4053 return 1;
4054 }
4055
4056 for_each_tracing_cpu(cpu) {
4057 buf_iter = trace_buffer_iter(iter, cpu);
4058 if (buf_iter) {
4059 if (!ring_buffer_iter_empty(buf_iter))
4060 return 0;
4061 } else {
4062 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4063 return 0;
4064 }
4065 }
4066
4067 return 1;
4068}
4069
4070
4071enum print_line_t print_trace_line(struct trace_iterator *iter)
4072{
4073 struct trace_array *tr = iter->tr;
4074 unsigned long trace_flags = tr->trace_flags;
4075 enum print_line_t ret;
4076
4077 if (iter->lost_events) {
4078 if (iter->lost_events == (unsigned long)-1)
4079 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4080 iter->cpu);
4081 else
4082 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4083 iter->cpu, iter->lost_events);
4084 if (trace_seq_has_overflowed(&iter->seq))
4085 return TRACE_TYPE_PARTIAL_LINE;
4086 }
4087
4088 if (iter->trace && iter->trace->print_line) {
4089 ret = iter->trace->print_line(iter);
4090 if (ret != TRACE_TYPE_UNHANDLED)
4091 return ret;
4092 }
4093
4094 if (iter->ent->type == TRACE_BPUTS &&
4095 trace_flags & TRACE_ITER_PRINTK &&
4096 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4097 return trace_print_bputs_msg_only(iter);
4098
4099 if (iter->ent->type == TRACE_BPRINT &&
4100 trace_flags & TRACE_ITER_PRINTK &&
4101 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4102 return trace_print_bprintk_msg_only(iter);
4103
4104 if (iter->ent->type == TRACE_PRINT &&
4105 trace_flags & TRACE_ITER_PRINTK &&
4106 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4107 return trace_print_printk_msg_only(iter);
4108
4109 if (trace_flags & TRACE_ITER_BIN)
4110 return print_bin_fmt(iter);
4111
4112 if (trace_flags & TRACE_ITER_HEX)
4113 return print_hex_fmt(iter);
4114
4115 if (trace_flags & TRACE_ITER_RAW)
4116 return print_raw_fmt(iter);
4117
4118 return print_trace_fmt(iter);
4119}
4120
4121void trace_latency_header(struct seq_file *m)
4122{
4123 struct trace_iterator *iter = m->private;
4124 struct trace_array *tr = iter->tr;
4125
4126
4127 if (trace_empty(iter))
4128 return;
4129
4130 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4131 print_trace_header(m, iter);
4132
4133 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4134 print_lat_help_header(m);
4135}
4136
4137void trace_default_header(struct seq_file *m)
4138{
4139 struct trace_iterator *iter = m->private;
4140 struct trace_array *tr = iter->tr;
4141 unsigned long trace_flags = tr->trace_flags;
4142
4143 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4144 return;
4145
4146 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4147
4148 if (trace_empty(iter))
4149 return;
4150 print_trace_header(m, iter);
4151 if (!(trace_flags & TRACE_ITER_VERBOSE))
4152 print_lat_help_header(m);
4153 } else {
4154 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4155 if (trace_flags & TRACE_ITER_IRQ_INFO)
4156 print_func_help_header_irq(iter->array_buffer,
4157 m, trace_flags);
4158 else
4159 print_func_help_header(iter->array_buffer, m,
4160 trace_flags);
4161 }
4162 }
4163}
4164
4165static void test_ftrace_alive(struct seq_file *m)
4166{
4167 if (!ftrace_is_dead())
4168 return;
4169 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4170 "# MAY BE MISSING FUNCTION EVENTS\n");
4171}
4172
4173#ifdef CONFIG_TRACER_MAX_TRACE
4174static void show_snapshot_main_help(struct seq_file *m)
4175{
4176 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4177 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4178 "# Takes a snapshot of the main buffer.\n"
4179 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4180 "# (Doesn't have to be '2' works with any number that\n"
4181 "# is not a '0' or '1')\n");
4182}
4183
4184static void show_snapshot_percpu_help(struct seq_file *m)
4185{
4186 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4187#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4188 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4189 "# Takes a snapshot of the main buffer for this cpu.\n");
4190#else
4191 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4192 "# Must use main snapshot file to allocate.\n");
4193#endif
4194 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4195 "# (Doesn't have to be '2' works with any number that\n"
4196 "# is not a '0' or '1')\n");
4197}
4198
4199static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4200{
4201 if (iter->tr->allocated_snapshot)
4202 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4203 else
4204 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4205
4206 seq_puts(m, "# Snapshot commands:\n");
4207 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4208 show_snapshot_main_help(m);
4209 else
4210 show_snapshot_percpu_help(m);
4211}
4212#else
4213
4214static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4215#endif
4216
4217static int s_show(struct seq_file *m, void *v)
4218{
4219 struct trace_iterator *iter = v;
4220 int ret;
4221
4222 if (iter->ent == NULL) {
4223 if (iter->tr) {
4224 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4225 seq_puts(m, "#\n");
4226 test_ftrace_alive(m);
4227 }
4228 if (iter->snapshot && trace_empty(iter))
4229 print_snapshot_help(m, iter);
4230 else if (iter->trace && iter->trace->print_header)
4231 iter->trace->print_header(m);
4232 else
4233 trace_default_header(m);
4234
4235 } else if (iter->leftover) {
4236
4237
4238
4239
4240 ret = trace_print_seq(m, &iter->seq);
4241
4242
4243 iter->leftover = ret;
4244
4245 } else {
4246 print_trace_line(iter);
4247 ret = trace_print_seq(m, &iter->seq);
4248
4249
4250
4251
4252
4253
4254
4255 iter->leftover = ret;
4256 }
4257
4258 return 0;
4259}
4260
4261
4262
4263
4264
4265static inline int tracing_get_cpu(struct inode *inode)
4266{
4267 if (inode->i_cdev)
4268 return (long)inode->i_cdev - 1;
4269 return RING_BUFFER_ALL_CPUS;
4270}
4271
4272static const struct seq_operations tracer_seq_ops = {
4273 .start = s_start,
4274 .next = s_next,
4275 .stop = s_stop,
4276 .show = s_show,
4277};
4278
4279static struct trace_iterator *
4280__tracing_open(struct inode *inode, struct file *file, bool snapshot)
4281{
4282 struct trace_array *tr = inode->i_private;
4283 struct trace_iterator *iter;
4284 int cpu;
4285
4286 if (tracing_disabled)
4287 return ERR_PTR(-ENODEV);
4288
4289 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4290 if (!iter)
4291 return ERR_PTR(-ENOMEM);
4292
4293 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4294 GFP_KERNEL);
4295 if (!iter->buffer_iter)
4296 goto release;
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306 iter->temp = kmalloc(128, GFP_KERNEL);
4307 if (iter->temp)
4308 iter->temp_size = 128;
4309
4310
4311
4312
4313
4314 mutex_lock(&trace_types_lock);
4315 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4316 if (!iter->trace)
4317 goto fail;
4318
4319 *iter->trace = *tr->current_trace;
4320
4321 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4322 goto fail;
4323
4324 iter->tr = tr;
4325
4326#ifdef CONFIG_TRACER_MAX_TRACE
4327
4328 if (tr->current_trace->print_max || snapshot)
4329 iter->array_buffer = &tr->max_buffer;
4330 else
4331#endif
4332 iter->array_buffer = &tr->array_buffer;
4333 iter->snapshot = snapshot;
4334 iter->pos = -1;
4335 iter->cpu_file = tracing_get_cpu(inode);
4336 mutex_init(&iter->mutex);
4337
4338
4339 if (iter->trace->open)
4340 iter->trace->open(iter);
4341
4342
4343 if (ring_buffer_overruns(iter->array_buffer->buffer))
4344 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4345
4346
4347 if (trace_clocks[tr->clock_id].in_ns)
4348 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4349
4350
4351
4352
4353
4354 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4355 tracing_stop_tr(tr);
4356
4357 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4358 for_each_tracing_cpu(cpu) {
4359 iter->buffer_iter[cpu] =
4360 ring_buffer_read_prepare(iter->array_buffer->buffer,
4361 cpu, GFP_KERNEL);
4362 }
4363 ring_buffer_read_prepare_sync();
4364 for_each_tracing_cpu(cpu) {
4365 ring_buffer_read_start(iter->buffer_iter[cpu]);
4366 tracing_iter_reset(iter, cpu);
4367 }
4368 } else {
4369 cpu = iter->cpu_file;
4370 iter->buffer_iter[cpu] =
4371 ring_buffer_read_prepare(iter->array_buffer->buffer,
4372 cpu, GFP_KERNEL);
4373 ring_buffer_read_prepare_sync();
4374 ring_buffer_read_start(iter->buffer_iter[cpu]);
4375 tracing_iter_reset(iter, cpu);
4376 }
4377
4378 mutex_unlock(&trace_types_lock);
4379
4380 return iter;
4381
4382 fail:
4383 mutex_unlock(&trace_types_lock);
4384 kfree(iter->trace);
4385 kfree(iter->temp);
4386 kfree(iter->buffer_iter);
4387release:
4388 seq_release_private(inode, file);
4389 return ERR_PTR(-ENOMEM);
4390}
4391
4392int tracing_open_generic(struct inode *inode, struct file *filp)
4393{
4394 int ret;
4395
4396 ret = tracing_check_open_get_tr(NULL);
4397 if (ret)
4398 return ret;
4399
4400 filp->private_data = inode->i_private;
4401 return 0;
4402}
4403
4404bool tracing_is_disabled(void)
4405{
4406 return (tracing_disabled) ? true: false;
4407}
4408
4409
4410
4411
4412
4413int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4414{
4415 struct trace_array *tr = inode->i_private;
4416 int ret;
4417
4418 ret = tracing_check_open_get_tr(tr);
4419 if (ret)
4420 return ret;
4421
4422 filp->private_data = inode->i_private;
4423
4424 return 0;
4425}
4426
4427static int tracing_release(struct inode *inode, struct file *file)
4428{
4429 struct trace_array *tr = inode->i_private;
4430 struct seq_file *m = file->private_data;
4431 struct trace_iterator *iter;
4432 int cpu;
4433
4434 if (!(file->f_mode & FMODE_READ)) {
4435 trace_array_put(tr);
4436 return 0;
4437 }
4438
4439
4440 iter = m->private;
4441 mutex_lock(&trace_types_lock);
4442
4443 for_each_tracing_cpu(cpu) {
4444 if (iter->buffer_iter[cpu])
4445 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4446 }
4447
4448 if (iter->trace && iter->trace->close)
4449 iter->trace->close(iter);
4450
4451 if (!iter->snapshot && tr->stop_count)
4452
4453 tracing_start_tr(tr);
4454
4455 __trace_array_put(tr);
4456
4457 mutex_unlock(&trace_types_lock);
4458
4459 mutex_destroy(&iter->mutex);
4460 free_cpumask_var(iter->started);
4461 kfree(iter->temp);
4462 kfree(iter->trace);
4463 kfree(iter->buffer_iter);
4464 seq_release_private(inode, file);
4465
4466 return 0;
4467}
4468
4469static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4470{
4471 struct trace_array *tr = inode->i_private;
4472
4473 trace_array_put(tr);
4474 return 0;
4475}
4476
4477static int tracing_single_release_tr(struct inode *inode, struct file *file)
4478{
4479 struct trace_array *tr = inode->i_private;
4480
4481 trace_array_put(tr);
4482
4483 return single_release(inode, file);
4484}
4485
4486static int tracing_open(struct inode *inode, struct file *file)
4487{
4488 struct trace_array *tr = inode->i_private;
4489 struct trace_iterator *iter;
4490 int ret;
4491
4492 ret = tracing_check_open_get_tr(tr);
4493 if (ret)
4494 return ret;
4495
4496
4497 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4498 int cpu = tracing_get_cpu(inode);
4499 struct array_buffer *trace_buf = &tr->array_buffer;
4500
4501#ifdef CONFIG_TRACER_MAX_TRACE
4502 if (tr->current_trace->print_max)
4503 trace_buf = &tr->max_buffer;
4504#endif
4505
4506 if (cpu == RING_BUFFER_ALL_CPUS)
4507 tracing_reset_online_cpus(trace_buf);
4508 else
4509 tracing_reset_cpu(trace_buf, cpu);
4510 }
4511
4512 if (file->f_mode & FMODE_READ) {
4513 iter = __tracing_open(inode, file, false);
4514 if (IS_ERR(iter))
4515 ret = PTR_ERR(iter);
4516 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4517 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4518 }
4519
4520 if (ret < 0)
4521 trace_array_put(tr);
4522
4523 return ret;
4524}
4525
4526
4527
4528
4529
4530
4531static bool
4532trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4533{
4534 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4535}
4536
4537
4538static struct tracer *
4539get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4540{
4541 while (t && !trace_ok_for_array(t, tr))
4542 t = t->next;
4543
4544 return t;
4545}
4546
4547static void *
4548t_next(struct seq_file *m, void *v, loff_t *pos)
4549{
4550 struct trace_array *tr = m->private;
4551 struct tracer *t = v;
4552
4553 (*pos)++;
4554
4555 if (t)
4556 t = get_tracer_for_array(tr, t->next);
4557
4558 return t;
4559}
4560
4561static void *t_start(struct seq_file *m, loff_t *pos)
4562{
4563 struct trace_array *tr = m->private;
4564 struct tracer *t;
4565 loff_t l = 0;
4566
4567 mutex_lock(&trace_types_lock);
4568
4569 t = get_tracer_for_array(tr, trace_types);
4570 for (; t && l < *pos; t = t_next(m, t, &l))
4571 ;
4572
4573 return t;
4574}
4575
4576static void t_stop(struct seq_file *m, void *p)
4577{
4578 mutex_unlock(&trace_types_lock);
4579}
4580
4581static int t_show(struct seq_file *m, void *v)
4582{
4583 struct tracer *t = v;
4584
4585 if (!t)
4586 return 0;
4587
4588 seq_puts(m, t->name);
4589 if (t->next)
4590 seq_putc(m, ' ');
4591 else
4592 seq_putc(m, '\n');
4593
4594 return 0;
4595}
4596
4597static const struct seq_operations show_traces_seq_ops = {
4598 .start = t_start,
4599 .next = t_next,
4600 .stop = t_stop,
4601 .show = t_show,
4602};
4603
4604static int show_traces_open(struct inode *inode, struct file *file)
4605{
4606 struct trace_array *tr = inode->i_private;
4607 struct seq_file *m;
4608 int ret;
4609
4610 ret = tracing_check_open_get_tr(tr);
4611 if (ret)
4612 return ret;
4613
4614 ret = seq_open(file, &show_traces_seq_ops);
4615 if (ret) {
4616 trace_array_put(tr);
4617 return ret;
4618 }
4619
4620 m = file->private_data;
4621 m->private = tr;
4622
4623 return 0;
4624}
4625
4626static int show_traces_release(struct inode *inode, struct file *file)
4627{
4628 struct trace_array *tr = inode->i_private;
4629
4630 trace_array_put(tr);
4631 return seq_release(inode, file);
4632}
4633
4634static ssize_t
4635tracing_write_stub(struct file *filp, const char __user *ubuf,
4636 size_t count, loff_t *ppos)
4637{
4638 return count;
4639}
4640
4641loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4642{
4643 int ret;
4644
4645 if (file->f_mode & FMODE_READ)
4646 ret = seq_lseek(file, offset, whence);
4647 else
4648 file->f_pos = ret = 0;
4649
4650 return ret;
4651}
4652
4653static const struct file_operations tracing_fops = {
4654 .open = tracing_open,
4655 .read = seq_read,
4656 .write = tracing_write_stub,
4657 .llseek = tracing_lseek,
4658 .release = tracing_release,
4659};
4660
4661static const struct file_operations show_traces_fops = {
4662 .open = show_traces_open,
4663 .read = seq_read,
4664 .llseek = seq_lseek,
4665 .release = show_traces_release,
4666};
4667
4668static ssize_t
4669tracing_cpumask_read(struct file *filp, char __user *ubuf,
4670 size_t count, loff_t *ppos)
4671{
4672 struct trace_array *tr = file_inode(filp)->i_private;
4673 char *mask_str;
4674 int len;
4675
4676 len = snprintf(NULL, 0, "%*pb\n",
4677 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4678 mask_str = kmalloc(len, GFP_KERNEL);
4679 if (!mask_str)
4680 return -ENOMEM;
4681
4682 len = snprintf(mask_str, len, "%*pb\n",
4683 cpumask_pr_args(tr->tracing_cpumask));
4684 if (len >= count) {
4685 count = -EINVAL;
4686 goto out_err;
4687 }
4688 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4689
4690out_err:
4691 kfree(mask_str);
4692
4693 return count;
4694}
4695
4696int tracing_set_cpumask(struct trace_array *tr,
4697 cpumask_var_t tracing_cpumask_new)
4698{
4699 int cpu;
4700
4701 if (!tr)
4702 return -EINVAL;
4703
4704 local_irq_disable();
4705 arch_spin_lock(&tr->max_lock);
4706 for_each_tracing_cpu(cpu) {
4707
4708
4709
4710
4711 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4712 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4713 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4714 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4715 }
4716 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4717 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4718 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4719 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4720 }
4721 }
4722 arch_spin_unlock(&tr->max_lock);
4723 local_irq_enable();
4724
4725 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4726
4727 return 0;
4728}
4729
4730static ssize_t
4731tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4732 size_t count, loff_t *ppos)
4733{
4734 struct trace_array *tr = file_inode(filp)->i_private;
4735 cpumask_var_t tracing_cpumask_new;
4736 int err;
4737
4738 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4739 return -ENOMEM;
4740
4741 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4742 if (err)
4743 goto err_free;
4744
4745 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4746 if (err)
4747 goto err_free;
4748
4749 free_cpumask_var(tracing_cpumask_new);
4750
4751 return count;
4752
4753err_free:
4754 free_cpumask_var(tracing_cpumask_new);
4755
4756 return err;
4757}
4758
4759static const struct file_operations tracing_cpumask_fops = {
4760 .open = tracing_open_generic_tr,
4761 .read = tracing_cpumask_read,
4762 .write = tracing_cpumask_write,
4763 .release = tracing_release_generic_tr,
4764 .llseek = generic_file_llseek,
4765};
4766
4767static int tracing_trace_options_show(struct seq_file *m, void *v)
4768{
4769 struct tracer_opt *trace_opts;
4770 struct trace_array *tr = m->private;
4771 u32 tracer_flags;
4772 int i;
4773
4774 mutex_lock(&trace_types_lock);
4775 tracer_flags = tr->current_trace->flags->val;
4776 trace_opts = tr->current_trace->flags->opts;
4777
4778 for (i = 0; trace_options[i]; i++) {
4779 if (tr->trace_flags & (1 << i))
4780 seq_printf(m, "%s\n", trace_options[i]);
4781 else
4782 seq_printf(m, "no%s\n", trace_options[i]);
4783 }
4784
4785 for (i = 0; trace_opts[i].name; i++) {
4786 if (tracer_flags & trace_opts[i].bit)
4787 seq_printf(m, "%s\n", trace_opts[i].name);
4788 else
4789 seq_printf(m, "no%s\n", trace_opts[i].name);
4790 }
4791 mutex_unlock(&trace_types_lock);
4792
4793 return 0;
4794}
4795
4796static int __set_tracer_option(struct trace_array *tr,
4797 struct tracer_flags *tracer_flags,
4798 struct tracer_opt *opts, int neg)
4799{
4800 struct tracer *trace = tracer_flags->trace;
4801 int ret;
4802
4803 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4804 if (ret)
4805 return ret;
4806
4807 if (neg)
4808 tracer_flags->val &= ~opts->bit;
4809 else
4810 tracer_flags->val |= opts->bit;
4811 return 0;
4812}
4813
4814
4815static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4816{
4817 struct tracer *trace = tr->current_trace;
4818 struct tracer_flags *tracer_flags = trace->flags;
4819 struct tracer_opt *opts = NULL;
4820 int i;
4821
4822 for (i = 0; tracer_flags->opts[i].name; i++) {
4823 opts = &tracer_flags->opts[i];
4824
4825 if (strcmp(cmp, opts->name) == 0)
4826 return __set_tracer_option(tr, trace->flags, opts, neg);
4827 }
4828
4829 return -EINVAL;
4830}
4831
4832
4833int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4834{
4835 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4836 return -1;
4837
4838 return 0;
4839}
4840
4841int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4842{
4843 if ((mask == TRACE_ITER_RECORD_TGID) ||
4844 (mask == TRACE_ITER_RECORD_CMD))
4845 lockdep_assert_held(&event_mutex);
4846
4847
4848 if (!!(tr->trace_flags & mask) == !!enabled)
4849 return 0;
4850
4851
4852 if (tr->current_trace->flag_changed)
4853 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4854 return -EINVAL;
4855
4856 if (enabled)
4857 tr->trace_flags |= mask;
4858 else
4859 tr->trace_flags &= ~mask;
4860
4861 if (mask == TRACE_ITER_RECORD_CMD)
4862 trace_event_enable_cmd_record(enabled);
4863
4864 if (mask == TRACE_ITER_RECORD_TGID) {
4865 if (!tgid_map)
4866 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4867 sizeof(*tgid_map),
4868 GFP_KERNEL);
4869 if (!tgid_map) {
4870 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4871 return -ENOMEM;
4872 }
4873
4874 trace_event_enable_tgid_record(enabled);
4875 }
4876
4877 if (mask == TRACE_ITER_EVENT_FORK)
4878 trace_event_follow_fork(tr, enabled);
4879
4880 if (mask == TRACE_ITER_FUNC_FORK)
4881 ftrace_pid_follow_fork(tr, enabled);
4882
4883 if (mask == TRACE_ITER_OVERWRITE) {
4884 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4885#ifdef CONFIG_TRACER_MAX_TRACE
4886 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4887#endif
4888 }
4889
4890 if (mask == TRACE_ITER_PRINTK) {
4891 trace_printk_start_stop_comm(enabled);
4892 trace_printk_control(enabled);
4893 }
4894
4895 return 0;
4896}
4897
4898int trace_set_options(struct trace_array *tr, char *option)
4899{
4900 char *cmp;
4901 int neg = 0;
4902 int ret;
4903 size_t orig_len = strlen(option);
4904 int len;
4905
4906 cmp = strstrip(option);
4907
4908 len = str_has_prefix(cmp, "no");
4909 if (len)
4910 neg = 1;
4911
4912 cmp += len;
4913
4914 mutex_lock(&event_mutex);
4915 mutex_lock(&trace_types_lock);
4916
4917 ret = match_string(trace_options, -1, cmp);
4918
4919 if (ret < 0)
4920 ret = set_tracer_option(tr, cmp, neg);
4921 else
4922 ret = set_tracer_flag(tr, 1 << ret, !neg);
4923
4924 mutex_unlock(&trace_types_lock);
4925 mutex_unlock(&event_mutex);
4926
4927
4928
4929
4930
4931 if (orig_len > strlen(option))
4932 option[strlen(option)] = ' ';
4933
4934 return ret;
4935}
4936
4937static void __init apply_trace_boot_options(void)
4938{
4939 char *buf = trace_boot_options_buf;
4940 char *option;
4941
4942 while (true) {
4943 option = strsep(&buf, ",");
4944
4945 if (!option)
4946 break;
4947
4948 if (*option)
4949 trace_set_options(&global_trace, option);
4950
4951
4952 if (buf)
4953 *(buf - 1) = ',';
4954 }
4955}
4956
4957static ssize_t
4958tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4959 size_t cnt, loff_t *ppos)
4960{
4961 struct seq_file *m = filp->private_data;
4962 struct trace_array *tr = m->private;
4963 char buf[64];
4964 int ret;
4965
4966 if (cnt >= sizeof(buf))
4967 return -EINVAL;
4968
4969 if (copy_from_user(buf, ubuf, cnt))
4970 return -EFAULT;
4971
4972 buf[cnt] = 0;
4973
4974 ret = trace_set_options(tr, buf);
4975 if (ret < 0)
4976 return ret;
4977
4978 *ppos += cnt;
4979
4980 return cnt;
4981}
4982
4983static int tracing_trace_options_open(struct inode *inode, struct file *file)
4984{
4985 struct trace_array *tr = inode->i_private;
4986 int ret;
4987
4988 ret = tracing_check_open_get_tr(tr);
4989 if (ret)
4990 return ret;
4991
4992 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4993 if (ret < 0)
4994 trace_array_put(tr);
4995
4996 return ret;
4997}
4998
4999static const struct file_operations tracing_iter_fops = {
5000 .open = tracing_trace_options_open,
5001 .read = seq_read,
5002 .llseek = seq_lseek,
5003 .release = tracing_single_release_tr,
5004 .write = tracing_trace_options_write,
5005};
5006
5007static const char readme_msg[] =
5008 "tracing mini-HOWTO:\n\n"
5009 "# echo 0 > tracing_on : quick way to disable tracing\n"
5010 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5011 " Important files:\n"
5012 " trace\t\t\t- The static contents of the buffer\n"
5013 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5014 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5015 " current_tracer\t- function and latency tracers\n"
5016 " available_tracers\t- list of configured tracers for current_tracer\n"
5017 " error_log\t- error log for failed commands (that support it)\n"
5018 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5019 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5020 " trace_clock\t\t-change the clock used to order events\n"
5021 " local: Per cpu clock but may not be synced across CPUs\n"
5022 " global: Synced across CPUs but slows tracing down.\n"
5023 " counter: Not a clock, but just an increment\n"
5024 " uptime: Jiffy counter from time of boot\n"
5025 " perf: Same clock that perf events use\n"
5026#ifdef CONFIG_X86_64
5027 " x86-tsc: TSC cycle counter\n"
5028#endif
5029 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5030 " delta: Delta difference against a buffer-wide timestamp\n"
5031 " absolute: Absolute (standalone) timestamp\n"
5032 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5033 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5034 " tracing_cpumask\t- Limit which CPUs to trace\n"
5035 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5036 "\t\t\t Remove sub-buffer with rmdir\n"
5037 " trace_options\t\t- Set format or modify how tracing happens\n"
5038 "\t\t\t Disable an option by prefixing 'no' to the\n"
5039 "\t\t\t option name\n"
5040 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5041#ifdef CONFIG_DYNAMIC_FTRACE
5042 "\n available_filter_functions - list of functions that can be filtered on\n"
5043 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5044 "\t\t\t functions\n"
5045 "\t accepts: func_full_name or glob-matching-pattern\n"
5046 "\t modules: Can select a group via module\n"
5047 "\t Format: :mod:<module-name>\n"
5048 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5049 "\t triggers: a command to perform when function is hit\n"
5050 "\t Format: <function>:<trigger>[:count]\n"
5051 "\t trigger: traceon, traceoff\n"
5052 "\t\t enable_event:<system>:<event>\n"
5053 "\t\t disable_event:<system>:<event>\n"
5054#ifdef CONFIG_STACKTRACE
5055 "\t\t stacktrace\n"
5056#endif
5057#ifdef CONFIG_TRACER_SNAPSHOT
5058 "\t\t snapshot\n"
5059#endif
5060 "\t\t dump\n"
5061 "\t\t cpudump\n"
5062 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5063 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5064 "\t The first one will disable tracing every time do_fault is hit\n"
5065 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5066 "\t The first time do trap is hit and it disables tracing, the\n"
5067 "\t counter will decrement to 2. If tracing is already disabled,\n"
5068 "\t the counter will not decrement. It only decrements when the\n"
5069 "\t trigger did work\n"
5070 "\t To remove trigger without count:\n"
5071 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5072 "\t To remove trigger with a count:\n"
5073 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5074 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5075 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5076 "\t modules: Can select a group via module command :mod:\n"
5077 "\t Does not accept triggers\n"
5078#endif
5079#ifdef CONFIG_FUNCTION_TRACER
5080 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5081 "\t\t (function)\n"
5082 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5083 "\t\t (function)\n"
5084#endif
5085#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5086 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5087 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5088 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5089#endif
5090#ifdef CONFIG_TRACER_SNAPSHOT
5091 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5092 "\t\t\t snapshot buffer. Read the contents for more\n"
5093 "\t\t\t information\n"
5094#endif
5095#ifdef CONFIG_STACK_TRACER
5096 " stack_trace\t\t- Shows the max stack trace when active\n"
5097 " stack_max_size\t- Shows current max stack size that was traced\n"
5098 "\t\t\t Write into this file to reset the max size (trigger a\n"
5099 "\t\t\t new trace)\n"
5100#ifdef CONFIG_DYNAMIC_FTRACE
5101 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5102 "\t\t\t traces\n"
5103#endif
5104#endif
5105#ifdef CONFIG_DYNAMIC_EVENTS
5106 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5107 "\t\t\t Write into this file to define/undefine new trace events.\n"
5108#endif
5109#ifdef CONFIG_KPROBE_EVENTS
5110 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5111 "\t\t\t Write into this file to define/undefine new trace events.\n"
5112#endif
5113#ifdef CONFIG_UPROBE_EVENTS
5114 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5115 "\t\t\t Write into this file to define/undefine new trace events.\n"
5116#endif
5117#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5118 "\t accepts: event-definitions (one definition per line)\n"
5119 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5120 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5121#ifdef CONFIG_HIST_TRIGGERS
5122 "\t s:[synthetic/]<event> <field> [<field>]\n"
5123#endif
5124 "\t -:[<group>/]<event>\n"
5125#ifdef CONFIG_KPROBE_EVENTS
5126 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5127 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5128#endif
5129#ifdef CONFIG_UPROBE_EVENTS
5130 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
5131#endif
5132 "\t args: <name>=fetcharg[:type]\n"
5133 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5134#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5135 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5136#else
5137 "\t $stack<index>, $stack, $retval, $comm,\n"
5138#endif
5139 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5140 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5141 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5142 "\t <type>\\[<array-size>\\]\n"
5143#ifdef CONFIG_HIST_TRIGGERS
5144 "\t field: <stype> <name>;\n"
5145 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5146 "\t [unsigned] char/int/long\n"
5147#endif
5148#endif
5149 " events/\t\t- Directory containing all trace event subsystems:\n"
5150 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5151 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5152 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5153 "\t\t\t events\n"
5154 " filter\t\t- If set, only events passing filter are traced\n"
5155 " events/<system>/<event>/\t- Directory containing control files for\n"
5156 "\t\t\t <event>:\n"
5157 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5158 " filter\t\t- If set, only events passing filter are traced\n"
5159 " trigger\t\t- If set, a command to perform when event is hit\n"
5160 "\t Format: <trigger>[:count][if <filter>]\n"
5161 "\t trigger: traceon, traceoff\n"
5162 "\t enable_event:<system>:<event>\n"
5163 "\t disable_event:<system>:<event>\n"
5164#ifdef CONFIG_HIST_TRIGGERS
5165 "\t enable_hist:<system>:<event>\n"
5166 "\t disable_hist:<system>:<event>\n"
5167#endif
5168#ifdef CONFIG_STACKTRACE
5169 "\t\t stacktrace\n"
5170#endif
5171#ifdef CONFIG_TRACER_SNAPSHOT
5172 "\t\t snapshot\n"
5173#endif
5174#ifdef CONFIG_HIST_TRIGGERS
5175 "\t\t hist (see below)\n"
5176#endif
5177 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5178 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5179 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5180 "\t events/block/block_unplug/trigger\n"
5181 "\t The first disables tracing every time block_unplug is hit.\n"
5182 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5183 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5184 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5185 "\t Like function triggers, the counter is only decremented if it\n"
5186 "\t enabled or disabled tracing.\n"
5187 "\t To remove a trigger without a count:\n"
5188 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5189 "\t To remove a trigger with a count:\n"
5190 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5191 "\t Filters can be ignored when removing a trigger.\n"
5192#ifdef CONFIG_HIST_TRIGGERS
5193 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5194 "\t Format: hist:keys=<field1[,field2,...]>\n"
5195 "\t [:values=<field1[,field2,...]>]\n"
5196 "\t [:sort=<field1[,field2,...]>]\n"
5197 "\t [:size=#entries]\n"
5198 "\t [:pause][:continue][:clear]\n"
5199 "\t [:name=histname1]\n"
5200 "\t [:<handler>.<action>]\n"
5201 "\t [if <filter>]\n\n"
5202 "\t When a matching event is hit, an entry is added to a hash\n"
5203 "\t table using the key(s) and value(s) named, and the value of a\n"
5204 "\t sum called 'hitcount' is incremented. Keys and values\n"
5205 "\t correspond to fields in the event's format description. Keys\n"
5206 "\t can be any field, or the special string 'stacktrace'.\n"
5207 "\t Compound keys consisting of up to two fields can be specified\n"
5208 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5209 "\t fields. Sort keys consisting of up to two fields can be\n"
5210 "\t specified using the 'sort' keyword. The sort direction can\n"
5211 "\t be modified by appending '.descending' or '.ascending' to a\n"
5212 "\t sort field. The 'size' parameter can be used to specify more\n"
5213 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5214 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5215 "\t its histogram data will be shared with other triggers of the\n"
5216 "\t same name, and trigger hits will update this common data.\n\n"
5217 "\t Reading the 'hist' file for the event will dump the hash\n"
5218 "\t table in its entirety to stdout. If there are multiple hist\n"
5219 "\t triggers attached to an event, there will be a table for each\n"
5220 "\t trigger in the output. The table displayed for a named\n"
5221 "\t trigger will be the same as any other instance having the\n"
5222 "\t same name. The default format used to display a given field\n"
5223 "\t can be modified by appending any of the following modifiers\n"
5224 "\t to the field name, as applicable:\n\n"
5225 "\t .hex display a number as a hex value\n"
5226 "\t .sym display an address as a symbol\n"
5227 "\t .sym-offset display an address as a symbol and offset\n"
5228 "\t .execname display a common_pid as a program name\n"
5229 "\t .syscall display a syscall id as a syscall name\n"
5230 "\t .log2 display log2 value rather than raw number\n"
5231 "\t .usecs display a common_timestamp in microseconds\n\n"
5232 "\t The 'pause' parameter can be used to pause an existing hist\n"
5233 "\t trigger or to start a hist trigger but not log any events\n"
5234 "\t until told to do so. 'continue' can be used to start or\n"
5235 "\t restart a paused hist trigger.\n\n"
5236 "\t The 'clear' parameter will clear the contents of a running\n"
5237 "\t hist trigger and leave its current paused/active state\n"
5238 "\t unchanged.\n\n"
5239 "\t The enable_hist and disable_hist triggers can be used to\n"
5240 "\t have one event conditionally start and stop another event's\n"
5241 "\t already-attached hist trigger. The syntax is analogous to\n"
5242 "\t the enable_event and disable_event triggers.\n\n"
5243 "\t Hist trigger handlers and actions are executed whenever a\n"
5244 "\t a histogram entry is added or updated. They take the form:\n\n"
5245 "\t <handler>.<action>\n\n"
5246 "\t The available handlers are:\n\n"
5247 "\t onmatch(matching.event) - invoke on addition or update\n"
5248 "\t onmax(var) - invoke if var exceeds current max\n"
5249 "\t onchange(var) - invoke action if var changes\n\n"
5250 "\t The available actions are:\n\n"
5251 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5252 "\t save(field,...) - save current event fields\n"
5253#ifdef CONFIG_TRACER_SNAPSHOT
5254 "\t snapshot() - snapshot the trace buffer\n"
5255#endif
5256#endif
5257;
5258
5259static ssize_t
5260tracing_readme_read(struct file *filp, char __user *ubuf,
5261 size_t cnt, loff_t *ppos)
5262{
5263 return simple_read_from_buffer(ubuf, cnt, ppos,
5264 readme_msg, strlen(readme_msg));
5265}
5266
5267static const struct file_operations tracing_readme_fops = {
5268 .open = tracing_open_generic,
5269 .read = tracing_readme_read,
5270 .llseek = generic_file_llseek,
5271};
5272
5273static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5274{
5275 int *ptr = v;
5276
5277 if (*pos || m->count)
5278 ptr++;
5279
5280 (*pos)++;
5281
5282 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5283 if (trace_find_tgid(*ptr))
5284 return ptr;
5285 }
5286
5287 return NULL;
5288}
5289
5290static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5291{
5292 void *v;
5293 loff_t l = 0;
5294
5295 if (!tgid_map)
5296 return NULL;
5297
5298 v = &tgid_map[0];
5299 while (l <= *pos) {
5300 v = saved_tgids_next(m, v, &l);
5301 if (!v)
5302 return NULL;
5303 }
5304
5305 return v;
5306}
5307
5308static void saved_tgids_stop(struct seq_file *m, void *v)
5309{
5310}
5311
5312static int saved_tgids_show(struct seq_file *m, void *v)
5313{
5314 int pid = (int *)v - tgid_map;
5315
5316 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5317 return 0;
5318}
5319
5320static const struct seq_operations tracing_saved_tgids_seq_ops = {
5321 .start = saved_tgids_start,
5322 .stop = saved_tgids_stop,
5323 .next = saved_tgids_next,
5324 .show = saved_tgids_show,
5325};
5326
5327static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5328{
5329 int ret;
5330
5331 ret = tracing_check_open_get_tr(NULL);
5332 if (ret)
5333 return ret;
5334
5335 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5336}
5337
5338
5339static const struct file_operations tracing_saved_tgids_fops = {
5340 .open = tracing_saved_tgids_open,
5341 .read = seq_read,
5342 .llseek = seq_lseek,
5343 .release = seq_release,
5344};
5345
5346static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5347{
5348 unsigned int *ptr = v;
5349
5350 if (*pos || m->count)
5351 ptr++;
5352
5353 (*pos)++;
5354
5355 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5356 ptr++) {
5357 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5358 continue;
5359
5360 return ptr;
5361 }
5362
5363 return NULL;
5364}
5365
5366static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5367{
5368 void *v;
5369 loff_t l = 0;
5370
5371 preempt_disable();
5372 arch_spin_lock(&trace_cmdline_lock);
5373
5374 v = &savedcmd->map_cmdline_to_pid[0];
5375 while (l <= *pos) {
5376 v = saved_cmdlines_next(m, v, &l);
5377 if (!v)
5378 return NULL;
5379 }
5380
5381 return v;
5382}
5383
5384static void saved_cmdlines_stop(struct seq_file *m, void *v)
5385{
5386 arch_spin_unlock(&trace_cmdline_lock);
5387 preempt_enable();
5388}
5389
5390static int saved_cmdlines_show(struct seq_file *m, void *v)
5391{
5392 char buf[TASK_COMM_LEN];
5393 unsigned int *pid = v;
5394
5395 __trace_find_cmdline(*pid, buf);
5396 seq_printf(m, "%d %s\n", *pid, buf);
5397 return 0;
5398}
5399
5400static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5401 .start = saved_cmdlines_start,
5402 .next = saved_cmdlines_next,
5403 .stop = saved_cmdlines_stop,
5404 .show = saved_cmdlines_show,
5405};
5406
5407static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5408{
5409 int ret;
5410
5411 ret = tracing_check_open_get_tr(NULL);
5412 if (ret)
5413 return ret;
5414
5415 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5416}
5417
5418static const struct file_operations tracing_saved_cmdlines_fops = {
5419 .open = tracing_saved_cmdlines_open,
5420 .read = seq_read,
5421 .llseek = seq_lseek,
5422 .release = seq_release,
5423};
5424
5425static ssize_t
5426tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5427 size_t cnt, loff_t *ppos)
5428{
5429 char buf[64];
5430 int r;
5431
5432 arch_spin_lock(&trace_cmdline_lock);
5433 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5434 arch_spin_unlock(&trace_cmdline_lock);
5435
5436 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5437}
5438
5439static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5440{
5441 kfree(s->saved_cmdlines);
5442 kfree(s->map_cmdline_to_pid);
5443 kfree(s);
5444}
5445
5446static int tracing_resize_saved_cmdlines(unsigned int val)
5447{
5448 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5449
5450 s = kmalloc(sizeof(*s), GFP_KERNEL);
5451 if (!s)
5452 return -ENOMEM;
5453
5454 if (allocate_cmdlines_buffer(val, s) < 0) {
5455 kfree(s);
5456 return -ENOMEM;
5457 }
5458
5459 arch_spin_lock(&trace_cmdline_lock);
5460 savedcmd_temp = savedcmd;
5461 savedcmd = s;
5462 arch_spin_unlock(&trace_cmdline_lock);
5463 free_saved_cmdlines_buffer(savedcmd_temp);
5464
5465 return 0;
5466}
5467
5468static ssize_t
5469tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5470 size_t cnt, loff_t *ppos)
5471{
5472 unsigned long val;
5473 int ret;
5474
5475 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5476 if (ret)
5477 return ret;
5478
5479
5480 if (!val || val > PID_MAX_DEFAULT)
5481 return -EINVAL;
5482
5483 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5484 if (ret < 0)
5485 return ret;
5486
5487 *ppos += cnt;
5488
5489 return cnt;
5490}
5491
5492static const struct file_operations tracing_saved_cmdlines_size_fops = {
5493 .open = tracing_open_generic,
5494 .read = tracing_saved_cmdlines_size_read,
5495 .write = tracing_saved_cmdlines_size_write,
5496};
5497
5498#ifdef CONFIG_TRACE_EVAL_MAP_FILE
5499static union trace_eval_map_item *
5500update_eval_map(union trace_eval_map_item *ptr)
5501{
5502 if (!ptr->map.eval_string) {
5503 if (ptr->tail.next) {
5504 ptr = ptr->tail.next;
5505
5506 ptr++;
5507 } else
5508 return NULL;
5509 }
5510 return ptr;
5511}
5512
5513static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5514{
5515 union trace_eval_map_item *ptr = v;
5516
5517
5518
5519
5520
5521 (*pos)++;
5522 ptr = update_eval_map(ptr);
5523 if (WARN_ON_ONCE(!ptr))
5524 return NULL;
5525
5526 ptr++;
5527 ptr = update_eval_map(ptr);
5528
5529 return ptr;
5530}
5531
5532static void *eval_map_start(struct seq_file *m, loff_t *pos)
5533{
5534 union trace_eval_map_item *v;
5535 loff_t l = 0;
5536
5537 mutex_lock(&trace_eval_mutex);
5538
5539 v = trace_eval_maps;
5540 if (v)
5541 v++;
5542
5543 while (v && l < *pos) {
5544 v = eval_map_next(m, v, &l);
5545 }
5546
5547 return v;
5548}
5549
5550static void eval_map_stop(struct seq_file *m, void *v)
5551{
5552 mutex_unlock(&trace_eval_mutex);
5553}
5554
5555static int eval_map_show(struct seq_file *m, void *v)
5556{
5557 union trace_eval_map_item *ptr = v;
5558
5559 seq_printf(m, "%s %ld (%s)\n",
5560 ptr->map.eval_string, ptr->map.eval_value,
5561 ptr->map.system);
5562
5563 return 0;
5564}
5565
5566static const struct seq_operations tracing_eval_map_seq_ops = {
5567 .start = eval_map_start,
5568 .next = eval_map_next,
5569 .stop = eval_map_stop,
5570 .show = eval_map_show,
5571};
5572
5573static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5574{
5575 int ret;
5576
5577 ret = tracing_check_open_get_tr(NULL);
5578 if (ret)
5579 return ret;
5580
5581 return seq_open(filp, &tracing_eval_map_seq_ops);
5582}
5583
5584static const struct file_operations tracing_eval_map_fops = {
5585 .open = tracing_eval_map_open,
5586 .read = seq_read,
5587 .llseek = seq_lseek,
5588 .release = seq_release,
5589};
5590
5591static inline union trace_eval_map_item *
5592trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5593{
5594
5595 return ptr + ptr->head.length + 1;
5596}
5597
5598static void
5599trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5600 int len)
5601{
5602 struct trace_eval_map **stop;
5603 struct trace_eval_map **map;
5604 union trace_eval_map_item *map_array;
5605 union trace_eval_map_item *ptr;
5606
5607 stop = start + len;
5608
5609
5610
5611
5612
5613
5614 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5615 if (!map_array) {
5616 pr_warn("Unable to allocate trace eval mapping\n");
5617 return;
5618 }
5619
5620 mutex_lock(&trace_eval_mutex);
5621
5622 if (!trace_eval_maps)
5623 trace_eval_maps = map_array;
5624 else {
5625 ptr = trace_eval_maps;
5626 for (;;) {
5627 ptr = trace_eval_jmp_to_tail(ptr);
5628 if (!ptr->tail.next)
5629 break;
5630 ptr = ptr->tail.next;
5631
5632 }
5633 ptr->tail.next = map_array;
5634 }
5635 map_array->head.mod = mod;
5636 map_array->head.length = len;
5637 map_array++;
5638
5639 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5640 map_array->map = **map;
5641 map_array++;
5642 }
5643 memset(map_array, 0, sizeof(*map_array));
5644
5645 mutex_unlock(&trace_eval_mutex);
5646}
5647
5648static void trace_create_eval_file(struct dentry *d_tracer)
5649{
5650 trace_create_file("eval_map", 0444, d_tracer,
5651 NULL, &tracing_eval_map_fops);
5652}
5653
5654#else
5655static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5656static inline void trace_insert_eval_map_file(struct module *mod,
5657 struct trace_eval_map **start, int len) { }
5658#endif
5659
5660static void trace_insert_eval_map(struct module *mod,
5661 struct trace_eval_map **start, int len)
5662{
5663 struct trace_eval_map **map;
5664
5665 if (len <= 0)
5666 return;
5667
5668 map = start;
5669
5670 trace_event_eval_update(map, len);
5671
5672 trace_insert_eval_map_file(mod, start, len);
5673}
5674
5675static ssize_t
5676tracing_set_trace_read(struct file *filp, char __user *ubuf,
5677 size_t cnt, loff_t *ppos)
5678{
5679 struct trace_array *tr = filp->private_data;
5680 char buf[MAX_TRACER_SIZE+2];
5681 int r;
5682
5683 mutex_lock(&trace_types_lock);
5684 r = sprintf(buf, "%s\n", tr->current_trace->name);
5685 mutex_unlock(&trace_types_lock);
5686
5687 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5688}
5689
5690int tracer_init(struct tracer *t, struct trace_array *tr)
5691{
5692 tracing_reset_online_cpus(&tr->array_buffer);
5693 return t->init(tr);
5694}
5695
5696static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5697{
5698 int cpu;
5699
5700 for_each_tracing_cpu(cpu)
5701 per_cpu_ptr(buf->data, cpu)->entries = val;
5702}
5703
5704#ifdef CONFIG_TRACER_MAX_TRACE
5705
5706static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5707 struct array_buffer *size_buf, int cpu_id)
5708{
5709 int cpu, ret = 0;
5710
5711 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5712 for_each_tracing_cpu(cpu) {
5713 ret = ring_buffer_resize(trace_buf->buffer,
5714 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5715 if (ret < 0)
5716 break;
5717 per_cpu_ptr(trace_buf->data, cpu)->entries =
5718 per_cpu_ptr(size_buf->data, cpu)->entries;
5719 }
5720 } else {
5721 ret = ring_buffer_resize(trace_buf->buffer,
5722 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5723 if (ret == 0)
5724 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5725 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5726 }
5727
5728 return ret;
5729}
5730#endif
5731
5732static int __tracing_resize_ring_buffer(struct trace_array *tr,
5733 unsigned long size, int cpu)
5734{
5735 int ret;
5736
5737
5738
5739
5740
5741
5742 ring_buffer_expanded = true;
5743
5744
5745 if (!tr->array_buffer.buffer)
5746 return 0;
5747
5748 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5749 if (ret < 0)
5750 return ret;
5751
5752#ifdef CONFIG_TRACER_MAX_TRACE
5753 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5754 !tr->current_trace->use_max_tr)
5755 goto out;
5756
5757 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5758 if (ret < 0) {
5759 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5760 &tr->array_buffer, cpu);
5761 if (r < 0) {
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776 WARN_ON(1);
5777 tracing_disabled = 1;
5778 }
5779 return ret;
5780 }
5781
5782 if (cpu == RING_BUFFER_ALL_CPUS)
5783 set_buffer_entries(&tr->max_buffer, size);
5784 else
5785 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5786
5787 out:
5788#endif
5789
5790 if (cpu == RING_BUFFER_ALL_CPUS)
5791 set_buffer_entries(&tr->array_buffer, size);
5792 else
5793 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5794
5795 return ret;
5796}
5797
5798ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5799 unsigned long size, int cpu_id)
5800{
5801 int ret = size;
5802
5803 mutex_lock(&trace_types_lock);
5804
5805 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5806
5807 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5808 ret = -EINVAL;
5809 goto out;
5810 }
5811 }
5812
5813 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5814 if (ret < 0)
5815 ret = -ENOMEM;
5816
5817out:
5818 mutex_unlock(&trace_types_lock);
5819
5820 return ret;
5821}
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834int tracing_update_buffers(void)
5835{
5836 int ret = 0;
5837
5838 mutex_lock(&trace_types_lock);
5839 if (!ring_buffer_expanded)
5840 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5841 RING_BUFFER_ALL_CPUS);
5842 mutex_unlock(&trace_types_lock);
5843
5844 return ret;
5845}
5846
5847struct trace_option_dentry;
5848
5849static void
5850create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5851
5852
5853
5854
5855
5856static void tracing_set_nop(struct trace_array *tr)
5857{
5858 if (tr->current_trace == &nop_trace)
5859 return;
5860
5861 tr->current_trace->enabled--;
5862
5863 if (tr->current_trace->reset)
5864 tr->current_trace->reset(tr);
5865
5866 tr->current_trace = &nop_trace;
5867}
5868
5869static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5870{
5871
5872 if (!tr->dir)
5873 return;
5874
5875 create_trace_option_files(tr, t);
5876}
5877
5878int tracing_set_tracer(struct trace_array *tr, const char *buf)
5879{
5880 struct tracer *t;
5881#ifdef CONFIG_TRACER_MAX_TRACE
5882 bool had_max_tr;
5883#endif
5884 int ret = 0;
5885
5886 mutex_lock(&trace_types_lock);
5887
5888 if (!ring_buffer_expanded) {
5889 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5890 RING_BUFFER_ALL_CPUS);
5891 if (ret < 0)
5892 goto out;
5893 ret = 0;
5894 }
5895
5896 for (t = trace_types; t; t = t->next) {
5897 if (strcmp(t->name, buf) == 0)
5898 break;
5899 }
5900 if (!t) {
5901 ret = -EINVAL;
5902 goto out;
5903 }
5904 if (t == tr->current_trace)
5905 goto out;
5906
5907#ifdef CONFIG_TRACER_SNAPSHOT
5908 if (t->use_max_tr) {
5909 arch_spin_lock(&tr->max_lock);
5910 if (tr->cond_snapshot)
5911 ret = -EBUSY;
5912 arch_spin_unlock(&tr->max_lock);
5913 if (ret)
5914 goto out;
5915 }
5916#endif
5917
5918 if (system_state < SYSTEM_RUNNING && t->noboot) {
5919 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5920 t->name);
5921 goto out;
5922 }
5923
5924
5925 if (!trace_ok_for_array(t, tr)) {
5926 ret = -EINVAL;
5927 goto out;
5928 }
5929
5930
5931 if (tr->trace_ref) {
5932 ret = -EBUSY;
5933 goto out;
5934 }
5935
5936 trace_branch_disable();
5937
5938 tr->current_trace->enabled--;
5939
5940 if (tr->current_trace->reset)
5941 tr->current_trace->reset(tr);
5942
5943
5944 tr->current_trace = &nop_trace;
5945
5946#ifdef CONFIG_TRACER_MAX_TRACE
5947 had_max_tr = tr->allocated_snapshot;
5948
5949 if (had_max_tr && !t->use_max_tr) {
5950
5951
5952
5953
5954
5955
5956
5957 synchronize_rcu();
5958 free_snapshot(tr);
5959 }
5960#endif
5961
5962#ifdef CONFIG_TRACER_MAX_TRACE
5963 if (t->use_max_tr && !had_max_tr) {
5964 ret = tracing_alloc_snapshot_instance(tr);
5965 if (ret < 0)
5966 goto out;
5967 }
5968#endif
5969
5970 if (t->init) {
5971 ret = tracer_init(t, tr);
5972 if (ret)
5973 goto out;
5974 }
5975
5976 tr->current_trace = t;
5977 tr->current_trace->enabled++;
5978 trace_branch_enable(tr);
5979 out:
5980 mutex_unlock(&trace_types_lock);
5981
5982 return ret;
5983}
5984
5985static ssize_t
5986tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5987 size_t cnt, loff_t *ppos)
5988{
5989 struct trace_array *tr = filp->private_data;
5990 char buf[MAX_TRACER_SIZE+1];
5991 int i;
5992 size_t ret;
5993 int err;
5994
5995 ret = cnt;
5996
5997 if (cnt > MAX_TRACER_SIZE)
5998 cnt = MAX_TRACER_SIZE;
5999
6000 if (copy_from_user(buf, ubuf, cnt))
6001 return -EFAULT;
6002
6003 buf[cnt] = 0;
6004
6005
6006 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6007 buf[i] = 0;
6008
6009 err = tracing_set_tracer(tr, buf);
6010 if (err)
6011 return err;
6012
6013 *ppos += ret;
6014
6015 return ret;
6016}
6017
6018static ssize_t
6019tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6020 size_t cnt, loff_t *ppos)
6021{
6022 char buf[64];
6023 int r;
6024
6025 r = snprintf(buf, sizeof(buf), "%ld\n",
6026 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6027 if (r > sizeof(buf))
6028 r = sizeof(buf);
6029 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6030}
6031
6032static ssize_t
6033tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6034 size_t cnt, loff_t *ppos)
6035{
6036 unsigned long val;
6037 int ret;
6038
6039 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6040 if (ret)
6041 return ret;
6042
6043 *ptr = val * 1000;
6044
6045 return cnt;
6046}
6047
6048static ssize_t
6049tracing_thresh_read(struct file *filp, char __user *ubuf,
6050 size_t cnt, loff_t *ppos)
6051{
6052 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6053}
6054
6055static ssize_t
6056tracing_thresh_write(struct file *filp, const char __user *ubuf,
6057 size_t cnt, loff_t *ppos)
6058{
6059 struct trace_array *tr = filp->private_data;
6060 int ret;
6061
6062 mutex_lock(&trace_types_lock);
6063 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6064 if (ret < 0)
6065 goto out;
6066
6067 if (tr->current_trace->update_thresh) {
6068 ret = tr->current_trace->update_thresh(tr);
6069 if (ret < 0)
6070 goto out;
6071 }
6072
6073 ret = cnt;
6074out:
6075 mutex_unlock(&trace_types_lock);
6076
6077 return ret;
6078}
6079
6080#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6081
6082static ssize_t
6083tracing_max_lat_read(struct file *filp, char __user *ubuf,
6084 size_t cnt, loff_t *ppos)
6085{
6086 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6087}
6088
6089static ssize_t
6090tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6091 size_t cnt, loff_t *ppos)
6092{
6093 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6094}
6095
6096#endif
6097
6098static int tracing_open_pipe(struct inode *inode, struct file *filp)
6099{
6100 struct trace_array *tr = inode->i_private;
6101 struct trace_iterator *iter;
6102 int ret;
6103
6104 ret = tracing_check_open_get_tr(tr);
6105 if (ret)
6106 return ret;
6107
6108 mutex_lock(&trace_types_lock);
6109
6110
6111 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6112 if (!iter) {
6113 ret = -ENOMEM;
6114 __trace_array_put(tr);
6115 goto out;
6116 }
6117
6118 trace_seq_init(&iter->seq);
6119 iter->trace = tr->current_trace;
6120
6121 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6122 ret = -ENOMEM;
6123 goto fail;
6124 }
6125
6126
6127 cpumask_setall(iter->started);
6128
6129 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6130 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6131
6132
6133 if (trace_clocks[tr->clock_id].in_ns)
6134 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6135
6136 iter->tr = tr;
6137 iter->array_buffer = &tr->array_buffer;
6138 iter->cpu_file = tracing_get_cpu(inode);
6139 mutex_init(&iter->mutex);
6140 filp->private_data = iter;
6141
6142 if (iter->trace->pipe_open)
6143 iter->trace->pipe_open(iter);
6144
6145 nonseekable_open(inode, filp);
6146
6147 tr->trace_ref++;
6148out:
6149 mutex_unlock(&trace_types_lock);
6150 return ret;
6151
6152fail:
6153 kfree(iter);
6154 __trace_array_put(tr);
6155 mutex_unlock(&trace_types_lock);
6156 return ret;
6157}
6158
6159static int tracing_release_pipe(struct inode *inode, struct file *file)
6160{
6161 struct trace_iterator *iter = file->private_data;
6162 struct trace_array *tr = inode->i_private;
6163
6164 mutex_lock(&trace_types_lock);
6165
6166 tr->trace_ref--;
6167
6168 if (iter->trace->pipe_close)
6169 iter->trace->pipe_close(iter);
6170
6171 mutex_unlock(&trace_types_lock);
6172
6173 free_cpumask_var(iter->started);
6174 mutex_destroy(&iter->mutex);
6175 kfree(iter);
6176
6177 trace_array_put(tr);
6178
6179 return 0;
6180}
6181
6182static __poll_t
6183trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6184{
6185 struct trace_array *tr = iter->tr;
6186
6187
6188 if (trace_buffer_iter(iter, iter->cpu_file))
6189 return EPOLLIN | EPOLLRDNORM;
6190
6191 if (tr->trace_flags & TRACE_ITER_BLOCK)
6192
6193
6194
6195 return EPOLLIN | EPOLLRDNORM;
6196 else
6197 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6198 filp, poll_table);
6199}
6200
6201static __poll_t
6202tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6203{
6204 struct trace_iterator *iter = filp->private_data;
6205
6206 return trace_poll(iter, filp, poll_table);
6207}
6208
6209
6210static int tracing_wait_pipe(struct file *filp)
6211{
6212 struct trace_iterator *iter = filp->private_data;
6213 int ret;
6214
6215 while (trace_empty(iter)) {
6216
6217 if ((filp->f_flags & O_NONBLOCK)) {
6218 return -EAGAIN;
6219 }
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6231 break;
6232
6233 mutex_unlock(&iter->mutex);
6234
6235 ret = wait_on_pipe(iter, 0);
6236
6237 mutex_lock(&iter->mutex);
6238
6239 if (ret)
6240 return ret;
6241 }
6242
6243 return 1;
6244}
6245
6246
6247
6248
6249static ssize_t
6250tracing_read_pipe(struct file *filp, char __user *ubuf,
6251 size_t cnt, loff_t *ppos)
6252{
6253 struct trace_iterator *iter = filp->private_data;
6254 ssize_t sret;
6255
6256
6257
6258
6259
6260
6261 mutex_lock(&iter->mutex);
6262
6263
6264 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6265 if (sret != -EBUSY)
6266 goto out;
6267
6268 trace_seq_init(&iter->seq);
6269
6270 if (iter->trace->read) {
6271 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6272 if (sret)
6273 goto out;
6274 }
6275
6276waitagain:
6277 sret = tracing_wait_pipe(filp);
6278 if (sret <= 0)
6279 goto out;
6280
6281
6282 if (trace_empty(iter)) {
6283 sret = 0;
6284 goto out;
6285 }
6286
6287 if (cnt >= PAGE_SIZE)
6288 cnt = PAGE_SIZE - 1;
6289
6290
6291 memset(&iter->seq, 0,
6292 sizeof(struct trace_iterator) -
6293 offsetof(struct trace_iterator, seq));
6294 cpumask_clear(iter->started);
6295 trace_seq_init(&iter->seq);
6296 iter->pos = -1;
6297
6298 trace_event_read_lock();
6299 trace_access_lock(iter->cpu_file);
6300 while (trace_find_next_entry_inc(iter) != NULL) {
6301 enum print_line_t ret;
6302 int save_len = iter->seq.seq.len;
6303
6304 ret = print_trace_line(iter);
6305 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6306
6307 iter->seq.seq.len = save_len;
6308 break;
6309 }
6310 if (ret != TRACE_TYPE_NO_CONSUME)
6311 trace_consume(iter);
6312
6313 if (trace_seq_used(&iter->seq) >= cnt)
6314 break;
6315
6316
6317
6318
6319
6320
6321 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6322 iter->ent->type);
6323 }
6324 trace_access_unlock(iter->cpu_file);
6325 trace_event_read_unlock();
6326
6327
6328 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6329 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6330 trace_seq_init(&iter->seq);
6331
6332
6333
6334
6335
6336 if (sret == -EBUSY)
6337 goto waitagain;
6338
6339out:
6340 mutex_unlock(&iter->mutex);
6341
6342 return sret;
6343}
6344
6345static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6346 unsigned int idx)
6347{
6348 __free_page(spd->pages[idx]);
6349}
6350
6351static size_t
6352tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6353{
6354 size_t count;
6355 int save_len;
6356 int ret;
6357
6358
6359 for (;;) {
6360 save_len = iter->seq.seq.len;
6361 ret = print_trace_line(iter);
6362
6363 if (trace_seq_has_overflowed(&iter->seq)) {
6364 iter->seq.seq.len = save_len;
6365 break;
6366 }
6367
6368
6369
6370
6371
6372
6373 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6374 iter->seq.seq.len = save_len;
6375 break;
6376 }
6377
6378 count = trace_seq_used(&iter->seq) - save_len;
6379 if (rem < count) {
6380 rem = 0;
6381 iter->seq.seq.len = save_len;
6382 break;
6383 }
6384
6385 if (ret != TRACE_TYPE_NO_CONSUME)
6386 trace_consume(iter);
6387 rem -= count;
6388 if (!trace_find_next_entry_inc(iter)) {
6389 rem = 0;
6390 iter->ent = NULL;
6391 break;
6392 }
6393 }
6394
6395 return rem;
6396}
6397
6398static ssize_t tracing_splice_read_pipe(struct file *filp,
6399 loff_t *ppos,
6400 struct pipe_inode_info *pipe,
6401 size_t len,
6402 unsigned int flags)
6403{
6404 struct page *pages_def[PIPE_DEF_BUFFERS];
6405 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6406 struct trace_iterator *iter = filp->private_data;
6407 struct splice_pipe_desc spd = {
6408 .pages = pages_def,
6409 .partial = partial_def,
6410 .nr_pages = 0,
6411 .nr_pages_max = PIPE_DEF_BUFFERS,
6412 .ops = &default_pipe_buf_ops,
6413 .spd_release = tracing_spd_release_pipe,
6414 };
6415 ssize_t ret;
6416 size_t rem;
6417 unsigned int i;
6418
6419 if (splice_grow_spd(pipe, &spd))
6420 return -ENOMEM;
6421
6422 mutex_lock(&iter->mutex);
6423
6424 if (iter->trace->splice_read) {
6425 ret = iter->trace->splice_read(iter, filp,
6426 ppos, pipe, len, flags);
6427 if (ret)
6428 goto out_err;
6429 }
6430
6431 ret = tracing_wait_pipe(filp);
6432 if (ret <= 0)
6433 goto out_err;
6434
6435 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6436 ret = -EFAULT;
6437 goto out_err;
6438 }
6439
6440 trace_event_read_lock();
6441 trace_access_lock(iter->cpu_file);
6442
6443
6444 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6445 spd.pages[i] = alloc_page(GFP_KERNEL);
6446 if (!spd.pages[i])
6447 break;
6448
6449 rem = tracing_fill_pipe_page(rem, iter);
6450
6451
6452 ret = trace_seq_to_buffer(&iter->seq,
6453 page_address(spd.pages[i]),
6454 trace_seq_used(&iter->seq));
6455 if (ret < 0) {
6456 __free_page(spd.pages[i]);
6457 break;
6458 }
6459 spd.partial[i].offset = 0;
6460 spd.partial[i].len = trace_seq_used(&iter->seq);
6461
6462 trace_seq_init(&iter->seq);
6463 }
6464
6465 trace_access_unlock(iter->cpu_file);
6466 trace_event_read_unlock();
6467 mutex_unlock(&iter->mutex);
6468
6469 spd.nr_pages = i;
6470
6471 if (i)
6472 ret = splice_to_pipe(pipe, &spd);
6473 else
6474 ret = 0;
6475out:
6476 splice_shrink_spd(&spd);
6477 return ret;
6478
6479out_err:
6480 mutex_unlock(&iter->mutex);
6481 goto out;
6482}
6483
6484static ssize_t
6485tracing_entries_read(struct file *filp, char __user *ubuf,
6486 size_t cnt, loff_t *ppos)
6487{
6488 struct inode *inode = file_inode(filp);
6489 struct trace_array *tr = inode->i_private;
6490 int cpu = tracing_get_cpu(inode);
6491 char buf[64];
6492 int r = 0;
6493 ssize_t ret;
6494
6495 mutex_lock(&trace_types_lock);
6496
6497 if (cpu == RING_BUFFER_ALL_CPUS) {
6498 int cpu, buf_size_same;
6499 unsigned long size;
6500
6501 size = 0;
6502 buf_size_same = 1;
6503
6504 for_each_tracing_cpu(cpu) {
6505
6506 if (size == 0)
6507 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6508 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6509 buf_size_same = 0;
6510 break;
6511 }
6512 }
6513
6514 if (buf_size_same) {
6515 if (!ring_buffer_expanded)
6516 r = sprintf(buf, "%lu (expanded: %lu)\n",
6517 size >> 10,
6518 trace_buf_size >> 10);
6519 else
6520 r = sprintf(buf, "%lu\n", size >> 10);
6521 } else
6522 r = sprintf(buf, "X\n");
6523 } else
6524 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6525
6526 mutex_unlock(&trace_types_lock);
6527
6528 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6529 return ret;
6530}
6531
6532static ssize_t
6533tracing_entries_write(struct file *filp, const char __user *ubuf,
6534 size_t cnt, loff_t *ppos)
6535{
6536 struct inode *inode = file_inode(filp);
6537 struct trace_array *tr = inode->i_private;
6538 unsigned long val;
6539 int ret;
6540
6541 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6542 if (ret)
6543 return ret;
6544
6545
6546 if (!val)
6547 return -EINVAL;
6548
6549
6550 val <<= 10;
6551 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6552 if (ret < 0)
6553 return ret;
6554
6555 *ppos += cnt;
6556
6557 return cnt;
6558}
6559
6560static ssize_t
6561tracing_total_entries_read(struct file *filp, char __user *ubuf,
6562 size_t cnt, loff_t *ppos)
6563{
6564 struct trace_array *tr = filp->private_data;
6565 char buf[64];
6566 int r, cpu;
6567 unsigned long size = 0, expanded_size = 0;
6568
6569 mutex_lock(&trace_types_lock);
6570 for_each_tracing_cpu(cpu) {
6571 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6572 if (!ring_buffer_expanded)
6573 expanded_size += trace_buf_size >> 10;
6574 }
6575 if (ring_buffer_expanded)
6576 r = sprintf(buf, "%lu\n", size);
6577 else
6578 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6579 mutex_unlock(&trace_types_lock);
6580
6581 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6582}
6583
6584static ssize_t
6585tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6586 size_t cnt, loff_t *ppos)
6587{
6588
6589
6590
6591
6592
6593 *ppos += cnt;
6594
6595 return cnt;
6596}
6597
6598static int
6599tracing_free_buffer_release(struct inode *inode, struct file *filp)
6600{
6601 struct trace_array *tr = inode->i_private;
6602
6603
6604 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6605 tracer_tracing_off(tr);
6606
6607 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6608
6609 trace_array_put(tr);
6610
6611 return 0;
6612}
6613
6614static ssize_t
6615tracing_mark_write(struct file *filp, const char __user *ubuf,
6616 size_t cnt, loff_t *fpos)
6617{
6618 struct trace_array *tr = filp->private_data;
6619 struct ring_buffer_event *event;
6620 enum event_trigger_type tt = ETT_NONE;
6621 struct trace_buffer *buffer;
6622 struct print_entry *entry;
6623 unsigned long irq_flags;
6624 ssize_t written;
6625 int size;
6626 int len;
6627
6628
6629#define FAULTED_STR "<faulted>"
6630#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1)
6631
6632 if (tracing_disabled)
6633 return -EINVAL;
6634
6635 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6636 return -EINVAL;
6637
6638 if (cnt > TRACE_BUF_SIZE)
6639 cnt = TRACE_BUF_SIZE;
6640
6641 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6642
6643 local_save_flags(irq_flags);
6644 size = sizeof(*entry) + cnt + 2;
6645
6646
6647 if (cnt < FAULTED_SIZE)
6648 size += FAULTED_SIZE - cnt;
6649
6650 buffer = tr->array_buffer.buffer;
6651 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6652 irq_flags, preempt_count());
6653 if (unlikely(!event))
6654
6655 return -EBADF;
6656
6657 entry = ring_buffer_event_data(event);
6658 entry->ip = _THIS_IP_;
6659
6660 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6661 if (len) {
6662 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6663 cnt = FAULTED_SIZE;
6664 written = -EFAULT;
6665 } else
6666 written = cnt;
6667 len = cnt;
6668
6669 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6670
6671 entry->buf[cnt] = '\0';
6672 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6673 }
6674
6675 if (entry->buf[cnt - 1] != '\n') {
6676 entry->buf[cnt] = '\n';
6677 entry->buf[cnt + 1] = '\0';
6678 } else
6679 entry->buf[cnt] = '\0';
6680
6681 __buffer_unlock_commit(buffer, event);
6682
6683 if (tt)
6684 event_triggers_post_call(tr->trace_marker_file, tt);
6685
6686 if (written > 0)
6687 *fpos += written;
6688
6689 return written;
6690}
6691
6692
6693#define RAW_DATA_MAX_SIZE (1024*3)
6694
6695static ssize_t
6696tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6697 size_t cnt, loff_t *fpos)
6698{
6699 struct trace_array *tr = filp->private_data;
6700 struct ring_buffer_event *event;
6701 struct trace_buffer *buffer;
6702 struct raw_data_entry *entry;
6703 unsigned long irq_flags;
6704 ssize_t written;
6705 int size;
6706 int len;
6707
6708#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6709
6710 if (tracing_disabled)
6711 return -EINVAL;
6712
6713 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6714 return -EINVAL;
6715
6716
6717 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6718 return -EINVAL;
6719
6720 if (cnt > TRACE_BUF_SIZE)
6721 cnt = TRACE_BUF_SIZE;
6722
6723 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6724
6725 local_save_flags(irq_flags);
6726 size = sizeof(*entry) + cnt;
6727 if (cnt < FAULT_SIZE_ID)
6728 size += FAULT_SIZE_ID - cnt;
6729
6730 buffer = tr->array_buffer.buffer;
6731 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6732 irq_flags, preempt_count());
6733 if (!event)
6734
6735 return -EBADF;
6736
6737 entry = ring_buffer_event_data(event);
6738
6739 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6740 if (len) {
6741 entry->id = -1;
6742 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6743 written = -EFAULT;
6744 } else
6745 written = cnt;
6746
6747 __buffer_unlock_commit(buffer, event);
6748
6749 if (written > 0)
6750 *fpos += written;
6751
6752 return written;
6753}
6754
6755static int tracing_clock_show(struct seq_file *m, void *v)
6756{
6757 struct trace_array *tr = m->private;
6758 int i;
6759
6760 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6761 seq_printf(m,
6762 "%s%s%s%s", i ? " " : "",
6763 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6764 i == tr->clock_id ? "]" : "");
6765 seq_putc(m, '\n');
6766
6767 return 0;
6768}
6769
6770int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6771{
6772 int i;
6773
6774 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6775 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6776 break;
6777 }
6778 if (i == ARRAY_SIZE(trace_clocks))
6779 return -EINVAL;
6780
6781 mutex_lock(&trace_types_lock);
6782
6783 tr->clock_id = i;
6784
6785 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6786
6787
6788
6789
6790
6791 tracing_reset_online_cpus(&tr->array_buffer);
6792
6793#ifdef CONFIG_TRACER_MAX_TRACE
6794 if (tr->max_buffer.buffer)
6795 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6796 tracing_reset_online_cpus(&tr->max_buffer);
6797#endif
6798
6799 mutex_unlock(&trace_types_lock);
6800
6801 return 0;
6802}
6803
6804static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6805 size_t cnt, loff_t *fpos)
6806{
6807 struct seq_file *m = filp->private_data;
6808 struct trace_array *tr = m->private;
6809 char buf[64];
6810 const char *clockstr;
6811 int ret;
6812
6813 if (cnt >= sizeof(buf))
6814 return -EINVAL;
6815
6816 if (copy_from_user(buf, ubuf, cnt))
6817 return -EFAULT;
6818
6819 buf[cnt] = 0;
6820
6821 clockstr = strstrip(buf);
6822
6823 ret = tracing_set_clock(tr, clockstr);
6824 if (ret)
6825 return ret;
6826
6827 *fpos += cnt;
6828
6829 return cnt;
6830}
6831
6832static int tracing_clock_open(struct inode *inode, struct file *file)
6833{
6834 struct trace_array *tr = inode->i_private;
6835 int ret;
6836
6837 ret = tracing_check_open_get_tr(tr);
6838 if (ret)
6839 return ret;
6840
6841 ret = single_open(file, tracing_clock_show, inode->i_private);
6842 if (ret < 0)
6843 trace_array_put(tr);
6844
6845 return ret;
6846}
6847
6848static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6849{
6850 struct trace_array *tr = m->private;
6851
6852 mutex_lock(&trace_types_lock);
6853
6854 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6855 seq_puts(m, "delta [absolute]\n");
6856 else
6857 seq_puts(m, "[delta] absolute\n");
6858
6859 mutex_unlock(&trace_types_lock);
6860
6861 return 0;
6862}
6863
6864static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6865{
6866 struct trace_array *tr = inode->i_private;
6867 int ret;
6868
6869 ret = tracing_check_open_get_tr(tr);
6870 if (ret)
6871 return ret;
6872
6873 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6874 if (ret < 0)
6875 trace_array_put(tr);
6876
6877 return ret;
6878}
6879
6880int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6881{
6882 int ret = 0;
6883
6884 mutex_lock(&trace_types_lock);
6885
6886 if (abs && tr->time_stamp_abs_ref++)
6887 goto out;
6888
6889 if (!abs) {
6890 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6891 ret = -EINVAL;
6892 goto out;
6893 }
6894
6895 if (--tr->time_stamp_abs_ref)
6896 goto out;
6897 }
6898
6899 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
6900
6901#ifdef CONFIG_TRACER_MAX_TRACE
6902 if (tr->max_buffer.buffer)
6903 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6904#endif
6905 out:
6906 mutex_unlock(&trace_types_lock);
6907
6908 return ret;
6909}
6910
6911struct ftrace_buffer_info {
6912 struct trace_iterator iter;
6913 void *spare;
6914 unsigned int spare_cpu;
6915 unsigned int read;
6916};
6917
6918#ifdef CONFIG_TRACER_SNAPSHOT
6919static int tracing_snapshot_open(struct inode *inode, struct file *file)
6920{
6921 struct trace_array *tr = inode->i_private;
6922 struct trace_iterator *iter;
6923 struct seq_file *m;
6924 int ret;
6925
6926 ret = tracing_check_open_get_tr(tr);
6927 if (ret)
6928 return ret;
6929
6930 if (file->f_mode & FMODE_READ) {
6931 iter = __tracing_open(inode, file, true);
6932 if (IS_ERR(iter))
6933 ret = PTR_ERR(iter);
6934 } else {
6935
6936 ret = -ENOMEM;
6937 m = kzalloc(sizeof(*m), GFP_KERNEL);
6938 if (!m)
6939 goto out;
6940 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6941 if (!iter) {
6942 kfree(m);
6943 goto out;
6944 }
6945 ret = 0;
6946
6947 iter->tr = tr;
6948 iter->array_buffer = &tr->max_buffer;
6949 iter->cpu_file = tracing_get_cpu(inode);
6950 m->private = iter;
6951 file->private_data = m;
6952 }
6953out:
6954 if (ret < 0)
6955 trace_array_put(tr);
6956
6957 return ret;
6958}
6959
6960static ssize_t
6961tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6962 loff_t *ppos)
6963{
6964 struct seq_file *m = filp->private_data;
6965 struct trace_iterator *iter = m->private;
6966 struct trace_array *tr = iter->tr;
6967 unsigned long val;
6968 int ret;
6969
6970 ret = tracing_update_buffers();
6971 if (ret < 0)
6972 return ret;
6973
6974 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6975 if (ret)
6976 return ret;
6977
6978 mutex_lock(&trace_types_lock);
6979
6980 if (tr->current_trace->use_max_tr) {
6981 ret = -EBUSY;
6982 goto out;
6983 }
6984
6985 arch_spin_lock(&tr->max_lock);
6986 if (tr->cond_snapshot)
6987 ret = -EBUSY;
6988 arch_spin_unlock(&tr->max_lock);
6989 if (ret)
6990 goto out;
6991
6992 switch (val) {
6993 case 0:
6994 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6995 ret = -EINVAL;
6996 break;
6997 }
6998 if (tr->allocated_snapshot)
6999 free_snapshot(tr);
7000 break;
7001 case 1:
7002
7003#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7004 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7005 ret = -EINVAL;
7006 break;
7007 }
7008#endif
7009 if (tr->allocated_snapshot)
7010 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7011 &tr->array_buffer, iter->cpu_file);
7012 else
7013 ret = tracing_alloc_snapshot_instance(tr);
7014 if (ret < 0)
7015 break;
7016 local_irq_disable();
7017
7018 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7019 update_max_tr(tr, current, smp_processor_id(), NULL);
7020 else
7021 update_max_tr_single(tr, current, iter->cpu_file);
7022 local_irq_enable();
7023 break;
7024 default:
7025 if (tr->allocated_snapshot) {
7026 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7027 tracing_reset_online_cpus(&tr->max_buffer);
7028 else
7029 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7030 }
7031 break;
7032 }
7033
7034 if (ret >= 0) {
7035 *ppos += cnt;
7036 ret = cnt;
7037 }
7038out:
7039 mutex_unlock(&trace_types_lock);
7040 return ret;
7041}
7042
7043static int tracing_snapshot_release(struct inode *inode, struct file *file)
7044{
7045 struct seq_file *m = file->private_data;
7046 int ret;
7047
7048 ret = tracing_release(inode, file);
7049
7050 if (file->f_mode & FMODE_READ)
7051 return ret;
7052
7053
7054 if (m)
7055 kfree(m->private);
7056 kfree(m);
7057
7058 return 0;
7059}
7060
7061static int tracing_buffers_open(struct inode *inode, struct file *filp);
7062static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7063 size_t count, loff_t *ppos);
7064static int tracing_buffers_release(struct inode *inode, struct file *file);
7065static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7066 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7067
7068static int snapshot_raw_open(struct inode *inode, struct file *filp)
7069{
7070 struct ftrace_buffer_info *info;
7071 int ret;
7072
7073
7074 ret = tracing_buffers_open(inode, filp);
7075 if (ret < 0)
7076 return ret;
7077
7078 info = filp->private_data;
7079
7080 if (info->iter.trace->use_max_tr) {
7081 tracing_buffers_release(inode, filp);
7082 return -EBUSY;
7083 }
7084
7085 info->iter.snapshot = true;
7086 info->iter.array_buffer = &info->iter.tr->max_buffer;
7087
7088 return ret;
7089}
7090
7091#endif
7092
7093
7094static const struct file_operations tracing_thresh_fops = {
7095 .open = tracing_open_generic,
7096 .read = tracing_thresh_read,
7097 .write = tracing_thresh_write,
7098 .llseek = generic_file_llseek,
7099};
7100
7101#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7102static const struct file_operations tracing_max_lat_fops = {
7103 .open = tracing_open_generic,
7104 .read = tracing_max_lat_read,
7105 .write = tracing_max_lat_write,
7106 .llseek = generic_file_llseek,
7107};
7108#endif
7109
7110static const struct file_operations set_tracer_fops = {
7111 .open = tracing_open_generic,
7112 .read = tracing_set_trace_read,
7113 .write = tracing_set_trace_write,
7114 .llseek = generic_file_llseek,
7115};
7116
7117static const struct file_operations tracing_pipe_fops = {
7118 .open = tracing_open_pipe,
7119 .poll = tracing_poll_pipe,
7120 .read = tracing_read_pipe,
7121 .splice_read = tracing_splice_read_pipe,
7122 .release = tracing_release_pipe,
7123 .llseek = no_llseek,
7124};
7125
7126static const struct file_operations tracing_entries_fops = {
7127 .open = tracing_open_generic_tr,
7128 .read = tracing_entries_read,
7129 .write = tracing_entries_write,
7130 .llseek = generic_file_llseek,
7131 .release = tracing_release_generic_tr,
7132};
7133
7134static const struct file_operations tracing_total_entries_fops = {
7135 .open = tracing_open_generic_tr,
7136 .read = tracing_total_entries_read,
7137 .llseek = generic_file_llseek,
7138 .release = tracing_release_generic_tr,
7139};
7140
7141static const struct file_operations tracing_free_buffer_fops = {
7142 .open = tracing_open_generic_tr,
7143 .write = tracing_free_buffer_write,
7144 .release = tracing_free_buffer_release,
7145};
7146
7147static const struct file_operations tracing_mark_fops = {
7148 .open = tracing_open_generic_tr,
7149 .write = tracing_mark_write,
7150 .llseek = generic_file_llseek,
7151 .release = tracing_release_generic_tr,
7152};
7153
7154static const struct file_operations tracing_mark_raw_fops = {
7155 .open = tracing_open_generic_tr,
7156 .write = tracing_mark_raw_write,
7157 .llseek = generic_file_llseek,
7158 .release = tracing_release_generic_tr,
7159};
7160
7161static const struct file_operations trace_clock_fops = {
7162 .open = tracing_clock_open,
7163 .read = seq_read,
7164 .llseek = seq_lseek,
7165 .release = tracing_single_release_tr,
7166 .write = tracing_clock_write,
7167};
7168
7169static const struct file_operations trace_time_stamp_mode_fops = {
7170 .open = tracing_time_stamp_mode_open,
7171 .read = seq_read,
7172 .llseek = seq_lseek,
7173 .release = tracing_single_release_tr,
7174};
7175
7176#ifdef CONFIG_TRACER_SNAPSHOT
7177static const struct file_operations snapshot_fops = {
7178 .open = tracing_snapshot_open,
7179 .read = seq_read,
7180 .write = tracing_snapshot_write,
7181 .llseek = tracing_lseek,
7182 .release = tracing_snapshot_release,
7183};
7184
7185static const struct file_operations snapshot_raw_fops = {
7186 .open = snapshot_raw_open,
7187 .read = tracing_buffers_read,
7188 .release = tracing_buffers_release,
7189 .splice_read = tracing_buffers_splice_read,
7190 .llseek = no_llseek,
7191};
7192
7193#endif
7194
7195#define TRACING_LOG_ERRS_MAX 8
7196#define TRACING_LOG_LOC_MAX 128
7197
7198#define CMD_PREFIX " Command: "
7199
7200struct err_info {
7201 const char **errs;
7202 u8 type;
7203 u8 pos;
7204 u64 ts;
7205};
7206
7207struct tracing_log_err {
7208 struct list_head list;
7209 struct err_info info;
7210 char loc[TRACING_LOG_LOC_MAX];
7211 char cmd[MAX_FILTER_STR_VAL];
7212};
7213
7214static DEFINE_MUTEX(tracing_err_log_lock);
7215
7216static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7217{
7218 struct tracing_log_err *err;
7219
7220 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7221 err = kzalloc(sizeof(*err), GFP_KERNEL);
7222 if (!err)
7223 err = ERR_PTR(-ENOMEM);
7224 tr->n_err_log_entries++;
7225
7226 return err;
7227 }
7228
7229 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7230 list_del(&err->list);
7231
7232 return err;
7233}
7234
7235
7236
7237
7238
7239
7240
7241
7242
7243
7244
7245
7246
7247unsigned int err_pos(char *cmd, const char *str)
7248{
7249 char *found;
7250
7251 if (WARN_ON(!strlen(cmd)))
7252 return 0;
7253
7254 found = strstr(cmd, str);
7255 if (found)
7256 return found - cmd;
7257
7258 return 0;
7259}
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288void tracing_log_err(struct trace_array *tr,
7289 const char *loc, const char *cmd,
7290 const char **errs, u8 type, u8 pos)
7291{
7292 struct tracing_log_err *err;
7293
7294 if (!tr)
7295 tr = &global_trace;
7296
7297 mutex_lock(&tracing_err_log_lock);
7298 err = get_tracing_log_err(tr);
7299 if (PTR_ERR(err) == -ENOMEM) {
7300 mutex_unlock(&tracing_err_log_lock);
7301 return;
7302 }
7303
7304 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7305 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7306
7307 err->info.errs = errs;
7308 err->info.type = type;
7309 err->info.pos = pos;
7310 err->info.ts = local_clock();
7311
7312 list_add_tail(&err->list, &tr->err_log);
7313 mutex_unlock(&tracing_err_log_lock);
7314}
7315
7316static void clear_tracing_err_log(struct trace_array *tr)
7317{
7318 struct tracing_log_err *err, *next;
7319
7320 mutex_lock(&tracing_err_log_lock);
7321 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7322 list_del(&err->list);
7323 kfree(err);
7324 }
7325
7326 tr->n_err_log_entries = 0;
7327 mutex_unlock(&tracing_err_log_lock);
7328}
7329
7330static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7331{
7332 struct trace_array *tr = m->private;
7333
7334 mutex_lock(&tracing_err_log_lock);
7335
7336 return seq_list_start(&tr->err_log, *pos);
7337}
7338
7339static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7340{
7341 struct trace_array *tr = m->private;
7342
7343 return seq_list_next(v, &tr->err_log, pos);
7344}
7345
7346static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7347{
7348 mutex_unlock(&tracing_err_log_lock);
7349}
7350
7351static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7352{
7353 u8 i;
7354
7355 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7356 seq_putc(m, ' ');
7357 for (i = 0; i < pos; i++)
7358 seq_putc(m, ' ');
7359 seq_puts(m, "^\n");
7360}
7361
7362static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7363{
7364 struct tracing_log_err *err = v;
7365
7366 if (err) {
7367 const char *err_text = err->info.errs[err->info.type];
7368 u64 sec = err->info.ts;
7369 u32 nsec;
7370
7371 nsec = do_div(sec, NSEC_PER_SEC);
7372 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7373 err->loc, err_text);
7374 seq_printf(m, "%s", err->cmd);
7375 tracing_err_log_show_pos(m, err->info.pos);
7376 }
7377
7378 return 0;
7379}
7380
7381static const struct seq_operations tracing_err_log_seq_ops = {
7382 .start = tracing_err_log_seq_start,
7383 .next = tracing_err_log_seq_next,
7384 .stop = tracing_err_log_seq_stop,
7385 .show = tracing_err_log_seq_show
7386};
7387
7388static int tracing_err_log_open(struct inode *inode, struct file *file)
7389{
7390 struct trace_array *tr = inode->i_private;
7391 int ret = 0;
7392
7393 ret = tracing_check_open_get_tr(tr);
7394 if (ret)
7395 return ret;
7396
7397
7398 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7399 clear_tracing_err_log(tr);
7400
7401 if (file->f_mode & FMODE_READ) {
7402 ret = seq_open(file, &tracing_err_log_seq_ops);
7403 if (!ret) {
7404 struct seq_file *m = file->private_data;
7405 m->private = tr;
7406 } else {
7407 trace_array_put(tr);
7408 }
7409 }
7410 return ret;
7411}
7412
7413static ssize_t tracing_err_log_write(struct file *file,
7414 const char __user *buffer,
7415 size_t count, loff_t *ppos)
7416{
7417 return count;
7418}
7419
7420static int tracing_err_log_release(struct inode *inode, struct file *file)
7421{
7422 struct trace_array *tr = inode->i_private;
7423
7424 trace_array_put(tr);
7425
7426 if (file->f_mode & FMODE_READ)
7427 seq_release(inode, file);
7428
7429 return 0;
7430}
7431
7432static const struct file_operations tracing_err_log_fops = {
7433 .open = tracing_err_log_open,
7434 .write = tracing_err_log_write,
7435 .read = seq_read,
7436 .llseek = seq_lseek,
7437 .release = tracing_err_log_release,
7438};
7439
7440static int tracing_buffers_open(struct inode *inode, struct file *filp)
7441{
7442 struct trace_array *tr = inode->i_private;
7443 struct ftrace_buffer_info *info;
7444 int ret;
7445
7446 ret = tracing_check_open_get_tr(tr);
7447 if (ret)
7448 return ret;
7449
7450 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7451 if (!info) {
7452 trace_array_put(tr);
7453 return -ENOMEM;
7454 }
7455
7456 mutex_lock(&trace_types_lock);
7457
7458 info->iter.tr = tr;
7459 info->iter.cpu_file = tracing_get_cpu(inode);
7460 info->iter.trace = tr->current_trace;
7461 info->iter.array_buffer = &tr->array_buffer;
7462 info->spare = NULL;
7463
7464 info->read = (unsigned int)-1;
7465
7466 filp->private_data = info;
7467
7468 tr->trace_ref++;
7469
7470 mutex_unlock(&trace_types_lock);
7471
7472 ret = nonseekable_open(inode, filp);
7473 if (ret < 0)
7474 trace_array_put(tr);
7475
7476 return ret;
7477}
7478
7479static __poll_t
7480tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7481{
7482 struct ftrace_buffer_info *info = filp->private_data;
7483 struct trace_iterator *iter = &info->iter;
7484
7485 return trace_poll(iter, filp, poll_table);
7486}
7487
7488static ssize_t
7489tracing_buffers_read(struct file *filp, char __user *ubuf,
7490 size_t count, loff_t *ppos)
7491{
7492 struct ftrace_buffer_info *info = filp->private_data;
7493 struct trace_iterator *iter = &info->iter;
7494 ssize_t ret = 0;
7495 ssize_t size;
7496
7497 if (!count)
7498 return 0;
7499
7500#ifdef CONFIG_TRACER_MAX_TRACE
7501 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7502 return -EBUSY;
7503#endif
7504
7505 if (!info->spare) {
7506 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7507 iter->cpu_file);
7508 if (IS_ERR(info->spare)) {
7509 ret = PTR_ERR(info->spare);
7510 info->spare = NULL;
7511 } else {
7512 info->spare_cpu = iter->cpu_file;
7513 }
7514 }
7515 if (!info->spare)
7516 return ret;
7517
7518
7519 if (info->read < PAGE_SIZE)
7520 goto read;
7521
7522 again:
7523 trace_access_lock(iter->cpu_file);
7524 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7525 &info->spare,
7526 count,
7527 iter->cpu_file, 0);
7528 trace_access_unlock(iter->cpu_file);
7529
7530 if (ret < 0) {
7531 if (trace_empty(iter)) {
7532 if ((filp->f_flags & O_NONBLOCK))
7533 return -EAGAIN;
7534
7535 ret = wait_on_pipe(iter, 0);
7536 if (ret)
7537 return ret;
7538
7539 goto again;
7540 }
7541 return 0;
7542 }
7543
7544 info->read = 0;
7545 read:
7546 size = PAGE_SIZE - info->read;
7547 if (size > count)
7548 size = count;
7549
7550 ret = copy_to_user(ubuf, info->spare + info->read, size);
7551 if (ret == size)
7552 return -EFAULT;
7553
7554 size -= ret;
7555
7556 *ppos += size;
7557 info->read += size;
7558
7559 return size;
7560}
7561
7562static int tracing_buffers_release(struct inode *inode, struct file *file)
7563{
7564 struct ftrace_buffer_info *info = file->private_data;
7565 struct trace_iterator *iter = &info->iter;
7566
7567 mutex_lock(&trace_types_lock);
7568
7569 iter->tr->trace_ref--;
7570
7571 __trace_array_put(iter->tr);
7572
7573 if (info->spare)
7574 ring_buffer_free_read_page(iter->array_buffer->buffer,
7575 info->spare_cpu, info->spare);
7576 kvfree(info);
7577
7578 mutex_unlock(&trace_types_lock);
7579
7580 return 0;
7581}
7582
7583struct buffer_ref {
7584 struct trace_buffer *buffer;
7585 void *page;
7586 int cpu;
7587 refcount_t refcount;
7588};
7589
7590static void buffer_ref_release(struct buffer_ref *ref)
7591{
7592 if (!refcount_dec_and_test(&ref->refcount))
7593 return;
7594 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7595 kfree(ref);
7596}
7597
7598static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7599 struct pipe_buffer *buf)
7600{
7601 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7602
7603 buffer_ref_release(ref);
7604 buf->private = 0;
7605}
7606
7607static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7608 struct pipe_buffer *buf)
7609{
7610 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7611
7612 if (refcount_read(&ref->refcount) > INT_MAX/2)
7613 return false;
7614
7615 refcount_inc(&ref->refcount);
7616 return true;
7617}
7618
7619
7620static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7621 .release = buffer_pipe_buf_release,
7622 .get = buffer_pipe_buf_get,
7623};
7624
7625
7626
7627
7628
7629static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7630{
7631 struct buffer_ref *ref =
7632 (struct buffer_ref *)spd->partial[i].private;
7633
7634 buffer_ref_release(ref);
7635 spd->partial[i].private = 0;
7636}
7637
7638static ssize_t
7639tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7640 struct pipe_inode_info *pipe, size_t len,
7641 unsigned int flags)
7642{
7643 struct ftrace_buffer_info *info = file->private_data;
7644 struct trace_iterator *iter = &info->iter;
7645 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7646 struct page *pages_def[PIPE_DEF_BUFFERS];
7647 struct splice_pipe_desc spd = {
7648 .pages = pages_def,
7649 .partial = partial_def,
7650 .nr_pages_max = PIPE_DEF_BUFFERS,
7651 .ops = &buffer_pipe_buf_ops,
7652 .spd_release = buffer_spd_release,
7653 };
7654 struct buffer_ref *ref;
7655 int entries, i;
7656 ssize_t ret = 0;
7657
7658#ifdef CONFIG_TRACER_MAX_TRACE
7659 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7660 return -EBUSY;
7661#endif
7662
7663 if (*ppos & (PAGE_SIZE - 1))
7664 return -EINVAL;
7665
7666 if (len & (PAGE_SIZE - 1)) {
7667 if (len < PAGE_SIZE)
7668 return -EINVAL;
7669 len &= PAGE_MASK;
7670 }
7671
7672 if (splice_grow_spd(pipe, &spd))
7673 return -ENOMEM;
7674
7675 again:
7676 trace_access_lock(iter->cpu_file);
7677 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7678
7679 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7680 struct page *page;
7681 int r;
7682
7683 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7684 if (!ref) {
7685 ret = -ENOMEM;
7686 break;
7687 }
7688
7689 refcount_set(&ref->refcount, 1);
7690 ref->buffer = iter->array_buffer->buffer;
7691 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7692 if (IS_ERR(ref->page)) {
7693 ret = PTR_ERR(ref->page);
7694 ref->page = NULL;
7695 kfree(ref);
7696 break;
7697 }
7698 ref->cpu = iter->cpu_file;
7699
7700 r = ring_buffer_read_page(ref->buffer, &ref->page,
7701 len, iter->cpu_file, 1);
7702 if (r < 0) {
7703 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7704 ref->page);
7705 kfree(ref);
7706 break;
7707 }
7708
7709 page = virt_to_page(ref->page);
7710
7711 spd.pages[i] = page;
7712 spd.partial[i].len = PAGE_SIZE;
7713 spd.partial[i].offset = 0;
7714 spd.partial[i].private = (unsigned long)ref;
7715 spd.nr_pages++;
7716 *ppos += PAGE_SIZE;
7717
7718 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7719 }
7720
7721 trace_access_unlock(iter->cpu_file);
7722 spd.nr_pages = i;
7723
7724
7725 if (!spd.nr_pages) {
7726 if (ret)
7727 goto out;
7728
7729 ret = -EAGAIN;
7730 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7731 goto out;
7732
7733 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7734 if (ret)
7735 goto out;
7736
7737 goto again;
7738 }
7739
7740 ret = splice_to_pipe(pipe, &spd);
7741out:
7742 splice_shrink_spd(&spd);
7743
7744 return ret;
7745}
7746
7747static const struct file_operations tracing_buffers_fops = {
7748 .open = tracing_buffers_open,
7749 .read = tracing_buffers_read,
7750 .poll = tracing_buffers_poll,
7751 .release = tracing_buffers_release,
7752 .splice_read = tracing_buffers_splice_read,
7753 .llseek = no_llseek,
7754};
7755
7756static ssize_t
7757tracing_stats_read(struct file *filp, char __user *ubuf,
7758 size_t count, loff_t *ppos)
7759{
7760 struct inode *inode = file_inode(filp);
7761 struct trace_array *tr = inode->i_private;
7762 struct array_buffer *trace_buf = &tr->array_buffer;
7763 int cpu = tracing_get_cpu(inode);
7764 struct trace_seq *s;
7765 unsigned long cnt;
7766 unsigned long long t;
7767 unsigned long usec_rem;
7768
7769 s = kmalloc(sizeof(*s), GFP_KERNEL);
7770 if (!s)
7771 return -ENOMEM;
7772
7773 trace_seq_init(s);
7774
7775 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7776 trace_seq_printf(s, "entries: %ld\n", cnt);
7777
7778 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7779 trace_seq_printf(s, "overrun: %ld\n", cnt);
7780
7781 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7782 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7783
7784 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7785 trace_seq_printf(s, "bytes: %ld\n", cnt);
7786
7787 if (trace_clocks[tr->clock_id].in_ns) {
7788
7789 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7790 usec_rem = do_div(t, USEC_PER_SEC);
7791 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7792 t, usec_rem);
7793
7794 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7795 usec_rem = do_div(t, USEC_PER_SEC);
7796 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7797 } else {
7798
7799 trace_seq_printf(s, "oldest event ts: %llu\n",
7800 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7801
7802 trace_seq_printf(s, "now ts: %llu\n",
7803 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7804 }
7805
7806 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7807 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7808
7809 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7810 trace_seq_printf(s, "read events: %ld\n", cnt);
7811
7812 count = simple_read_from_buffer(ubuf, count, ppos,
7813 s->buffer, trace_seq_used(s));
7814
7815 kfree(s);
7816
7817 return count;
7818}
7819
7820static const struct file_operations tracing_stats_fops = {
7821 .open = tracing_open_generic_tr,
7822 .read = tracing_stats_read,
7823 .llseek = generic_file_llseek,
7824 .release = tracing_release_generic_tr,
7825};
7826
7827#ifdef CONFIG_DYNAMIC_FTRACE
7828
7829static ssize_t
7830tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7831 size_t cnt, loff_t *ppos)
7832{
7833 ssize_t ret;
7834 char *buf;
7835 int r;
7836
7837
7838 buf = kmalloc(256, GFP_KERNEL);
7839 if (!buf)
7840 return -ENOMEM;
7841
7842 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7843 ftrace_update_tot_cnt,
7844 ftrace_number_of_pages,
7845 ftrace_number_of_groups);
7846
7847 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7848 kfree(buf);
7849 return ret;
7850}
7851
7852static const struct file_operations tracing_dyn_info_fops = {
7853 .open = tracing_open_generic,
7854 .read = tracing_read_dyn_info,
7855 .llseek = generic_file_llseek,
7856};
7857#endif
7858
7859#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7860static void
7861ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7862 struct trace_array *tr, struct ftrace_probe_ops *ops,
7863 void *data)
7864{
7865 tracing_snapshot_instance(tr);
7866}
7867
7868static void
7869ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7870 struct trace_array *tr, struct ftrace_probe_ops *ops,
7871 void *data)
7872{
7873 struct ftrace_func_mapper *mapper = data;
7874 long *count = NULL;
7875
7876 if (mapper)
7877 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7878
7879 if (count) {
7880
7881 if (*count <= 0)
7882 return;
7883
7884 (*count)--;
7885 }
7886
7887 tracing_snapshot_instance(tr);
7888}
7889
7890static int
7891ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7892 struct ftrace_probe_ops *ops, void *data)
7893{
7894 struct ftrace_func_mapper *mapper = data;
7895 long *count = NULL;
7896
7897 seq_printf(m, "%ps:", (void *)ip);
7898
7899 seq_puts(m, "snapshot");
7900
7901 if (mapper)
7902 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7903
7904 if (count)
7905 seq_printf(m, ":count=%ld\n", *count);
7906 else
7907 seq_puts(m, ":unlimited\n");
7908
7909 return 0;
7910}
7911
7912static int
7913ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7914 unsigned long ip, void *init_data, void **data)
7915{
7916 struct ftrace_func_mapper *mapper = *data;
7917
7918 if (!mapper) {
7919 mapper = allocate_ftrace_func_mapper();
7920 if (!mapper)
7921 return -ENOMEM;
7922 *data = mapper;
7923 }
7924
7925 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7926}
7927
7928static void
7929ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7930 unsigned long ip, void *data)
7931{
7932 struct ftrace_func_mapper *mapper = data;
7933
7934 if (!ip) {
7935 if (!mapper)
7936 return;
7937 free_ftrace_func_mapper(mapper, NULL);
7938 return;
7939 }
7940
7941 ftrace_func_mapper_remove_ip(mapper, ip);
7942}
7943
7944static struct ftrace_probe_ops snapshot_probe_ops = {
7945 .func = ftrace_snapshot,
7946 .print = ftrace_snapshot_print,
7947};
7948
7949static struct ftrace_probe_ops snapshot_count_probe_ops = {
7950 .func = ftrace_count_snapshot,
7951 .print = ftrace_snapshot_print,
7952 .init = ftrace_snapshot_init,
7953 .free = ftrace_snapshot_free,
7954};
7955
7956static int
7957ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7958 char *glob, char *cmd, char *param, int enable)
7959{
7960 struct ftrace_probe_ops *ops;
7961 void *count = (void *)-1;
7962 char *number;
7963 int ret;
7964
7965 if (!tr)
7966 return -ENODEV;
7967
7968
7969 if (!enable)
7970 return -EINVAL;
7971
7972 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7973
7974 if (glob[0] == '!')
7975 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7976
7977 if (!param)
7978 goto out_reg;
7979
7980 number = strsep(¶m, ":");
7981
7982 if (!strlen(number))
7983 goto out_reg;
7984
7985
7986
7987
7988
7989 ret = kstrtoul(number, 0, (unsigned long *)&count);
7990 if (ret)
7991 return ret;
7992
7993 out_reg:
7994 ret = tracing_alloc_snapshot_instance(tr);
7995 if (ret < 0)
7996 goto out;
7997
7998 ret = register_ftrace_function_probe(glob, tr, ops, count);
7999
8000 out:
8001 return ret < 0 ? ret : 0;
8002}
8003
8004static struct ftrace_func_command ftrace_snapshot_cmd = {
8005 .name = "snapshot",
8006 .func = ftrace_trace_snapshot_callback,
8007};
8008
8009static __init int register_snapshot_cmd(void)
8010{
8011 return register_ftrace_command(&ftrace_snapshot_cmd);
8012}
8013#else
8014static inline __init int register_snapshot_cmd(void) { return 0; }
8015#endif
8016
8017static struct dentry *tracing_get_dentry(struct trace_array *tr)
8018{
8019 if (WARN_ON(!tr->dir))
8020 return ERR_PTR(-ENODEV);
8021
8022
8023 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8024 return NULL;
8025
8026
8027 return tr->dir;
8028}
8029
8030static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8031{
8032 struct dentry *d_tracer;
8033
8034 if (tr->percpu_dir)
8035 return tr->percpu_dir;
8036
8037 d_tracer = tracing_get_dentry(tr);
8038 if (IS_ERR(d_tracer))
8039 return NULL;
8040
8041 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8042
8043 MEM_FAIL(!tr->percpu_dir,
8044 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8045
8046 return tr->percpu_dir;
8047}
8048
8049static struct dentry *
8050trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8051 void *data, long cpu, const struct file_operations *fops)
8052{
8053 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8054
8055 if (ret)
8056 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8057 return ret;
8058}
8059
8060static void
8061tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8062{
8063 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8064 struct dentry *d_cpu;
8065 char cpu_dir[30];
8066
8067 if (!d_percpu)
8068 return;
8069
8070 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8071 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8072 if (!d_cpu) {
8073 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8074 return;
8075 }
8076
8077
8078 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8079 tr, cpu, &tracing_pipe_fops);
8080
8081
8082 trace_create_cpu_file("trace", 0644, d_cpu,
8083 tr, cpu, &tracing_fops);
8084
8085 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8086 tr, cpu, &tracing_buffers_fops);
8087
8088 trace_create_cpu_file("stats", 0444, d_cpu,
8089 tr, cpu, &tracing_stats_fops);
8090
8091 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8092 tr, cpu, &tracing_entries_fops);
8093
8094#ifdef CONFIG_TRACER_SNAPSHOT
8095 trace_create_cpu_file("snapshot", 0644, d_cpu,
8096 tr, cpu, &snapshot_fops);
8097
8098 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8099 tr, cpu, &snapshot_raw_fops);
8100#endif
8101}
8102
8103#ifdef CONFIG_FTRACE_SELFTEST
8104
8105#include "trace_selftest.c"
8106#endif
8107
8108static ssize_t
8109trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8110 loff_t *ppos)
8111{
8112 struct trace_option_dentry *topt = filp->private_data;
8113 char *buf;
8114
8115 if (topt->flags->val & topt->opt->bit)
8116 buf = "1\n";
8117 else
8118 buf = "0\n";
8119
8120 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8121}
8122
8123static ssize_t
8124trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8125 loff_t *ppos)
8126{
8127 struct trace_option_dentry *topt = filp->private_data;
8128 unsigned long val;
8129 int ret;
8130
8131 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8132 if (ret)
8133 return ret;
8134
8135 if (val != 0 && val != 1)
8136 return -EINVAL;
8137
8138 if (!!(topt->flags->val & topt->opt->bit) != val) {
8139 mutex_lock(&trace_types_lock);
8140 ret = __set_tracer_option(topt->tr, topt->flags,
8141 topt->opt, !val);
8142 mutex_unlock(&trace_types_lock);
8143 if (ret)
8144 return ret;
8145 }
8146
8147 *ppos += cnt;
8148
8149 return cnt;
8150}
8151
8152
8153static const struct file_operations trace_options_fops = {
8154 .open = tracing_open_generic,
8155 .read = trace_options_read,
8156 .write = trace_options_write,
8157 .llseek = generic_file_llseek,
8158};
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169
8170
8171
8172
8173
8174
8175
8176
8177
8178
8179
8180
8181
8182
8183
8184static void get_tr_index(void *data, struct trace_array **ptr,
8185 unsigned int *pindex)
8186{
8187 *pindex = *(unsigned char *)data;
8188
8189 *ptr = container_of(data - *pindex, struct trace_array,
8190 trace_flags_index);
8191}
8192
8193static ssize_t
8194trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8195 loff_t *ppos)
8196{
8197 void *tr_index = filp->private_data;
8198 struct trace_array *tr;
8199 unsigned int index;
8200 char *buf;
8201
8202 get_tr_index(tr_index, &tr, &index);
8203
8204 if (tr->trace_flags & (1 << index))
8205 buf = "1\n";
8206 else
8207 buf = "0\n";
8208
8209 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8210}
8211
8212static ssize_t
8213trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8214 loff_t *ppos)
8215{
8216 void *tr_index = filp->private_data;
8217 struct trace_array *tr;
8218 unsigned int index;
8219 unsigned long val;
8220 int ret;
8221
8222 get_tr_index(tr_index, &tr, &index);
8223
8224 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8225 if (ret)
8226 return ret;
8227
8228 if (val != 0 && val != 1)
8229 return -EINVAL;
8230
8231 mutex_lock(&event_mutex);
8232 mutex_lock(&trace_types_lock);
8233 ret = set_tracer_flag(tr, 1 << index, val);
8234 mutex_unlock(&trace_types_lock);
8235 mutex_unlock(&event_mutex);
8236
8237 if (ret < 0)
8238 return ret;
8239
8240 *ppos += cnt;
8241
8242 return cnt;
8243}
8244
8245static const struct file_operations trace_options_core_fops = {
8246 .open = tracing_open_generic,
8247 .read = trace_options_core_read,
8248 .write = trace_options_core_write,
8249 .llseek = generic_file_llseek,
8250};
8251
8252struct dentry *trace_create_file(const char *name,
8253 umode_t mode,
8254 struct dentry *parent,
8255 void *data,
8256 const struct file_operations *fops)
8257{
8258 struct dentry *ret;
8259
8260 ret = tracefs_create_file(name, mode, parent, data, fops);
8261 if (!ret)
8262 pr_warn("Could not create tracefs '%s' entry\n", name);
8263
8264 return ret;
8265}
8266
8267
8268static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8269{
8270 struct dentry *d_tracer;
8271
8272 if (tr->options)
8273 return tr->options;
8274
8275 d_tracer = tracing_get_dentry(tr);
8276 if (IS_ERR(d_tracer))
8277 return NULL;
8278
8279 tr->options = tracefs_create_dir("options", d_tracer);
8280 if (!tr->options) {
8281 pr_warn("Could not create tracefs directory 'options'\n");
8282 return NULL;
8283 }
8284
8285 return tr->options;
8286}
8287
8288static void
8289create_trace_option_file(struct trace_array *tr,
8290 struct trace_option_dentry *topt,
8291 struct tracer_flags *flags,
8292 struct tracer_opt *opt)
8293{
8294 struct dentry *t_options;
8295
8296 t_options = trace_options_init_dentry(tr);
8297 if (!t_options)
8298 return;
8299
8300 topt->flags = flags;
8301 topt->opt = opt;
8302 topt->tr = tr;
8303
8304 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8305 &trace_options_fops);
8306
8307}
8308
8309static void
8310create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8311{
8312 struct trace_option_dentry *topts;
8313 struct trace_options *tr_topts;
8314 struct tracer_flags *flags;
8315 struct tracer_opt *opts;
8316 int cnt;
8317 int i;
8318
8319 if (!tracer)
8320 return;
8321
8322 flags = tracer->flags;
8323
8324 if (!flags || !flags->opts)
8325 return;
8326
8327
8328
8329
8330
8331 if (!trace_ok_for_array(tracer, tr))
8332 return;
8333
8334 for (i = 0; i < tr->nr_topts; i++) {
8335
8336 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8337 return;
8338 }
8339
8340 opts = flags->opts;
8341
8342 for (cnt = 0; opts[cnt].name; cnt++)
8343 ;
8344
8345 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8346 if (!topts)
8347 return;
8348
8349 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8350 GFP_KERNEL);
8351 if (!tr_topts) {
8352 kfree(topts);
8353 return;
8354 }
8355
8356 tr->topts = tr_topts;
8357 tr->topts[tr->nr_topts].tracer = tracer;
8358 tr->topts[tr->nr_topts].topts = topts;
8359 tr->nr_topts++;
8360
8361 for (cnt = 0; opts[cnt].name; cnt++) {
8362 create_trace_option_file(tr, &topts[cnt], flags,
8363 &opts[cnt]);
8364 MEM_FAIL(topts[cnt].entry == NULL,
8365 "Failed to create trace option: %s",
8366 opts[cnt].name);
8367 }
8368}
8369
8370static struct dentry *
8371create_trace_option_core_file(struct trace_array *tr,
8372 const char *option, long index)
8373{
8374 struct dentry *t_options;
8375
8376 t_options = trace_options_init_dentry(tr);
8377 if (!t_options)
8378 return NULL;
8379
8380 return trace_create_file(option, 0644, t_options,
8381 (void *)&tr->trace_flags_index[index],
8382 &trace_options_core_fops);
8383}
8384
8385static void create_trace_options_dir(struct trace_array *tr)
8386{
8387 struct dentry *t_options;
8388 bool top_level = tr == &global_trace;
8389 int i;
8390
8391 t_options = trace_options_init_dentry(tr);
8392 if (!t_options)
8393 return;
8394
8395 for (i = 0; trace_options[i]; i++) {
8396 if (top_level ||
8397 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8398 create_trace_option_core_file(tr, trace_options[i], i);
8399 }
8400}
8401
8402static ssize_t
8403rb_simple_read(struct file *filp, char __user *ubuf,
8404 size_t cnt, loff_t *ppos)
8405{
8406 struct trace_array *tr = filp->private_data;
8407 char buf[64];
8408 int r;
8409
8410 r = tracer_tracing_is_on(tr);
8411 r = sprintf(buf, "%d\n", r);
8412
8413 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8414}
8415
8416static ssize_t
8417rb_simple_write(struct file *filp, const char __user *ubuf,
8418 size_t cnt, loff_t *ppos)
8419{
8420 struct trace_array *tr = filp->private_data;
8421 struct trace_buffer *buffer = tr->array_buffer.buffer;
8422 unsigned long val;
8423 int ret;
8424
8425 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8426 if (ret)
8427 return ret;
8428
8429 if (buffer) {
8430 mutex_lock(&trace_types_lock);
8431 if (!!val == tracer_tracing_is_on(tr)) {
8432 val = 0;
8433 } else if (val) {
8434 tracer_tracing_on(tr);
8435 if (tr->current_trace->start)
8436 tr->current_trace->start(tr);
8437 } else {
8438 tracer_tracing_off(tr);
8439 if (tr->current_trace->stop)
8440 tr->current_trace->stop(tr);
8441 }
8442 mutex_unlock(&trace_types_lock);
8443 }
8444
8445 (*ppos)++;
8446
8447 return cnt;
8448}
8449
8450static const struct file_operations rb_simple_fops = {
8451 .open = tracing_open_generic_tr,
8452 .read = rb_simple_read,
8453 .write = rb_simple_write,
8454 .release = tracing_release_generic_tr,
8455 .llseek = default_llseek,
8456};
8457
8458static ssize_t
8459buffer_percent_read(struct file *filp, char __user *ubuf,
8460 size_t cnt, loff_t *ppos)
8461{
8462 struct trace_array *tr = filp->private_data;
8463 char buf[64];
8464 int r;
8465
8466 r = tr->buffer_percent;
8467 r = sprintf(buf, "%d\n", r);
8468
8469 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8470}
8471
8472static ssize_t
8473buffer_percent_write(struct file *filp, const char __user *ubuf,
8474 size_t cnt, loff_t *ppos)
8475{
8476 struct trace_array *tr = filp->private_data;
8477 unsigned long val;
8478 int ret;
8479
8480 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8481 if (ret)
8482 return ret;
8483
8484 if (val > 100)
8485 return -EINVAL;
8486
8487 if (!val)
8488 val = 1;
8489
8490 tr->buffer_percent = val;
8491
8492 (*ppos)++;
8493
8494 return cnt;
8495}
8496
8497static const struct file_operations buffer_percent_fops = {
8498 .open = tracing_open_generic_tr,
8499 .read = buffer_percent_read,
8500 .write = buffer_percent_write,
8501 .release = tracing_release_generic_tr,
8502 .llseek = default_llseek,
8503};
8504
8505static struct dentry *trace_instance_dir;
8506
8507static void
8508init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8509
8510static int
8511allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8512{
8513 enum ring_buffer_flags rb_flags;
8514
8515 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8516
8517 buf->tr = tr;
8518
8519 buf->buffer = ring_buffer_alloc(size, rb_flags);
8520 if (!buf->buffer)
8521 return -ENOMEM;
8522
8523 buf->data = alloc_percpu(struct trace_array_cpu);
8524 if (!buf->data) {
8525 ring_buffer_free(buf->buffer);
8526 buf->buffer = NULL;
8527 return -ENOMEM;
8528 }
8529
8530
8531 set_buffer_entries(&tr->array_buffer,
8532 ring_buffer_size(tr->array_buffer.buffer, 0));
8533
8534 return 0;
8535}
8536
8537static int allocate_trace_buffers(struct trace_array *tr, int size)
8538{
8539 int ret;
8540
8541 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8542 if (ret)
8543 return ret;
8544
8545#ifdef CONFIG_TRACER_MAX_TRACE
8546 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8547 allocate_snapshot ? size : 1);
8548 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8549 ring_buffer_free(tr->array_buffer.buffer);
8550 tr->array_buffer.buffer = NULL;
8551 free_percpu(tr->array_buffer.data);
8552 tr->array_buffer.data = NULL;
8553 return -ENOMEM;
8554 }
8555 tr->allocated_snapshot = allocate_snapshot;
8556
8557
8558
8559
8560
8561 allocate_snapshot = false;
8562#endif
8563
8564 return 0;
8565}
8566
8567static void free_trace_buffer(struct array_buffer *buf)
8568{
8569 if (buf->buffer) {
8570 ring_buffer_free(buf->buffer);
8571 buf->buffer = NULL;
8572 free_percpu(buf->data);
8573 buf->data = NULL;
8574 }
8575}
8576
8577static void free_trace_buffers(struct trace_array *tr)
8578{
8579 if (!tr)
8580 return;
8581
8582 free_trace_buffer(&tr->array_buffer);
8583
8584#ifdef CONFIG_TRACER_MAX_TRACE
8585 free_trace_buffer(&tr->max_buffer);
8586#endif
8587}
8588
8589static void init_trace_flags_index(struct trace_array *tr)
8590{
8591 int i;
8592
8593
8594 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8595 tr->trace_flags_index[i] = i;
8596}
8597
8598static void __update_tracer_options(struct trace_array *tr)
8599{
8600 struct tracer *t;
8601
8602 for (t = trace_types; t; t = t->next)
8603 add_tracer_options(tr, t);
8604}
8605
8606static void update_tracer_options(struct trace_array *tr)
8607{
8608 mutex_lock(&trace_types_lock);
8609 __update_tracer_options(tr);
8610 mutex_unlock(&trace_types_lock);
8611}
8612
8613
8614struct trace_array *trace_array_find(const char *instance)
8615{
8616 struct trace_array *tr, *found = NULL;
8617
8618 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8619 if (tr->name && strcmp(tr->name, instance) == 0) {
8620 found = tr;
8621 break;
8622 }
8623 }
8624
8625 return found;
8626}
8627
8628struct trace_array *trace_array_find_get(const char *instance)
8629{
8630 struct trace_array *tr;
8631
8632 mutex_lock(&trace_types_lock);
8633 tr = trace_array_find(instance);
8634 if (tr)
8635 tr->ref++;
8636 mutex_unlock(&trace_types_lock);
8637
8638 return tr;
8639}
8640
8641static struct trace_array *trace_array_create(const char *name)
8642{
8643 struct trace_array *tr;
8644 int ret;
8645
8646 ret = -ENOMEM;
8647 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8648 if (!tr)
8649 return ERR_PTR(ret);
8650
8651 tr->name = kstrdup(name, GFP_KERNEL);
8652 if (!tr->name)
8653 goto out_free_tr;
8654
8655 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8656 goto out_free_tr;
8657
8658 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8659
8660 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8661
8662 raw_spin_lock_init(&tr->start_lock);
8663
8664 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8665
8666 tr->current_trace = &nop_trace;
8667
8668 INIT_LIST_HEAD(&tr->systems);
8669 INIT_LIST_HEAD(&tr->events);
8670 INIT_LIST_HEAD(&tr->hist_vars);
8671 INIT_LIST_HEAD(&tr->err_log);
8672
8673 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8674 goto out_free_tr;
8675
8676 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8677 if (!tr->dir)
8678 goto out_free_tr;
8679
8680 ret = event_trace_add_tracer(tr->dir, tr);
8681 if (ret) {
8682 tracefs_remove(tr->dir);
8683 goto out_free_tr;
8684 }
8685
8686 ftrace_init_trace_array(tr);
8687
8688 init_tracer_tracefs(tr, tr->dir);
8689 init_trace_flags_index(tr);
8690 __update_tracer_options(tr);
8691
8692 list_add(&tr->list, &ftrace_trace_arrays);
8693
8694 tr->ref++;
8695
8696
8697 return tr;
8698
8699 out_free_tr:
8700 free_trace_buffers(tr);
8701 free_cpumask_var(tr->tracing_cpumask);
8702 kfree(tr->name);
8703 kfree(tr);
8704
8705 return ERR_PTR(ret);
8706}
8707
8708static int instance_mkdir(const char *name)
8709{
8710 struct trace_array *tr;
8711 int ret;
8712
8713 mutex_lock(&event_mutex);
8714 mutex_lock(&trace_types_lock);
8715
8716 ret = -EEXIST;
8717 if (trace_array_find(name))
8718 goto out_unlock;
8719
8720 tr = trace_array_create(name);
8721
8722 ret = PTR_ERR_OR_ZERO(tr);
8723
8724out_unlock:
8725 mutex_unlock(&trace_types_lock);
8726 mutex_unlock(&event_mutex);
8727 return ret;
8728}
8729
8730
8731
8732
8733
8734
8735
8736
8737
8738
8739
8740
8741
8742
8743
8744
8745
8746struct trace_array *trace_array_get_by_name(const char *name)
8747{
8748 struct trace_array *tr;
8749
8750 mutex_lock(&event_mutex);
8751 mutex_lock(&trace_types_lock);
8752
8753 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8754 if (tr->name && strcmp(tr->name, name) == 0)
8755 goto out_unlock;
8756 }
8757
8758 tr = trace_array_create(name);
8759
8760 if (IS_ERR(tr))
8761 tr = NULL;
8762out_unlock:
8763 if (tr)
8764 tr->ref++;
8765
8766 mutex_unlock(&trace_types_lock);
8767 mutex_unlock(&event_mutex);
8768 return tr;
8769}
8770EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8771
8772static int __remove_instance(struct trace_array *tr)
8773{
8774 int i;
8775
8776
8777 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
8778 return -EBUSY;
8779
8780 list_del(&tr->list);
8781
8782
8783 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8784 if ((1 << i) & ZEROED_TRACE_FLAGS)
8785 set_tracer_flag(tr, 1 << i, 0);
8786 }
8787
8788 tracing_set_nop(tr);
8789 clear_ftrace_function_probes(tr);
8790 event_trace_del_tracer(tr);
8791 ftrace_clear_pids(tr);
8792 ftrace_destroy_function_files(tr);
8793 tracefs_remove(tr->dir);
8794 free_trace_buffers(tr);
8795
8796 for (i = 0; i < tr->nr_topts; i++) {
8797 kfree(tr->topts[i].topts);
8798 }
8799 kfree(tr->topts);
8800
8801 free_cpumask_var(tr->tracing_cpumask);
8802 kfree(tr->name);
8803 kfree(tr);
8804 tr = NULL;
8805
8806 return 0;
8807}
8808
8809int trace_array_destroy(struct trace_array *this_tr)
8810{
8811 struct trace_array *tr;
8812 int ret;
8813
8814 if (!this_tr)
8815 return -EINVAL;
8816
8817 mutex_lock(&event_mutex);
8818 mutex_lock(&trace_types_lock);
8819
8820 ret = -ENODEV;
8821
8822
8823 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8824 if (tr == this_tr) {
8825 ret = __remove_instance(tr);
8826 break;
8827 }
8828 }
8829
8830 mutex_unlock(&trace_types_lock);
8831 mutex_unlock(&event_mutex);
8832
8833 return ret;
8834}
8835EXPORT_SYMBOL_GPL(trace_array_destroy);
8836
8837static int instance_rmdir(const char *name)
8838{
8839 struct trace_array *tr;
8840 int ret;
8841
8842 mutex_lock(&event_mutex);
8843 mutex_lock(&trace_types_lock);
8844
8845 ret = -ENODEV;
8846 tr = trace_array_find(name);
8847 if (tr)
8848 ret = __remove_instance(tr);
8849
8850 mutex_unlock(&trace_types_lock);
8851 mutex_unlock(&event_mutex);
8852
8853 return ret;
8854}
8855
8856static __init void create_trace_instances(struct dentry *d_tracer)
8857{
8858 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8859 instance_mkdir,
8860 instance_rmdir);
8861 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8862 return;
8863}
8864
8865static void
8866init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8867{
8868 struct trace_event_file *file;
8869 int cpu;
8870
8871 trace_create_file("available_tracers", 0444, d_tracer,
8872 tr, &show_traces_fops);
8873
8874 trace_create_file("current_tracer", 0644, d_tracer,
8875 tr, &set_tracer_fops);
8876
8877 trace_create_file("tracing_cpumask", 0644, d_tracer,
8878 tr, &tracing_cpumask_fops);
8879
8880 trace_create_file("trace_options", 0644, d_tracer,
8881 tr, &tracing_iter_fops);
8882
8883 trace_create_file("trace", 0644, d_tracer,
8884 tr, &tracing_fops);
8885
8886 trace_create_file("trace_pipe", 0444, d_tracer,
8887 tr, &tracing_pipe_fops);
8888
8889 trace_create_file("buffer_size_kb", 0644, d_tracer,
8890 tr, &tracing_entries_fops);
8891
8892 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8893 tr, &tracing_total_entries_fops);
8894
8895 trace_create_file("free_buffer", 0200, d_tracer,
8896 tr, &tracing_free_buffer_fops);
8897
8898 trace_create_file("trace_marker", 0220, d_tracer,
8899 tr, &tracing_mark_fops);
8900
8901 file = __find_event_file(tr, "ftrace", "print");
8902 if (file && file->dir)
8903 trace_create_file("trigger", 0644, file->dir, file,
8904 &event_trigger_fops);
8905 tr->trace_marker_file = file;
8906
8907 trace_create_file("trace_marker_raw", 0220, d_tracer,
8908 tr, &tracing_mark_raw_fops);
8909
8910 trace_create_file("trace_clock", 0644, d_tracer, tr,
8911 &trace_clock_fops);
8912
8913 trace_create_file("tracing_on", 0644, d_tracer,
8914 tr, &rb_simple_fops);
8915
8916 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8917 &trace_time_stamp_mode_fops);
8918
8919 tr->buffer_percent = 50;
8920
8921 trace_create_file("buffer_percent", 0444, d_tracer,
8922 tr, &buffer_percent_fops);
8923
8924 create_trace_options_dir(tr);
8925
8926#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8927 trace_create_maxlat_file(tr, d_tracer);
8928#endif
8929
8930 if (ftrace_create_function_files(tr, d_tracer))
8931 MEM_FAIL(1, "Could not allocate function filter files");
8932
8933#ifdef CONFIG_TRACER_SNAPSHOT
8934 trace_create_file("snapshot", 0644, d_tracer,
8935 tr, &snapshot_fops);
8936#endif
8937
8938 trace_create_file("error_log", 0644, d_tracer,
8939 tr, &tracing_err_log_fops);
8940
8941 for_each_tracing_cpu(cpu)
8942 tracing_init_tracefs_percpu(tr, cpu);
8943
8944 ftrace_init_tracefs(tr, d_tracer);
8945}
8946
8947static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8948{
8949 struct vfsmount *mnt;
8950 struct file_system_type *type;
8951
8952
8953
8954
8955
8956
8957 type = get_fs_type("tracefs");
8958 if (!type)
8959 return NULL;
8960 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8961 put_filesystem(type);
8962 if (IS_ERR(mnt))
8963 return NULL;
8964 mntget(mnt);
8965
8966 return mnt;
8967}
8968
8969
8970
8971
8972
8973
8974
8975
8976struct dentry *tracing_init_dentry(void)
8977{
8978 struct trace_array *tr = &global_trace;
8979
8980 if (security_locked_down(LOCKDOWN_TRACEFS)) {
8981 pr_warn("Tracing disabled due to lockdown\n");
8982 return ERR_PTR(-EPERM);
8983 }
8984
8985
8986 if (tr->dir)
8987 return NULL;
8988
8989 if (WARN_ON(!tracefs_initialized()))
8990 return ERR_PTR(-ENODEV);
8991
8992
8993
8994
8995
8996
8997
8998 tr->dir = debugfs_create_automount("tracing", NULL,
8999 trace_automount, NULL);
9000
9001 return NULL;
9002}
9003
9004extern struct trace_eval_map *__start_ftrace_eval_maps[];
9005extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9006
9007static void __init trace_eval_init(void)
9008{
9009 int len;
9010
9011 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9012 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9013}
9014
9015#ifdef CONFIG_MODULES
9016static void trace_module_add_evals(struct module *mod)
9017{
9018 if (!mod->num_trace_evals)
9019 return;
9020
9021
9022
9023
9024
9025 if (trace_module_has_bad_taint(mod))
9026 return;
9027
9028 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9029}
9030
9031#ifdef CONFIG_TRACE_EVAL_MAP_FILE
9032static void trace_module_remove_evals(struct module *mod)
9033{
9034 union trace_eval_map_item *map;
9035 union trace_eval_map_item **last = &trace_eval_maps;
9036
9037 if (!mod->num_trace_evals)
9038 return;
9039
9040 mutex_lock(&trace_eval_mutex);
9041
9042 map = trace_eval_maps;
9043
9044 while (map) {
9045 if (map->head.mod == mod)
9046 break;
9047 map = trace_eval_jmp_to_tail(map);
9048 last = &map->tail.next;
9049 map = map->tail.next;
9050 }
9051 if (!map)
9052 goto out;
9053
9054 *last = trace_eval_jmp_to_tail(map)->tail.next;
9055 kfree(map);
9056 out:
9057 mutex_unlock(&trace_eval_mutex);
9058}
9059#else
9060static inline void trace_module_remove_evals(struct module *mod) { }
9061#endif
9062
9063static int trace_module_notify(struct notifier_block *self,
9064 unsigned long val, void *data)
9065{
9066 struct module *mod = data;
9067
9068 switch (val) {
9069 case MODULE_STATE_COMING:
9070 trace_module_add_evals(mod);
9071 break;
9072 case MODULE_STATE_GOING:
9073 trace_module_remove_evals(mod);
9074 break;
9075 }
9076
9077 return 0;
9078}
9079
9080static struct notifier_block trace_module_nb = {
9081 .notifier_call = trace_module_notify,
9082 .priority = 0,
9083};
9084#endif
9085
9086static __init int tracer_init_tracefs(void)
9087{
9088 struct dentry *d_tracer;
9089
9090 trace_access_lock_init();
9091
9092 d_tracer = tracing_init_dentry();
9093 if (IS_ERR(d_tracer))
9094 return 0;
9095
9096 event_trace_init();
9097
9098 init_tracer_tracefs(&global_trace, d_tracer);
9099 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
9100
9101 trace_create_file("tracing_thresh", 0644, d_tracer,
9102 &global_trace, &tracing_thresh_fops);
9103
9104 trace_create_file("README", 0444, d_tracer,
9105 NULL, &tracing_readme_fops);
9106
9107 trace_create_file("saved_cmdlines", 0444, d_tracer,
9108 NULL, &tracing_saved_cmdlines_fops);
9109
9110 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
9111 NULL, &tracing_saved_cmdlines_size_fops);
9112
9113 trace_create_file("saved_tgids", 0444, d_tracer,
9114 NULL, &tracing_saved_tgids_fops);
9115
9116 trace_eval_init();
9117
9118 trace_create_eval_file(d_tracer);
9119
9120#ifdef CONFIG_MODULES
9121 register_module_notifier(&trace_module_nb);
9122#endif
9123
9124#ifdef CONFIG_DYNAMIC_FTRACE
9125 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
9126 NULL, &tracing_dyn_info_fops);
9127#endif
9128
9129 create_trace_instances(d_tracer);
9130
9131 update_tracer_options(&global_trace);
9132
9133 return 0;
9134}
9135
9136static int trace_panic_handler(struct notifier_block *this,
9137 unsigned long event, void *unused)
9138{
9139 if (ftrace_dump_on_oops)
9140 ftrace_dump(ftrace_dump_on_oops);
9141 return NOTIFY_OK;
9142}
9143
9144static struct notifier_block trace_panic_notifier = {
9145 .notifier_call = trace_panic_handler,
9146 .next = NULL,
9147 .priority = 150
9148};
9149
9150static int trace_die_handler(struct notifier_block *self,
9151 unsigned long val,
9152 void *data)
9153{
9154 switch (val) {
9155 case DIE_OOPS:
9156 if (ftrace_dump_on_oops)
9157 ftrace_dump(ftrace_dump_on_oops);
9158 break;
9159 default:
9160 break;
9161 }
9162 return NOTIFY_OK;
9163}
9164
9165static struct notifier_block trace_die_notifier = {
9166 .notifier_call = trace_die_handler,
9167 .priority = 200
9168};
9169
9170
9171
9172
9173
9174#define TRACE_MAX_PRINT 1000
9175
9176
9177
9178
9179
9180
9181#define KERN_TRACE KERN_EMERG
9182
9183void
9184trace_printk_seq(struct trace_seq *s)
9185{
9186
9187 if (s->seq.len >= TRACE_MAX_PRINT)
9188 s->seq.len = TRACE_MAX_PRINT;
9189
9190
9191
9192
9193
9194
9195 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9196 s->seq.len = s->seq.size - 1;
9197
9198
9199 s->buffer[s->seq.len] = 0;
9200
9201 printk(KERN_TRACE "%s", s->buffer);
9202
9203 trace_seq_init(s);
9204}
9205
9206void trace_init_global_iter(struct trace_iterator *iter)
9207{
9208 iter->tr = &global_trace;
9209 iter->trace = iter->tr->current_trace;
9210 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9211 iter->array_buffer = &global_trace.array_buffer;
9212
9213 if (iter->trace && iter->trace->open)
9214 iter->trace->open(iter);
9215
9216
9217 if (ring_buffer_overruns(iter->array_buffer->buffer))
9218 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9219
9220
9221 if (trace_clocks[iter->tr->clock_id].in_ns)
9222 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9223}
9224
9225void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9226{
9227
9228 static struct trace_iterator iter;
9229 static atomic_t dump_running;
9230 struct trace_array *tr = &global_trace;
9231 unsigned int old_userobj;
9232 unsigned long flags;
9233 int cnt = 0, cpu;
9234
9235
9236 if (atomic_inc_return(&dump_running) != 1) {
9237 atomic_dec(&dump_running);
9238 return;
9239 }
9240
9241
9242
9243
9244
9245
9246
9247
9248
9249 tracing_off();
9250
9251 local_irq_save(flags);
9252 printk_nmi_direct_enter();
9253
9254
9255 trace_init_global_iter(&iter);
9256
9257 iter.temp = static_temp_buf;
9258 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9259
9260 for_each_tracing_cpu(cpu) {
9261 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9262 }
9263
9264 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9265
9266
9267 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9268
9269 switch (oops_dump_mode) {
9270 case DUMP_ALL:
9271 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9272 break;
9273 case DUMP_ORIG:
9274 iter.cpu_file = raw_smp_processor_id();
9275 break;
9276 case DUMP_NONE:
9277 goto out_enable;
9278 default:
9279 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9280 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9281 }
9282
9283 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9284
9285
9286 if (ftrace_is_dead()) {
9287 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9288 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9289 }
9290
9291
9292
9293
9294
9295
9296
9297
9298 while (!trace_empty(&iter)) {
9299
9300 if (!cnt)
9301 printk(KERN_TRACE "---------------------------------\n");
9302
9303 cnt++;
9304
9305 trace_iterator_reset(&iter);
9306 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9307
9308 if (trace_find_next_entry_inc(&iter) != NULL) {
9309 int ret;
9310
9311 ret = print_trace_line(&iter);
9312 if (ret != TRACE_TYPE_NO_CONSUME)
9313 trace_consume(&iter);
9314 }
9315 touch_nmi_watchdog();
9316
9317 trace_printk_seq(&iter.seq);
9318 }
9319
9320 if (!cnt)
9321 printk(KERN_TRACE " (ftrace buffer empty)\n");
9322 else
9323 printk(KERN_TRACE "---------------------------------\n");
9324
9325 out_enable:
9326 tr->trace_flags |= old_userobj;
9327
9328 for_each_tracing_cpu(cpu) {
9329 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9330 }
9331 atomic_dec(&dump_running);
9332 printk_nmi_direct_exit();
9333 local_irq_restore(flags);
9334}
9335EXPORT_SYMBOL_GPL(ftrace_dump);
9336
9337int trace_run_command(const char *buf, int (*createfn)(int, char **))
9338{
9339 char **argv;
9340 int argc, ret;
9341
9342 argc = 0;
9343 ret = 0;
9344 argv = argv_split(GFP_KERNEL, buf, &argc);
9345 if (!argv)
9346 return -ENOMEM;
9347
9348 if (argc)
9349 ret = createfn(argc, argv);
9350
9351 argv_free(argv);
9352
9353 return ret;
9354}
9355
9356#define WRITE_BUFSIZE 4096
9357
9358ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9359 size_t count, loff_t *ppos,
9360 int (*createfn)(int, char **))
9361{
9362 char *kbuf, *buf, *tmp;
9363 int ret = 0;
9364 size_t done = 0;
9365 size_t size;
9366
9367 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9368 if (!kbuf)
9369 return -ENOMEM;
9370
9371 while (done < count) {
9372 size = count - done;
9373
9374 if (size >= WRITE_BUFSIZE)
9375 size = WRITE_BUFSIZE - 1;
9376
9377 if (copy_from_user(kbuf, buffer + done, size)) {
9378 ret = -EFAULT;
9379 goto out;
9380 }
9381 kbuf[size] = '\0';
9382 buf = kbuf;
9383 do {
9384 tmp = strchr(buf, '\n');
9385 if (tmp) {
9386 *tmp = '\0';
9387 size = tmp - buf + 1;
9388 } else {
9389 size = strlen(buf);
9390 if (done + size < count) {
9391 if (buf != kbuf)
9392 break;
9393
9394 pr_warn("Line length is too long: Should be less than %d\n",
9395 WRITE_BUFSIZE - 2);
9396 ret = -EINVAL;
9397 goto out;
9398 }
9399 }
9400 done += size;
9401
9402
9403 tmp = strchr(buf, '#');
9404
9405 if (tmp)
9406 *tmp = '\0';
9407
9408 ret = trace_run_command(buf, createfn);
9409 if (ret)
9410 goto out;
9411 buf += size;
9412
9413 } while (done < count);
9414 }
9415 ret = done;
9416
9417out:
9418 kfree(kbuf);
9419
9420 return ret;
9421}
9422
9423__init static int tracer_alloc_buffers(void)
9424{
9425 int ring_buf_size;
9426 int ret = -ENOMEM;
9427
9428
9429 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9430 pr_warn("Tracing disabled due to lockdown\n");
9431 return -EPERM;
9432 }
9433
9434
9435
9436
9437
9438 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9439
9440 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9441 goto out;
9442
9443 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9444 goto out_free_buffer_mask;
9445
9446
9447 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9448
9449 trace_printk_init_buffers();
9450
9451
9452 if (ring_buffer_expanded)
9453 ring_buf_size = trace_buf_size;
9454 else
9455 ring_buf_size = 1;
9456
9457 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9458 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9459
9460 raw_spin_lock_init(&global_trace.start_lock);
9461
9462
9463
9464
9465
9466
9467
9468 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9469 "trace/RB:preapre", trace_rb_cpu_prepare,
9470 NULL);
9471 if (ret < 0)
9472 goto out_free_cpumask;
9473
9474 ret = -ENOMEM;
9475 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9476 if (!temp_buffer)
9477 goto out_rm_hp_state;
9478
9479 if (trace_create_savedcmd() < 0)
9480 goto out_free_temp_buffer;
9481
9482
9483 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9484 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9485 goto out_free_savedcmd;
9486 }
9487
9488 if (global_trace.buffer_disabled)
9489 tracing_off();
9490
9491 if (trace_boot_clock) {
9492 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9493 if (ret < 0)
9494 pr_warn("Trace clock %s not defined, going back to default\n",
9495 trace_boot_clock);
9496 }
9497
9498
9499
9500
9501
9502
9503 global_trace.current_trace = &nop_trace;
9504
9505 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9506
9507 ftrace_init_global_array_ops(&global_trace);
9508
9509 init_trace_flags_index(&global_trace);
9510
9511 register_tracer(&nop_trace);
9512
9513
9514 init_function_trace();
9515
9516
9517 tracing_disabled = 0;
9518
9519 atomic_notifier_chain_register(&panic_notifier_list,
9520 &trace_panic_notifier);
9521
9522 register_die_notifier(&trace_die_notifier);
9523
9524 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9525
9526 INIT_LIST_HEAD(&global_trace.systems);
9527 INIT_LIST_HEAD(&global_trace.events);
9528 INIT_LIST_HEAD(&global_trace.hist_vars);
9529 INIT_LIST_HEAD(&global_trace.err_log);
9530 list_add(&global_trace.list, &ftrace_trace_arrays);
9531
9532 apply_trace_boot_options();
9533
9534 register_snapshot_cmd();
9535
9536 return 0;
9537
9538out_free_savedcmd:
9539 free_saved_cmdlines_buffer(savedcmd);
9540out_free_temp_buffer:
9541 ring_buffer_free(temp_buffer);
9542out_rm_hp_state:
9543 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9544out_free_cpumask:
9545 free_cpumask_var(global_trace.tracing_cpumask);
9546out_free_buffer_mask:
9547 free_cpumask_var(tracing_buffer_mask);
9548out:
9549 return ret;
9550}
9551
9552void __init early_trace_init(void)
9553{
9554 if (tracepoint_printk) {
9555 tracepoint_print_iter =
9556 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9557 if (MEM_FAIL(!tracepoint_print_iter,
9558 "Failed to allocate trace iterator\n"))
9559 tracepoint_printk = 0;
9560 else
9561 static_key_enable(&tracepoint_printk_key.key);
9562 }
9563 tracer_alloc_buffers();
9564}
9565
9566void __init trace_init(void)
9567{
9568 trace_event_init();
9569}
9570
9571__init static int clear_boot_tracer(void)
9572{
9573
9574
9575
9576
9577
9578
9579
9580 if (!default_bootup_tracer)
9581 return 0;
9582
9583 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9584 default_bootup_tracer);
9585 default_bootup_tracer = NULL;
9586
9587 return 0;
9588}
9589
9590fs_initcall(tracer_init_tracefs);
9591late_initcall_sync(clear_boot_tracer);
9592
9593#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9594__init static int tracing_set_default_clock(void)
9595{
9596
9597 if (!trace_boot_clock && !sched_clock_stable()) {
9598 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9599 pr_warn("Can not set tracing clock due to lockdown\n");
9600 return -EPERM;
9601 }
9602
9603 printk(KERN_WARNING
9604 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9605 "If you want to keep using the local clock, then add:\n"
9606 " \"trace_clock=local\"\n"
9607 "on the kernel command line\n");
9608 tracing_set_clock(&global_trace, "global");
9609 }
9610
9611 return 0;
9612}
9613late_initcall_sync(tracing_set_default_clock);
9614#endif
9615