1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/notifier.h>
21#include <linux/irqflags.h>
22#include <linux/debugfs.h>
23#include <linux/tracefs.h>
24#include <linux/pagemap.h>
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
28#include <linux/kprobes.h>
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
32#include <linux/splice.h>
33#include <linux/kdebug.h>
34#include <linux/string.h>
35#include <linux/mount.h>
36#include <linux/rwsem.h>
37#include <linux/slab.h>
38#include <linux/ctype.h>
39#include <linux/init.h>
40#include <linux/poll.h>
41#include <linux/nmi.h>
42#include <linux/fs.h>
43#include <linux/sched/rt.h>
44
45#include "trace.h"
46#include "trace_output.h"
47
48
49
50
51
52bool ring_buffer_expanded;
53
54
55
56
57
58
59
60
61static bool __read_mostly tracing_selftest_running;
62
63
64
65
66bool __read_mostly tracing_selftest_disabled;
67
68
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
72
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
79{
80 return 0;
81}
82
83
84
85
86
87
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
90
91
92
93
94
95
96static int tracing_disabled = 1;
97
98cpumask_var_t __read_mostly tracing_buffer_mask;
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
117
118
119int __disable_trace_on_warning;
120
121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131
132
133
134
135 union trace_enum_map_item *next;
136 const char *end;
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141
142
143
144
145
146
147
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif
156
157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
158
159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
161static char *default_bootup_tracer;
162
163static bool allocate_snapshot;
164
165static int __init set_cmdline_ftrace(char *str)
166{
167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
168 default_bootup_tracer = bootup_tracer_buf;
169
170 ring_buffer_expanded = true;
171 return 1;
172}
173__setup("ftrace=", set_cmdline_ftrace);
174
175static int __init set_ftrace_dump_on_oops(char *str)
176{
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
190
191static int __init stop_trace_on_warning(char *str)
192{
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
195 return 1;
196}
197__setup("traceoff_on_warning", stop_trace_on_warning);
198
199static int __init boot_alloc_snapshot(char *str)
200{
201 allocate_snapshot = true;
202
203 ring_buffer_expanded = true;
204 return 1;
205}
206__setup("alloc_snapshot", boot_alloc_snapshot);
207
208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
210
211static int __init set_trace_boot_options(char *str)
212{
213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
236
237unsigned long long ns2usecs(cycle_t nsec)
238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
244
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
252
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269static struct trace_array global_trace = {
270 .trace_flags = TRACE_DEFAULT_FLAGS,
271};
272
273LIST_HEAD(ftrace_trace_arrays);
274
275int trace_array_get(struct trace_array *this_tr)
276{
277 struct trace_array *tr;
278 int ret = -ENODEV;
279
280 mutex_lock(&trace_types_lock);
281 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
282 if (tr == this_tr) {
283 tr->ref++;
284 ret = 0;
285 break;
286 }
287 }
288 mutex_unlock(&trace_types_lock);
289
290 return ret;
291}
292
293static void __trace_array_put(struct trace_array *this_tr)
294{
295 WARN_ON(!this_tr->ref);
296 this_tr->ref--;
297}
298
299void trace_array_put(struct trace_array *this_tr)
300{
301 mutex_lock(&trace_types_lock);
302 __trace_array_put(this_tr);
303 mutex_unlock(&trace_types_lock);
304}
305
306int filter_check_discard(struct trace_event_file *file, void *rec,
307 struct ring_buffer *buffer,
308 struct ring_buffer_event *event)
309{
310 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
311 !filter_match_preds(file->filter, rec)) {
312 ring_buffer_discard_commit(buffer, event);
313 return 1;
314 }
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(filter_check_discard);
319
320int call_filter_check_discard(struct trace_event_call *call, void *rec,
321 struct ring_buffer *buffer,
322 struct ring_buffer_event *event)
323{
324 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
325 !filter_match_preds(call->filter, rec)) {
326 ring_buffer_discard_commit(buffer, event);
327 return 1;
328 }
329
330 return 0;
331}
332EXPORT_SYMBOL_GPL(call_filter_check_discard);
333
334static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
335{
336 u64 ts;
337
338
339 if (!buf->buffer)
340 return trace_clock_local();
341
342 ts = ring_buffer_time_stamp(buf->buffer, cpu);
343 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
344
345 return ts;
346}
347
348cycle_t ftrace_now(int cpu)
349{
350 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
351}
352
353
354
355
356
357
358
359
360
361
362int tracing_is_enabled(void)
363{
364
365
366
367
368
369 smp_rmb();
370 return !global_trace.buffer_disabled;
371}
372
373
374
375
376
377
378
379
380
381
382
383#define TRACE_BUF_SIZE_DEFAULT 1441792UL
384
385static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
386
387
388static struct tracer *trace_types __read_mostly;
389
390
391
392
393DEFINE_MUTEX(trace_types_lock);
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417#ifdef CONFIG_SMP
418static DECLARE_RWSEM(all_cpu_access_lock);
419static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
420
421static inline void trace_access_lock(int cpu)
422{
423 if (cpu == RING_BUFFER_ALL_CPUS) {
424
425 down_write(&all_cpu_access_lock);
426 } else {
427
428
429
430 down_read(&all_cpu_access_lock);
431
432
433 mutex_lock(&per_cpu(cpu_access_lock, cpu));
434 }
435}
436
437static inline void trace_access_unlock(int cpu)
438{
439 if (cpu == RING_BUFFER_ALL_CPUS) {
440 up_write(&all_cpu_access_lock);
441 } else {
442 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
443 up_read(&all_cpu_access_lock);
444 }
445}
446
447static inline void trace_access_lock_init(void)
448{
449 int cpu;
450
451 for_each_possible_cpu(cpu)
452 mutex_init(&per_cpu(cpu_access_lock, cpu));
453}
454
455#else
456
457static DEFINE_MUTEX(access_lock);
458
459static inline void trace_access_lock(int cpu)
460{
461 (void)cpu;
462 mutex_lock(&access_lock);
463}
464
465static inline void trace_access_unlock(int cpu)
466{
467 (void)cpu;
468 mutex_unlock(&access_lock);
469}
470
471static inline void trace_access_lock_init(void)
472{
473}
474
475#endif
476
477#ifdef CONFIG_STACKTRACE
478static void __ftrace_trace_stack(struct ring_buffer *buffer,
479 unsigned long flags,
480 int skip, int pc, struct pt_regs *regs);
481static inline void ftrace_trace_stack(struct trace_array *tr,
482 struct ring_buffer *buffer,
483 unsigned long flags,
484 int skip, int pc, struct pt_regs *regs);
485
486#else
487static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs)
490{
491}
492static inline void ftrace_trace_stack(struct trace_array *tr,
493 struct ring_buffer *buffer,
494 unsigned long flags,
495 int skip, int pc, struct pt_regs *regs)
496{
497}
498
499#endif
500
501static void tracer_tracing_on(struct trace_array *tr)
502{
503 if (tr->trace_buffer.buffer)
504 ring_buffer_record_on(tr->trace_buffer.buffer);
505
506
507
508
509
510
511
512
513 tr->buffer_disabled = 0;
514
515 smp_wmb();
516}
517
518
519
520
521
522
523
524void tracing_on(void)
525{
526 tracer_tracing_on(&global_trace);
527}
528EXPORT_SYMBOL_GPL(tracing_on);
529
530
531
532
533
534
535
536int __trace_puts(unsigned long ip, const char *str, int size)
537{
538 struct ring_buffer_event *event;
539 struct ring_buffer *buffer;
540 struct print_entry *entry;
541 unsigned long irq_flags;
542 int alloc;
543 int pc;
544
545 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
546 return 0;
547
548 pc = preempt_count();
549
550 if (unlikely(tracing_selftest_running || tracing_disabled))
551 return 0;
552
553 alloc = sizeof(*entry) + size + 2;
554
555 local_save_flags(irq_flags);
556 buffer = global_trace.trace_buffer.buffer;
557 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
558 irq_flags, pc);
559 if (!event)
560 return 0;
561
562 entry = ring_buffer_event_data(event);
563 entry->ip = ip;
564
565 memcpy(&entry->buf, str, size);
566
567
568 if (entry->buf[size - 1] != '\n') {
569 entry->buf[size] = '\n';
570 entry->buf[size + 1] = '\0';
571 } else
572 entry->buf[size] = '\0';
573
574 __buffer_unlock_commit(buffer, event);
575 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
576
577 return size;
578}
579EXPORT_SYMBOL_GPL(__trace_puts);
580
581
582
583
584
585
586int __trace_bputs(unsigned long ip, const char *str)
587{
588 struct ring_buffer_event *event;
589 struct ring_buffer *buffer;
590 struct bputs_entry *entry;
591 unsigned long irq_flags;
592 int size = sizeof(struct bputs_entry);
593 int pc;
594
595 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
596 return 0;
597
598 pc = preempt_count();
599
600 if (unlikely(tracing_selftest_running || tracing_disabled))
601 return 0;
602
603 local_save_flags(irq_flags);
604 buffer = global_trace.trace_buffer.buffer;
605 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
606 irq_flags, pc);
607 if (!event)
608 return 0;
609
610 entry = ring_buffer_event_data(event);
611 entry->ip = ip;
612 entry->str = str;
613
614 __buffer_unlock_commit(buffer, event);
615 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
616
617 return 1;
618}
619EXPORT_SYMBOL_GPL(__trace_bputs);
620
621#ifdef CONFIG_TRACER_SNAPSHOT
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636void tracing_snapshot(void)
637{
638 struct trace_array *tr = &global_trace;
639 struct tracer *tracer = tr->current_trace;
640 unsigned long flags;
641
642 if (in_nmi()) {
643 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
644 internal_trace_puts("*** snapshot is being ignored ***\n");
645 return;
646 }
647
648 if (!tr->allocated_snapshot) {
649 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
650 internal_trace_puts("*** stopping trace here! ***\n");
651 tracing_off();
652 return;
653 }
654
655
656 if (tracer->use_max_tr) {
657 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
658 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
659 return;
660 }
661
662 local_irq_save(flags);
663 update_max_tr(tr, current, smp_processor_id());
664 local_irq_restore(flags);
665}
666EXPORT_SYMBOL_GPL(tracing_snapshot);
667
668static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
669 struct trace_buffer *size_buf, int cpu_id);
670static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
671
672static int alloc_snapshot(struct trace_array *tr)
673{
674 int ret;
675
676 if (!tr->allocated_snapshot) {
677
678
679 ret = resize_buffer_duplicate_size(&tr->max_buffer,
680 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
681 if (ret < 0)
682 return ret;
683
684 tr->allocated_snapshot = true;
685 }
686
687 return 0;
688}
689
690static void free_snapshot(struct trace_array *tr)
691{
692
693
694
695
696
697 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
698 set_buffer_entries(&tr->max_buffer, 1);
699 tracing_reset_online_cpus(&tr->max_buffer);
700 tr->allocated_snapshot = false;
701}
702
703
704
705
706
707
708
709
710
711
712
713int tracing_alloc_snapshot(void)
714{
715 struct trace_array *tr = &global_trace;
716 int ret;
717
718 ret = alloc_snapshot(tr);
719 WARN_ON(ret < 0);
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
724
725
726
727
728
729
730
731
732
733
734
735
736void tracing_snapshot_alloc(void)
737{
738 int ret;
739
740 ret = tracing_alloc_snapshot();
741 if (ret < 0)
742 return;
743
744 tracing_snapshot();
745}
746EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
747#else
748void tracing_snapshot(void)
749{
750 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
751}
752EXPORT_SYMBOL_GPL(tracing_snapshot);
753int tracing_alloc_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
756 return -ENODEV;
757}
758EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
759void tracing_snapshot_alloc(void)
760{
761
762 tracing_snapshot();
763}
764EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
765#endif
766
767static void tracer_tracing_off(struct trace_array *tr)
768{
769 if (tr->trace_buffer.buffer)
770 ring_buffer_record_off(tr->trace_buffer.buffer);
771
772
773
774
775
776
777
778
779 tr->buffer_disabled = 1;
780
781 smp_wmb();
782}
783
784
785
786
787
788
789
790
791
792void tracing_off(void)
793{
794 tracer_tracing_off(&global_trace);
795}
796EXPORT_SYMBOL_GPL(tracing_off);
797
798void disable_trace_on_warning(void)
799{
800 if (__disable_trace_on_warning)
801 tracing_off();
802}
803
804
805
806
807
808
809
810static int tracer_tracing_is_on(struct trace_array *tr)
811{
812 if (tr->trace_buffer.buffer)
813 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
814 return !tr->buffer_disabled;
815}
816
817
818
819
820int tracing_is_on(void)
821{
822 return tracer_tracing_is_on(&global_trace);
823}
824EXPORT_SYMBOL_GPL(tracing_is_on);
825
826static int __init set_buf_size(char *str)
827{
828 unsigned long buf_size;
829
830 if (!str)
831 return 0;
832 buf_size = memparse(str, &str);
833
834 if (buf_size == 0)
835 return 0;
836 trace_buf_size = buf_size;
837 return 1;
838}
839__setup("trace_buf_size=", set_buf_size);
840
841static int __init set_tracing_thresh(char *str)
842{
843 unsigned long threshold;
844 int ret;
845
846 if (!str)
847 return 0;
848 ret = kstrtoul(str, 0, &threshold);
849 if (ret < 0)
850 return 0;
851 tracing_thresh = threshold * 1000;
852 return 1;
853}
854__setup("tracing_thresh=", set_tracing_thresh);
855
856unsigned long nsecs_to_usecs(unsigned long nsecs)
857{
858 return nsecs / 1000;
859}
860
861
862
863
864
865
866
867#undef C
868#define C(a, b) b
869
870
871static const char *trace_options[] = {
872 TRACE_FLAGS
873 NULL
874};
875
876static struct {
877 u64 (*func)(void);
878 const char *name;
879 int in_ns;
880} trace_clocks[] = {
881 { trace_clock_local, "local", 1 },
882 { trace_clock_global, "global", 1 },
883 { trace_clock_counter, "counter", 0 },
884 { trace_clock_jiffies, "uptime", 0 },
885 { trace_clock, "perf", 1 },
886 { ktime_get_mono_fast_ns, "mono", 1 },
887 { ktime_get_raw_fast_ns, "mono_raw", 1 },
888 ARCH_TRACE_CLOCKS
889};
890
891
892
893
894int trace_parser_get_init(struct trace_parser *parser, int size)
895{
896 memset(parser, 0, sizeof(*parser));
897
898 parser->buffer = kmalloc(size, GFP_KERNEL);
899 if (!parser->buffer)
900 return 1;
901
902 parser->size = size;
903 return 0;
904}
905
906
907
908
909void trace_parser_put(struct trace_parser *parser)
910{
911 kfree(parser->buffer);
912}
913
914
915
916
917
918
919
920
921
922
923
924
925int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
926 size_t cnt, loff_t *ppos)
927{
928 char ch;
929 size_t read = 0;
930 ssize_t ret;
931
932 if (!*ppos)
933 trace_parser_clear(parser);
934
935 ret = get_user(ch, ubuf++);
936 if (ret)
937 goto out;
938
939 read++;
940 cnt--;
941
942
943
944
945
946 if (!parser->cont) {
947
948 while (cnt && isspace(ch)) {
949 ret = get_user(ch, ubuf++);
950 if (ret)
951 goto out;
952 read++;
953 cnt--;
954 }
955
956
957 if (isspace(ch)) {
958 *ppos += read;
959 ret = read;
960 goto out;
961 }
962
963 parser->idx = 0;
964 }
965
966
967 while (cnt && !isspace(ch)) {
968 if (parser->idx < parser->size - 1)
969 parser->buffer[parser->idx++] = ch;
970 else {
971 ret = -EINVAL;
972 goto out;
973 }
974 ret = get_user(ch, ubuf++);
975 if (ret)
976 goto out;
977 read++;
978 cnt--;
979 }
980
981
982 if (isspace(ch)) {
983 parser->buffer[parser->idx] = 0;
984 parser->cont = false;
985 } else if (parser->idx < parser->size - 1) {
986 parser->cont = true;
987 parser->buffer[parser->idx++] = ch;
988 } else {
989 ret = -EINVAL;
990 goto out;
991 }
992
993 *ppos += read;
994 ret = read;
995
996out:
997 return ret;
998}
999
1000
1001static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1002{
1003 int len;
1004
1005 if (trace_seq_used(s) <= s->seq.readpos)
1006 return -EBUSY;
1007
1008 len = trace_seq_used(s) - s->seq.readpos;
1009 if (cnt > len)
1010 cnt = len;
1011 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1012
1013 s->seq.readpos += cnt;
1014 return cnt;
1015}
1016
1017unsigned long __read_mostly tracing_thresh;
1018
1019#ifdef CONFIG_TRACER_MAX_TRACE
1020
1021
1022
1023
1024
1025static void
1026__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1027{
1028 struct trace_buffer *trace_buf = &tr->trace_buffer;
1029 struct trace_buffer *max_buf = &tr->max_buffer;
1030 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1031 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1032
1033 max_buf->cpu = cpu;
1034 max_buf->time_start = data->preempt_timestamp;
1035
1036 max_data->saved_latency = tr->max_latency;
1037 max_data->critical_start = data->critical_start;
1038 max_data->critical_end = data->critical_end;
1039
1040 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1041 max_data->pid = tsk->pid;
1042
1043
1044
1045
1046 if (tsk == current)
1047 max_data->uid = current_uid();
1048 else
1049 max_data->uid = task_uid(tsk);
1050
1051 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1052 max_data->policy = tsk->policy;
1053 max_data->rt_priority = tsk->rt_priority;
1054
1055
1056 tracing_record_cmdline(tsk);
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068void
1069update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1070{
1071 struct ring_buffer *buf;
1072
1073 if (tr->stop_count)
1074 return;
1075
1076 WARN_ON_ONCE(!irqs_disabled());
1077
1078 if (!tr->allocated_snapshot) {
1079
1080 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1081 return;
1082 }
1083
1084 arch_spin_lock(&tr->max_lock);
1085
1086 buf = tr->trace_buffer.buffer;
1087 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1088 tr->max_buffer.buffer = buf;
1089
1090 __update_max_tr(tr, tsk, cpu);
1091 arch_spin_unlock(&tr->max_lock);
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102void
1103update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1104{
1105 int ret;
1106
1107 if (tr->stop_count)
1108 return;
1109
1110 WARN_ON_ONCE(!irqs_disabled());
1111 if (!tr->allocated_snapshot) {
1112
1113 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1114 return;
1115 }
1116
1117 arch_spin_lock(&tr->max_lock);
1118
1119 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1120
1121 if (ret == -EBUSY) {
1122
1123
1124
1125
1126
1127
1128 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1129 "Failed to swap buffers due to commit in progress\n");
1130 }
1131
1132 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1133
1134 __update_max_tr(tr, tsk, cpu);
1135 arch_spin_unlock(&tr->max_lock);
1136}
1137#endif
1138
1139static int wait_on_pipe(struct trace_iterator *iter, bool full)
1140{
1141
1142 if (trace_buffer_iter(iter, iter->cpu_file))
1143 return 0;
1144
1145 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1146 full);
1147}
1148
1149#ifdef CONFIG_FTRACE_STARTUP_TEST
1150static int run_tracer_selftest(struct tracer *type)
1151{
1152 struct trace_array *tr = &global_trace;
1153 struct tracer *saved_tracer = tr->current_trace;
1154 int ret;
1155
1156 if (!type->selftest || tracing_selftest_disabled)
1157 return 0;
1158
1159
1160
1161
1162
1163
1164
1165
1166 tracing_reset_online_cpus(&tr->trace_buffer);
1167
1168 tr->current_trace = type;
1169
1170#ifdef CONFIG_TRACER_MAX_TRACE
1171 if (type->use_max_tr) {
1172
1173 if (ring_buffer_expanded)
1174 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1175 RING_BUFFER_ALL_CPUS);
1176 tr->allocated_snapshot = true;
1177 }
1178#endif
1179
1180
1181 pr_info("Testing tracer %s: ", type->name);
1182 ret = type->selftest(type, tr);
1183
1184 tr->current_trace = saved_tracer;
1185 if (ret) {
1186 printk(KERN_CONT "FAILED!\n");
1187
1188 WARN_ON(1);
1189 return -1;
1190 }
1191
1192 tracing_reset_online_cpus(&tr->trace_buffer);
1193
1194#ifdef CONFIG_TRACER_MAX_TRACE
1195 if (type->use_max_tr) {
1196 tr->allocated_snapshot = false;
1197
1198
1199 if (ring_buffer_expanded)
1200 ring_buffer_resize(tr->max_buffer.buffer, 1,
1201 RING_BUFFER_ALL_CPUS);
1202 }
1203#endif
1204
1205 printk(KERN_CONT "PASSED\n");
1206 return 0;
1207}
1208#else
1209static inline int run_tracer_selftest(struct tracer *type)
1210{
1211 return 0;
1212}
1213#endif
1214
1215static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1216
1217static void __init apply_trace_boot_options(void);
1218
1219
1220
1221
1222
1223
1224
1225int __init register_tracer(struct tracer *type)
1226{
1227 struct tracer *t;
1228 int ret = 0;
1229
1230 if (!type->name) {
1231 pr_info("Tracer must have a name\n");
1232 return -1;
1233 }
1234
1235 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1236 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237 return -1;
1238 }
1239
1240 mutex_lock(&trace_types_lock);
1241
1242 tracing_selftest_running = true;
1243
1244 for (t = trace_types; t; t = t->next) {
1245 if (strcmp(type->name, t->name) == 0) {
1246
1247 pr_info("Tracer %s already registered\n",
1248 type->name);
1249 ret = -1;
1250 goto out;
1251 }
1252 }
1253
1254 if (!type->set_flag)
1255 type->set_flag = &dummy_set_flag;
1256 if (!type->flags) {
1257
1258 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1259 if (!type->flags) {
1260 ret = -ENOMEM;
1261 goto out;
1262 }
1263 type->flags->val = 0;
1264 type->flags->opts = dummy_tracer_opt;
1265 } else
1266 if (!type->flags->opts)
1267 type->flags->opts = dummy_tracer_opt;
1268
1269
1270 type->flags->trace = type;
1271
1272 ret = run_tracer_selftest(type);
1273 if (ret < 0)
1274 goto out;
1275
1276 type->next = trace_types;
1277 trace_types = type;
1278 add_tracer_options(&global_trace, type);
1279
1280 out:
1281 tracing_selftest_running = false;
1282 mutex_unlock(&trace_types_lock);
1283
1284 if (ret || !default_bootup_tracer)
1285 goto out_unlock;
1286
1287 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1288 goto out_unlock;
1289
1290 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1291
1292 tracing_set_tracer(&global_trace, type->name);
1293 default_bootup_tracer = NULL;
1294
1295 apply_trace_boot_options();
1296
1297
1298 tracing_selftest_disabled = true;
1299#ifdef CONFIG_FTRACE_STARTUP_TEST
1300 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1301 type->name);
1302#endif
1303
1304 out_unlock:
1305 return ret;
1306}
1307
1308void tracing_reset(struct trace_buffer *buf, int cpu)
1309{
1310 struct ring_buffer *buffer = buf->buffer;
1311
1312 if (!buffer)
1313 return;
1314
1315 ring_buffer_record_disable(buffer);
1316
1317
1318 synchronize_sched();
1319 ring_buffer_reset_cpu(buffer, cpu);
1320
1321 ring_buffer_record_enable(buffer);
1322}
1323
1324void tracing_reset_online_cpus(struct trace_buffer *buf)
1325{
1326 struct ring_buffer *buffer = buf->buffer;
1327 int cpu;
1328
1329 if (!buffer)
1330 return;
1331
1332 ring_buffer_record_disable(buffer);
1333
1334
1335 synchronize_sched();
1336
1337 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1338
1339 for_each_online_cpu(cpu)
1340 ring_buffer_reset_cpu(buffer, cpu);
1341
1342 ring_buffer_record_enable(buffer);
1343}
1344
1345
1346void tracing_reset_all_online_cpus(void)
1347{
1348 struct trace_array *tr;
1349
1350 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1351 tracing_reset_online_cpus(&tr->trace_buffer);
1352#ifdef CONFIG_TRACER_MAX_TRACE
1353 tracing_reset_online_cpus(&tr->max_buffer);
1354#endif
1355 }
1356}
1357
1358#define SAVED_CMDLINES_DEFAULT 128
1359#define NO_CMDLINE_MAP UINT_MAX
1360static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1361struct saved_cmdlines_buffer {
1362 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1363 unsigned *map_cmdline_to_pid;
1364 unsigned cmdline_num;
1365 int cmdline_idx;
1366 char *saved_cmdlines;
1367};
1368static struct saved_cmdlines_buffer *savedcmd;
1369
1370
1371static atomic_t trace_record_cmdline_disabled __read_mostly;
1372
1373static inline char *get_saved_cmdlines(int idx)
1374{
1375 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1376}
1377
1378static inline void set_cmdline(int idx, const char *cmdline)
1379{
1380 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1381}
1382
1383static int allocate_cmdlines_buffer(unsigned int val,
1384 struct saved_cmdlines_buffer *s)
1385{
1386 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1387 GFP_KERNEL);
1388 if (!s->map_cmdline_to_pid)
1389 return -ENOMEM;
1390
1391 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1392 if (!s->saved_cmdlines) {
1393 kfree(s->map_cmdline_to_pid);
1394 return -ENOMEM;
1395 }
1396
1397 s->cmdline_idx = 0;
1398 s->cmdline_num = val;
1399 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1400 sizeof(s->map_pid_to_cmdline));
1401 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1402 val * sizeof(*s->map_cmdline_to_pid));
1403
1404 return 0;
1405}
1406
1407static int trace_create_savedcmd(void)
1408{
1409 int ret;
1410
1411 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1412 if (!savedcmd)
1413 return -ENOMEM;
1414
1415 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1416 if (ret < 0) {
1417 kfree(savedcmd);
1418 savedcmd = NULL;
1419 return -ENOMEM;
1420 }
1421
1422 return 0;
1423}
1424
1425int is_tracing_stopped(void)
1426{
1427 return global_trace.stop_count;
1428}
1429
1430
1431
1432
1433
1434
1435
1436void tracing_start(void)
1437{
1438 struct ring_buffer *buffer;
1439 unsigned long flags;
1440
1441 if (tracing_disabled)
1442 return;
1443
1444 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1445 if (--global_trace.stop_count) {
1446 if (global_trace.stop_count < 0) {
1447
1448 WARN_ON_ONCE(1);
1449 global_trace.stop_count = 0;
1450 }
1451 goto out;
1452 }
1453
1454
1455 arch_spin_lock(&global_trace.max_lock);
1456
1457 buffer = global_trace.trace_buffer.buffer;
1458 if (buffer)
1459 ring_buffer_record_enable(buffer);
1460
1461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
1463 if (buffer)
1464 ring_buffer_record_enable(buffer);
1465#endif
1466
1467 arch_spin_unlock(&global_trace.max_lock);
1468
1469 out:
1470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_start_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 if (tracing_disabled)
1479 return;
1480
1481
1482 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1483 return tracing_start();
1484
1485 raw_spin_lock_irqsave(&tr->start_lock, flags);
1486
1487 if (--tr->stop_count) {
1488 if (tr->stop_count < 0) {
1489
1490 WARN_ON_ONCE(1);
1491 tr->stop_count = 0;
1492 }
1493 goto out;
1494 }
1495
1496 buffer = tr->trace_buffer.buffer;
1497 if (buffer)
1498 ring_buffer_record_enable(buffer);
1499
1500 out:
1501 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1502}
1503
1504
1505
1506
1507
1508
1509
1510void tracing_stop(void)
1511{
1512 struct ring_buffer *buffer;
1513 unsigned long flags;
1514
1515 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1516 if (global_trace.stop_count++)
1517 goto out;
1518
1519
1520 arch_spin_lock(&global_trace.max_lock);
1521
1522 buffer = global_trace.trace_buffer.buffer;
1523 if (buffer)
1524 ring_buffer_record_disable(buffer);
1525
1526#ifdef CONFIG_TRACER_MAX_TRACE
1527 buffer = global_trace.max_buffer.buffer;
1528 if (buffer)
1529 ring_buffer_record_disable(buffer);
1530#endif
1531
1532 arch_spin_unlock(&global_trace.max_lock);
1533
1534 out:
1535 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1536}
1537
1538static void tracing_stop_tr(struct trace_array *tr)
1539{
1540 struct ring_buffer *buffer;
1541 unsigned long flags;
1542
1543
1544 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1545 return tracing_stop();
1546
1547 raw_spin_lock_irqsave(&tr->start_lock, flags);
1548 if (tr->stop_count++)
1549 goto out;
1550
1551 buffer = tr->trace_buffer.buffer;
1552 if (buffer)
1553 ring_buffer_record_disable(buffer);
1554
1555 out:
1556 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1557}
1558
1559void trace_stop_cmdline_recording(void);
1560
1561static int trace_save_cmdline(struct task_struct *tsk)
1562{
1563 unsigned pid, idx;
1564
1565 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1566 return 0;
1567
1568
1569
1570
1571
1572
1573
1574 if (!arch_spin_trylock(&trace_cmdline_lock))
1575 return 0;
1576
1577 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1578 if (idx == NO_CMDLINE_MAP) {
1579 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1580
1581
1582
1583
1584
1585
1586
1587 pid = savedcmd->map_cmdline_to_pid[idx];
1588 if (pid != NO_CMDLINE_MAP)
1589 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1590
1591 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1592 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1593
1594 savedcmd->cmdline_idx = idx;
1595 }
1596
1597 set_cmdline(idx, tsk->comm);
1598
1599 arch_spin_unlock(&trace_cmdline_lock);
1600
1601 return 1;
1602}
1603
1604static void __trace_find_cmdline(int pid, char comm[])
1605{
1606 unsigned map;
1607
1608 if (!pid) {
1609 strcpy(comm, "<idle>");
1610 return;
1611 }
1612
1613 if (WARN_ON_ONCE(pid < 0)) {
1614 strcpy(comm, "<XXX>");
1615 return;
1616 }
1617
1618 if (pid > PID_MAX_DEFAULT) {
1619 strcpy(comm, "<...>");
1620 return;
1621 }
1622
1623 map = savedcmd->map_pid_to_cmdline[pid];
1624 if (map != NO_CMDLINE_MAP)
1625 strcpy(comm, get_saved_cmdlines(map));
1626 else
1627 strcpy(comm, "<...>");
1628}
1629
1630void trace_find_cmdline(int pid, char comm[])
1631{
1632 preempt_disable();
1633 arch_spin_lock(&trace_cmdline_lock);
1634
1635 __trace_find_cmdline(pid, comm);
1636
1637 arch_spin_unlock(&trace_cmdline_lock);
1638 preempt_enable();
1639}
1640
1641void tracing_record_cmdline(struct task_struct *tsk)
1642{
1643 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1644 return;
1645
1646 if (!__this_cpu_read(trace_cmdline_save))
1647 return;
1648
1649 if (trace_save_cmdline(tsk))
1650 __this_cpu_write(trace_cmdline_save, false);
1651}
1652
1653void
1654tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1655 int pc)
1656{
1657 struct task_struct *tsk = current;
1658
1659 entry->preempt_count = pc & 0xff;
1660 entry->pid = (tsk) ? tsk->pid : 0;
1661 entry->flags =
1662#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1663 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1664#else
1665 TRACE_FLAG_IRQS_NOSUPPORT |
1666#endif
1667 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1668 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1669 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1670 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1671 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1672}
1673EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1674
1675struct ring_buffer_event *
1676trace_buffer_lock_reserve(struct ring_buffer *buffer,
1677 int type,
1678 unsigned long len,
1679 unsigned long flags, int pc)
1680{
1681 struct ring_buffer_event *event;
1682
1683 event = ring_buffer_lock_reserve(buffer, len);
1684 if (event != NULL) {
1685 struct trace_entry *ent = ring_buffer_event_data(event);
1686
1687 tracing_generic_entry_update(ent, flags, pc);
1688 ent->type = type;
1689 }
1690
1691 return event;
1692}
1693
1694void
1695__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1696{
1697 __this_cpu_write(trace_cmdline_save, true);
1698 ring_buffer_unlock_commit(buffer, event);
1699}
1700
1701void trace_buffer_unlock_commit(struct trace_array *tr,
1702 struct ring_buffer *buffer,
1703 struct ring_buffer_event *event,
1704 unsigned long flags, int pc)
1705{
1706 __buffer_unlock_commit(buffer, event);
1707
1708 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710}
1711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1712
1713static struct ring_buffer *temp_buffer;
1714
1715struct ring_buffer_event *
1716trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1717 struct trace_event_file *trace_file,
1718 int type, unsigned long len,
1719 unsigned long flags, int pc)
1720{
1721 struct ring_buffer_event *entry;
1722
1723 *current_rb = trace_file->tr->trace_buffer.buffer;
1724 entry = trace_buffer_lock_reserve(*current_rb,
1725 type, len, flags, pc);
1726
1727
1728
1729
1730
1731
1732 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1733 *current_rb = temp_buffer;
1734 entry = trace_buffer_lock_reserve(*current_rb,
1735 type, len, flags, pc);
1736 }
1737 return entry;
1738}
1739EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1740
1741struct ring_buffer_event *
1742trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1743 int type, unsigned long len,
1744 unsigned long flags, int pc)
1745{
1746 *current_rb = global_trace.trace_buffer.buffer;
1747 return trace_buffer_lock_reserve(*current_rb,
1748 type, len, flags, pc);
1749}
1750EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1751
1752void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1753 struct ring_buffer *buffer,
1754 struct ring_buffer_event *event,
1755 unsigned long flags, int pc,
1756 struct pt_regs *regs)
1757{
1758 __buffer_unlock_commit(buffer, event);
1759
1760 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1761 ftrace_trace_userstack(buffer, flags, pc);
1762}
1763EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1764
1765void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1766 struct ring_buffer_event *event)
1767{
1768 ring_buffer_discard_commit(buffer, event);
1769}
1770EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1771
1772void
1773trace_function(struct trace_array *tr,
1774 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1775 int pc)
1776{
1777 struct trace_event_call *call = &event_function;
1778 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1779 struct ring_buffer_event *event;
1780 struct ftrace_entry *entry;
1781
1782 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1783 flags, pc);
1784 if (!event)
1785 return;
1786 entry = ring_buffer_event_data(event);
1787 entry->ip = ip;
1788 entry->parent_ip = parent_ip;
1789
1790 if (!call_filter_check_discard(call, entry, buffer, event))
1791 __buffer_unlock_commit(buffer, event);
1792}
1793
1794#ifdef CONFIG_STACKTRACE
1795
1796#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797struct ftrace_stack {
1798 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1799};
1800
1801static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
1804static void __ftrace_trace_stack(struct ring_buffer *buffer,
1805 unsigned long flags,
1806 int skip, int pc, struct pt_regs *regs)
1807{
1808 struct trace_event_call *call = &event_kernel_stack;
1809 struct ring_buffer_event *event;
1810 struct stack_entry *entry;
1811 struct stack_trace trace;
1812 int use_stack;
1813 int size = FTRACE_STACK_ENTRIES;
1814
1815 trace.nr_entries = 0;
1816 trace.skip = skip;
1817
1818
1819
1820
1821
1822
1823
1824 preempt_disable_notrace();
1825
1826 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1827
1828
1829
1830
1831
1832
1833
1834 barrier();
1835 if (use_stack == 1) {
1836 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1837 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1838
1839 if (regs)
1840 save_stack_trace_regs(regs, &trace);
1841 else
1842 save_stack_trace(&trace);
1843
1844 if (trace.nr_entries > size)
1845 size = trace.nr_entries;
1846 } else
1847
1848 use_stack = 0;
1849
1850 size *= sizeof(unsigned long);
1851
1852 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1853 sizeof(*entry) + size, flags, pc);
1854 if (!event)
1855 goto out;
1856 entry = ring_buffer_event_data(event);
1857
1858 memset(&entry->caller, 0, size);
1859
1860 if (use_stack)
1861 memcpy(&entry->caller, trace.entries,
1862 trace.nr_entries * sizeof(unsigned long));
1863 else {
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.entries = entry->caller;
1866 if (regs)
1867 save_stack_trace_regs(regs, &trace);
1868 else
1869 save_stack_trace(&trace);
1870 }
1871
1872 entry->size = trace.nr_entries;
1873
1874 if (!call_filter_check_discard(call, entry, buffer, event))
1875 __buffer_unlock_commit(buffer, event);
1876
1877 out:
1878
1879 barrier();
1880 __this_cpu_dec(ftrace_stack_reserve);
1881 preempt_enable_notrace();
1882
1883}
1884
1885static inline void ftrace_trace_stack(struct trace_array *tr,
1886 struct ring_buffer *buffer,
1887 unsigned long flags,
1888 int skip, int pc, struct pt_regs *regs)
1889{
1890 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1891 return;
1892
1893 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1894}
1895
1896void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897 int pc)
1898{
1899 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1900}
1901
1902
1903
1904
1905
1906void trace_dump_stack(int skip)
1907{
1908 unsigned long flags;
1909
1910 if (tracing_disabled || tracing_selftest_running)
1911 return;
1912
1913 local_save_flags(flags);
1914
1915
1916
1917
1918
1919 skip += 3;
1920 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921 flags, skip, preempt_count(), NULL);
1922}
1923
1924static DEFINE_PER_CPU(int, user_stack_count);
1925
1926void
1927ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1928{
1929 struct trace_event_call *call = &event_user_stack;
1930 struct ring_buffer_event *event;
1931 struct userstack_entry *entry;
1932 struct stack_trace trace;
1933
1934 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1935 return;
1936
1937
1938
1939
1940
1941 if (unlikely(in_nmi()))
1942 return;
1943
1944
1945
1946
1947
1948 preempt_disable();
1949 if (__this_cpu_read(user_stack_count))
1950 goto out;
1951
1952 __this_cpu_inc(user_stack_count);
1953
1954 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1955 sizeof(*entry), flags, pc);
1956 if (!event)
1957 goto out_drop_count;
1958 entry = ring_buffer_event_data(event);
1959
1960 entry->tgid = current->tgid;
1961 memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963 trace.nr_entries = 0;
1964 trace.max_entries = FTRACE_STACK_ENTRIES;
1965 trace.skip = 0;
1966 trace.entries = entry->caller;
1967
1968 save_stack_trace_user(&trace);
1969 if (!call_filter_check_discard(call, entry, buffer, event))
1970 __buffer_unlock_commit(buffer, event);
1971
1972 out_drop_count:
1973 __this_cpu_dec(user_stack_count);
1974 out:
1975 preempt_enable();
1976}
1977
1978#ifdef UNUSED
1979static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1980{
1981 ftrace_trace_userstack(tr, flags, preempt_count());
1982}
1983#endif
1984
1985#endif
1986
1987
1988struct trace_buffer_struct {
1989 char buffer[TRACE_BUF_SIZE];
1990};
1991
1992static struct trace_buffer_struct *trace_percpu_buffer;
1993static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997
1998
1999
2000
2001
2002
2003
2004static char *get_trace_buf(void)
2005{
2006 struct trace_buffer_struct *percpu_buffer;
2007
2008
2009
2010
2011
2012 if (in_nmi())
2013 percpu_buffer = trace_percpu_nmi_buffer;
2014 else if (in_irq())
2015 percpu_buffer = trace_percpu_irq_buffer;
2016 else if (in_softirq())
2017 percpu_buffer = trace_percpu_sirq_buffer;
2018 else
2019 percpu_buffer = trace_percpu_buffer;
2020
2021 if (!percpu_buffer)
2022 return NULL;
2023
2024 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2025}
2026
2027static int alloc_percpu_trace_buffer(void)
2028{
2029 struct trace_buffer_struct *buffers;
2030 struct trace_buffer_struct *sirq_buffers;
2031 struct trace_buffer_struct *irq_buffers;
2032 struct trace_buffer_struct *nmi_buffers;
2033
2034 buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!buffers)
2036 goto err_warn;
2037
2038 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039 if (!sirq_buffers)
2040 goto err_sirq;
2041
2042 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043 if (!irq_buffers)
2044 goto err_irq;
2045
2046 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047 if (!nmi_buffers)
2048 goto err_nmi;
2049
2050 trace_percpu_buffer = buffers;
2051 trace_percpu_sirq_buffer = sirq_buffers;
2052 trace_percpu_irq_buffer = irq_buffers;
2053 trace_percpu_nmi_buffer = nmi_buffers;
2054
2055 return 0;
2056
2057 err_nmi:
2058 free_percpu(irq_buffers);
2059 err_irq:
2060 free_percpu(sirq_buffers);
2061 err_sirq:
2062 free_percpu(buffers);
2063 err_warn:
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2065 return -ENOMEM;
2066}
2067
2068static int buffers_allocated;
2069
2070void trace_printk_init_buffers(void)
2071{
2072 if (buffers_allocated)
2073 return;
2074
2075 if (alloc_percpu_trace_buffer())
2076 return;
2077
2078
2079
2080 pr_warn("\n");
2081 pr_warn("**********************************************************\n");
2082 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warn("** **\n");
2084 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warn("** **\n");
2086 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2087 pr_warn("** unsafe for production use. **\n");
2088 pr_warn("** **\n");
2089 pr_warn("** If you see this message and you are not debugging **\n");
2090 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warn("** **\n");
2092 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warn("**********************************************************\n");
2094
2095
2096 tracing_update_buffers();
2097
2098 buffers_allocated = 1;
2099
2100
2101
2102
2103
2104
2105
2106 if (global_trace.trace_buffer.buffer)
2107 tracing_start_cmdline_record();
2108}
2109
2110void trace_printk_start_comm(void)
2111{
2112
2113 if (!buffers_allocated)
2114 return;
2115 tracing_start_cmdline_record();
2116}
2117
2118static void trace_printk_start_stop_comm(int enabled)
2119{
2120 if (!buffers_allocated)
2121 return;
2122
2123 if (enabled)
2124 tracing_start_cmdline_record();
2125 else
2126 tracing_stop_cmdline_record();
2127}
2128
2129
2130
2131
2132
2133int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2134{
2135 struct trace_event_call *call = &event_bprint;
2136 struct ring_buffer_event *event;
2137 struct ring_buffer *buffer;
2138 struct trace_array *tr = &global_trace;
2139 struct bprint_entry *entry;
2140 unsigned long flags;
2141 char *tbuffer;
2142 int len = 0, size, pc;
2143
2144 if (unlikely(tracing_selftest_running || tracing_disabled))
2145 return 0;
2146
2147
2148 pause_graph_tracing();
2149
2150 pc = preempt_count();
2151 preempt_disable_notrace();
2152
2153 tbuffer = get_trace_buf();
2154 if (!tbuffer) {
2155 len = 0;
2156 goto out;
2157 }
2158
2159 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2160
2161 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162 goto out;
2163
2164 local_save_flags(flags);
2165 size = sizeof(*entry) + sizeof(u32) * len;
2166 buffer = tr->trace_buffer.buffer;
2167 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168 flags, pc);
2169 if (!event)
2170 goto out;
2171 entry = ring_buffer_event_data(event);
2172 entry->ip = ip;
2173 entry->fmt = fmt;
2174
2175 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2176 if (!call_filter_check_discard(call, entry, buffer, event)) {
2177 __buffer_unlock_commit(buffer, event);
2178 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2179 }
2180
2181out:
2182 preempt_enable_notrace();
2183 unpause_graph_tracing();
2184
2185 return len;
2186}
2187EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
2189static int
2190__trace_array_vprintk(struct ring_buffer *buffer,
2191 unsigned long ip, const char *fmt, va_list args)
2192{
2193 struct trace_event_call *call = &event_print;
2194 struct ring_buffer_event *event;
2195 int len = 0, size, pc;
2196 struct print_entry *entry;
2197 unsigned long flags;
2198 char *tbuffer;
2199
2200 if (tracing_disabled || tracing_selftest_running)
2201 return 0;
2202
2203
2204 pause_graph_tracing();
2205
2206 pc = preempt_count();
2207 preempt_disable_notrace();
2208
2209
2210 tbuffer = get_trace_buf();
2211 if (!tbuffer) {
2212 len = 0;
2213 goto out;
2214 }
2215
2216 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2217
2218 local_save_flags(flags);
2219 size = sizeof(*entry) + len + 1;
2220 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2221 flags, pc);
2222 if (!event)
2223 goto out;
2224 entry = ring_buffer_event_data(event);
2225 entry->ip = ip;
2226
2227 memcpy(&entry->buf, tbuffer, len + 1);
2228 if (!call_filter_check_discard(call, entry, buffer, event)) {
2229 __buffer_unlock_commit(buffer, event);
2230 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2231 }
2232 out:
2233 preempt_enable_notrace();
2234 unpause_graph_tracing();
2235
2236 return len;
2237}
2238
2239int trace_array_vprintk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, va_list args)
2241{
2242 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243}
2244
2245int trace_array_printk(struct trace_array *tr,
2246 unsigned long ip, const char *fmt, ...)
2247{
2248 int ret;
2249 va_list ap;
2250
2251 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2252 return 0;
2253
2254 va_start(ap, fmt);
2255 ret = trace_array_vprintk(tr, ip, fmt, ap);
2256 va_end(ap);
2257 return ret;
2258}
2259
2260int trace_array_printk_buf(struct ring_buffer *buffer,
2261 unsigned long ip, const char *fmt, ...)
2262{
2263 int ret;
2264 va_list ap;
2265
2266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2267 return 0;
2268
2269 va_start(ap, fmt);
2270 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271 va_end(ap);
2272 return ret;
2273}
2274
2275int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276{
2277 return trace_array_vprintk(&global_trace, ip, fmt, args);
2278}
2279EXPORT_SYMBOL_GPL(trace_vprintk);
2280
2281static void trace_iterator_increment(struct trace_iterator *iter)
2282{
2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
2285 iter->idx++;
2286 if (buf_iter)
2287 ring_buffer_read(buf_iter, NULL);
2288}
2289
2290static struct trace_entry *
2291peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292 unsigned long *lost_events)
2293{
2294 struct ring_buffer_event *event;
2295 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2296
2297 if (buf_iter)
2298 event = ring_buffer_iter_peek(buf_iter, ts);
2299 else
2300 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2301 lost_events);
2302
2303 if (event) {
2304 iter->ent_size = ring_buffer_event_length(event);
2305 return ring_buffer_event_data(event);
2306 }
2307 iter->ent_size = 0;
2308 return NULL;
2309}
2310
2311static struct trace_entry *
2312__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313 unsigned long *missing_events, u64 *ent_ts)
2314{
2315 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2316 struct trace_entry *ent, *next = NULL;
2317 unsigned long lost_events = 0, next_lost = 0;
2318 int cpu_file = iter->cpu_file;
2319 u64 next_ts = 0, ts;
2320 int next_cpu = -1;
2321 int next_size = 0;
2322 int cpu;
2323
2324
2325
2326
2327
2328 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2329 if (ring_buffer_empty_cpu(buffer, cpu_file))
2330 return NULL;
2331 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2332 if (ent_cpu)
2333 *ent_cpu = cpu_file;
2334
2335 return ent;
2336 }
2337
2338 for_each_tracing_cpu(cpu) {
2339
2340 if (ring_buffer_empty_cpu(buffer, cpu))
2341 continue;
2342
2343 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2344
2345
2346
2347
2348 if (ent && (!next || ts < next_ts)) {
2349 next = ent;
2350 next_cpu = cpu;
2351 next_ts = ts;
2352 next_lost = lost_events;
2353 next_size = iter->ent_size;
2354 }
2355 }
2356
2357 iter->ent_size = next_size;
2358
2359 if (ent_cpu)
2360 *ent_cpu = next_cpu;
2361
2362 if (ent_ts)
2363 *ent_ts = next_ts;
2364
2365 if (missing_events)
2366 *missing_events = next_lost;
2367
2368 return next;
2369}
2370
2371
2372struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373 int *ent_cpu, u64 *ent_ts)
2374{
2375 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2376}
2377
2378
2379void *trace_find_next_entry_inc(struct trace_iterator *iter)
2380{
2381 iter->ent = __find_next_entry(iter, &iter->cpu,
2382 &iter->lost_events, &iter->ts);
2383
2384 if (iter->ent)
2385 trace_iterator_increment(iter);
2386
2387 return iter->ent ? iter : NULL;
2388}
2389
2390static void trace_consume(struct trace_iterator *iter)
2391{
2392 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2393 &iter->lost_events);
2394}
2395
2396static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2397{
2398 struct trace_iterator *iter = m->private;
2399 int i = (int)*pos;
2400 void *ent;
2401
2402 WARN_ON_ONCE(iter->leftover);
2403
2404 (*pos)++;
2405
2406
2407 if (iter->idx > i)
2408 return NULL;
2409
2410 if (iter->idx < 0)
2411 ent = trace_find_next_entry_inc(iter);
2412 else
2413 ent = iter;
2414
2415 while (ent && iter->idx < i)
2416 ent = trace_find_next_entry_inc(iter);
2417
2418 iter->pos = *pos;
2419
2420 return ent;
2421}
2422
2423void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2424{
2425 struct ring_buffer_event *event;
2426 struct ring_buffer_iter *buf_iter;
2427 unsigned long entries = 0;
2428 u64 ts;
2429
2430 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2431
2432 buf_iter = trace_buffer_iter(iter, cpu);
2433 if (!buf_iter)
2434 return;
2435
2436 ring_buffer_iter_reset(buf_iter);
2437
2438
2439
2440
2441
2442
2443 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2444 if (ts >= iter->trace_buffer->time_start)
2445 break;
2446 entries++;
2447 ring_buffer_read(buf_iter, NULL);
2448 }
2449
2450 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2451}
2452
2453
2454
2455
2456
2457static void *s_start(struct seq_file *m, loff_t *pos)
2458{
2459 struct trace_iterator *iter = m->private;
2460 struct trace_array *tr = iter->tr;
2461 int cpu_file = iter->cpu_file;
2462 void *p = NULL;
2463 loff_t l = 0;
2464 int cpu;
2465
2466
2467
2468
2469
2470
2471
2472 mutex_lock(&trace_types_lock);
2473 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474 *iter->trace = *tr->current_trace;
2475 mutex_unlock(&trace_types_lock);
2476
2477#ifdef CONFIG_TRACER_MAX_TRACE
2478 if (iter->snapshot && iter->trace->use_max_tr)
2479 return ERR_PTR(-EBUSY);
2480#endif
2481
2482 if (!iter->snapshot)
2483 atomic_inc(&trace_record_cmdline_disabled);
2484
2485 if (*pos != iter->pos) {
2486 iter->ent = NULL;
2487 iter->cpu = 0;
2488 iter->idx = -1;
2489
2490 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2491 for_each_tracing_cpu(cpu)
2492 tracing_iter_reset(iter, cpu);
2493 } else
2494 tracing_iter_reset(iter, cpu_file);
2495
2496 iter->leftover = 0;
2497 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498 ;
2499
2500 } else {
2501
2502
2503
2504
2505 if (iter->leftover)
2506 p = iter;
2507 else {
2508 l = *pos - 1;
2509 p = s_next(m, p, &l);
2510 }
2511 }
2512
2513 trace_event_read_lock();
2514 trace_access_lock(cpu_file);
2515 return p;
2516}
2517
2518static void s_stop(struct seq_file *m, void *p)
2519{
2520 struct trace_iterator *iter = m->private;
2521
2522#ifdef CONFIG_TRACER_MAX_TRACE
2523 if (iter->snapshot && iter->trace->use_max_tr)
2524 return;
2525#endif
2526
2527 if (!iter->snapshot)
2528 atomic_dec(&trace_record_cmdline_disabled);
2529
2530 trace_access_unlock(iter->cpu_file);
2531 trace_event_read_unlock();
2532}
2533
2534static void
2535get_total_entries(struct trace_buffer *buf,
2536 unsigned long *total, unsigned long *entries)
2537{
2538 unsigned long count;
2539 int cpu;
2540
2541 *total = 0;
2542 *entries = 0;
2543
2544 for_each_tracing_cpu(cpu) {
2545 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2546
2547
2548
2549
2550
2551 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2553
2554 *total += count;
2555 } else
2556 *total += count +
2557 ring_buffer_overrun_cpu(buf->buffer, cpu);
2558 *entries += count;
2559 }
2560}
2561
2562static void print_lat_help_header(struct seq_file *m)
2563{
2564 seq_puts(m, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2569 "# |||| / delay \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
2572}
2573
2574static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2575{
2576 unsigned long total;
2577 unsigned long entries;
2578
2579 get_total_entries(buf, &total, &entries);
2580 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries, total, num_online_cpus());
2582 seq_puts(m, "#\n");
2583}
2584
2585static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2586{
2587 print_event_info(buf, m);
2588 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2589 "# | | | | |\n");
2590}
2591
2592static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2593{
2594 print_event_info(buf, m);
2595 seq_puts(m, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2599 "# ||| / delay\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
2602}
2603
2604void
2605print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606{
2607 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2608 struct trace_buffer *buf = iter->trace_buffer;
2609 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2610 struct tracer *type = iter->trace;
2611 unsigned long entries;
2612 unsigned long total;
2613 const char *name = "preemption";
2614
2615 name = type->name;
2616
2617 get_total_entries(buf, &total, &entries);
2618
2619 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2620 name, UTS_RELEASE);
2621 seq_puts(m, "# -----------------------------------"
2622 "---------------------------------\n");
2623 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2625 nsecs_to_usecs(data->saved_latency),
2626 entries,
2627 total,
2628 buf->cpu,
2629#if defined(CONFIG_PREEMPT_NONE)
2630 "server",
2631#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632 "desktop",
2633#elif defined(CONFIG_PREEMPT)
2634 "preempt",
2635#else
2636 "unknown",
2637#endif
2638
2639 0, 0, 0, 0);
2640#ifdef CONFIG_SMP
2641 seq_printf(m, " #P:%d)\n", num_online_cpus());
2642#else
2643 seq_puts(m, ")\n");
2644#endif
2645 seq_puts(m, "# -----------------\n");
2646 seq_printf(m, "# | task: %.16s-%d "
2647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2648 data->comm, data->pid,
2649 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2650 data->policy, data->rt_priority);
2651 seq_puts(m, "# -----------------\n");
2652
2653 if (data->critical_start) {
2654 seq_puts(m, "# => started at: ");
2655 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656 trace_print_seq(m, &iter->seq);
2657 seq_puts(m, "\n# => ended at: ");
2658 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659 trace_print_seq(m, &iter->seq);
2660 seq_puts(m, "\n#\n");
2661 }
2662
2663 seq_puts(m, "#\n");
2664}
2665
2666static void test_cpu_buff_start(struct trace_iterator *iter)
2667{
2668 struct trace_seq *s = &iter->seq;
2669 struct trace_array *tr = iter->tr;
2670
2671 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2672 return;
2673
2674 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675 return;
2676
2677 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2678 return;
2679
2680 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2681 return;
2682
2683 if (iter->started)
2684 cpumask_set_cpu(iter->cpu, iter->started);
2685
2686
2687 if (iter->idx > 1)
2688 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689 iter->cpu);
2690}
2691
2692static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2693{
2694 struct trace_array *tr = iter->tr;
2695 struct trace_seq *s = &iter->seq;
2696 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2697 struct trace_entry *entry;
2698 struct trace_event *event;
2699
2700 entry = iter->ent;
2701
2702 test_cpu_buff_start(iter);
2703
2704 event = ftrace_find_event(entry->type);
2705
2706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2707 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708 trace_print_lat_context(iter);
2709 else
2710 trace_print_context(iter);
2711 }
2712
2713 if (trace_seq_has_overflowed(s))
2714 return TRACE_TYPE_PARTIAL_LINE;
2715
2716 if (event)
2717 return event->funcs->trace(iter, sym_flags, event);
2718
2719 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2720
2721 return trace_handle_return(s);
2722}
2723
2724static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2725{
2726 struct trace_array *tr = iter->tr;
2727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
2729 struct trace_event *event;
2730
2731 entry = iter->ent;
2732
2733 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2734 trace_seq_printf(s, "%d %d %llu ",
2735 entry->pid, iter->cpu, iter->ts);
2736
2737 if (trace_seq_has_overflowed(s))
2738 return TRACE_TYPE_PARTIAL_LINE;
2739
2740 event = ftrace_find_event(entry->type);
2741 if (event)
2742 return event->funcs->raw(iter, 0, event);
2743
2744 trace_seq_printf(s, "%d ?\n", entry->type);
2745
2746 return trace_handle_return(s);
2747}
2748
2749static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2750{
2751 struct trace_array *tr = iter->tr;
2752 struct trace_seq *s = &iter->seq;
2753 unsigned char newline = '\n';
2754 struct trace_entry *entry;
2755 struct trace_event *event;
2756
2757 entry = iter->ent;
2758
2759 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2760 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763 if (trace_seq_has_overflowed(s))
2764 return TRACE_TYPE_PARTIAL_LINE;
2765 }
2766
2767 event = ftrace_find_event(entry->type);
2768 if (event) {
2769 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2770 if (ret != TRACE_TYPE_HANDLED)
2771 return ret;
2772 }
2773
2774 SEQ_PUT_FIELD(s, newline);
2775
2776 return trace_handle_return(s);
2777}
2778
2779static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2780{
2781 struct trace_array *tr = iter->tr;
2782 struct trace_seq *s = &iter->seq;
2783 struct trace_entry *entry;
2784 struct trace_event *event;
2785
2786 entry = iter->ent;
2787
2788 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2789 SEQ_PUT_FIELD(s, entry->pid);
2790 SEQ_PUT_FIELD(s, iter->cpu);
2791 SEQ_PUT_FIELD(s, iter->ts);
2792 if (trace_seq_has_overflowed(s))
2793 return TRACE_TYPE_PARTIAL_LINE;
2794 }
2795
2796 event = ftrace_find_event(entry->type);
2797 return event ? event->funcs->binary(iter, 0, event) :
2798 TRACE_TYPE_HANDLED;
2799}
2800
2801int trace_empty(struct trace_iterator *iter)
2802{
2803 struct ring_buffer_iter *buf_iter;
2804 int cpu;
2805
2806
2807 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2808 cpu = iter->cpu_file;
2809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
2812 return 0;
2813 } else {
2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2815 return 0;
2816 }
2817 return 1;
2818 }
2819
2820 for_each_tracing_cpu(cpu) {
2821 buf_iter = trace_buffer_iter(iter, cpu);
2822 if (buf_iter) {
2823 if (!ring_buffer_iter_empty(buf_iter))
2824 return 0;
2825 } else {
2826 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2827 return 0;
2828 }
2829 }
2830
2831 return 1;
2832}
2833
2834
2835enum print_line_t print_trace_line(struct trace_iterator *iter)
2836{
2837 struct trace_array *tr = iter->tr;
2838 unsigned long trace_flags = tr->trace_flags;
2839 enum print_line_t ret;
2840
2841 if (iter->lost_events) {
2842 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter->cpu, iter->lost_events);
2844 if (trace_seq_has_overflowed(&iter->seq))
2845 return TRACE_TYPE_PARTIAL_LINE;
2846 }
2847
2848 if (iter->trace && iter->trace->print_line) {
2849 ret = iter->trace->print_line(iter);
2850 if (ret != TRACE_TYPE_UNHANDLED)
2851 return ret;
2852 }
2853
2854 if (iter->ent->type == TRACE_BPUTS &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 return trace_print_bputs_msg_only(iter);
2858
2859 if (iter->ent->type == TRACE_BPRINT &&
2860 trace_flags & TRACE_ITER_PRINTK &&
2861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2862 return trace_print_bprintk_msg_only(iter);
2863
2864 if (iter->ent->type == TRACE_PRINT &&
2865 trace_flags & TRACE_ITER_PRINTK &&
2866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2867 return trace_print_printk_msg_only(iter);
2868
2869 if (trace_flags & TRACE_ITER_BIN)
2870 return print_bin_fmt(iter);
2871
2872 if (trace_flags & TRACE_ITER_HEX)
2873 return print_hex_fmt(iter);
2874
2875 if (trace_flags & TRACE_ITER_RAW)
2876 return print_raw_fmt(iter);
2877
2878 return print_trace_fmt(iter);
2879}
2880
2881void trace_latency_header(struct seq_file *m)
2882{
2883 struct trace_iterator *iter = m->private;
2884 struct trace_array *tr = iter->tr;
2885
2886
2887 if (trace_empty(iter))
2888 return;
2889
2890 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891 print_trace_header(m, iter);
2892
2893 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2894 print_lat_help_header(m);
2895}
2896
2897void trace_default_header(struct seq_file *m)
2898{
2899 struct trace_iterator *iter = m->private;
2900 struct trace_array *tr = iter->tr;
2901 unsigned long trace_flags = tr->trace_flags;
2902
2903 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904 return;
2905
2906 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907
2908 if (trace_empty(iter))
2909 return;
2910 print_trace_header(m, iter);
2911 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912 print_lat_help_header(m);
2913 } else {
2914 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915 if (trace_flags & TRACE_ITER_IRQ_INFO)
2916 print_func_help_header_irq(iter->trace_buffer, m);
2917 else
2918 print_func_help_header(iter->trace_buffer, m);
2919 }
2920 }
2921}
2922
2923static void test_ftrace_alive(struct seq_file *m)
2924{
2925 if (!ftrace_is_dead())
2926 return;
2927 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
2929}
2930
2931#ifdef CONFIG_TRACER_MAX_TRACE
2932static void show_snapshot_main_help(struct seq_file *m)
2933{
2934 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
2940}
2941
2942static void show_snapshot_percpu_help(struct seq_file *m)
2943{
2944 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2945#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2946 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
2948#else
2949 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
2951#endif
2952 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
2955}
2956
2957static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958{
2959 if (iter->tr->allocated_snapshot)
2960 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2961 else
2962 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2963
2964 seq_puts(m, "# Snapshot commands:\n");
2965 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966 show_snapshot_main_help(m);
2967 else
2968 show_snapshot_percpu_help(m);
2969}
2970#else
2971
2972static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973#endif
2974
2975static int s_show(struct seq_file *m, void *v)
2976{
2977 struct trace_iterator *iter = v;
2978 int ret;
2979
2980 if (iter->ent == NULL) {
2981 if (iter->tr) {
2982 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983 seq_puts(m, "#\n");
2984 test_ftrace_alive(m);
2985 }
2986 if (iter->snapshot && trace_empty(iter))
2987 print_snapshot_help(m, iter);
2988 else if (iter->trace && iter->trace->print_header)
2989 iter->trace->print_header(m);
2990 else
2991 trace_default_header(m);
2992
2993 } else if (iter->leftover) {
2994
2995
2996
2997
2998 ret = trace_print_seq(m, &iter->seq);
2999
3000
3001 iter->leftover = ret;
3002
3003 } else {
3004 print_trace_line(iter);
3005 ret = trace_print_seq(m, &iter->seq);
3006
3007
3008
3009
3010
3011
3012
3013 iter->leftover = ret;
3014 }
3015
3016 return 0;
3017}
3018
3019
3020
3021
3022
3023static inline int tracing_get_cpu(struct inode *inode)
3024{
3025 if (inode->i_cdev)
3026 return (long)inode->i_cdev - 1;
3027 return RING_BUFFER_ALL_CPUS;
3028}
3029
3030static const struct seq_operations tracer_seq_ops = {
3031 .start = s_start,
3032 .next = s_next,
3033 .stop = s_stop,
3034 .show = s_show,
3035};
3036
3037static struct trace_iterator *
3038__tracing_open(struct inode *inode, struct file *file, bool snapshot)
3039{
3040 struct trace_array *tr = inode->i_private;
3041 struct trace_iterator *iter;
3042 int cpu;
3043
3044 if (tracing_disabled)
3045 return ERR_PTR(-ENODEV);
3046
3047 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3048 if (!iter)
3049 return ERR_PTR(-ENOMEM);
3050
3051 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3052 GFP_KERNEL);
3053 if (!iter->buffer_iter)
3054 goto release;
3055
3056
3057
3058
3059
3060 mutex_lock(&trace_types_lock);
3061 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3062 if (!iter->trace)
3063 goto fail;
3064
3065 *iter->trace = *tr->current_trace;
3066
3067 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3068 goto fail;
3069
3070 iter->tr = tr;
3071
3072#ifdef CONFIG_TRACER_MAX_TRACE
3073
3074 if (tr->current_trace->print_max || snapshot)
3075 iter->trace_buffer = &tr->max_buffer;
3076 else
3077#endif
3078 iter->trace_buffer = &tr->trace_buffer;
3079 iter->snapshot = snapshot;
3080 iter->pos = -1;
3081 iter->cpu_file = tracing_get_cpu(inode);
3082 mutex_init(&iter->mutex);
3083
3084
3085 if (iter->trace && iter->trace->open)
3086 iter->trace->open(iter);
3087
3088
3089 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3090 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
3092
3093 if (trace_clocks[tr->clock_id].in_ns)
3094 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
3096
3097 if (!iter->snapshot)
3098 tracing_stop_tr(tr);
3099
3100 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3101 for_each_tracing_cpu(cpu) {
3102 iter->buffer_iter[cpu] =
3103 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3104 }
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu) {
3107 ring_buffer_read_start(iter->buffer_iter[cpu]);
3108 tracing_iter_reset(iter, cpu);
3109 }
3110 } else {
3111 cpu = iter->cpu_file;
3112 iter->buffer_iter[cpu] =
3113 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter->buffer_iter[cpu]);
3116 tracing_iter_reset(iter, cpu);
3117 }
3118
3119 mutex_unlock(&trace_types_lock);
3120
3121 return iter;
3122
3123 fail:
3124 mutex_unlock(&trace_types_lock);
3125 kfree(iter->trace);
3126 kfree(iter->buffer_iter);
3127release:
3128 seq_release_private(inode, file);
3129 return ERR_PTR(-ENOMEM);
3130}
3131
3132int tracing_open_generic(struct inode *inode, struct file *filp)
3133{
3134 if (tracing_disabled)
3135 return -ENODEV;
3136
3137 filp->private_data = inode->i_private;
3138 return 0;
3139}
3140
3141bool tracing_is_disabled(void)
3142{
3143 return (tracing_disabled) ? true: false;
3144}
3145
3146
3147
3148
3149
3150static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 if (tracing_disabled)
3155 return -ENODEV;
3156
3157 if (trace_array_get(tr) < 0)
3158 return -ENODEV;
3159
3160 filp->private_data = inode->i_private;
3161
3162 return 0;
3163}
3164
3165static int tracing_release(struct inode *inode, struct file *file)
3166{
3167 struct trace_array *tr = inode->i_private;
3168 struct seq_file *m = file->private_data;
3169 struct trace_iterator *iter;
3170 int cpu;
3171
3172 if (!(file->f_mode & FMODE_READ)) {
3173 trace_array_put(tr);
3174 return 0;
3175 }
3176
3177
3178 iter = m->private;
3179 mutex_lock(&trace_types_lock);
3180
3181 for_each_tracing_cpu(cpu) {
3182 if (iter->buffer_iter[cpu])
3183 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184 }
3185
3186 if (iter->trace && iter->trace->close)
3187 iter->trace->close(iter);
3188
3189 if (!iter->snapshot)
3190
3191 tracing_start_tr(tr);
3192
3193 __trace_array_put(tr);
3194
3195 mutex_unlock(&trace_types_lock);
3196
3197 mutex_destroy(&iter->mutex);
3198 free_cpumask_var(iter->started);
3199 kfree(iter->trace);
3200 kfree(iter->buffer_iter);
3201 seq_release_private(inode, file);
3202
3203 return 0;
3204}
3205
3206static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207{
3208 struct trace_array *tr = inode->i_private;
3209
3210 trace_array_put(tr);
3211 return 0;
3212}
3213
3214static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215{
3216 struct trace_array *tr = inode->i_private;
3217
3218 trace_array_put(tr);
3219
3220 return single_release(inode, file);
3221}
3222
3223static int tracing_open(struct inode *inode, struct file *file)
3224{
3225 struct trace_array *tr = inode->i_private;
3226 struct trace_iterator *iter;
3227 int ret = 0;
3228
3229 if (trace_array_get(tr) < 0)
3230 return -ENODEV;
3231
3232
3233 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234 int cpu = tracing_get_cpu(inode);
3235
3236 if (cpu == RING_BUFFER_ALL_CPUS)
3237 tracing_reset_online_cpus(&tr->trace_buffer);
3238 else
3239 tracing_reset(&tr->trace_buffer, cpu);
3240 }
3241
3242 if (file->f_mode & FMODE_READ) {
3243 iter = __tracing_open(inode, file, false);
3244 if (IS_ERR(iter))
3245 ret = PTR_ERR(iter);
3246 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3247 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3248 }
3249
3250 if (ret < 0)
3251 trace_array_put(tr);
3252
3253 return ret;
3254}
3255
3256
3257
3258
3259
3260
3261static bool
3262trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3263{
3264 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3265}
3266
3267
3268static struct tracer *
3269get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3270{
3271 while (t && !trace_ok_for_array(t, tr))
3272 t = t->next;
3273
3274 return t;
3275}
3276
3277static void *
3278t_next(struct seq_file *m, void *v, loff_t *pos)
3279{
3280 struct trace_array *tr = m->private;
3281 struct tracer *t = v;
3282
3283 (*pos)++;
3284
3285 if (t)
3286 t = get_tracer_for_array(tr, t->next);
3287
3288 return t;
3289}
3290
3291static void *t_start(struct seq_file *m, loff_t *pos)
3292{
3293 struct trace_array *tr = m->private;
3294 struct tracer *t;
3295 loff_t l = 0;
3296
3297 mutex_lock(&trace_types_lock);
3298
3299 t = get_tracer_for_array(tr, trace_types);
3300 for (; t && l < *pos; t = t_next(m, t, &l))
3301 ;
3302
3303 return t;
3304}
3305
3306static void t_stop(struct seq_file *m, void *p)
3307{
3308 mutex_unlock(&trace_types_lock);
3309}
3310
3311static int t_show(struct seq_file *m, void *v)
3312{
3313 struct tracer *t = v;
3314
3315 if (!t)
3316 return 0;
3317
3318 seq_puts(m, t->name);
3319 if (t->next)
3320 seq_putc(m, ' ');
3321 else
3322 seq_putc(m, '\n');
3323
3324 return 0;
3325}
3326
3327static const struct seq_operations show_traces_seq_ops = {
3328 .start = t_start,
3329 .next = t_next,
3330 .stop = t_stop,
3331 .show = t_show,
3332};
3333
3334static int show_traces_open(struct inode *inode, struct file *file)
3335{
3336 struct trace_array *tr = inode->i_private;
3337 struct seq_file *m;
3338 int ret;
3339
3340 if (tracing_disabled)
3341 return -ENODEV;
3342
3343 ret = seq_open(file, &show_traces_seq_ops);
3344 if (ret)
3345 return ret;
3346
3347 m = file->private_data;
3348 m->private = tr;
3349
3350 return 0;
3351}
3352
3353static ssize_t
3354tracing_write_stub(struct file *filp, const char __user *ubuf,
3355 size_t count, loff_t *ppos)
3356{
3357 return count;
3358}
3359
3360loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3361{
3362 int ret;
3363
3364 if (file->f_mode & FMODE_READ)
3365 ret = seq_lseek(file, offset, whence);
3366 else
3367 file->f_pos = ret = 0;
3368
3369 return ret;
3370}
3371
3372static const struct file_operations tracing_fops = {
3373 .open = tracing_open,
3374 .read = seq_read,
3375 .write = tracing_write_stub,
3376 .llseek = tracing_lseek,
3377 .release = tracing_release,
3378};
3379
3380static const struct file_operations show_traces_fops = {
3381 .open = show_traces_open,
3382 .read = seq_read,
3383 .release = seq_release,
3384 .llseek = seq_lseek,
3385};
3386
3387
3388
3389
3390
3391static DEFINE_MUTEX(tracing_cpumask_update_lock);
3392
3393
3394
3395
3396
3397static char mask_str[NR_CPUS + 1];
3398
3399static ssize_t
3400tracing_cpumask_read(struct file *filp, char __user *ubuf,
3401 size_t count, loff_t *ppos)
3402{
3403 struct trace_array *tr = file_inode(filp)->i_private;
3404 int len;
3405
3406 mutex_lock(&tracing_cpumask_update_lock);
3407
3408 len = snprintf(mask_str, count, "%*pb\n",
3409 cpumask_pr_args(tr->tracing_cpumask));
3410 if (len >= count) {
3411 count = -EINVAL;
3412 goto out_err;
3413 }
3414 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3415
3416out_err:
3417 mutex_unlock(&tracing_cpumask_update_lock);
3418
3419 return count;
3420}
3421
3422static ssize_t
3423tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3424 size_t count, loff_t *ppos)
3425{
3426 struct trace_array *tr = file_inode(filp)->i_private;
3427 cpumask_var_t tracing_cpumask_new;
3428 int err, cpu;
3429
3430 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3431 return -ENOMEM;
3432
3433 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3434 if (err)
3435 goto err_unlock;
3436
3437 mutex_lock(&tracing_cpumask_update_lock);
3438
3439 local_irq_disable();
3440 arch_spin_lock(&tr->max_lock);
3441 for_each_tracing_cpu(cpu) {
3442
3443
3444
3445
3446 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3447 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3448 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3449 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3450 }
3451 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3452 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3453 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3454 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3455 }
3456 }
3457 arch_spin_unlock(&tr->max_lock);
3458 local_irq_enable();
3459
3460 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3461
3462 mutex_unlock(&tracing_cpumask_update_lock);
3463 free_cpumask_var(tracing_cpumask_new);
3464
3465 return count;
3466
3467err_unlock:
3468 free_cpumask_var(tracing_cpumask_new);
3469
3470 return err;
3471}
3472
3473static const struct file_operations tracing_cpumask_fops = {
3474 .open = tracing_open_generic_tr,
3475 .read = tracing_cpumask_read,
3476 .write = tracing_cpumask_write,
3477 .release = tracing_release_generic_tr,
3478 .llseek = generic_file_llseek,
3479};
3480
3481static int tracing_trace_options_show(struct seq_file *m, void *v)
3482{
3483 struct tracer_opt *trace_opts;
3484 struct trace_array *tr = m->private;
3485 u32 tracer_flags;
3486 int i;
3487
3488 mutex_lock(&trace_types_lock);
3489 tracer_flags = tr->current_trace->flags->val;
3490 trace_opts = tr->current_trace->flags->opts;
3491
3492 for (i = 0; trace_options[i]; i++) {
3493 if (tr->trace_flags & (1 << i))
3494 seq_printf(m, "%s\n", trace_options[i]);
3495 else
3496 seq_printf(m, "no%s\n", trace_options[i]);
3497 }
3498
3499 for (i = 0; trace_opts[i].name; i++) {
3500 if (tracer_flags & trace_opts[i].bit)
3501 seq_printf(m, "%s\n", trace_opts[i].name);
3502 else
3503 seq_printf(m, "no%s\n", trace_opts[i].name);
3504 }
3505 mutex_unlock(&trace_types_lock);
3506
3507 return 0;
3508}
3509
3510static int __set_tracer_option(struct trace_array *tr,
3511 struct tracer_flags *tracer_flags,
3512 struct tracer_opt *opts, int neg)
3513{
3514 struct tracer *trace = tracer_flags->trace;
3515 int ret;
3516
3517 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3518 if (ret)
3519 return ret;
3520
3521 if (neg)
3522 tracer_flags->val &= ~opts->bit;
3523 else
3524 tracer_flags->val |= opts->bit;
3525 return 0;
3526}
3527
3528
3529static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3530{
3531 struct tracer *trace = tr->current_trace;
3532 struct tracer_flags *tracer_flags = trace->flags;
3533 struct tracer_opt *opts = NULL;
3534 int i;
3535
3536 for (i = 0; tracer_flags->opts[i].name; i++) {
3537 opts = &tracer_flags->opts[i];
3538
3539 if (strcmp(cmp, opts->name) == 0)
3540 return __set_tracer_option(tr, trace->flags, opts, neg);
3541 }
3542
3543 return -EINVAL;
3544}
3545
3546
3547int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3548{
3549 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550 return -1;
3551
3552 return 0;
3553}
3554
3555int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3556{
3557
3558 if (!!(tr->trace_flags & mask) == !!enabled)
3559 return 0;
3560
3561
3562 if (tr->current_trace->flag_changed)
3563 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3564 return -EINVAL;
3565
3566 if (enabled)
3567 tr->trace_flags |= mask;
3568 else
3569 tr->trace_flags &= ~mask;
3570
3571 if (mask == TRACE_ITER_RECORD_CMD)
3572 trace_event_enable_cmd_record(enabled);
3573
3574 if (mask == TRACE_ITER_OVERWRITE) {
3575 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3576#ifdef CONFIG_TRACER_MAX_TRACE
3577 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3578#endif
3579 }
3580
3581 if (mask == TRACE_ITER_PRINTK) {
3582 trace_printk_start_stop_comm(enabled);
3583 trace_printk_control(enabled);
3584 }
3585
3586 return 0;
3587}
3588
3589static int trace_set_options(struct trace_array *tr, char *option)
3590{
3591 char *cmp;
3592 int neg = 0;
3593 int ret = -ENODEV;
3594 int i;
3595 size_t orig_len = strlen(option);
3596
3597 cmp = strstrip(option);
3598
3599 if (strncmp(cmp, "no", 2) == 0) {
3600 neg = 1;
3601 cmp += 2;
3602 }
3603
3604 mutex_lock(&trace_types_lock);
3605
3606 for (i = 0; trace_options[i]; i++) {
3607 if (strcmp(cmp, trace_options[i]) == 0) {
3608 ret = set_tracer_flag(tr, 1 << i, !neg);
3609 break;
3610 }
3611 }
3612
3613
3614 if (!trace_options[i])
3615 ret = set_tracer_option(tr, cmp, neg);
3616
3617 mutex_unlock(&trace_types_lock);
3618
3619
3620
3621
3622
3623 if (orig_len > strlen(option))
3624 option[strlen(option)] = ' ';
3625
3626 return ret;
3627}
3628
3629static void __init apply_trace_boot_options(void)
3630{
3631 char *buf = trace_boot_options_buf;
3632 char *option;
3633
3634 while (true) {
3635 option = strsep(&buf, ",");
3636
3637 if (!option)
3638 break;
3639
3640 if (*option)
3641 trace_set_options(&global_trace, option);
3642
3643
3644 if (buf)
3645 *(buf - 1) = ',';
3646 }
3647}
3648
3649static ssize_t
3650tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3651 size_t cnt, loff_t *ppos)
3652{
3653 struct seq_file *m = filp->private_data;
3654 struct trace_array *tr = m->private;
3655 char buf[64];
3656 int ret;
3657
3658 if (cnt >= sizeof(buf))
3659 return -EINVAL;
3660
3661 if (copy_from_user(&buf, ubuf, cnt))
3662 return -EFAULT;
3663
3664 buf[cnt] = 0;
3665
3666 ret = trace_set_options(tr, buf);
3667 if (ret < 0)
3668 return ret;
3669
3670 *ppos += cnt;
3671
3672 return cnt;
3673}
3674
3675static int tracing_trace_options_open(struct inode *inode, struct file *file)
3676{
3677 struct trace_array *tr = inode->i_private;
3678 int ret;
3679
3680 if (tracing_disabled)
3681 return -ENODEV;
3682
3683 if (trace_array_get(tr) < 0)
3684 return -ENODEV;
3685
3686 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3687 if (ret < 0)
3688 trace_array_put(tr);
3689
3690 return ret;
3691}
3692
3693static const struct file_operations tracing_iter_fops = {
3694 .open = tracing_trace_options_open,
3695 .read = seq_read,
3696 .llseek = seq_lseek,
3697 .release = tracing_single_release_tr,
3698 .write = tracing_trace_options_write,
3699};
3700
3701static const char readme_msg[] =
3702 "tracing mini-HOWTO:\n\n"
3703 "# echo 0 > tracing_on : quick way to disable tracing\n"
3704 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3705 " Important files:\n"
3706 " trace\t\t\t- The static contents of the buffer\n"
3707 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3708 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3709 " current_tracer\t- function and latency tracers\n"
3710 " available_tracers\t- list of configured tracers for current_tracer\n"
3711 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3712 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3713 " trace_clock\t\t-change the clock used to order events\n"
3714 " local: Per cpu clock but may not be synced across CPUs\n"
3715 " global: Synced across CPUs but slows tracing down.\n"
3716 " counter: Not a clock, but just an increment\n"
3717 " uptime: Jiffy counter from time of boot\n"
3718 " perf: Same clock that perf events use\n"
3719#ifdef CONFIG_X86_64
3720 " x86-tsc: TSC cycle counter\n"
3721#endif
3722 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3723 " tracing_cpumask\t- Limit which CPUs to trace\n"
3724 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3725 "\t\t\t Remove sub-buffer with rmdir\n"
3726 " trace_options\t\t- Set format or modify how tracing happens\n"
3727 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3728 "\t\t\t option name\n"
3729 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3730#ifdef CONFIG_DYNAMIC_FTRACE
3731 "\n available_filter_functions - list of functions that can be filtered on\n"
3732 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3733 "\t\t\t functions\n"
3734 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3735 "\t modules: Can select a group via module\n"
3736 "\t Format: :mod:<module-name>\n"
3737 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3738 "\t triggers: a command to perform when function is hit\n"
3739 "\t Format: <function>:<trigger>[:count]\n"
3740 "\t trigger: traceon, traceoff\n"
3741 "\t\t enable_event:<system>:<event>\n"
3742 "\t\t disable_event:<system>:<event>\n"
3743#ifdef CONFIG_STACKTRACE
3744 "\t\t stacktrace\n"
3745#endif
3746#ifdef CONFIG_TRACER_SNAPSHOT
3747 "\t\t snapshot\n"
3748#endif
3749 "\t\t dump\n"
3750 "\t\t cpudump\n"
3751 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3752 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3753 "\t The first one will disable tracing every time do_fault is hit\n"
3754 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3755 "\t The first time do trap is hit and it disables tracing, the\n"
3756 "\t counter will decrement to 2. If tracing is already disabled,\n"
3757 "\t the counter will not decrement. It only decrements when the\n"
3758 "\t trigger did work\n"
3759 "\t To remove trigger without count:\n"
3760 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3761 "\t To remove trigger with a count:\n"
3762 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3763 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3764 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3765 "\t modules: Can select a group via module command :mod:\n"
3766 "\t Does not accept triggers\n"
3767#endif
3768#ifdef CONFIG_FUNCTION_TRACER
3769 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3770 "\t\t (function)\n"
3771#endif
3772#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3773 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3774 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3775 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3776#endif
3777#ifdef CONFIG_TRACER_SNAPSHOT
3778 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3779 "\t\t\t snapshot buffer. Read the contents for more\n"
3780 "\t\t\t information\n"
3781#endif
3782#ifdef CONFIG_STACK_TRACER
3783 " stack_trace\t\t- Shows the max stack trace when active\n"
3784 " stack_max_size\t- Shows current max stack size that was traced\n"
3785 "\t\t\t Write into this file to reset the max size (trigger a\n"
3786 "\t\t\t new trace)\n"
3787#ifdef CONFIG_DYNAMIC_FTRACE
3788 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3789 "\t\t\t traces\n"
3790#endif
3791#endif
3792 " events/\t\t- Directory containing all trace event subsystems:\n"
3793 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3794 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3795 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3796 "\t\t\t events\n"
3797 " filter\t\t- If set, only events passing filter are traced\n"
3798 " events/<system>/<event>/\t- Directory containing control files for\n"
3799 "\t\t\t <event>:\n"
3800 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3801 " filter\t\t- If set, only events passing filter are traced\n"
3802 " trigger\t\t- If set, a command to perform when event is hit\n"
3803 "\t Format: <trigger>[:count][if <filter>]\n"
3804 "\t trigger: traceon, traceoff\n"
3805 "\t enable_event:<system>:<event>\n"
3806 "\t disable_event:<system>:<event>\n"
3807#ifdef CONFIG_STACKTRACE
3808 "\t\t stacktrace\n"
3809#endif
3810#ifdef CONFIG_TRACER_SNAPSHOT
3811 "\t\t snapshot\n"
3812#endif
3813 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3814 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3815 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3816 "\t events/block/block_unplug/trigger\n"
3817 "\t The first disables tracing every time block_unplug is hit.\n"
3818 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3819 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3820 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3821 "\t Like function triggers, the counter is only decremented if it\n"
3822 "\t enabled or disabled tracing.\n"
3823 "\t To remove a trigger without a count:\n"
3824 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3825 "\t To remove a trigger with a count:\n"
3826 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3827 "\t Filters can be ignored when removing a trigger.\n"
3828;
3829
3830static ssize_t
3831tracing_readme_read(struct file *filp, char __user *ubuf,
3832 size_t cnt, loff_t *ppos)
3833{
3834 return simple_read_from_buffer(ubuf, cnt, ppos,
3835 readme_msg, strlen(readme_msg));
3836}
3837
3838static const struct file_operations tracing_readme_fops = {
3839 .open = tracing_open_generic,
3840 .read = tracing_readme_read,
3841 .llseek = generic_file_llseek,
3842};
3843
3844static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3845{
3846 unsigned int *ptr = v;
3847
3848 if (*pos || m->count)
3849 ptr++;
3850
3851 (*pos)++;
3852
3853 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3854 ptr++) {
3855 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3856 continue;
3857
3858 return ptr;
3859 }
3860
3861 return NULL;
3862}
3863
3864static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3865{
3866 void *v;
3867 loff_t l = 0;
3868
3869 preempt_disable();
3870 arch_spin_lock(&trace_cmdline_lock);
3871
3872 v = &savedcmd->map_cmdline_to_pid[0];
3873 while (l <= *pos) {
3874 v = saved_cmdlines_next(m, v, &l);
3875 if (!v)
3876 return NULL;
3877 }
3878
3879 return v;
3880}
3881
3882static void saved_cmdlines_stop(struct seq_file *m, void *v)
3883{
3884 arch_spin_unlock(&trace_cmdline_lock);
3885 preempt_enable();
3886}
3887
3888static int saved_cmdlines_show(struct seq_file *m, void *v)
3889{
3890 char buf[TASK_COMM_LEN];
3891 unsigned int *pid = v;
3892
3893 __trace_find_cmdline(*pid, buf);
3894 seq_printf(m, "%d %s\n", *pid, buf);
3895 return 0;
3896}
3897
3898static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3899 .start = saved_cmdlines_start,
3900 .next = saved_cmdlines_next,
3901 .stop = saved_cmdlines_stop,
3902 .show = saved_cmdlines_show,
3903};
3904
3905static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3906{
3907 if (tracing_disabled)
3908 return -ENODEV;
3909
3910 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3911}
3912
3913static const struct file_operations tracing_saved_cmdlines_fops = {
3914 .open = tracing_saved_cmdlines_open,
3915 .read = seq_read,
3916 .llseek = seq_lseek,
3917 .release = seq_release,
3918};
3919
3920static ssize_t
3921tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3922 size_t cnt, loff_t *ppos)
3923{
3924 char buf[64];
3925 int r;
3926
3927 arch_spin_lock(&trace_cmdline_lock);
3928 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3929 arch_spin_unlock(&trace_cmdline_lock);
3930
3931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3932}
3933
3934static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3935{
3936 kfree(s->saved_cmdlines);
3937 kfree(s->map_cmdline_to_pid);
3938 kfree(s);
3939}
3940
3941static int tracing_resize_saved_cmdlines(unsigned int val)
3942{
3943 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3944
3945 s = kmalloc(sizeof(*s), GFP_KERNEL);
3946 if (!s)
3947 return -ENOMEM;
3948
3949 if (allocate_cmdlines_buffer(val, s) < 0) {
3950 kfree(s);
3951 return -ENOMEM;
3952 }
3953
3954 arch_spin_lock(&trace_cmdline_lock);
3955 savedcmd_temp = savedcmd;
3956 savedcmd = s;
3957 arch_spin_unlock(&trace_cmdline_lock);
3958 free_saved_cmdlines_buffer(savedcmd_temp);
3959
3960 return 0;
3961}
3962
3963static ssize_t
3964tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3965 size_t cnt, loff_t *ppos)
3966{
3967 unsigned long val;
3968 int ret;
3969
3970 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3971 if (ret)
3972 return ret;
3973
3974
3975 if (!val || val > PID_MAX_DEFAULT)
3976 return -EINVAL;
3977
3978 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3979 if (ret < 0)
3980 return ret;
3981
3982 *ppos += cnt;
3983
3984 return cnt;
3985}
3986
3987static const struct file_operations tracing_saved_cmdlines_size_fops = {
3988 .open = tracing_open_generic,
3989 .read = tracing_saved_cmdlines_size_read,
3990 .write = tracing_saved_cmdlines_size_write,
3991};
3992
3993#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3994static union trace_enum_map_item *
3995update_enum_map(union trace_enum_map_item *ptr)
3996{
3997 if (!ptr->map.enum_string) {
3998 if (ptr->tail.next) {
3999 ptr = ptr->tail.next;
4000
4001 ptr++;
4002 } else
4003 return NULL;
4004 }
4005 return ptr;
4006}
4007
4008static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4009{
4010 union trace_enum_map_item *ptr = v;
4011
4012
4013
4014
4015
4016 ptr = update_enum_map(ptr);
4017 if (WARN_ON_ONCE(!ptr))
4018 return NULL;
4019
4020 ptr++;
4021
4022 (*pos)++;
4023
4024 ptr = update_enum_map(ptr);
4025
4026 return ptr;
4027}
4028
4029static void *enum_map_start(struct seq_file *m, loff_t *pos)
4030{
4031 union trace_enum_map_item *v;
4032 loff_t l = 0;
4033
4034 mutex_lock(&trace_enum_mutex);
4035
4036 v = trace_enum_maps;
4037 if (v)
4038 v++;
4039
4040 while (v && l < *pos) {
4041 v = enum_map_next(m, v, &l);
4042 }
4043
4044 return v;
4045}
4046
4047static void enum_map_stop(struct seq_file *m, void *v)
4048{
4049 mutex_unlock(&trace_enum_mutex);
4050}
4051
4052static int enum_map_show(struct seq_file *m, void *v)
4053{
4054 union trace_enum_map_item *ptr = v;
4055
4056 seq_printf(m, "%s %ld (%s)\n",
4057 ptr->map.enum_string, ptr->map.enum_value,
4058 ptr->map.system);
4059
4060 return 0;
4061}
4062
4063static const struct seq_operations tracing_enum_map_seq_ops = {
4064 .start = enum_map_start,
4065 .next = enum_map_next,
4066 .stop = enum_map_stop,
4067 .show = enum_map_show,
4068};
4069
4070static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4071{
4072 if (tracing_disabled)
4073 return -ENODEV;
4074
4075 return seq_open(filp, &tracing_enum_map_seq_ops);
4076}
4077
4078static const struct file_operations tracing_enum_map_fops = {
4079 .open = tracing_enum_map_open,
4080 .read = seq_read,
4081 .llseek = seq_lseek,
4082 .release = seq_release,
4083};
4084
4085static inline union trace_enum_map_item *
4086trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4087{
4088
4089 return ptr + ptr->head.length + 1;
4090}
4091
4092static void
4093trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4094 int len)
4095{
4096 struct trace_enum_map **stop;
4097 struct trace_enum_map **map;
4098 union trace_enum_map_item *map_array;
4099 union trace_enum_map_item *ptr;
4100
4101 stop = start + len;
4102
4103
4104
4105
4106
4107
4108 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4109 if (!map_array) {
4110 pr_warn("Unable to allocate trace enum mapping\n");
4111 return;
4112 }
4113
4114 mutex_lock(&trace_enum_mutex);
4115
4116 if (!trace_enum_maps)
4117 trace_enum_maps = map_array;
4118 else {
4119 ptr = trace_enum_maps;
4120 for (;;) {
4121 ptr = trace_enum_jmp_to_tail(ptr);
4122 if (!ptr->tail.next)
4123 break;
4124 ptr = ptr->tail.next;
4125
4126 }
4127 ptr->tail.next = map_array;
4128 }
4129 map_array->head.mod = mod;
4130 map_array->head.length = len;
4131 map_array++;
4132
4133 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4134 map_array->map = **map;
4135 map_array++;
4136 }
4137 memset(map_array, 0, sizeof(*map_array));
4138
4139 mutex_unlock(&trace_enum_mutex);
4140}
4141
4142static void trace_create_enum_file(struct dentry *d_tracer)
4143{
4144 trace_create_file("enum_map", 0444, d_tracer,
4145 NULL, &tracing_enum_map_fops);
4146}
4147
4148#else
4149static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4150static inline void trace_insert_enum_map_file(struct module *mod,
4151 struct trace_enum_map **start, int len) { }
4152#endif
4153
4154static void trace_insert_enum_map(struct module *mod,
4155 struct trace_enum_map **start, int len)
4156{
4157 struct trace_enum_map **map;
4158
4159 if (len <= 0)
4160 return;
4161
4162 map = start;
4163
4164 trace_event_enum_update(map, len);
4165
4166 trace_insert_enum_map_file(mod, start, len);
4167}
4168
4169static ssize_t
4170tracing_set_trace_read(struct file *filp, char __user *ubuf,
4171 size_t cnt, loff_t *ppos)
4172{
4173 struct trace_array *tr = filp->private_data;
4174 char buf[MAX_TRACER_SIZE+2];
4175 int r;
4176
4177 mutex_lock(&trace_types_lock);
4178 r = sprintf(buf, "%s\n", tr->current_trace->name);
4179 mutex_unlock(&trace_types_lock);
4180
4181 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4182}
4183
4184int tracer_init(struct tracer *t, struct trace_array *tr)
4185{
4186 tracing_reset_online_cpus(&tr->trace_buffer);
4187 return t->init(tr);
4188}
4189
4190static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4191{
4192 int cpu;
4193
4194 for_each_tracing_cpu(cpu)
4195 per_cpu_ptr(buf->data, cpu)->entries = val;
4196}
4197
4198#ifdef CONFIG_TRACER_MAX_TRACE
4199
4200static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4201 struct trace_buffer *size_buf, int cpu_id)
4202{
4203 int cpu, ret = 0;
4204
4205 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4206 for_each_tracing_cpu(cpu) {
4207 ret = ring_buffer_resize(trace_buf->buffer,
4208 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4209 if (ret < 0)
4210 break;
4211 per_cpu_ptr(trace_buf->data, cpu)->entries =
4212 per_cpu_ptr(size_buf->data, cpu)->entries;
4213 }
4214 } else {
4215 ret = ring_buffer_resize(trace_buf->buffer,
4216 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4217 if (ret == 0)
4218 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4219 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4220 }
4221
4222 return ret;
4223}
4224#endif
4225
4226static int __tracing_resize_ring_buffer(struct trace_array *tr,
4227 unsigned long size, int cpu)
4228{
4229 int ret;
4230
4231
4232
4233
4234
4235
4236 ring_buffer_expanded = true;
4237
4238
4239 if (!tr->trace_buffer.buffer)
4240 return 0;
4241
4242 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4243 if (ret < 0)
4244 return ret;
4245
4246#ifdef CONFIG_TRACER_MAX_TRACE
4247 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4248 !tr->current_trace->use_max_tr)
4249 goto out;
4250
4251 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4252 if (ret < 0) {
4253 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4254 &tr->trace_buffer, cpu);
4255 if (r < 0) {
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270 WARN_ON(1);
4271 tracing_disabled = 1;
4272 }
4273 return ret;
4274 }
4275
4276 if (cpu == RING_BUFFER_ALL_CPUS)
4277 set_buffer_entries(&tr->max_buffer, size);
4278 else
4279 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4280
4281 out:
4282#endif
4283
4284 if (cpu == RING_BUFFER_ALL_CPUS)
4285 set_buffer_entries(&tr->trace_buffer, size);
4286 else
4287 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4288
4289 return ret;
4290}
4291
4292static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4293 unsigned long size, int cpu_id)
4294{
4295 int ret = size;
4296
4297 mutex_lock(&trace_types_lock);
4298
4299 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4300
4301 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4302 ret = -EINVAL;
4303 goto out;
4304 }
4305 }
4306
4307 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4308 if (ret < 0)
4309 ret = -ENOMEM;
4310
4311out:
4312 mutex_unlock(&trace_types_lock);
4313
4314 return ret;
4315}
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328int tracing_update_buffers(void)
4329{
4330 int ret = 0;
4331
4332 mutex_lock(&trace_types_lock);
4333 if (!ring_buffer_expanded)
4334 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4335 RING_BUFFER_ALL_CPUS);
4336 mutex_unlock(&trace_types_lock);
4337
4338 return ret;
4339}
4340
4341struct trace_option_dentry;
4342
4343static void
4344create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4345
4346
4347
4348
4349
4350static void tracing_set_nop(struct trace_array *tr)
4351{
4352 if (tr->current_trace == &nop_trace)
4353 return;
4354
4355 tr->current_trace->enabled--;
4356
4357 if (tr->current_trace->reset)
4358 tr->current_trace->reset(tr);
4359
4360 tr->current_trace = &nop_trace;
4361}
4362
4363static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4364{
4365
4366 if (!tr->dir)
4367 return;
4368
4369 create_trace_option_files(tr, t);
4370}
4371
4372static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4373{
4374 struct tracer *t;
4375#ifdef CONFIG_TRACER_MAX_TRACE
4376 bool had_max_tr;
4377#endif
4378 int ret = 0;
4379
4380 mutex_lock(&trace_types_lock);
4381
4382 if (!ring_buffer_expanded) {
4383 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4384 RING_BUFFER_ALL_CPUS);
4385 if (ret < 0)
4386 goto out;
4387 ret = 0;
4388 }
4389
4390 for (t = trace_types; t; t = t->next) {
4391 if (strcmp(t->name, buf) == 0)
4392 break;
4393 }
4394 if (!t) {
4395 ret = -EINVAL;
4396 goto out;
4397 }
4398 if (t == tr->current_trace)
4399 goto out;
4400
4401
4402 if (!trace_ok_for_array(t, tr)) {
4403 ret = -EINVAL;
4404 goto out;
4405 }
4406
4407
4408 if (tr->current_trace->ref) {
4409 ret = -EBUSY;
4410 goto out;
4411 }
4412
4413 trace_branch_disable();
4414
4415 tr->current_trace->enabled--;
4416
4417 if (tr->current_trace->reset)
4418 tr->current_trace->reset(tr);
4419
4420
4421 tr->current_trace = &nop_trace;
4422
4423#ifdef CONFIG_TRACER_MAX_TRACE
4424 had_max_tr = tr->allocated_snapshot;
4425
4426 if (had_max_tr && !t->use_max_tr) {
4427
4428
4429
4430
4431
4432
4433
4434 synchronize_sched();
4435 free_snapshot(tr);
4436 }
4437#endif
4438
4439#ifdef CONFIG_TRACER_MAX_TRACE
4440 if (t->use_max_tr && !had_max_tr) {
4441 ret = alloc_snapshot(tr);
4442 if (ret < 0)
4443 goto out;
4444 }
4445#endif
4446
4447 if (t->init) {
4448 ret = tracer_init(t, tr);
4449 if (ret)
4450 goto out;
4451 }
4452
4453 tr->current_trace = t;
4454 tr->current_trace->enabled++;
4455 trace_branch_enable(tr);
4456 out:
4457 mutex_unlock(&trace_types_lock);
4458
4459 return ret;
4460}
4461
4462static ssize_t
4463tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4464 size_t cnt, loff_t *ppos)
4465{
4466 struct trace_array *tr = filp->private_data;
4467 char buf[MAX_TRACER_SIZE+1];
4468 int i;
4469 size_t ret;
4470 int err;
4471
4472 ret = cnt;
4473
4474 if (cnt > MAX_TRACER_SIZE)
4475 cnt = MAX_TRACER_SIZE;
4476
4477 if (copy_from_user(&buf, ubuf, cnt))
4478 return -EFAULT;
4479
4480 buf[cnt] = 0;
4481
4482
4483 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4484 buf[i] = 0;
4485
4486 err = tracing_set_tracer(tr, buf);
4487 if (err)
4488 return err;
4489
4490 *ppos += ret;
4491
4492 return ret;
4493}
4494
4495static ssize_t
4496tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4497 size_t cnt, loff_t *ppos)
4498{
4499 char buf[64];
4500 int r;
4501
4502 r = snprintf(buf, sizeof(buf), "%ld\n",
4503 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4504 if (r > sizeof(buf))
4505 r = sizeof(buf);
4506 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4507}
4508
4509static ssize_t
4510tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4511 size_t cnt, loff_t *ppos)
4512{
4513 unsigned long val;
4514 int ret;
4515
4516 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4517 if (ret)
4518 return ret;
4519
4520 *ptr = val * 1000;
4521
4522 return cnt;
4523}
4524
4525static ssize_t
4526tracing_thresh_read(struct file *filp, char __user *ubuf,
4527 size_t cnt, loff_t *ppos)
4528{
4529 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4530}
4531
4532static ssize_t
4533tracing_thresh_write(struct file *filp, const char __user *ubuf,
4534 size_t cnt, loff_t *ppos)
4535{
4536 struct trace_array *tr = filp->private_data;
4537 int ret;
4538
4539 mutex_lock(&trace_types_lock);
4540 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4541 if (ret < 0)
4542 goto out;
4543
4544 if (tr->current_trace->update_thresh) {
4545 ret = tr->current_trace->update_thresh(tr);
4546 if (ret < 0)
4547 goto out;
4548 }
4549
4550 ret = cnt;
4551out:
4552 mutex_unlock(&trace_types_lock);
4553
4554 return ret;
4555}
4556
4557#ifdef CONFIG_TRACER_MAX_TRACE
4558
4559static ssize_t
4560tracing_max_lat_read(struct file *filp, char __user *ubuf,
4561 size_t cnt, loff_t *ppos)
4562{
4563 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4564}
4565
4566static ssize_t
4567tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4568 size_t cnt, loff_t *ppos)
4569{
4570 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4571}
4572
4573#endif
4574
4575static int tracing_open_pipe(struct inode *inode, struct file *filp)
4576{
4577 struct trace_array *tr = inode->i_private;
4578 struct trace_iterator *iter;
4579 int ret = 0;
4580
4581 if (tracing_disabled)
4582 return -ENODEV;
4583
4584 if (trace_array_get(tr) < 0)
4585 return -ENODEV;
4586
4587 mutex_lock(&trace_types_lock);
4588
4589
4590 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4591 if (!iter) {
4592 ret = -ENOMEM;
4593 __trace_array_put(tr);
4594 goto out;
4595 }
4596
4597 trace_seq_init(&iter->seq);
4598 iter->trace = tr->current_trace;
4599
4600 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4601 ret = -ENOMEM;
4602 goto fail;
4603 }
4604
4605
4606 cpumask_setall(iter->started);
4607
4608 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4609 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4610
4611
4612 if (trace_clocks[tr->clock_id].in_ns)
4613 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4614
4615 iter->tr = tr;
4616 iter->trace_buffer = &tr->trace_buffer;
4617 iter->cpu_file = tracing_get_cpu(inode);
4618 mutex_init(&iter->mutex);
4619 filp->private_data = iter;
4620
4621 if (iter->trace->pipe_open)
4622 iter->trace->pipe_open(iter);
4623
4624 nonseekable_open(inode, filp);
4625
4626 tr->current_trace->ref++;
4627out:
4628 mutex_unlock(&trace_types_lock);
4629 return ret;
4630
4631fail:
4632 kfree(iter->trace);
4633 kfree(iter);
4634 __trace_array_put(tr);
4635 mutex_unlock(&trace_types_lock);
4636 return ret;
4637}
4638
4639static int tracing_release_pipe(struct inode *inode, struct file *file)
4640{
4641 struct trace_iterator *iter = file->private_data;
4642 struct trace_array *tr = inode->i_private;
4643
4644 mutex_lock(&trace_types_lock);
4645
4646 tr->current_trace->ref--;
4647
4648 if (iter->trace->pipe_close)
4649 iter->trace->pipe_close(iter);
4650
4651 mutex_unlock(&trace_types_lock);
4652
4653 free_cpumask_var(iter->started);
4654 mutex_destroy(&iter->mutex);
4655 kfree(iter);
4656
4657 trace_array_put(tr);
4658
4659 return 0;
4660}
4661
4662static unsigned int
4663trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4664{
4665 struct trace_array *tr = iter->tr;
4666
4667
4668 if (trace_buffer_iter(iter, iter->cpu_file))
4669 return POLLIN | POLLRDNORM;
4670
4671 if (tr->trace_flags & TRACE_ITER_BLOCK)
4672
4673
4674
4675 return POLLIN | POLLRDNORM;
4676 else
4677 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4678 filp, poll_table);
4679}
4680
4681static unsigned int
4682tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4683{
4684 struct trace_iterator *iter = filp->private_data;
4685
4686 return trace_poll(iter, filp, poll_table);
4687}
4688
4689
4690static int tracing_wait_pipe(struct file *filp)
4691{
4692 struct trace_iterator *iter = filp->private_data;
4693 int ret;
4694
4695 while (trace_empty(iter)) {
4696
4697 if ((filp->f_flags & O_NONBLOCK)) {
4698 return -EAGAIN;
4699 }
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710 if (!tracing_is_on() && iter->pos)
4711 break;
4712
4713 mutex_unlock(&iter->mutex);
4714
4715 ret = wait_on_pipe(iter, false);
4716
4717 mutex_lock(&iter->mutex);
4718
4719 if (ret)
4720 return ret;
4721 }
4722
4723 return 1;
4724}
4725
4726
4727
4728
4729static ssize_t
4730tracing_read_pipe(struct file *filp, char __user *ubuf,
4731 size_t cnt, loff_t *ppos)
4732{
4733 struct trace_iterator *iter = filp->private_data;
4734 ssize_t sret;
4735
4736
4737 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4738 if (sret != -EBUSY)
4739 return sret;
4740
4741 trace_seq_init(&iter->seq);
4742
4743
4744
4745
4746
4747
4748 mutex_lock(&iter->mutex);
4749 if (iter->trace->read) {
4750 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4751 if (sret)
4752 goto out;
4753 }
4754
4755waitagain:
4756 sret = tracing_wait_pipe(filp);
4757 if (sret <= 0)
4758 goto out;
4759
4760
4761 if (trace_empty(iter)) {
4762 sret = 0;
4763 goto out;
4764 }
4765
4766 if (cnt >= PAGE_SIZE)
4767 cnt = PAGE_SIZE - 1;
4768
4769
4770 memset(&iter->seq, 0,
4771 sizeof(struct trace_iterator) -
4772 offsetof(struct trace_iterator, seq));
4773 cpumask_clear(iter->started);
4774 iter->pos = -1;
4775
4776 trace_event_read_lock();
4777 trace_access_lock(iter->cpu_file);
4778 while (trace_find_next_entry_inc(iter) != NULL) {
4779 enum print_line_t ret;
4780 int save_len = iter->seq.seq.len;
4781
4782 ret = print_trace_line(iter);
4783 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4784
4785 iter->seq.seq.len = save_len;
4786 break;
4787 }
4788 if (ret != TRACE_TYPE_NO_CONSUME)
4789 trace_consume(iter);
4790
4791 if (trace_seq_used(&iter->seq) >= cnt)
4792 break;
4793
4794
4795
4796
4797
4798
4799 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4800 iter->ent->type);
4801 }
4802 trace_access_unlock(iter->cpu_file);
4803 trace_event_read_unlock();
4804
4805
4806 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4807 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4808 trace_seq_init(&iter->seq);
4809
4810
4811
4812
4813
4814 if (sret == -EBUSY)
4815 goto waitagain;
4816
4817out:
4818 mutex_unlock(&iter->mutex);
4819
4820 return sret;
4821}
4822
4823static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4824 unsigned int idx)
4825{
4826 __free_page(spd->pages[idx]);
4827}
4828
4829static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4830 .can_merge = 0,
4831 .confirm = generic_pipe_buf_confirm,
4832 .release = generic_pipe_buf_release,
4833 .steal = generic_pipe_buf_steal,
4834 .get = generic_pipe_buf_get,
4835};
4836
4837static size_t
4838tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4839{
4840 size_t count;
4841 int save_len;
4842 int ret;
4843
4844
4845 for (;;) {
4846 save_len = iter->seq.seq.len;
4847 ret = print_trace_line(iter);
4848
4849 if (trace_seq_has_overflowed(&iter->seq)) {
4850 iter->seq.seq.len = save_len;
4851 break;
4852 }
4853
4854
4855
4856
4857
4858
4859 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4860 iter->seq.seq.len = save_len;
4861 break;
4862 }
4863
4864 count = trace_seq_used(&iter->seq) - save_len;
4865 if (rem < count) {
4866 rem = 0;
4867 iter->seq.seq.len = save_len;
4868 break;
4869 }
4870
4871 if (ret != TRACE_TYPE_NO_CONSUME)
4872 trace_consume(iter);
4873 rem -= count;
4874 if (!trace_find_next_entry_inc(iter)) {
4875 rem = 0;
4876 iter->ent = NULL;
4877 break;
4878 }
4879 }
4880
4881 return rem;
4882}
4883
4884static ssize_t tracing_splice_read_pipe(struct file *filp,
4885 loff_t *ppos,
4886 struct pipe_inode_info *pipe,
4887 size_t len,
4888 unsigned int flags)
4889{
4890 struct page *pages_def[PIPE_DEF_BUFFERS];
4891 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4892 struct trace_iterator *iter = filp->private_data;
4893 struct splice_pipe_desc spd = {
4894 .pages = pages_def,
4895 .partial = partial_def,
4896 .nr_pages = 0,
4897 .nr_pages_max = PIPE_DEF_BUFFERS,
4898 .flags = flags,
4899 .ops = &tracing_pipe_buf_ops,
4900 .spd_release = tracing_spd_release_pipe,
4901 };
4902 ssize_t ret;
4903 size_t rem;
4904 unsigned int i;
4905
4906 if (splice_grow_spd(pipe, &spd))
4907 return -ENOMEM;
4908
4909 mutex_lock(&iter->mutex);
4910
4911 if (iter->trace->splice_read) {
4912 ret = iter->trace->splice_read(iter, filp,
4913 ppos, pipe, len, flags);
4914 if (ret)
4915 goto out_err;
4916 }
4917
4918 ret = tracing_wait_pipe(filp);
4919 if (ret <= 0)
4920 goto out_err;
4921
4922 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4923 ret = -EFAULT;
4924 goto out_err;
4925 }
4926
4927 trace_event_read_lock();
4928 trace_access_lock(iter->cpu_file);
4929
4930
4931 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4932 spd.pages[i] = alloc_page(GFP_KERNEL);
4933 if (!spd.pages[i])
4934 break;
4935
4936 rem = tracing_fill_pipe_page(rem, iter);
4937
4938
4939 ret = trace_seq_to_buffer(&iter->seq,
4940 page_address(spd.pages[i]),
4941 trace_seq_used(&iter->seq));
4942 if (ret < 0) {
4943 __free_page(spd.pages[i]);
4944 break;
4945 }
4946 spd.partial[i].offset = 0;
4947 spd.partial[i].len = trace_seq_used(&iter->seq);
4948
4949 trace_seq_init(&iter->seq);
4950 }
4951
4952 trace_access_unlock(iter->cpu_file);
4953 trace_event_read_unlock();
4954 mutex_unlock(&iter->mutex);
4955
4956 spd.nr_pages = i;
4957
4958 if (i)
4959 ret = splice_to_pipe(pipe, &spd);
4960 else
4961 ret = 0;
4962out:
4963 splice_shrink_spd(&spd);
4964 return ret;
4965
4966out_err:
4967 mutex_unlock(&iter->mutex);
4968 goto out;
4969}
4970
4971static ssize_t
4972tracing_entries_read(struct file *filp, char __user *ubuf,
4973 size_t cnt, loff_t *ppos)
4974{
4975 struct inode *inode = file_inode(filp);
4976 struct trace_array *tr = inode->i_private;
4977 int cpu = tracing_get_cpu(inode);
4978 char buf[64];
4979 int r = 0;
4980 ssize_t ret;
4981
4982 mutex_lock(&trace_types_lock);
4983
4984 if (cpu == RING_BUFFER_ALL_CPUS) {
4985 int cpu, buf_size_same;
4986 unsigned long size;
4987
4988 size = 0;
4989 buf_size_same = 1;
4990
4991 for_each_tracing_cpu(cpu) {
4992
4993 if (size == 0)
4994 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4995 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4996 buf_size_same = 0;
4997 break;
4998 }
4999 }
5000
5001 if (buf_size_same) {
5002 if (!ring_buffer_expanded)
5003 r = sprintf(buf, "%lu (expanded: %lu)\n",
5004 size >> 10,
5005 trace_buf_size >> 10);
5006 else
5007 r = sprintf(buf, "%lu\n", size >> 10);
5008 } else
5009 r = sprintf(buf, "X\n");
5010 } else
5011 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5012
5013 mutex_unlock(&trace_types_lock);
5014
5015 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5016 return ret;
5017}
5018
5019static ssize_t
5020tracing_entries_write(struct file *filp, const char __user *ubuf,
5021 size_t cnt, loff_t *ppos)
5022{
5023 struct inode *inode = file_inode(filp);
5024 struct trace_array *tr = inode->i_private;
5025 unsigned long val;
5026 int ret;
5027
5028 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5029 if (ret)
5030 return ret;
5031
5032
5033 if (!val)
5034 return -EINVAL;
5035
5036
5037 val <<= 10;
5038 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5039 if (ret < 0)
5040 return ret;
5041
5042 *ppos += cnt;
5043
5044 return cnt;
5045}
5046
5047static ssize_t
5048tracing_total_entries_read(struct file *filp, char __user *ubuf,
5049 size_t cnt, loff_t *ppos)
5050{
5051 struct trace_array *tr = filp->private_data;
5052 char buf[64];
5053 int r, cpu;
5054 unsigned long size = 0, expanded_size = 0;
5055
5056 mutex_lock(&trace_types_lock);
5057 for_each_tracing_cpu(cpu) {
5058 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5059 if (!ring_buffer_expanded)
5060 expanded_size += trace_buf_size >> 10;
5061 }
5062 if (ring_buffer_expanded)
5063 r = sprintf(buf, "%lu\n", size);
5064 else
5065 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5066 mutex_unlock(&trace_types_lock);
5067
5068 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5069}
5070
5071static ssize_t
5072tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5073 size_t cnt, loff_t *ppos)
5074{
5075
5076
5077
5078
5079
5080 *ppos += cnt;
5081
5082 return cnt;
5083}
5084
5085static int
5086tracing_free_buffer_release(struct inode *inode, struct file *filp)
5087{
5088 struct trace_array *tr = inode->i_private;
5089
5090
5091 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5092 tracer_tracing_off(tr);
5093
5094 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5095
5096 trace_array_put(tr);
5097
5098 return 0;
5099}
5100
5101static ssize_t
5102tracing_mark_write(struct file *filp, const char __user *ubuf,
5103 size_t cnt, loff_t *fpos)
5104{
5105 unsigned long addr = (unsigned long)ubuf;
5106 struct trace_array *tr = filp->private_data;
5107 struct ring_buffer_event *event;
5108 struct ring_buffer *buffer;
5109 struct print_entry *entry;
5110 unsigned long irq_flags;
5111 struct page *pages[2];
5112 void *map_page[2];
5113 int nr_pages = 1;
5114 ssize_t written;
5115 int offset;
5116 int size;
5117 int len;
5118 int ret;
5119 int i;
5120
5121 if (tracing_disabled)
5122 return -EINVAL;
5123
5124 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5125 return -EINVAL;
5126
5127 if (cnt > TRACE_BUF_SIZE)
5128 cnt = TRACE_BUF_SIZE;
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5145
5146
5147 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5148 nr_pages = 2;
5149
5150 offset = addr & (PAGE_SIZE - 1);
5151 addr &= PAGE_MASK;
5152
5153 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5154 if (ret < nr_pages) {
5155 while (--ret >= 0)
5156 put_page(pages[ret]);
5157 written = -EFAULT;
5158 goto out;
5159 }
5160
5161 for (i = 0; i < nr_pages; i++)
5162 map_page[i] = kmap_atomic(pages[i]);
5163
5164 local_save_flags(irq_flags);
5165 size = sizeof(*entry) + cnt + 2;
5166 buffer = tr->trace_buffer.buffer;
5167 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5168 irq_flags, preempt_count());
5169 if (!event) {
5170
5171 written = -EBADF;
5172 goto out_unlock;
5173 }
5174
5175 entry = ring_buffer_event_data(event);
5176 entry->ip = _THIS_IP_;
5177
5178 if (nr_pages == 2) {
5179 len = PAGE_SIZE - offset;
5180 memcpy(&entry->buf, map_page[0] + offset, len);
5181 memcpy(&entry->buf[len], map_page[1], cnt - len);
5182 } else
5183 memcpy(&entry->buf, map_page[0] + offset, cnt);
5184
5185 if (entry->buf[cnt - 1] != '\n') {
5186 entry->buf[cnt] = '\n';
5187 entry->buf[cnt + 1] = '\0';
5188 } else
5189 entry->buf[cnt] = '\0';
5190
5191 __buffer_unlock_commit(buffer, event);
5192
5193 written = cnt;
5194
5195 *fpos += written;
5196
5197 out_unlock:
5198 for (i = nr_pages - 1; i >= 0; i--) {
5199 kunmap_atomic(map_page[i]);
5200 put_page(pages[i]);
5201 }
5202 out:
5203 return written;
5204}
5205
5206static int tracing_clock_show(struct seq_file *m, void *v)
5207{
5208 struct trace_array *tr = m->private;
5209 int i;
5210
5211 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5212 seq_printf(m,
5213 "%s%s%s%s", i ? " " : "",
5214 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5215 i == tr->clock_id ? "]" : "");
5216 seq_putc(m, '\n');
5217
5218 return 0;
5219}
5220
5221static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5222{
5223 int i;
5224
5225 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5226 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5227 break;
5228 }
5229 if (i == ARRAY_SIZE(trace_clocks))
5230 return -EINVAL;
5231
5232 mutex_lock(&trace_types_lock);
5233
5234 tr->clock_id = i;
5235
5236 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5237
5238
5239
5240
5241
5242 tracing_reset_online_cpus(&tr->trace_buffer);
5243
5244#ifdef CONFIG_TRACER_MAX_TRACE
5245 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5246 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5247 tracing_reset_online_cpus(&tr->max_buffer);
5248#endif
5249
5250 mutex_unlock(&trace_types_lock);
5251
5252 return 0;
5253}
5254
5255static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5256 size_t cnt, loff_t *fpos)
5257{
5258 struct seq_file *m = filp->private_data;
5259 struct trace_array *tr = m->private;
5260 char buf[64];
5261 const char *clockstr;
5262 int ret;
5263
5264 if (cnt >= sizeof(buf))
5265 return -EINVAL;
5266
5267 if (copy_from_user(&buf, ubuf, cnt))
5268 return -EFAULT;
5269
5270 buf[cnt] = 0;
5271
5272 clockstr = strstrip(buf);
5273
5274 ret = tracing_set_clock(tr, clockstr);
5275 if (ret)
5276 return ret;
5277
5278 *fpos += cnt;
5279
5280 return cnt;
5281}
5282
5283static int tracing_clock_open(struct inode *inode, struct file *file)
5284{
5285 struct trace_array *tr = inode->i_private;
5286 int ret;
5287
5288 if (tracing_disabled)
5289 return -ENODEV;
5290
5291 if (trace_array_get(tr))
5292 return -ENODEV;
5293
5294 ret = single_open(file, tracing_clock_show, inode->i_private);
5295 if (ret < 0)
5296 trace_array_put(tr);
5297
5298 return ret;
5299}
5300
5301struct ftrace_buffer_info {
5302 struct trace_iterator iter;
5303 void *spare;
5304 unsigned int read;
5305};
5306
5307#ifdef CONFIG_TRACER_SNAPSHOT
5308static int tracing_snapshot_open(struct inode *inode, struct file *file)
5309{
5310 struct trace_array *tr = inode->i_private;
5311 struct trace_iterator *iter;
5312 struct seq_file *m;
5313 int ret = 0;
5314
5315 if (trace_array_get(tr) < 0)
5316 return -ENODEV;
5317
5318 if (file->f_mode & FMODE_READ) {
5319 iter = __tracing_open(inode, file, true);
5320 if (IS_ERR(iter))
5321 ret = PTR_ERR(iter);
5322 } else {
5323
5324 ret = -ENOMEM;
5325 m = kzalloc(sizeof(*m), GFP_KERNEL);
5326 if (!m)
5327 goto out;
5328 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5329 if (!iter) {
5330 kfree(m);
5331 goto out;
5332 }
5333 ret = 0;
5334
5335 iter->tr = tr;
5336 iter->trace_buffer = &tr->max_buffer;
5337 iter->cpu_file = tracing_get_cpu(inode);
5338 m->private = iter;
5339 file->private_data = m;
5340 }
5341out:
5342 if (ret < 0)
5343 trace_array_put(tr);
5344
5345 return ret;
5346}
5347
5348static ssize_t
5349tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5350 loff_t *ppos)
5351{
5352 struct seq_file *m = filp->private_data;
5353 struct trace_iterator *iter = m->private;
5354 struct trace_array *tr = iter->tr;
5355 unsigned long val;
5356 int ret;
5357
5358 ret = tracing_update_buffers();
5359 if (ret < 0)
5360 return ret;
5361
5362 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5363 if (ret)
5364 return ret;
5365
5366 mutex_lock(&trace_types_lock);
5367
5368 if (tr->current_trace->use_max_tr) {
5369 ret = -EBUSY;
5370 goto out;
5371 }
5372
5373 switch (val) {
5374 case 0:
5375 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5376 ret = -EINVAL;
5377 break;
5378 }
5379 if (tr->allocated_snapshot)
5380 free_snapshot(tr);
5381 break;
5382 case 1:
5383
5384#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5385 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5386 ret = -EINVAL;
5387 break;
5388 }
5389#endif
5390 if (!tr->allocated_snapshot) {
5391 ret = alloc_snapshot(tr);
5392 if (ret < 0)
5393 break;
5394 }
5395 local_irq_disable();
5396
5397 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5398 update_max_tr(tr, current, smp_processor_id());
5399 else
5400 update_max_tr_single(tr, current, iter->cpu_file);
5401 local_irq_enable();
5402 break;
5403 default:
5404 if (tr->allocated_snapshot) {
5405 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5406 tracing_reset_online_cpus(&tr->max_buffer);
5407 else
5408 tracing_reset(&tr->max_buffer, iter->cpu_file);
5409 }
5410 break;
5411 }
5412
5413 if (ret >= 0) {
5414 *ppos += cnt;
5415 ret = cnt;
5416 }
5417out:
5418 mutex_unlock(&trace_types_lock);
5419 return ret;
5420}
5421
5422static int tracing_snapshot_release(struct inode *inode, struct file *file)
5423{
5424 struct seq_file *m = file->private_data;
5425 int ret;
5426
5427 ret = tracing_release(inode, file);
5428
5429 if (file->f_mode & FMODE_READ)
5430 return ret;
5431
5432
5433 if (m)
5434 kfree(m->private);
5435 kfree(m);
5436
5437 return 0;
5438}
5439
5440static int tracing_buffers_open(struct inode *inode, struct file *filp);
5441static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5442 size_t count, loff_t *ppos);
5443static int tracing_buffers_release(struct inode *inode, struct file *file);
5444static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5445 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5446
5447static int snapshot_raw_open(struct inode *inode, struct file *filp)
5448{
5449 struct ftrace_buffer_info *info;
5450 int ret;
5451
5452 ret = tracing_buffers_open(inode, filp);
5453 if (ret < 0)
5454 return ret;
5455
5456 info = filp->private_data;
5457
5458 if (info->iter.trace->use_max_tr) {
5459 tracing_buffers_release(inode, filp);
5460 return -EBUSY;
5461 }
5462
5463 info->iter.snapshot = true;
5464 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5465
5466 return ret;
5467}
5468
5469#endif
5470
5471
5472static const struct file_operations tracing_thresh_fops = {
5473 .open = tracing_open_generic,
5474 .read = tracing_thresh_read,
5475 .write = tracing_thresh_write,
5476 .llseek = generic_file_llseek,
5477};
5478
5479#ifdef CONFIG_TRACER_MAX_TRACE
5480static const struct file_operations tracing_max_lat_fops = {
5481 .open = tracing_open_generic,
5482 .read = tracing_max_lat_read,
5483 .write = tracing_max_lat_write,
5484 .llseek = generic_file_llseek,
5485};
5486#endif
5487
5488static const struct file_operations set_tracer_fops = {
5489 .open = tracing_open_generic,
5490 .read = tracing_set_trace_read,
5491 .write = tracing_set_trace_write,
5492 .llseek = generic_file_llseek,
5493};
5494
5495static const struct file_operations tracing_pipe_fops = {
5496 .open = tracing_open_pipe,
5497 .poll = tracing_poll_pipe,
5498 .read = tracing_read_pipe,
5499 .splice_read = tracing_splice_read_pipe,
5500 .release = tracing_release_pipe,
5501 .llseek = no_llseek,
5502};
5503
5504static const struct file_operations tracing_entries_fops = {
5505 .open = tracing_open_generic_tr,
5506 .read = tracing_entries_read,
5507 .write = tracing_entries_write,
5508 .llseek = generic_file_llseek,
5509 .release = tracing_release_generic_tr,
5510};
5511
5512static const struct file_operations tracing_total_entries_fops = {
5513 .open = tracing_open_generic_tr,
5514 .read = tracing_total_entries_read,
5515 .llseek = generic_file_llseek,
5516 .release = tracing_release_generic_tr,
5517};
5518
5519static const struct file_operations tracing_free_buffer_fops = {
5520 .open = tracing_open_generic_tr,
5521 .write = tracing_free_buffer_write,
5522 .release = tracing_free_buffer_release,
5523};
5524
5525static const struct file_operations tracing_mark_fops = {
5526 .open = tracing_open_generic_tr,
5527 .write = tracing_mark_write,
5528 .llseek = generic_file_llseek,
5529 .release = tracing_release_generic_tr,
5530};
5531
5532static const struct file_operations trace_clock_fops = {
5533 .open = tracing_clock_open,
5534 .read = seq_read,
5535 .llseek = seq_lseek,
5536 .release = tracing_single_release_tr,
5537 .write = tracing_clock_write,
5538};
5539
5540#ifdef CONFIG_TRACER_SNAPSHOT
5541static const struct file_operations snapshot_fops = {
5542 .open = tracing_snapshot_open,
5543 .read = seq_read,
5544 .write = tracing_snapshot_write,
5545 .llseek = tracing_lseek,
5546 .release = tracing_snapshot_release,
5547};
5548
5549static const struct file_operations snapshot_raw_fops = {
5550 .open = snapshot_raw_open,
5551 .read = tracing_buffers_read,
5552 .release = tracing_buffers_release,
5553 .splice_read = tracing_buffers_splice_read,
5554 .llseek = no_llseek,
5555};
5556
5557#endif
5558
5559static int tracing_buffers_open(struct inode *inode, struct file *filp)
5560{
5561 struct trace_array *tr = inode->i_private;
5562 struct ftrace_buffer_info *info;
5563 int ret;
5564
5565 if (tracing_disabled)
5566 return -ENODEV;
5567
5568 if (trace_array_get(tr) < 0)
5569 return -ENODEV;
5570
5571 info = kzalloc(sizeof(*info), GFP_KERNEL);
5572 if (!info) {
5573 trace_array_put(tr);
5574 return -ENOMEM;
5575 }
5576
5577 mutex_lock(&trace_types_lock);
5578
5579 info->iter.tr = tr;
5580 info->iter.cpu_file = tracing_get_cpu(inode);
5581 info->iter.trace = tr->current_trace;
5582 info->iter.trace_buffer = &tr->trace_buffer;
5583 info->spare = NULL;
5584
5585 info->read = (unsigned int)-1;
5586
5587 filp->private_data = info;
5588
5589 tr->current_trace->ref++;
5590
5591 mutex_unlock(&trace_types_lock);
5592
5593 ret = nonseekable_open(inode, filp);
5594 if (ret < 0)
5595 trace_array_put(tr);
5596
5597 return ret;
5598}
5599
5600static unsigned int
5601tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5602{
5603 struct ftrace_buffer_info *info = filp->private_data;
5604 struct trace_iterator *iter = &info->iter;
5605
5606 return trace_poll(iter, filp, poll_table);
5607}
5608
5609static ssize_t
5610tracing_buffers_read(struct file *filp, char __user *ubuf,
5611 size_t count, loff_t *ppos)
5612{
5613 struct ftrace_buffer_info *info = filp->private_data;
5614 struct trace_iterator *iter = &info->iter;
5615 ssize_t ret;
5616 ssize_t size;
5617
5618 if (!count)
5619 return 0;
5620
5621#ifdef CONFIG_TRACER_MAX_TRACE
5622 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5623 return -EBUSY;
5624#endif
5625
5626 if (!info->spare)
5627 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5628 iter->cpu_file);
5629 if (!info->spare)
5630 return -ENOMEM;
5631
5632
5633 if (info->read < PAGE_SIZE)
5634 goto read;
5635
5636 again:
5637 trace_access_lock(iter->cpu_file);
5638 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5639 &info->spare,
5640 count,
5641 iter->cpu_file, 0);
5642 trace_access_unlock(iter->cpu_file);
5643
5644 if (ret < 0) {
5645 if (trace_empty(iter)) {
5646 if ((filp->f_flags & O_NONBLOCK))
5647 return -EAGAIN;
5648
5649 ret = wait_on_pipe(iter, false);
5650 if (ret)
5651 return ret;
5652
5653 goto again;
5654 }
5655 return 0;
5656 }
5657
5658 info->read = 0;
5659 read:
5660 size = PAGE_SIZE - info->read;
5661 if (size > count)
5662 size = count;
5663
5664 ret = copy_to_user(ubuf, info->spare + info->read, size);
5665 if (ret == size)
5666 return -EFAULT;
5667
5668 size -= ret;
5669
5670 *ppos += size;
5671 info->read += size;
5672
5673 return size;
5674}
5675
5676static int tracing_buffers_release(struct inode *inode, struct file *file)
5677{
5678 struct ftrace_buffer_info *info = file->private_data;
5679 struct trace_iterator *iter = &info->iter;
5680
5681 mutex_lock(&trace_types_lock);
5682
5683 iter->tr->current_trace->ref--;
5684
5685 __trace_array_put(iter->tr);
5686
5687 if (info->spare)
5688 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5689 kfree(info);
5690
5691 mutex_unlock(&trace_types_lock);
5692
5693 return 0;
5694}
5695
5696struct buffer_ref {
5697 struct ring_buffer *buffer;
5698 void *page;
5699 int ref;
5700};
5701
5702static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5703 struct pipe_buffer *buf)
5704{
5705 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5706
5707 if (--ref->ref)
5708 return;
5709
5710 ring_buffer_free_read_page(ref->buffer, ref->page);
5711 kfree(ref);
5712 buf->private = 0;
5713}
5714
5715static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5716 struct pipe_buffer *buf)
5717{
5718 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5719
5720 ref->ref++;
5721}
5722
5723
5724static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5725 .can_merge = 0,
5726 .confirm = generic_pipe_buf_confirm,
5727 .release = buffer_pipe_buf_release,
5728 .steal = generic_pipe_buf_steal,
5729 .get = buffer_pipe_buf_get,
5730};
5731
5732
5733
5734
5735
5736static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5737{
5738 struct buffer_ref *ref =
5739 (struct buffer_ref *)spd->partial[i].private;
5740
5741 if (--ref->ref)
5742 return;
5743
5744 ring_buffer_free_read_page(ref->buffer, ref->page);
5745 kfree(ref);
5746 spd->partial[i].private = 0;
5747}
5748
5749static ssize_t
5750tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5751 struct pipe_inode_info *pipe, size_t len,
5752 unsigned int flags)
5753{
5754 struct ftrace_buffer_info *info = file->private_data;
5755 struct trace_iterator *iter = &info->iter;
5756 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5757 struct page *pages_def[PIPE_DEF_BUFFERS];
5758 struct splice_pipe_desc spd = {
5759 .pages = pages_def,
5760 .partial = partial_def,
5761 .nr_pages_max = PIPE_DEF_BUFFERS,
5762 .flags = flags,
5763 .ops = &buffer_pipe_buf_ops,
5764 .spd_release = buffer_spd_release,
5765 };
5766 struct buffer_ref *ref;
5767 int entries, size, i;
5768 ssize_t ret = 0;
5769
5770#ifdef CONFIG_TRACER_MAX_TRACE
5771 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5772 return -EBUSY;
5773#endif
5774
5775 if (splice_grow_spd(pipe, &spd))
5776 return -ENOMEM;
5777
5778 if (*ppos & (PAGE_SIZE - 1))
5779 return -EINVAL;
5780
5781 if (len & (PAGE_SIZE - 1)) {
5782 if (len < PAGE_SIZE)
5783 return -EINVAL;
5784 len &= PAGE_MASK;
5785 }
5786
5787 again:
5788 trace_access_lock(iter->cpu_file);
5789 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5790
5791 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5792 struct page *page;
5793 int r;
5794
5795 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5796 if (!ref) {
5797 ret = -ENOMEM;
5798 break;
5799 }
5800
5801 ref->ref = 1;
5802 ref->buffer = iter->trace_buffer->buffer;
5803 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5804 if (!ref->page) {
5805 ret = -ENOMEM;
5806 kfree(ref);
5807 break;
5808 }
5809
5810 r = ring_buffer_read_page(ref->buffer, &ref->page,
5811 len, iter->cpu_file, 1);
5812 if (r < 0) {
5813 ring_buffer_free_read_page(ref->buffer, ref->page);
5814 kfree(ref);
5815 break;
5816 }
5817
5818
5819
5820
5821
5822 size = ring_buffer_page_len(ref->page);
5823 if (size < PAGE_SIZE)
5824 memset(ref->page + size, 0, PAGE_SIZE - size);
5825
5826 page = virt_to_page(ref->page);
5827
5828 spd.pages[i] = page;
5829 spd.partial[i].len = PAGE_SIZE;
5830 spd.partial[i].offset = 0;
5831 spd.partial[i].private = (unsigned long)ref;
5832 spd.nr_pages++;
5833 *ppos += PAGE_SIZE;
5834
5835 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5836 }
5837
5838 trace_access_unlock(iter->cpu_file);
5839 spd.nr_pages = i;
5840
5841
5842 if (!spd.nr_pages) {
5843 if (ret)
5844 return ret;
5845
5846 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5847 return -EAGAIN;
5848
5849 ret = wait_on_pipe(iter, true);
5850 if (ret)
5851 return ret;
5852
5853 goto again;
5854 }
5855
5856 ret = splice_to_pipe(pipe, &spd);
5857 splice_shrink_spd(&spd);
5858
5859 return ret;
5860}
5861
5862static const struct file_operations tracing_buffers_fops = {
5863 .open = tracing_buffers_open,
5864 .read = tracing_buffers_read,
5865 .poll = tracing_buffers_poll,
5866 .release = tracing_buffers_release,
5867 .splice_read = tracing_buffers_splice_read,
5868 .llseek = no_llseek,
5869};
5870
5871static ssize_t
5872tracing_stats_read(struct file *filp, char __user *ubuf,
5873 size_t count, loff_t *ppos)
5874{
5875 struct inode *inode = file_inode(filp);
5876 struct trace_array *tr = inode->i_private;
5877 struct trace_buffer *trace_buf = &tr->trace_buffer;
5878 int cpu = tracing_get_cpu(inode);
5879 struct trace_seq *s;
5880 unsigned long cnt;
5881 unsigned long long t;
5882 unsigned long usec_rem;
5883
5884 s = kmalloc(sizeof(*s), GFP_KERNEL);
5885 if (!s)
5886 return -ENOMEM;
5887
5888 trace_seq_init(s);
5889
5890 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5891 trace_seq_printf(s, "entries: %ld\n", cnt);
5892
5893 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5894 trace_seq_printf(s, "overrun: %ld\n", cnt);
5895
5896 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5897 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5898
5899 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5900 trace_seq_printf(s, "bytes: %ld\n", cnt);
5901
5902 if (trace_clocks[tr->clock_id].in_ns) {
5903
5904 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5905 usec_rem = do_div(t, USEC_PER_SEC);
5906 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5907 t, usec_rem);
5908
5909 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5910 usec_rem = do_div(t, USEC_PER_SEC);
5911 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5912 } else {
5913
5914 trace_seq_printf(s, "oldest event ts: %llu\n",
5915 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5916
5917 trace_seq_printf(s, "now ts: %llu\n",
5918 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5919 }
5920
5921 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5922 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5923
5924 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5925 trace_seq_printf(s, "read events: %ld\n", cnt);
5926
5927 count = simple_read_from_buffer(ubuf, count, ppos,
5928 s->buffer, trace_seq_used(s));
5929
5930 kfree(s);
5931
5932 return count;
5933}
5934
5935static const struct file_operations tracing_stats_fops = {
5936 .open = tracing_open_generic_tr,
5937 .read = tracing_stats_read,
5938 .llseek = generic_file_llseek,
5939 .release = tracing_release_generic_tr,
5940};
5941
5942#ifdef CONFIG_DYNAMIC_FTRACE
5943
5944int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5945{
5946 return 0;
5947}
5948
5949static ssize_t
5950tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5951 size_t cnt, loff_t *ppos)
5952{
5953 static char ftrace_dyn_info_buffer[1024];
5954 static DEFINE_MUTEX(dyn_info_mutex);
5955 unsigned long *p = filp->private_data;
5956 char *buf = ftrace_dyn_info_buffer;
5957 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5958 int r;
5959
5960 mutex_lock(&dyn_info_mutex);
5961 r = sprintf(buf, "%ld ", *p);
5962
5963 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5964 buf[r++] = '\n';
5965
5966 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5967
5968 mutex_unlock(&dyn_info_mutex);
5969
5970 return r;
5971}
5972
5973static const struct file_operations tracing_dyn_info_fops = {
5974 .open = tracing_open_generic,
5975 .read = tracing_read_dyn_info,
5976 .llseek = generic_file_llseek,
5977};
5978#endif
5979
5980#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5981static void
5982ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5983{
5984 tracing_snapshot();
5985}
5986
5987static void
5988ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5989{
5990 unsigned long *count = (long *)data;
5991
5992 if (!*count)
5993 return;
5994
5995 if (*count != -1)
5996 (*count)--;
5997
5998 tracing_snapshot();
5999}
6000
6001static int
6002ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6003 struct ftrace_probe_ops *ops, void *data)
6004{
6005 long count = (long)data;
6006
6007 seq_printf(m, "%ps:", (void *)ip);
6008
6009 seq_puts(m, "snapshot");
6010
6011 if (count == -1)
6012 seq_puts(m, ":unlimited\n");
6013 else
6014 seq_printf(m, ":count=%ld\n", count);
6015
6016 return 0;
6017}
6018
6019static struct ftrace_probe_ops snapshot_probe_ops = {
6020 .func = ftrace_snapshot,
6021 .print = ftrace_snapshot_print,
6022};
6023
6024static struct ftrace_probe_ops snapshot_count_probe_ops = {
6025 .func = ftrace_count_snapshot,
6026 .print = ftrace_snapshot_print,
6027};
6028
6029static int
6030ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6031 char *glob, char *cmd, char *param, int enable)
6032{
6033 struct ftrace_probe_ops *ops;
6034 void *count = (void *)-1;
6035 char *number;
6036 int ret;
6037
6038
6039 if (!enable)
6040 return -EINVAL;
6041
6042 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6043
6044 if (glob[0] == '!') {
6045 unregister_ftrace_function_probe_func(glob+1, ops);
6046 return 0;
6047 }
6048
6049 if (!param)
6050 goto out_reg;
6051
6052 number = strsep(¶m, ":");
6053
6054 if (!strlen(number))
6055 goto out_reg;
6056
6057
6058
6059
6060
6061 ret = kstrtoul(number, 0, (unsigned long *)&count);
6062 if (ret)
6063 return ret;
6064
6065 out_reg:
6066 ret = register_ftrace_function_probe(glob, ops, count);
6067
6068 if (ret >= 0)
6069 alloc_snapshot(&global_trace);
6070
6071 return ret < 0 ? ret : 0;
6072}
6073
6074static struct ftrace_func_command ftrace_snapshot_cmd = {
6075 .name = "snapshot",
6076 .func = ftrace_trace_snapshot_callback,
6077};
6078
6079static __init int register_snapshot_cmd(void)
6080{
6081 return register_ftrace_command(&ftrace_snapshot_cmd);
6082}
6083#else
6084static inline __init int register_snapshot_cmd(void) { return 0; }
6085#endif
6086
6087static struct dentry *tracing_get_dentry(struct trace_array *tr)
6088{
6089 if (WARN_ON(!tr->dir))
6090 return ERR_PTR(-ENODEV);
6091
6092
6093 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6094 return NULL;
6095
6096
6097 return tr->dir;
6098}
6099
6100static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6101{
6102 struct dentry *d_tracer;
6103
6104 if (tr->percpu_dir)
6105 return tr->percpu_dir;
6106
6107 d_tracer = tracing_get_dentry(tr);
6108 if (IS_ERR(d_tracer))
6109 return NULL;
6110
6111 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6112
6113 WARN_ONCE(!tr->percpu_dir,
6114 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6115
6116 return tr->percpu_dir;
6117}
6118
6119static struct dentry *
6120trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6121 void *data, long cpu, const struct file_operations *fops)
6122{
6123 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6124
6125 if (ret)
6126 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6127 return ret;
6128}
6129
6130static void
6131tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6132{
6133 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6134 struct dentry *d_cpu;
6135 char cpu_dir[30];
6136
6137 if (!d_percpu)
6138 return;
6139
6140 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6141 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6142 if (!d_cpu) {
6143 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6144 return;
6145 }
6146
6147
6148 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6149 tr, cpu, &tracing_pipe_fops);
6150
6151
6152 trace_create_cpu_file("trace", 0644, d_cpu,
6153 tr, cpu, &tracing_fops);
6154
6155 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6156 tr, cpu, &tracing_buffers_fops);
6157
6158 trace_create_cpu_file("stats", 0444, d_cpu,
6159 tr, cpu, &tracing_stats_fops);
6160
6161 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6162 tr, cpu, &tracing_entries_fops);
6163
6164#ifdef CONFIG_TRACER_SNAPSHOT
6165 trace_create_cpu_file("snapshot", 0644, d_cpu,
6166 tr, cpu, &snapshot_fops);
6167
6168 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6169 tr, cpu, &snapshot_raw_fops);
6170#endif
6171}
6172
6173#ifdef CONFIG_FTRACE_SELFTEST
6174
6175#include "trace_selftest.c"
6176#endif
6177
6178static ssize_t
6179trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6180 loff_t *ppos)
6181{
6182 struct trace_option_dentry *topt = filp->private_data;
6183 char *buf;
6184
6185 if (topt->flags->val & topt->opt->bit)
6186 buf = "1\n";
6187 else
6188 buf = "0\n";
6189
6190 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6191}
6192
6193static ssize_t
6194trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6195 loff_t *ppos)
6196{
6197 struct trace_option_dentry *topt = filp->private_data;
6198 unsigned long val;
6199 int ret;
6200
6201 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6202 if (ret)
6203 return ret;
6204
6205 if (val != 0 && val != 1)
6206 return -EINVAL;
6207
6208 if (!!(topt->flags->val & topt->opt->bit) != val) {
6209 mutex_lock(&trace_types_lock);
6210 ret = __set_tracer_option(topt->tr, topt->flags,
6211 topt->opt, !val);
6212 mutex_unlock(&trace_types_lock);
6213 if (ret)
6214 return ret;
6215 }
6216
6217 *ppos += cnt;
6218
6219 return cnt;
6220}
6221
6222
6223static const struct file_operations trace_options_fops = {
6224 .open = tracing_open_generic,
6225 .read = trace_options_read,
6226 .write = trace_options_write,
6227 .llseek = generic_file_llseek,
6228};
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254static void get_tr_index(void *data, struct trace_array **ptr,
6255 unsigned int *pindex)
6256{
6257 *pindex = *(unsigned char *)data;
6258
6259 *ptr = container_of(data - *pindex, struct trace_array,
6260 trace_flags_index);
6261}
6262
6263static ssize_t
6264trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6265 loff_t *ppos)
6266{
6267 void *tr_index = filp->private_data;
6268 struct trace_array *tr;
6269 unsigned int index;
6270 char *buf;
6271
6272 get_tr_index(tr_index, &tr, &index);
6273
6274 if (tr->trace_flags & (1 << index))
6275 buf = "1\n";
6276 else
6277 buf = "0\n";
6278
6279 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6280}
6281
6282static ssize_t
6283trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6284 loff_t *ppos)
6285{
6286 void *tr_index = filp->private_data;
6287 struct trace_array *tr;
6288 unsigned int index;
6289 unsigned long val;
6290 int ret;
6291
6292 get_tr_index(tr_index, &tr, &index);
6293
6294 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6295 if (ret)
6296 return ret;
6297
6298 if (val != 0 && val != 1)
6299 return -EINVAL;
6300
6301 mutex_lock(&trace_types_lock);
6302 ret = set_tracer_flag(tr, 1 << index, val);
6303 mutex_unlock(&trace_types_lock);
6304
6305 if (ret < 0)
6306 return ret;
6307
6308 *ppos += cnt;
6309
6310 return cnt;
6311}
6312
6313static const struct file_operations trace_options_core_fops = {
6314 .open = tracing_open_generic,
6315 .read = trace_options_core_read,
6316 .write = trace_options_core_write,
6317 .llseek = generic_file_llseek,
6318};
6319
6320struct dentry *trace_create_file(const char *name,
6321 umode_t mode,
6322 struct dentry *parent,
6323 void *data,
6324 const struct file_operations *fops)
6325{
6326 struct dentry *ret;
6327
6328 ret = tracefs_create_file(name, mode, parent, data, fops);
6329 if (!ret)
6330 pr_warn("Could not create tracefs '%s' entry\n", name);
6331
6332 return ret;
6333}
6334
6335
6336static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6337{
6338 struct dentry *d_tracer;
6339
6340 if (tr->options)
6341 return tr->options;
6342
6343 d_tracer = tracing_get_dentry(tr);
6344 if (IS_ERR(d_tracer))
6345 return NULL;
6346
6347 tr->options = tracefs_create_dir("options", d_tracer);
6348 if (!tr->options) {
6349 pr_warn("Could not create tracefs directory 'options'\n");
6350 return NULL;
6351 }
6352
6353 return tr->options;
6354}
6355
6356static void
6357create_trace_option_file(struct trace_array *tr,
6358 struct trace_option_dentry *topt,
6359 struct tracer_flags *flags,
6360 struct tracer_opt *opt)
6361{
6362 struct dentry *t_options;
6363
6364 t_options = trace_options_init_dentry(tr);
6365 if (!t_options)
6366 return;
6367
6368 topt->flags = flags;
6369 topt->opt = opt;
6370 topt->tr = tr;
6371
6372 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6373 &trace_options_fops);
6374
6375}
6376
6377static void
6378create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6379{
6380 struct trace_option_dentry *topts;
6381 struct trace_options *tr_topts;
6382 struct tracer_flags *flags;
6383 struct tracer_opt *opts;
6384 int cnt;
6385 int i;
6386
6387 if (!tracer)
6388 return;
6389
6390 flags = tracer->flags;
6391
6392 if (!flags || !flags->opts)
6393 return;
6394
6395
6396
6397
6398
6399 if (!trace_ok_for_array(tracer, tr))
6400 return;
6401
6402 for (i = 0; i < tr->nr_topts; i++) {
6403
6404 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
6405 return;
6406 }
6407
6408 opts = flags->opts;
6409
6410 for (cnt = 0; opts[cnt].name; cnt++)
6411 ;
6412
6413 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6414 if (!topts)
6415 return;
6416
6417 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6418 GFP_KERNEL);
6419 if (!tr_topts) {
6420 kfree(topts);
6421 return;
6422 }
6423
6424 tr->topts = tr_topts;
6425 tr->topts[tr->nr_topts].tracer = tracer;
6426 tr->topts[tr->nr_topts].topts = topts;
6427 tr->nr_topts++;
6428
6429 for (cnt = 0; opts[cnt].name; cnt++) {
6430 create_trace_option_file(tr, &topts[cnt], flags,
6431 &opts[cnt]);
6432 WARN_ONCE(topts[cnt].entry == NULL,
6433 "Failed to create trace option: %s",
6434 opts[cnt].name);
6435 }
6436}
6437
6438static struct dentry *
6439create_trace_option_core_file(struct trace_array *tr,
6440 const char *option, long index)
6441{
6442 struct dentry *t_options;
6443
6444 t_options = trace_options_init_dentry(tr);
6445 if (!t_options)
6446 return NULL;
6447
6448 return trace_create_file(option, 0644, t_options,
6449 (void *)&tr->trace_flags_index[index],
6450 &trace_options_core_fops);
6451}
6452
6453static void create_trace_options_dir(struct trace_array *tr)
6454{
6455 struct dentry *t_options;
6456 bool top_level = tr == &global_trace;
6457 int i;
6458
6459 t_options = trace_options_init_dentry(tr);
6460 if (!t_options)
6461 return;
6462
6463 for (i = 0; trace_options[i]; i++) {
6464 if (top_level ||
6465 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6466 create_trace_option_core_file(tr, trace_options[i], i);
6467 }
6468}
6469
6470static ssize_t
6471rb_simple_read(struct file *filp, char __user *ubuf,
6472 size_t cnt, loff_t *ppos)
6473{
6474 struct trace_array *tr = filp->private_data;
6475 char buf[64];
6476 int r;
6477
6478 r = tracer_tracing_is_on(tr);
6479 r = sprintf(buf, "%d\n", r);
6480
6481 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6482}
6483
6484static ssize_t
6485rb_simple_write(struct file *filp, const char __user *ubuf,
6486 size_t cnt, loff_t *ppos)
6487{
6488 struct trace_array *tr = filp->private_data;
6489 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6490 unsigned long val;
6491 int ret;
6492
6493 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6494 if (ret)
6495 return ret;
6496
6497 if (buffer) {
6498 mutex_lock(&trace_types_lock);
6499 if (val) {
6500 tracer_tracing_on(tr);
6501 if (tr->current_trace->start)
6502 tr->current_trace->start(tr);
6503 } else {
6504 tracer_tracing_off(tr);
6505 if (tr->current_trace->stop)
6506 tr->current_trace->stop(tr);
6507 }
6508 mutex_unlock(&trace_types_lock);
6509 }
6510
6511 (*ppos)++;
6512
6513 return cnt;
6514}
6515
6516static const struct file_operations rb_simple_fops = {
6517 .open = tracing_open_generic_tr,
6518 .read = rb_simple_read,
6519 .write = rb_simple_write,
6520 .release = tracing_release_generic_tr,
6521 .llseek = default_llseek,
6522};
6523
6524struct dentry *trace_instance_dir;
6525
6526static void
6527init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6528
6529static int
6530allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6531{
6532 enum ring_buffer_flags rb_flags;
6533
6534 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6535
6536 buf->tr = tr;
6537
6538 buf->buffer = ring_buffer_alloc(size, rb_flags);
6539 if (!buf->buffer)
6540 return -ENOMEM;
6541
6542 buf->data = alloc_percpu(struct trace_array_cpu);
6543 if (!buf->data) {
6544 ring_buffer_free(buf->buffer);
6545 return -ENOMEM;
6546 }
6547
6548
6549 set_buffer_entries(&tr->trace_buffer,
6550 ring_buffer_size(tr->trace_buffer.buffer, 0));
6551
6552 return 0;
6553}
6554
6555static int allocate_trace_buffers(struct trace_array *tr, int size)
6556{
6557 int ret;
6558
6559 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6560 if (ret)
6561 return ret;
6562
6563#ifdef CONFIG_TRACER_MAX_TRACE
6564 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6565 allocate_snapshot ? size : 1);
6566 if (WARN_ON(ret)) {
6567 ring_buffer_free(tr->trace_buffer.buffer);
6568 free_percpu(tr->trace_buffer.data);
6569 return -ENOMEM;
6570 }
6571 tr->allocated_snapshot = allocate_snapshot;
6572
6573
6574
6575
6576
6577 allocate_snapshot = false;
6578#endif
6579 return 0;
6580}
6581
6582static void free_trace_buffer(struct trace_buffer *buf)
6583{
6584 if (buf->buffer) {
6585 ring_buffer_free(buf->buffer);
6586 buf->buffer = NULL;
6587 free_percpu(buf->data);
6588 buf->data = NULL;
6589 }
6590}
6591
6592static void free_trace_buffers(struct trace_array *tr)
6593{
6594 if (!tr)
6595 return;
6596
6597 free_trace_buffer(&tr->trace_buffer);
6598
6599#ifdef CONFIG_TRACER_MAX_TRACE
6600 free_trace_buffer(&tr->max_buffer);
6601#endif
6602}
6603
6604static void init_trace_flags_index(struct trace_array *tr)
6605{
6606 int i;
6607
6608
6609 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6610 tr->trace_flags_index[i] = i;
6611}
6612
6613static void __update_tracer_options(struct trace_array *tr)
6614{
6615 struct tracer *t;
6616
6617 for (t = trace_types; t; t = t->next)
6618 add_tracer_options(tr, t);
6619}
6620
6621static void update_tracer_options(struct trace_array *tr)
6622{
6623 mutex_lock(&trace_types_lock);
6624 __update_tracer_options(tr);
6625 mutex_unlock(&trace_types_lock);
6626}
6627
6628static int instance_mkdir(const char *name)
6629{
6630 struct trace_array *tr;
6631 int ret;
6632
6633 mutex_lock(&trace_types_lock);
6634
6635 ret = -EEXIST;
6636 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6637 if (tr->name && strcmp(tr->name, name) == 0)
6638 goto out_unlock;
6639 }
6640
6641 ret = -ENOMEM;
6642 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6643 if (!tr)
6644 goto out_unlock;
6645
6646 tr->name = kstrdup(name, GFP_KERNEL);
6647 if (!tr->name)
6648 goto out_free_tr;
6649
6650 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6651 goto out_free_tr;
6652
6653 tr->trace_flags = global_trace.trace_flags;
6654
6655 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6656
6657 raw_spin_lock_init(&tr->start_lock);
6658
6659 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6660
6661 tr->current_trace = &nop_trace;
6662
6663 INIT_LIST_HEAD(&tr->systems);
6664 INIT_LIST_HEAD(&tr->events);
6665
6666 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6667 goto out_free_tr;
6668
6669 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6670 if (!tr->dir)
6671 goto out_free_tr;
6672
6673 ret = event_trace_add_tracer(tr->dir, tr);
6674 if (ret) {
6675 tracefs_remove_recursive(tr->dir);
6676 goto out_free_tr;
6677 }
6678
6679 init_tracer_tracefs(tr, tr->dir);
6680 init_trace_flags_index(tr);
6681 __update_tracer_options(tr);
6682
6683 list_add(&tr->list, &ftrace_trace_arrays);
6684
6685 mutex_unlock(&trace_types_lock);
6686
6687 return 0;
6688
6689 out_free_tr:
6690 free_trace_buffers(tr);
6691 free_cpumask_var(tr->tracing_cpumask);
6692 kfree(tr->name);
6693 kfree(tr);
6694
6695 out_unlock:
6696 mutex_unlock(&trace_types_lock);
6697
6698 return ret;
6699
6700}
6701
6702static int instance_rmdir(const char *name)
6703{
6704 struct trace_array *tr;
6705 int found = 0;
6706 int ret;
6707 int i;
6708
6709 mutex_lock(&trace_types_lock);
6710
6711 ret = -ENODEV;
6712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6713 if (tr->name && strcmp(tr->name, name) == 0) {
6714 found = 1;
6715 break;
6716 }
6717 }
6718 if (!found)
6719 goto out_unlock;
6720
6721 ret = -EBUSY;
6722 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6723 goto out_unlock;
6724
6725 list_del(&tr->list);
6726
6727 tracing_set_nop(tr);
6728 event_trace_del_tracer(tr);
6729 ftrace_destroy_function_files(tr);
6730 tracefs_remove_recursive(tr->dir);
6731 free_trace_buffers(tr);
6732
6733 for (i = 0; i < tr->nr_topts; i++) {
6734 kfree(tr->topts[i].topts);
6735 }
6736 kfree(tr->topts);
6737
6738 kfree(tr->name);
6739 kfree(tr);
6740
6741 ret = 0;
6742
6743 out_unlock:
6744 mutex_unlock(&trace_types_lock);
6745
6746 return ret;
6747}
6748
6749static __init void create_trace_instances(struct dentry *d_tracer)
6750{
6751 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6752 instance_mkdir,
6753 instance_rmdir);
6754 if (WARN_ON(!trace_instance_dir))
6755 return;
6756}
6757
6758static void
6759init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6760{
6761 int cpu;
6762
6763 trace_create_file("available_tracers", 0444, d_tracer,
6764 tr, &show_traces_fops);
6765
6766 trace_create_file("current_tracer", 0644, d_tracer,
6767 tr, &set_tracer_fops);
6768
6769 trace_create_file("tracing_cpumask", 0644, d_tracer,
6770 tr, &tracing_cpumask_fops);
6771
6772 trace_create_file("trace_options", 0644, d_tracer,
6773 tr, &tracing_iter_fops);
6774
6775 trace_create_file("trace", 0644, d_tracer,
6776 tr, &tracing_fops);
6777
6778 trace_create_file("trace_pipe", 0444, d_tracer,
6779 tr, &tracing_pipe_fops);
6780
6781 trace_create_file("buffer_size_kb", 0644, d_tracer,
6782 tr, &tracing_entries_fops);
6783
6784 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6785 tr, &tracing_total_entries_fops);
6786
6787 trace_create_file("free_buffer", 0200, d_tracer,
6788 tr, &tracing_free_buffer_fops);
6789
6790 trace_create_file("trace_marker", 0220, d_tracer,
6791 tr, &tracing_mark_fops);
6792
6793 trace_create_file("trace_clock", 0644, d_tracer, tr,
6794 &trace_clock_fops);
6795
6796 trace_create_file("tracing_on", 0644, d_tracer,
6797 tr, &rb_simple_fops);
6798
6799 create_trace_options_dir(tr);
6800
6801#ifdef CONFIG_TRACER_MAX_TRACE
6802 trace_create_file("tracing_max_latency", 0644, d_tracer,
6803 &tr->max_latency, &tracing_max_lat_fops);
6804#endif
6805
6806 if (ftrace_create_function_files(tr, d_tracer))
6807 WARN(1, "Could not allocate function filter files");
6808
6809#ifdef CONFIG_TRACER_SNAPSHOT
6810 trace_create_file("snapshot", 0644, d_tracer,
6811 tr, &snapshot_fops);
6812#endif
6813
6814 for_each_tracing_cpu(cpu)
6815 tracing_init_tracefs_percpu(tr, cpu);
6816
6817}
6818
6819static struct vfsmount *trace_automount(void *ingore)
6820{
6821 struct vfsmount *mnt;
6822 struct file_system_type *type;
6823
6824
6825
6826
6827
6828
6829 type = get_fs_type("tracefs");
6830 if (!type)
6831 return NULL;
6832 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6833 put_filesystem(type);
6834 if (IS_ERR(mnt))
6835 return NULL;
6836 mntget(mnt);
6837
6838 return mnt;
6839}
6840
6841
6842
6843
6844
6845
6846
6847
6848struct dentry *tracing_init_dentry(void)
6849{
6850 struct trace_array *tr = &global_trace;
6851
6852
6853 if (tr->dir)
6854 return NULL;
6855
6856 if (WARN_ON(!tracefs_initialized()) ||
6857 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6858 WARN_ON(!debugfs_initialized())))
6859 return ERR_PTR(-ENODEV);
6860
6861
6862
6863
6864
6865
6866
6867 tr->dir = debugfs_create_automount("tracing", NULL,
6868 trace_automount, NULL);
6869 if (!tr->dir) {
6870 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6871 return ERR_PTR(-ENOMEM);
6872 }
6873
6874 return NULL;
6875}
6876
6877extern struct trace_enum_map *__start_ftrace_enum_maps[];
6878extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6879
6880static void __init trace_enum_init(void)
6881{
6882 int len;
6883
6884 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6885 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6886}
6887
6888#ifdef CONFIG_MODULES
6889static void trace_module_add_enums(struct module *mod)
6890{
6891 if (!mod->num_trace_enums)
6892 return;
6893
6894
6895
6896
6897
6898 if (trace_module_has_bad_taint(mod))
6899 return;
6900
6901 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6902}
6903
6904#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6905static void trace_module_remove_enums(struct module *mod)
6906{
6907 union trace_enum_map_item *map;
6908 union trace_enum_map_item **last = &trace_enum_maps;
6909
6910 if (!mod->num_trace_enums)
6911 return;
6912
6913 mutex_lock(&trace_enum_mutex);
6914
6915 map = trace_enum_maps;
6916
6917 while (map) {
6918 if (map->head.mod == mod)
6919 break;
6920 map = trace_enum_jmp_to_tail(map);
6921 last = &map->tail.next;
6922 map = map->tail.next;
6923 }
6924 if (!map)
6925 goto out;
6926
6927 *last = trace_enum_jmp_to_tail(map)->tail.next;
6928 kfree(map);
6929 out:
6930 mutex_unlock(&trace_enum_mutex);
6931}
6932#else
6933static inline void trace_module_remove_enums(struct module *mod) { }
6934#endif
6935
6936static int trace_module_notify(struct notifier_block *self,
6937 unsigned long val, void *data)
6938{
6939 struct module *mod = data;
6940
6941 switch (val) {
6942 case MODULE_STATE_COMING:
6943 trace_module_add_enums(mod);
6944 break;
6945 case MODULE_STATE_GOING:
6946 trace_module_remove_enums(mod);
6947 break;
6948 }
6949
6950 return 0;
6951}
6952
6953static struct notifier_block trace_module_nb = {
6954 .notifier_call = trace_module_notify,
6955 .priority = 0,
6956};
6957#endif
6958
6959static __init int tracer_init_tracefs(void)
6960{
6961 struct dentry *d_tracer;
6962
6963 trace_access_lock_init();
6964
6965 d_tracer = tracing_init_dentry();
6966 if (IS_ERR(d_tracer))
6967 return 0;
6968
6969 init_tracer_tracefs(&global_trace, d_tracer);
6970
6971 trace_create_file("tracing_thresh", 0644, d_tracer,
6972 &global_trace, &tracing_thresh_fops);
6973
6974 trace_create_file("README", 0444, d_tracer,
6975 NULL, &tracing_readme_fops);
6976
6977 trace_create_file("saved_cmdlines", 0444, d_tracer,
6978 NULL, &tracing_saved_cmdlines_fops);
6979
6980 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6981 NULL, &tracing_saved_cmdlines_size_fops);
6982
6983 trace_enum_init();
6984
6985 trace_create_enum_file(d_tracer);
6986
6987#ifdef CONFIG_MODULES
6988 register_module_notifier(&trace_module_nb);
6989#endif
6990
6991#ifdef CONFIG_DYNAMIC_FTRACE
6992 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6993 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6994#endif
6995
6996 create_trace_instances(d_tracer);
6997
6998 update_tracer_options(&global_trace);
6999
7000 return 0;
7001}
7002
7003static int trace_panic_handler(struct notifier_block *this,
7004 unsigned long event, void *unused)
7005{
7006 if (ftrace_dump_on_oops)
7007 ftrace_dump(ftrace_dump_on_oops);
7008 return NOTIFY_OK;
7009}
7010
7011static struct notifier_block trace_panic_notifier = {
7012 .notifier_call = trace_panic_handler,
7013 .next = NULL,
7014 .priority = 150
7015};
7016
7017static int trace_die_handler(struct notifier_block *self,
7018 unsigned long val,
7019 void *data)
7020{
7021 switch (val) {
7022 case DIE_OOPS:
7023 if (ftrace_dump_on_oops)
7024 ftrace_dump(ftrace_dump_on_oops);
7025 break;
7026 default:
7027 break;
7028 }
7029 return NOTIFY_OK;
7030}
7031
7032static struct notifier_block trace_die_notifier = {
7033 .notifier_call = trace_die_handler,
7034 .priority = 200
7035};
7036
7037
7038
7039
7040
7041#define TRACE_MAX_PRINT 1000
7042
7043
7044
7045
7046
7047
7048#define KERN_TRACE KERN_EMERG
7049
7050void
7051trace_printk_seq(struct trace_seq *s)
7052{
7053
7054 if (s->seq.len >= TRACE_MAX_PRINT)
7055 s->seq.len = TRACE_MAX_PRINT;
7056
7057
7058
7059
7060
7061
7062 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7063 s->seq.len = s->seq.size - 1;
7064
7065
7066 s->buffer[s->seq.len] = 0;
7067
7068 printk(KERN_TRACE "%s", s->buffer);
7069
7070 trace_seq_init(s);
7071}
7072
7073void trace_init_global_iter(struct trace_iterator *iter)
7074{
7075 iter->tr = &global_trace;
7076 iter->trace = iter->tr->current_trace;
7077 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7078 iter->trace_buffer = &global_trace.trace_buffer;
7079
7080 if (iter->trace && iter->trace->open)
7081 iter->trace->open(iter);
7082
7083
7084 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7085 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7086
7087
7088 if (trace_clocks[iter->tr->clock_id].in_ns)
7089 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7090}
7091
7092void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7093{
7094
7095 static struct trace_iterator iter;
7096 static atomic_t dump_running;
7097 struct trace_array *tr = &global_trace;
7098 unsigned int old_userobj;
7099 unsigned long flags;
7100 int cnt = 0, cpu;
7101
7102
7103 if (atomic_inc_return(&dump_running) != 1) {
7104 atomic_dec(&dump_running);
7105 return;
7106 }
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116 tracing_off();
7117
7118 local_irq_save(flags);
7119
7120
7121 trace_init_global_iter(&iter);
7122
7123 for_each_tracing_cpu(cpu) {
7124 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7125 }
7126
7127 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7128
7129
7130 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7131
7132 switch (oops_dump_mode) {
7133 case DUMP_ALL:
7134 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7135 break;
7136 case DUMP_ORIG:
7137 iter.cpu_file = raw_smp_processor_id();
7138 break;
7139 case DUMP_NONE:
7140 goto out_enable;
7141 default:
7142 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7143 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7144 }
7145
7146 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7147
7148
7149 if (ftrace_is_dead()) {
7150 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7151 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7152 }
7153
7154
7155
7156
7157
7158
7159
7160
7161 while (!trace_empty(&iter)) {
7162
7163 if (!cnt)
7164 printk(KERN_TRACE "---------------------------------\n");
7165
7166 cnt++;
7167
7168
7169 memset(&iter.seq, 0,
7170 sizeof(struct trace_iterator) -
7171 offsetof(struct trace_iterator, seq));
7172 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7173 iter.pos = -1;
7174
7175 if (trace_find_next_entry_inc(&iter) != NULL) {
7176 int ret;
7177
7178 ret = print_trace_line(&iter);
7179 if (ret != TRACE_TYPE_NO_CONSUME)
7180 trace_consume(&iter);
7181 }
7182 touch_nmi_watchdog();
7183
7184 trace_printk_seq(&iter.seq);
7185 }
7186
7187 if (!cnt)
7188 printk(KERN_TRACE " (ftrace buffer empty)\n");
7189 else
7190 printk(KERN_TRACE "---------------------------------\n");
7191
7192 out_enable:
7193 tr->trace_flags |= old_userobj;
7194
7195 for_each_tracing_cpu(cpu) {
7196 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7197 }
7198 atomic_dec(&dump_running);
7199 local_irq_restore(flags);
7200}
7201EXPORT_SYMBOL_GPL(ftrace_dump);
7202
7203__init static int tracer_alloc_buffers(void)
7204{
7205 int ring_buf_size;
7206 int ret = -ENOMEM;
7207
7208
7209
7210
7211
7212 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7213
7214 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7215 goto out;
7216
7217 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7218 goto out_free_buffer_mask;
7219
7220
7221 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7222
7223 trace_printk_init_buffers();
7224
7225
7226 if (ring_buffer_expanded)
7227 ring_buf_size = trace_buf_size;
7228 else
7229 ring_buf_size = 1;
7230
7231 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7232 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7233
7234 raw_spin_lock_init(&global_trace.start_lock);
7235
7236
7237 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7238 if (!temp_buffer)
7239 goto out_free_cpumask;
7240
7241 if (trace_create_savedcmd() < 0)
7242 goto out_free_temp_buffer;
7243
7244
7245 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7246 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7247 WARN_ON(1);
7248 goto out_free_savedcmd;
7249 }
7250
7251 if (global_trace.buffer_disabled)
7252 tracing_off();
7253
7254 if (trace_boot_clock) {
7255 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7256 if (ret < 0)
7257 pr_warn("Trace clock %s not defined, going back to default\n",
7258 trace_boot_clock);
7259 }
7260
7261
7262
7263
7264
7265
7266 global_trace.current_trace = &nop_trace;
7267
7268 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7269
7270 ftrace_init_global_array_ops(&global_trace);
7271
7272 init_trace_flags_index(&global_trace);
7273
7274 register_tracer(&nop_trace);
7275
7276
7277 tracing_disabled = 0;
7278
7279 atomic_notifier_chain_register(&panic_notifier_list,
7280 &trace_panic_notifier);
7281
7282 register_die_notifier(&trace_die_notifier);
7283
7284 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7285
7286 INIT_LIST_HEAD(&global_trace.systems);
7287 INIT_LIST_HEAD(&global_trace.events);
7288 list_add(&global_trace.list, &ftrace_trace_arrays);
7289
7290 apply_trace_boot_options();
7291
7292 register_snapshot_cmd();
7293
7294 return 0;
7295
7296out_free_savedcmd:
7297 free_saved_cmdlines_buffer(savedcmd);
7298out_free_temp_buffer:
7299 ring_buffer_free(temp_buffer);
7300out_free_cpumask:
7301 free_cpumask_var(global_trace.tracing_cpumask);
7302out_free_buffer_mask:
7303 free_cpumask_var(tracing_buffer_mask);
7304out:
7305 return ret;
7306}
7307
7308void __init trace_init(void)
7309{
7310 if (tracepoint_printk) {
7311 tracepoint_print_iter =
7312 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7313 if (WARN_ON(!tracepoint_print_iter))
7314 tracepoint_printk = 0;
7315 }
7316 tracer_alloc_buffers();
7317 trace_event_init();
7318}
7319
7320__init static int clear_boot_tracer(void)
7321{
7322
7323
7324
7325
7326
7327
7328
7329 if (!default_bootup_tracer)
7330 return 0;
7331
7332 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7333 default_bootup_tracer);
7334 default_bootup_tracer = NULL;
7335
7336 return 0;
7337}
7338
7339fs_initcall(tracer_init_tracefs);
7340late_initcall(clear_boot_tracer);
7341