1
2
3
4
5
6#include <linux/ring_buffer.h>
7#include <linux/trace_clock.h>
8#include <linux/ftrace_irq.h>
9#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
12#include <linux/hardirq.h>
13#include <linux/kmemcheck.h>
14#include <linux/module.h>
15#include <linux/percpu.h>
16#include <linux/mutex.h>
17#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
20#include <linux/cpu.h>
21#include <linux/fs.h>
22
23#include "trace.h"
24
25
26
27
28int ring_buffer_print_entry_header(struct trace_seq *s)
29{
30 int ret;
31
32 ret = trace_seq_printf(s, "# compressed entry header\n");
33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
34 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
35 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
36 ret = trace_seq_printf(s, "\n");
37 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING);
39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND);
41 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
43
44 return ret;
45}
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144enum {
145 RB_BUFFERS_ON_BIT = 0,
146 RB_BUFFERS_DISABLED_BIT = 1,
147};
148
149enum {
150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
152};
153
154static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
155
156#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
158
159
160
161
162
163
164void tracing_on(void)
165{
166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
167}
168EXPORT_SYMBOL_GPL(tracing_on);
169
170
171
172
173
174
175
176
177
178void tracing_off(void)
179{
180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181}
182EXPORT_SYMBOL_GPL(tracing_off);
183
184
185
186
187
188
189
190void tracing_off_permanent(void)
191{
192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
193}
194
195
196
197
198int tracing_is_on(void)
199{
200 return ring_buffer_flags == RB_BUFFERS_ON;
201}
202EXPORT_SYMBOL_GPL(tracing_is_on);
203
204#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
205#define RB_ALIGNMENT 4U
206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
207#define RB_EVNT_MIN_SIZE 8U
208
209
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
211
212enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215};
216
217static inline int rb_null_event(struct ring_buffer_event *event)
218{
219 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
220}
221
222static void rb_event_set_padding(struct ring_buffer_event *event)
223{
224
225 event->type_len = RINGBUF_TYPE_PADDING;
226 event->time_delta = 0;
227}
228
229static unsigned
230rb_event_data_length(struct ring_buffer_event *event)
231{
232 unsigned length;
233
234 if (event->type_len)
235 length = event->type_len * RB_ALIGNMENT;
236 else
237 length = event->array[0];
238 return length + RB_EVNT_HDR_SIZE;
239}
240
241
242static unsigned
243rb_event_length(struct ring_buffer_event *event)
244{
245 switch (event->type_len) {
246 case RINGBUF_TYPE_PADDING:
247 if (rb_null_event(event))
248
249 return -1;
250 return event->array[0] + RB_EVNT_HDR_SIZE;
251
252 case RINGBUF_TYPE_TIME_EXTEND:
253 return RB_LEN_TIME_EXTEND;
254
255 case RINGBUF_TYPE_TIME_STAMP:
256 return RB_LEN_TIME_STAMP;
257
258 case RINGBUF_TYPE_DATA:
259 return rb_event_data_length(event);
260 default:
261 BUG();
262 }
263
264 return 0;
265}
266
267
268
269
270
271unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272{
273 unsigned length = rb_event_length(event);
274 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
275 return length;
276 length -= RB_EVNT_HDR_SIZE;
277 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
278 length -= sizeof(event->array[0]);
279 return length;
280}
281EXPORT_SYMBOL_GPL(ring_buffer_event_length);
282
283
284static void *
285rb_event_data(struct ring_buffer_event *event)
286{
287 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
288
289 if (event->type_len)
290 return (void *)&event->array[0];
291
292 return (void *)&event->array[1];
293}
294
295
296
297
298
299void *ring_buffer_event_data(struct ring_buffer_event *event)
300{
301 return rb_event_data(event);
302}
303EXPORT_SYMBOL_GPL(ring_buffer_event_data);
304
305#define for_each_buffer_cpu(buffer, cpu) \
306 for_each_cpu(cpu, buffer->cpumask)
307
308#define TS_SHIFT 27
309#define TS_MASK ((1ULL << TS_SHIFT) - 1)
310#define TS_DELTA_TEST (~TS_MASK)
311
312struct buffer_data_page {
313 u64 time_stamp;
314 local_t commit;
315 unsigned char data[];
316};
317
318
319
320
321
322
323
324
325
326struct buffer_page {
327 struct list_head list;
328 local_t write;
329 unsigned read;
330 local_t entries;
331 struct buffer_data_page *page;
332};
333
334
335
336
337
338
339
340
341
342
343
344
345
346#define RB_WRITE_MASK 0xfffff
347#define RB_WRITE_INTCNT (1 << 20)
348
349static void rb_init_page(struct buffer_data_page *bpage)
350{
351 local_set(&bpage->commit, 0);
352}
353
354
355
356
357
358
359
360size_t ring_buffer_page_len(void *page)
361{
362 return local_read(&((struct buffer_data_page *)page)->commit)
363 + BUF_PAGE_HDR_SIZE;
364}
365
366
367
368
369
370static void free_buffer_page(struct buffer_page *bpage)
371{
372 free_page((unsigned long)bpage->page);
373 kfree(bpage);
374}
375
376
377
378
379static inline int test_time_stamp(u64 delta)
380{
381 if (delta & TS_DELTA_TEST)
382 return 1;
383 return 0;
384}
385
386#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
387
388
389#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
390
391
392#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
393
394int ring_buffer_print_page_header(struct trace_seq *s)
395{
396 struct buffer_data_page field;
397 int ret;
398
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400 "offset:0;\tsize:%u;\n",
401 (unsigned int)sizeof(field.time_stamp));
402
403 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
404 "offset:%u;\tsize:%u;\n",
405 (unsigned int)offsetof(typeof(field), commit),
406 (unsigned int)sizeof(field.commit));
407
408 ret = trace_seq_printf(s, "\tfield: char data;\t"
409 "offset:%u;\tsize:%u;\n",
410 (unsigned int)offsetof(typeof(field), data),
411 (unsigned int)BUF_PAGE_SIZE);
412
413 return ret;
414}
415
416
417
418
419struct ring_buffer_per_cpu {
420 int cpu;
421 struct ring_buffer *buffer;
422 spinlock_t reader_lock;
423 raw_spinlock_t lock;
424 struct lock_class_key lock_key;
425 struct list_head *pages;
426 struct buffer_page *head_page;
427 struct buffer_page *tail_page;
428 struct buffer_page *commit_page;
429 struct buffer_page *reader_page;
430 local_t commit_overrun;
431 local_t overrun;
432 local_t entries;
433 local_t committing;
434 local_t commits;
435 unsigned long read;
436 u64 write_stamp;
437 u64 read_stamp;
438 atomic_t record_disabled;
439};
440
441struct ring_buffer {
442 unsigned pages;
443 unsigned flags;
444 int cpus;
445 atomic_t record_disabled;
446 cpumask_var_t cpumask;
447
448 struct lock_class_key *reader_lock_key;
449
450 struct mutex mutex;
451
452 struct ring_buffer_per_cpu **buffers;
453
454#ifdef CONFIG_HOTPLUG_CPU
455 struct notifier_block cpu_notify;
456#endif
457 u64 (*clock)(void);
458};
459
460struct ring_buffer_iter {
461 struct ring_buffer_per_cpu *cpu_buffer;
462 unsigned long head;
463 struct buffer_page *head_page;
464 u64 read_stamp;
465};
466
467
468#define RB_WARN_ON(b, cond) \
469 ({ \
470 int _____ret = unlikely(cond); \
471 if (_____ret) { \
472 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
473 struct ring_buffer_per_cpu *__b = \
474 (void *)b; \
475 atomic_inc(&__b->buffer->record_disabled); \
476 } else \
477 atomic_inc(&b->record_disabled); \
478 WARN_ON(1); \
479 } \
480 _____ret; \
481 })
482
483
484#define DEBUG_SHIFT 0
485
486static inline u64 rb_time_stamp(struct ring_buffer *buffer)
487{
488
489 return buffer->clock() << DEBUG_SHIFT;
490}
491
492u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
493{
494 u64 time;
495
496 preempt_disable_notrace();
497 time = rb_time_stamp(buffer);
498 preempt_enable_no_resched_notrace();
499
500 return time;
501}
502EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
503
504void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
505 int cpu, u64 *ts)
506{
507
508 *ts >>= DEBUG_SHIFT;
509}
510EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581#define RB_PAGE_NORMAL 0UL
582#define RB_PAGE_HEAD 1UL
583#define RB_PAGE_UPDATE 2UL
584
585
586#define RB_FLAG_MASK 3UL
587
588
589#define RB_PAGE_MOVED 4UL
590
591
592
593
594static struct list_head *rb_list_head(struct list_head *list)
595{
596 unsigned long val = (unsigned long)list;
597
598 return (struct list_head *)(val & ~RB_FLAG_MASK);
599}
600
601
602
603
604
605
606
607
608
609static int inline
610rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
611 struct buffer_page *page, struct list_head *list)
612{
613 unsigned long val;
614
615 val = (unsigned long)list->next;
616
617 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
618 return RB_PAGE_MOVED;
619
620 return val & RB_FLAG_MASK;
621}
622
623
624
625
626
627
628
629
630static int rb_is_reader_page(struct buffer_page *page)
631{
632 struct list_head *list = page->list.prev;
633
634 return rb_list_head(list->next) != &page->list;
635}
636
637
638
639
640static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
641 struct list_head *list)
642{
643 unsigned long *ptr;
644
645 ptr = (unsigned long *)&list->next;
646 *ptr |= RB_PAGE_HEAD;
647 *ptr &= ~RB_PAGE_UPDATE;
648}
649
650
651
652
653static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
654{
655 struct buffer_page *head;
656
657 head = cpu_buffer->head_page;
658 if (!head)
659 return;
660
661
662
663
664 rb_set_list_to_head(cpu_buffer, head->list.prev);
665}
666
667static void rb_list_head_clear(struct list_head *list)
668{
669 unsigned long *ptr = (unsigned long *)&list->next;
670
671 *ptr &= ~RB_FLAG_MASK;
672}
673
674
675
676
677static void
678rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
679{
680 struct list_head *hd;
681
682
683 rb_list_head_clear(cpu_buffer->pages);
684
685 list_for_each(hd, cpu_buffer->pages)
686 rb_list_head_clear(hd);
687}
688
689static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
690 struct buffer_page *head,
691 struct buffer_page *prev,
692 int old_flag, int new_flag)
693{
694 struct list_head *list;
695 unsigned long val = (unsigned long)&head->list;
696 unsigned long ret;
697
698 list = &prev->list;
699
700 val &= ~RB_FLAG_MASK;
701
702 ret = cmpxchg((unsigned long *)&list->next,
703 val | old_flag, val | new_flag);
704
705
706 if ((ret & ~RB_FLAG_MASK) != val)
707 return RB_PAGE_MOVED;
708
709 return ret & RB_FLAG_MASK;
710}
711
712static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
713 struct buffer_page *head,
714 struct buffer_page *prev,
715 int old_flag)
716{
717 return rb_head_page_set(cpu_buffer, head, prev,
718 old_flag, RB_PAGE_UPDATE);
719}
720
721static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
722 struct buffer_page *head,
723 struct buffer_page *prev,
724 int old_flag)
725{
726 return rb_head_page_set(cpu_buffer, head, prev,
727 old_flag, RB_PAGE_HEAD);
728}
729
730static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
731 struct buffer_page *head,
732 struct buffer_page *prev,
733 int old_flag)
734{
735 return rb_head_page_set(cpu_buffer, head, prev,
736 old_flag, RB_PAGE_NORMAL);
737}
738
739static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
740 struct buffer_page **bpage)
741{
742 struct list_head *p = rb_list_head((*bpage)->list.next);
743
744 *bpage = list_entry(p, struct buffer_page, list);
745}
746
747static struct buffer_page *
748rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
749{
750 struct buffer_page *head;
751 struct buffer_page *page;
752 struct list_head *list;
753 int i;
754
755 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
756 return NULL;
757
758
759 list = cpu_buffer->pages;
760 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
761 return NULL;
762
763 page = head = cpu_buffer->head_page;
764
765
766
767
768
769
770 for (i = 0; i < 3; i++) {
771 do {
772 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
773 cpu_buffer->head_page = page;
774 return page;
775 }
776 rb_inc_page(cpu_buffer, &page);
777 } while (page != head);
778 }
779
780 RB_WARN_ON(cpu_buffer, 1);
781
782 return NULL;
783}
784
785static int rb_head_page_replace(struct buffer_page *old,
786 struct buffer_page *new)
787{
788 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
789 unsigned long val;
790 unsigned long ret;
791
792 val = *ptr & ~RB_FLAG_MASK;
793 val |= RB_PAGE_HEAD;
794
795 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
796
797 return ret == val;
798}
799
800
801
802
803
804
805static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
806 struct buffer_page *tail_page,
807 struct buffer_page *next_page)
808{
809 struct buffer_page *old_tail;
810 unsigned long old_entries;
811 unsigned long old_write;
812 int ret = 0;
813
814
815
816
817
818
819
820
821
822
823 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
824 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
825
826
827
828
829
830 barrier();
831
832
833
834
835
836
837 if (tail_page == cpu_buffer->tail_page) {
838
839 unsigned long val = old_write & ~RB_WRITE_MASK;
840 unsigned long eval = old_entries & ~RB_WRITE_MASK;
841
842
843
844
845
846
847
848
849
850
851
852 (void)local_cmpxchg(&next_page->write, old_write, val);
853 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
854
855
856
857
858
859
860 local_set(&next_page->page->commit, 0);
861
862 old_tail = cmpxchg(&cpu_buffer->tail_page,
863 tail_page, next_page);
864
865 if (old_tail == tail_page)
866 ret = 1;
867 }
868
869 return ret;
870}
871
872static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
873 struct buffer_page *bpage)
874{
875 unsigned long val = (unsigned long)bpage;
876
877 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
878 return 1;
879
880 return 0;
881}
882
883
884
885
886static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
887 struct list_head *list)
888{
889 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
890 return 1;
891 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
892 return 1;
893 return 0;
894}
895
896
897
898
899
900
901
902
903static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
904{
905 struct list_head *head = cpu_buffer->pages;
906 struct buffer_page *bpage, *tmp;
907
908 rb_head_page_deactivate(cpu_buffer);
909
910 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
911 return -1;
912 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
913 return -1;
914
915 if (rb_check_list(cpu_buffer, head))
916 return -1;
917
918 list_for_each_entry_safe(bpage, tmp, head, list) {
919 if (RB_WARN_ON(cpu_buffer,
920 bpage->list.next->prev != &bpage->list))
921 return -1;
922 if (RB_WARN_ON(cpu_buffer,
923 bpage->list.prev->next != &bpage->list))
924 return -1;
925 if (rb_check_list(cpu_buffer, &bpage->list))
926 return -1;
927 }
928
929 rb_head_page_activate(cpu_buffer);
930
931 return 0;
932}
933
934static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
935 unsigned nr_pages)
936{
937 struct buffer_page *bpage, *tmp;
938 unsigned long addr;
939 LIST_HEAD(pages);
940 unsigned i;
941
942 WARN_ON(!nr_pages);
943
944 for (i = 0; i < nr_pages; i++) {
945 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
946 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
947 if (!bpage)
948 goto free_pages;
949
950 rb_check_bpage(cpu_buffer, bpage);
951
952 list_add(&bpage->list, &pages);
953
954 addr = __get_free_page(GFP_KERNEL);
955 if (!addr)
956 goto free_pages;
957 bpage->page = (void *)addr;
958 rb_init_page(bpage->page);
959 }
960
961
962
963
964
965
966 cpu_buffer->pages = pages.next;
967 list_del(&pages);
968
969 rb_check_pages(cpu_buffer);
970
971 return 0;
972
973 free_pages:
974 list_for_each_entry_safe(bpage, tmp, &pages, list) {
975 list_del_init(&bpage->list);
976 free_buffer_page(bpage);
977 }
978 return -ENOMEM;
979}
980
981static struct ring_buffer_per_cpu *
982rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
983{
984 struct ring_buffer_per_cpu *cpu_buffer;
985 struct buffer_page *bpage;
986 unsigned long addr;
987 int ret;
988
989 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
990 GFP_KERNEL, cpu_to_node(cpu));
991 if (!cpu_buffer)
992 return NULL;
993
994 cpu_buffer->cpu = cpu;
995 cpu_buffer->buffer = buffer;
996 spin_lock_init(&cpu_buffer->reader_lock);
997 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
998 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
999
1000 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1001 GFP_KERNEL, cpu_to_node(cpu));
1002 if (!bpage)
1003 goto fail_free_buffer;
1004
1005 rb_check_bpage(cpu_buffer, bpage);
1006
1007 cpu_buffer->reader_page = bpage;
1008 addr = __get_free_page(GFP_KERNEL);
1009 if (!addr)
1010 goto fail_free_reader;
1011 bpage->page = (void *)addr;
1012 rb_init_page(bpage->page);
1013
1014 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1015
1016 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1017 if (ret < 0)
1018 goto fail_free_reader;
1019
1020 cpu_buffer->head_page
1021 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1022 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1023
1024 rb_head_page_activate(cpu_buffer);
1025
1026 return cpu_buffer;
1027
1028 fail_free_reader:
1029 free_buffer_page(cpu_buffer->reader_page);
1030
1031 fail_free_buffer:
1032 kfree(cpu_buffer);
1033 return NULL;
1034}
1035
1036static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1037{
1038 struct list_head *head = cpu_buffer->pages;
1039 struct buffer_page *bpage, *tmp;
1040
1041 free_buffer_page(cpu_buffer->reader_page);
1042
1043 rb_head_page_deactivate(cpu_buffer);
1044
1045 if (head) {
1046 list_for_each_entry_safe(bpage, tmp, head, list) {
1047 list_del_init(&bpage->list);
1048 free_buffer_page(bpage);
1049 }
1050 bpage = list_entry(head, struct buffer_page, list);
1051 free_buffer_page(bpage);
1052 }
1053
1054 kfree(cpu_buffer);
1055}
1056
1057#ifdef CONFIG_HOTPLUG_CPU
1058static int rb_cpu_notify(struct notifier_block *self,
1059 unsigned long action, void *hcpu);
1060#endif
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1073 struct lock_class_key *key)
1074{
1075 struct ring_buffer *buffer;
1076 int bsize;
1077 int cpu;
1078
1079
1080 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1081 GFP_KERNEL);
1082 if (!buffer)
1083 return NULL;
1084
1085 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1086 goto fail_free_buffer;
1087
1088 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1089 buffer->flags = flags;
1090 buffer->clock = trace_clock_local;
1091 buffer->reader_lock_key = key;
1092
1093
1094 if (buffer->pages < 2)
1095 buffer->pages = 2;
1096
1097
1098
1099
1100
1101
1102#ifdef CONFIG_HOTPLUG_CPU
1103 get_online_cpus();
1104 cpumask_copy(buffer->cpumask, cpu_online_mask);
1105#else
1106 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1107#endif
1108 buffer->cpus = nr_cpu_ids;
1109
1110 bsize = sizeof(void *) * nr_cpu_ids;
1111 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1112 GFP_KERNEL);
1113 if (!buffer->buffers)
1114 goto fail_free_cpumask;
1115
1116 for_each_buffer_cpu(buffer, cpu) {
1117 buffer->buffers[cpu] =
1118 rb_allocate_cpu_buffer(buffer, cpu);
1119 if (!buffer->buffers[cpu])
1120 goto fail_free_buffers;
1121 }
1122
1123#ifdef CONFIG_HOTPLUG_CPU
1124 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1125 buffer->cpu_notify.priority = 0;
1126 register_cpu_notifier(&buffer->cpu_notify);
1127#endif
1128
1129 put_online_cpus();
1130 mutex_init(&buffer->mutex);
1131
1132 return buffer;
1133
1134 fail_free_buffers:
1135 for_each_buffer_cpu(buffer, cpu) {
1136 if (buffer->buffers[cpu])
1137 rb_free_cpu_buffer(buffer->buffers[cpu]);
1138 }
1139 kfree(buffer->buffers);
1140
1141 fail_free_cpumask:
1142 free_cpumask_var(buffer->cpumask);
1143 put_online_cpus();
1144
1145 fail_free_buffer:
1146 kfree(buffer);
1147 return NULL;
1148}
1149EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1150
1151
1152
1153
1154
1155void
1156ring_buffer_free(struct ring_buffer *buffer)
1157{
1158 int cpu;
1159
1160 get_online_cpus();
1161
1162#ifdef CONFIG_HOTPLUG_CPU
1163 unregister_cpu_notifier(&buffer->cpu_notify);
1164#endif
1165
1166 for_each_buffer_cpu(buffer, cpu)
1167 rb_free_cpu_buffer(buffer->buffers[cpu]);
1168
1169 put_online_cpus();
1170
1171 kfree(buffer->buffers);
1172 free_cpumask_var(buffer->cpumask);
1173
1174 kfree(buffer);
1175}
1176EXPORT_SYMBOL_GPL(ring_buffer_free);
1177
1178void ring_buffer_set_clock(struct ring_buffer *buffer,
1179 u64 (*clock)(void))
1180{
1181 buffer->clock = clock;
1182}
1183
1184static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1185
1186static void
1187rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1188{
1189 struct buffer_page *bpage;
1190 struct list_head *p;
1191 unsigned i;
1192
1193 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched();
1195
1196 spin_lock_irq(&cpu_buffer->reader_lock);
1197 rb_head_page_deactivate(cpu_buffer);
1198
1199 for (i = 0; i < nr_pages; i++) {
1200 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1201 return;
1202 p = cpu_buffer->pages->next;
1203 bpage = list_entry(p, struct buffer_page, list);
1204 list_del_init(&bpage->list);
1205 free_buffer_page(bpage);
1206 }
1207 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1208 return;
1209
1210 rb_reset_cpu(cpu_buffer);
1211 spin_unlock_irq(&cpu_buffer->reader_lock);
1212
1213 rb_check_pages(cpu_buffer);
1214
1215 atomic_dec(&cpu_buffer->record_disabled);
1216
1217}
1218
1219static void
1220rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1221 struct list_head *pages, unsigned nr_pages)
1222{
1223 struct buffer_page *bpage;
1224 struct list_head *p;
1225 unsigned i;
1226
1227 atomic_inc(&cpu_buffer->record_disabled);
1228 synchronize_sched();
1229
1230 spin_lock_irq(&cpu_buffer->reader_lock);
1231 rb_head_page_deactivate(cpu_buffer);
1232
1233 for (i = 0; i < nr_pages; i++) {
1234 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1235 return;
1236 p = pages->next;
1237 bpage = list_entry(p, struct buffer_page, list);
1238 list_del_init(&bpage->list);
1239 list_add_tail(&bpage->list, cpu_buffer->pages);
1240 }
1241 rb_reset_cpu(cpu_buffer);
1242 spin_unlock_irq(&cpu_buffer->reader_lock);
1243
1244 rb_check_pages(cpu_buffer);
1245
1246 atomic_dec(&cpu_buffer->record_disabled);
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1264{
1265 struct ring_buffer_per_cpu *cpu_buffer;
1266 unsigned nr_pages, rm_pages, new_pages;
1267 struct buffer_page *bpage, *tmp;
1268 unsigned long buffer_size;
1269 unsigned long addr;
1270 LIST_HEAD(pages);
1271 int i, cpu;
1272
1273
1274
1275
1276 if (!buffer)
1277 return size;
1278
1279 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1280 size *= BUF_PAGE_SIZE;
1281 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1282
1283
1284 if (size < BUF_PAGE_SIZE * 2)
1285 size = BUF_PAGE_SIZE * 2;
1286
1287 if (size == buffer_size)
1288 return size;
1289
1290 mutex_lock(&buffer->mutex);
1291 get_online_cpus();
1292
1293 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1294
1295 if (size < buffer_size) {
1296
1297
1298 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1299 goto out_fail;
1300
1301 rm_pages = buffer->pages - nr_pages;
1302
1303 for_each_buffer_cpu(buffer, cpu) {
1304 cpu_buffer = buffer->buffers[cpu];
1305 rb_remove_pages(cpu_buffer, rm_pages);
1306 }
1307 goto out;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1319 goto out_fail;
1320
1321 new_pages = nr_pages - buffer->pages;
1322
1323 for_each_buffer_cpu(buffer, cpu) {
1324 for (i = 0; i < new_pages; i++) {
1325 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
1326 cache_line_size()),
1327 GFP_KERNEL, cpu_to_node(cpu));
1328 if (!bpage)
1329 goto free_pages;
1330 list_add(&bpage->list, &pages);
1331 addr = __get_free_page(GFP_KERNEL);
1332 if (!addr)
1333 goto free_pages;
1334 bpage->page = (void *)addr;
1335 rb_init_page(bpage->page);
1336 }
1337 }
1338
1339 for_each_buffer_cpu(buffer, cpu) {
1340 cpu_buffer = buffer->buffers[cpu];
1341 rb_insert_pages(cpu_buffer, &pages, new_pages);
1342 }
1343
1344 if (RB_WARN_ON(buffer, !list_empty(&pages)))
1345 goto out_fail;
1346
1347 out:
1348 buffer->pages = nr_pages;
1349 put_online_cpus();
1350 mutex_unlock(&buffer->mutex);
1351
1352 return size;
1353
1354 free_pages:
1355 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1356 list_del_init(&bpage->list);
1357 free_buffer_page(bpage);
1358 }
1359 put_online_cpus();
1360 mutex_unlock(&buffer->mutex);
1361 return -ENOMEM;
1362
1363
1364
1365
1366
1367 out_fail:
1368 put_online_cpus();
1369 mutex_unlock(&buffer->mutex);
1370 return -1;
1371}
1372EXPORT_SYMBOL_GPL(ring_buffer_resize);
1373
1374static inline void *
1375__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1376{
1377 return bpage->data + index;
1378}
1379
1380static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1381{
1382 return bpage->page->data + index;
1383}
1384
1385static inline struct ring_buffer_event *
1386rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1387{
1388 return __rb_page_index(cpu_buffer->reader_page,
1389 cpu_buffer->reader_page->read);
1390}
1391
1392static inline struct ring_buffer_event *
1393rb_iter_head_event(struct ring_buffer_iter *iter)
1394{
1395 return __rb_page_index(iter->head_page, iter->head);
1396}
1397
1398static inline unsigned long rb_page_write(struct buffer_page *bpage)
1399{
1400 return local_read(&bpage->write) & RB_WRITE_MASK;
1401}
1402
1403static inline unsigned rb_page_commit(struct buffer_page *bpage)
1404{
1405 return local_read(&bpage->page->commit);
1406}
1407
1408static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1409{
1410 return local_read(&bpage->entries) & RB_WRITE_MASK;
1411}
1412
1413
1414static inline unsigned rb_page_size(struct buffer_page *bpage)
1415{
1416 return rb_page_commit(bpage);
1417}
1418
1419static inline unsigned
1420rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1421{
1422 return rb_page_commit(cpu_buffer->commit_page);
1423}
1424
1425static inline unsigned
1426rb_event_index(struct ring_buffer_event *event)
1427{
1428 unsigned long addr = (unsigned long)event;
1429
1430 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1431}
1432
1433static inline int
1434rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1435 struct ring_buffer_event *event)
1436{
1437 unsigned long addr = (unsigned long)event;
1438 unsigned long index;
1439
1440 index = rb_event_index(event);
1441 addr &= PAGE_MASK;
1442
1443 return cpu_buffer->commit_page->page == (void *)addr &&
1444 rb_commit_index(cpu_buffer) == index;
1445}
1446
1447static void
1448rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1449{
1450 unsigned long max_count;
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 again:
1461 max_count = cpu_buffer->buffer->pages * 100;
1462
1463 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1464 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1465 return;
1466 if (RB_WARN_ON(cpu_buffer,
1467 rb_is_reader_page(cpu_buffer->tail_page)))
1468 return;
1469 local_set(&cpu_buffer->commit_page->page->commit,
1470 rb_page_write(cpu_buffer->commit_page));
1471 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1472 cpu_buffer->write_stamp =
1473 cpu_buffer->commit_page->page->time_stamp;
1474
1475 barrier();
1476 }
1477 while (rb_commit_index(cpu_buffer) !=
1478 rb_page_write(cpu_buffer->commit_page)) {
1479
1480 local_set(&cpu_buffer->commit_page->page->commit,
1481 rb_page_write(cpu_buffer->commit_page));
1482 RB_WARN_ON(cpu_buffer,
1483 local_read(&cpu_buffer->commit_page->page->commit) &
1484 ~RB_WRITE_MASK);
1485 barrier();
1486 }
1487
1488
1489 barrier();
1490
1491
1492
1493
1494
1495
1496 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1497 goto again;
1498}
1499
1500static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1501{
1502 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1503 cpu_buffer->reader_page->read = 0;
1504}
1505
1506static void rb_inc_iter(struct ring_buffer_iter *iter)
1507{
1508 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1509
1510
1511
1512
1513
1514
1515
1516 if (iter->head_page == cpu_buffer->reader_page)
1517 iter->head_page = rb_set_head_page(cpu_buffer);
1518 else
1519 rb_inc_page(cpu_buffer, &iter->head_page);
1520
1521 iter->read_stamp = iter->head_page->page->time_stamp;
1522 iter->head = 0;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static void
1537rb_update_event(struct ring_buffer_event *event,
1538 unsigned type, unsigned length)
1539{
1540 event->type_len = type;
1541
1542 switch (type) {
1543
1544 case RINGBUF_TYPE_PADDING:
1545 case RINGBUF_TYPE_TIME_EXTEND:
1546 case RINGBUF_TYPE_TIME_STAMP:
1547 break;
1548
1549 case 0:
1550 length -= RB_EVNT_HDR_SIZE;
1551 if (length > RB_MAX_SMALL_DATA)
1552 event->array[0] = length;
1553 else
1554 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1555 break;
1556 default:
1557 BUG();
1558 }
1559}
1560
1561
1562
1563
1564
1565
1566
1567
1568static int
1569rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1570 struct buffer_page *tail_page,
1571 struct buffer_page *next_page)
1572{
1573 struct buffer_page *new_head;
1574 int entries;
1575 int type;
1576 int ret;
1577
1578 entries = rb_page_entries(next_page);
1579
1580
1581
1582
1583
1584
1585 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1586 RB_PAGE_HEAD);
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599 switch (type) {
1600 case RB_PAGE_HEAD:
1601
1602
1603
1604
1605
1606 local_add(entries, &cpu_buffer->overrun);
1607
1608
1609
1610
1611
1612
1613
1614 break;
1615
1616 case RB_PAGE_UPDATE:
1617
1618
1619
1620
1621 break;
1622 case RB_PAGE_NORMAL:
1623
1624
1625
1626
1627
1628 return 1;
1629 case RB_PAGE_MOVED:
1630
1631
1632
1633
1634
1635 return 1;
1636 default:
1637 RB_WARN_ON(cpu_buffer, 1);
1638 return -1;
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 new_head = next_page;
1656 rb_inc_page(cpu_buffer, &new_head);
1657
1658 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1659 RB_PAGE_NORMAL);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 switch (ret) {
1670 case RB_PAGE_HEAD:
1671 case RB_PAGE_NORMAL:
1672
1673 break;
1674 default:
1675 RB_WARN_ON(cpu_buffer, 1);
1676 return -1;
1677 }
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 if (ret == RB_PAGE_NORMAL) {
1690
1691
1692
1693
1694 if (cpu_buffer->tail_page != tail_page &&
1695 cpu_buffer->tail_page != next_page)
1696 rb_head_page_set_normal(cpu_buffer, new_head,
1697 next_page,
1698 RB_PAGE_HEAD);
1699 }
1700
1701
1702
1703
1704
1705
1706 if (type == RB_PAGE_HEAD) {
1707 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1708 tail_page,
1709 RB_PAGE_UPDATE);
1710 if (RB_WARN_ON(cpu_buffer,
1711 ret != RB_PAGE_UPDATE))
1712 return -1;
1713 }
1714
1715 return 0;
1716}
1717
1718static unsigned rb_calculate_event_length(unsigned length)
1719{
1720 struct ring_buffer_event event;
1721
1722
1723 if (!length)
1724 length = 1;
1725
1726 if (length > RB_MAX_SMALL_DATA)
1727 length += sizeof(event.array[0]);
1728
1729 length += RB_EVNT_HDR_SIZE;
1730 length = ALIGN(length, RB_ALIGNMENT);
1731
1732 return length;
1733}
1734
1735static inline void
1736rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1737 struct buffer_page *tail_page,
1738 unsigned long tail, unsigned long length)
1739{
1740 struct ring_buffer_event *event;
1741
1742
1743
1744
1745
1746 if (tail >= BUF_PAGE_SIZE) {
1747 local_sub(length, &tail_page->write);
1748 return;
1749 }
1750
1751 event = __rb_page_index(tail_page, tail);
1752 kmemcheck_annotate_bitfield(event, bitfield);
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1766
1767
1768
1769 rb_event_set_padding(event);
1770
1771
1772 local_sub(length, &tail_page->write);
1773 return;
1774 }
1775
1776
1777 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1778 event->type_len = RINGBUF_TYPE_PADDING;
1779
1780 event->time_delta = 1;
1781
1782
1783 length = (tail + length) - BUF_PAGE_SIZE;
1784 local_sub(length, &tail_page->write);
1785}
1786
1787static struct ring_buffer_event *
1788rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1789 unsigned long length, unsigned long tail,
1790 struct buffer_page *commit_page,
1791 struct buffer_page *tail_page, u64 *ts)
1792{
1793 struct ring_buffer *buffer = cpu_buffer->buffer;
1794 struct buffer_page *next_page;
1795 int ret;
1796
1797 next_page = tail_page;
1798
1799 rb_inc_page(cpu_buffer, &next_page);
1800
1801
1802
1803
1804
1805
1806 if (unlikely(next_page == commit_page)) {
1807 local_inc(&cpu_buffer->commit_overrun);
1808 goto out_reset;
1809 }
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
1826
1827
1828
1829
1830
1831 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1832
1833
1834
1835
1836 if (!(buffer->flags & RB_FL_OVERWRITE))
1837 goto out_reset;
1838
1839 ret = rb_handle_head_page(cpu_buffer,
1840 tail_page,
1841 next_page);
1842 if (ret < 0)
1843 goto out_reset;
1844 if (ret)
1845 goto out_again;
1846 } else {
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857 if (unlikely((cpu_buffer->commit_page !=
1858 cpu_buffer->tail_page) &&
1859 (cpu_buffer->commit_page ==
1860 cpu_buffer->reader_page))) {
1861 local_inc(&cpu_buffer->commit_overrun);
1862 goto out_reset;
1863 }
1864 }
1865 }
1866
1867 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1868 if (ret) {
1869
1870
1871
1872
1873 *ts = rb_time_stamp(buffer);
1874 next_page->page->time_stamp = *ts;
1875 }
1876
1877 out_again:
1878
1879 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1880
1881
1882 return ERR_PTR(-EAGAIN);
1883
1884 out_reset:
1885
1886 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1887
1888 return NULL;
1889}
1890
1891static struct ring_buffer_event *
1892__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1893 unsigned type, unsigned long length, u64 *ts)
1894{
1895 struct buffer_page *tail_page, *commit_page;
1896 struct ring_buffer_event *event;
1897 unsigned long tail, write;
1898
1899 commit_page = cpu_buffer->commit_page;
1900
1901 barrier();
1902 tail_page = cpu_buffer->tail_page;
1903 write = local_add_return(length, &tail_page->write);
1904
1905
1906 write &= RB_WRITE_MASK;
1907 tail = write - length;
1908
1909
1910 if (write > BUF_PAGE_SIZE)
1911 return rb_move_tail(cpu_buffer, length, tail,
1912 commit_page, tail_page, ts);
1913
1914
1915
1916 event = __rb_page_index(tail_page, tail);
1917 kmemcheck_annotate_bitfield(event, bitfield);
1918 rb_update_event(event, type, length);
1919
1920
1921 if (likely(!type))
1922 local_inc(&tail_page->entries);
1923
1924
1925
1926
1927
1928 if (!tail)
1929 tail_page->page->time_stamp = *ts;
1930
1931 return event;
1932}
1933
1934static inline int
1935rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1936 struct ring_buffer_event *event)
1937{
1938 unsigned long new_index, old_index;
1939 struct buffer_page *bpage;
1940 unsigned long index;
1941 unsigned long addr;
1942
1943 new_index = rb_event_index(event);
1944 old_index = new_index + rb_event_length(event);
1945 addr = (unsigned long)event;
1946 addr &= PAGE_MASK;
1947
1948 bpage = cpu_buffer->tail_page;
1949
1950 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1951 unsigned long write_mask =
1952 local_read(&bpage->write) & ~RB_WRITE_MASK;
1953
1954
1955
1956
1957
1958
1959 old_index += write_mask;
1960 new_index += write_mask;
1961 index = local_cmpxchg(&bpage->write, old_index, new_index);
1962 if (index == old_index)
1963 return 1;
1964 }
1965
1966
1967 return 0;
1968}
1969
1970static int
1971rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1972 u64 *ts, u64 *delta)
1973{
1974 struct ring_buffer_event *event;
1975 static int once;
1976 int ret;
1977
1978 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1979 printk(KERN_WARNING "Delta way too big! %llu"
1980 " ts=%llu write stamp = %llu\n",
1981 (unsigned long long)*delta,
1982 (unsigned long long)*ts,
1983 (unsigned long long)cpu_buffer->write_stamp);
1984 WARN_ON(1);
1985 }
1986
1987
1988
1989
1990
1991 event = __rb_reserve_next(cpu_buffer,
1992 RINGBUF_TYPE_TIME_EXTEND,
1993 RB_LEN_TIME_EXTEND,
1994 ts);
1995 if (!event)
1996 return -EBUSY;
1997
1998 if (PTR_ERR(event) == -EAGAIN)
1999 return -EAGAIN;
2000
2001
2002 if (rb_event_is_commit(cpu_buffer, event)) {
2003
2004
2005
2006
2007
2008 if (rb_event_index(event)) {
2009 event->time_delta = *delta & TS_MASK;
2010 event->array[0] = *delta >> TS_SHIFT;
2011 } else {
2012
2013 if (!rb_try_to_discard(cpu_buffer, event)) {
2014
2015 event->time_delta = 0;
2016 event->array[0] = 0;
2017 }
2018 }
2019 cpu_buffer->write_stamp = *ts;
2020
2021 ret = 1;
2022 } else {
2023
2024 if (!rb_try_to_discard(cpu_buffer, event)) {
2025
2026 event->time_delta = 0;
2027 event->array[0] = 0;
2028 }
2029 ret = 0;
2030 }
2031
2032 *delta = 0;
2033
2034 return ret;
2035}
2036
2037static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2038{
2039 local_inc(&cpu_buffer->committing);
2040 local_inc(&cpu_buffer->commits);
2041}
2042
2043static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2044{
2045 unsigned long commits;
2046
2047 if (RB_WARN_ON(cpu_buffer,
2048 !local_read(&cpu_buffer->committing)))
2049 return;
2050
2051 again:
2052 commits = local_read(&cpu_buffer->commits);
2053
2054 barrier();
2055 if (local_read(&cpu_buffer->committing) == 1)
2056 rb_set_commit_to_write(cpu_buffer);
2057
2058 local_dec(&cpu_buffer->committing);
2059
2060
2061 barrier();
2062
2063
2064
2065
2066
2067
2068 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2069 !local_read(&cpu_buffer->committing)) {
2070 local_inc(&cpu_buffer->committing);
2071 goto again;
2072 }
2073}
2074
2075static struct ring_buffer_event *
2076rb_reserve_next_event(struct ring_buffer *buffer,
2077 struct ring_buffer_per_cpu *cpu_buffer,
2078 unsigned long length)
2079{
2080 struct ring_buffer_event *event;
2081 u64 ts, delta = 0;
2082 int commit = 0;
2083 int nr_loops = 0;
2084
2085 rb_start_commit(cpu_buffer);
2086
2087#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2088
2089
2090
2091
2092
2093
2094 barrier();
2095 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2096 local_dec(&cpu_buffer->committing);
2097 local_dec(&cpu_buffer->commits);
2098 return NULL;
2099 }
2100#endif
2101
2102 length = rb_calculate_event_length(length);
2103 again:
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2114 goto out_fail;
2115
2116 ts = rb_time_stamp(cpu_buffer->buffer);
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
2127 rb_page_write(cpu_buffer->tail_page) ==
2128 rb_commit_index(cpu_buffer))) {
2129 u64 diff;
2130
2131 diff = ts - cpu_buffer->write_stamp;
2132
2133
2134 barrier();
2135
2136
2137 if (unlikely(ts < cpu_buffer->write_stamp))
2138 goto get_event;
2139
2140 delta = diff;
2141 if (unlikely(test_time_stamp(delta))) {
2142
2143 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
2144 if (commit == -EBUSY)
2145 goto out_fail;
2146
2147 if (commit == -EAGAIN)
2148 goto again;
2149
2150 RB_WARN_ON(cpu_buffer, commit < 0);
2151 }
2152 }
2153
2154 get_event:
2155 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
2156 if (unlikely(PTR_ERR(event) == -EAGAIN))
2157 goto again;
2158
2159 if (!event)
2160 goto out_fail;
2161
2162 if (!rb_event_is_commit(cpu_buffer, event))
2163 delta = 0;
2164
2165 event->time_delta = delta;
2166
2167 return event;
2168
2169 out_fail:
2170 rb_end_commit(cpu_buffer);
2171 return NULL;
2172}
2173
2174#ifdef CONFIG_TRACING
2175
2176#define TRACE_RECURSIVE_DEPTH 16
2177
2178static int trace_recursive_lock(void)
2179{
2180 current->trace_recursion++;
2181
2182 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2183 return 0;
2184
2185
2186 tracing_off_permanent();
2187
2188 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2189 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2190 current->trace_recursion,
2191 hardirq_count() >> HARDIRQ_SHIFT,
2192 softirq_count() >> SOFTIRQ_SHIFT,
2193 in_nmi());
2194
2195 WARN_ON_ONCE(1);
2196 return -1;
2197}
2198
2199static void trace_recursive_unlock(void)
2200{
2201 WARN_ON_ONCE(!current->trace_recursion);
2202
2203 current->trace_recursion--;
2204}
2205
2206#else
2207
2208#define trace_recursive_lock() (0)
2209#define trace_recursive_unlock() do { } while (0)
2210
2211#endif
2212
2213static DEFINE_PER_CPU(int, rb_need_resched);
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230struct ring_buffer_event *
2231ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2232{
2233 struct ring_buffer_per_cpu *cpu_buffer;
2234 struct ring_buffer_event *event;
2235 int cpu, resched;
2236
2237 if (ring_buffer_flags != RB_BUFFERS_ON)
2238 return NULL;
2239
2240 if (atomic_read(&buffer->record_disabled))
2241 return NULL;
2242
2243
2244 resched = ftrace_preempt_disable();
2245
2246 if (trace_recursive_lock())
2247 goto out_nocheck;
2248
2249 cpu = raw_smp_processor_id();
2250
2251 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2252 goto out;
2253
2254 cpu_buffer = buffer->buffers[cpu];
2255
2256 if (atomic_read(&cpu_buffer->record_disabled))
2257 goto out;
2258
2259 if (length > BUF_MAX_DATA_SIZE)
2260 goto out;
2261
2262 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2263 if (!event)
2264 goto out;
2265
2266
2267
2268
2269
2270
2271 if (preempt_count() == 1)
2272 per_cpu(rb_need_resched, cpu) = resched;
2273
2274 return event;
2275
2276 out:
2277 trace_recursive_unlock();
2278
2279 out_nocheck:
2280 ftrace_preempt_enable(resched);
2281 return NULL;
2282}
2283EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2284
2285static void
2286rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2287 struct ring_buffer_event *event)
2288{
2289
2290
2291
2292
2293 if (rb_event_is_commit(cpu_buffer, event))
2294 cpu_buffer->write_stamp += event->time_delta;
2295}
2296
2297static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2298 struct ring_buffer_event *event)
2299{
2300 local_inc(&cpu_buffer->entries);
2301 rb_update_write_stamp(cpu_buffer, event);
2302 rb_end_commit(cpu_buffer);
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2315 struct ring_buffer_event *event)
2316{
2317 struct ring_buffer_per_cpu *cpu_buffer;
2318 int cpu = raw_smp_processor_id();
2319
2320 cpu_buffer = buffer->buffers[cpu];
2321
2322 rb_commit(cpu_buffer, event);
2323
2324 trace_recursive_unlock();
2325
2326
2327
2328
2329 if (preempt_count() == 1)
2330 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2331 else
2332 preempt_enable_no_resched_notrace();
2333
2334 return 0;
2335}
2336EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2337
2338static inline void rb_event_discard(struct ring_buffer_event *event)
2339{
2340
2341 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2342 event->type_len = RINGBUF_TYPE_PADDING;
2343
2344 if (!event->time_delta)
2345 event->time_delta = 1;
2346}
2347
2348
2349
2350
2351
2352
2353
2354static inline void
2355rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2356 struct ring_buffer_event *event)
2357{
2358 unsigned long addr = (unsigned long)event;
2359 struct buffer_page *bpage = cpu_buffer->commit_page;
2360 struct buffer_page *start;
2361
2362 addr &= PAGE_MASK;
2363
2364
2365 if (likely(bpage->page == (void *)addr)) {
2366 local_dec(&bpage->entries);
2367 return;
2368 }
2369
2370
2371
2372
2373
2374 rb_inc_page(cpu_buffer, &bpage);
2375 start = bpage;
2376 do {
2377 if (bpage->page == (void *)addr) {
2378 local_dec(&bpage->entries);
2379 return;
2380 }
2381 rb_inc_page(cpu_buffer, &bpage);
2382 } while (bpage != start);
2383
2384
2385 RB_WARN_ON(cpu_buffer, 1);
2386}
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407void ring_buffer_discard_commit(struct ring_buffer *buffer,
2408 struct ring_buffer_event *event)
2409{
2410 struct ring_buffer_per_cpu *cpu_buffer;
2411 int cpu;
2412
2413
2414 rb_event_discard(event);
2415
2416 cpu = smp_processor_id();
2417 cpu_buffer = buffer->buffers[cpu];
2418
2419
2420
2421
2422
2423
2424 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2425
2426 rb_decrement_entry(cpu_buffer, event);
2427 if (rb_try_to_discard(cpu_buffer, event))
2428 goto out;
2429
2430
2431
2432
2433
2434 rb_update_write_stamp(cpu_buffer, event);
2435 out:
2436 rb_end_commit(cpu_buffer);
2437
2438 trace_recursive_unlock();
2439
2440
2441
2442
2443 if (preempt_count() == 1)
2444 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2445 else
2446 preempt_enable_no_resched_notrace();
2447
2448}
2449EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464int ring_buffer_write(struct ring_buffer *buffer,
2465 unsigned long length,
2466 void *data)
2467{
2468 struct ring_buffer_per_cpu *cpu_buffer;
2469 struct ring_buffer_event *event;
2470 void *body;
2471 int ret = -EBUSY;
2472 int cpu, resched;
2473
2474 if (ring_buffer_flags != RB_BUFFERS_ON)
2475 return -EBUSY;
2476
2477 if (atomic_read(&buffer->record_disabled))
2478 return -EBUSY;
2479
2480 resched = ftrace_preempt_disable();
2481
2482 cpu = raw_smp_processor_id();
2483
2484 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2485 goto out;
2486
2487 cpu_buffer = buffer->buffers[cpu];
2488
2489 if (atomic_read(&cpu_buffer->record_disabled))
2490 goto out;
2491
2492 if (length > BUF_MAX_DATA_SIZE)
2493 goto out;
2494
2495 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2496 if (!event)
2497 goto out;
2498
2499 body = rb_event_data(event);
2500
2501 memcpy(body, data, length);
2502
2503 rb_commit(cpu_buffer, event);
2504
2505 ret = 0;
2506 out:
2507 ftrace_preempt_enable(resched);
2508
2509 return ret;
2510}
2511EXPORT_SYMBOL_GPL(ring_buffer_write);
2512
2513static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2514{
2515 struct buffer_page *reader = cpu_buffer->reader_page;
2516 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2517 struct buffer_page *commit = cpu_buffer->commit_page;
2518
2519
2520 if (unlikely(!head))
2521 return 1;
2522
2523 return reader->read == rb_page_commit(reader) &&
2524 (commit == reader ||
2525 (commit == head &&
2526 head->read == rb_page_commit(commit)));
2527}
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538void ring_buffer_record_disable(struct ring_buffer *buffer)
2539{
2540 atomic_inc(&buffer->record_disabled);
2541}
2542EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2543
2544
2545
2546
2547
2548
2549
2550
2551void ring_buffer_record_enable(struct ring_buffer *buffer)
2552{
2553 atomic_dec(&buffer->record_disabled);
2554}
2555EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2568{
2569 struct ring_buffer_per_cpu *cpu_buffer;
2570
2571 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2572 return;
2573
2574 cpu_buffer = buffer->buffers[cpu];
2575 atomic_inc(&cpu_buffer->record_disabled);
2576}
2577EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2588{
2589 struct ring_buffer_per_cpu *cpu_buffer;
2590
2591 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2592 return;
2593
2594 cpu_buffer = buffer->buffers[cpu];
2595 atomic_dec(&cpu_buffer->record_disabled);
2596}
2597EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2598
2599
2600
2601
2602
2603
2604unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2605{
2606 struct ring_buffer_per_cpu *cpu_buffer;
2607 unsigned long ret;
2608
2609 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2610 return 0;
2611
2612 cpu_buffer = buffer->buffers[cpu];
2613 ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
2614 - cpu_buffer->read;
2615
2616 return ret;
2617}
2618EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2619
2620
2621
2622
2623
2624
2625unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2626{
2627 struct ring_buffer_per_cpu *cpu_buffer;
2628 unsigned long ret;
2629
2630 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2631 return 0;
2632
2633 cpu_buffer = buffer->buffers[cpu];
2634 ret = local_read(&cpu_buffer->overrun);
2635
2636 return ret;
2637}
2638EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
2639
2640
2641
2642
2643
2644
2645unsigned long
2646ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2647{
2648 struct ring_buffer_per_cpu *cpu_buffer;
2649 unsigned long ret;
2650
2651 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2652 return 0;
2653
2654 cpu_buffer = buffer->buffers[cpu];
2655 ret = local_read(&cpu_buffer->commit_overrun);
2656
2657 return ret;
2658}
2659EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2660
2661
2662
2663
2664
2665
2666
2667
2668unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2669{
2670 struct ring_buffer_per_cpu *cpu_buffer;
2671 unsigned long entries = 0;
2672 int cpu;
2673
2674
2675 for_each_buffer_cpu(buffer, cpu) {
2676 cpu_buffer = buffer->buffers[cpu];
2677 entries += (local_read(&cpu_buffer->entries) -
2678 local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
2679 }
2680
2681 return entries;
2682}
2683EXPORT_SYMBOL_GPL(ring_buffer_entries);
2684
2685
2686
2687
2688
2689
2690
2691
2692unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2693{
2694 struct ring_buffer_per_cpu *cpu_buffer;
2695 unsigned long overruns = 0;
2696 int cpu;
2697
2698
2699 for_each_buffer_cpu(buffer, cpu) {
2700 cpu_buffer = buffer->buffers[cpu];
2701 overruns += local_read(&cpu_buffer->overrun);
2702 }
2703
2704 return overruns;
2705}
2706EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2707
2708static void rb_iter_reset(struct ring_buffer_iter *iter)
2709{
2710 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2711
2712
2713 if (list_empty(&cpu_buffer->reader_page->list)) {
2714 iter->head_page = rb_set_head_page(cpu_buffer);
2715 if (unlikely(!iter->head_page))
2716 return;
2717 iter->head = iter->head_page->read;
2718 } else {
2719 iter->head_page = cpu_buffer->reader_page;
2720 iter->head = cpu_buffer->reader_page->read;
2721 }
2722 if (iter->head)
2723 iter->read_stamp = cpu_buffer->read_stamp;
2724 else
2725 iter->read_stamp = iter->head_page->page->time_stamp;
2726}
2727
2728
2729
2730
2731
2732
2733
2734
2735void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2736{
2737 struct ring_buffer_per_cpu *cpu_buffer;
2738 unsigned long flags;
2739
2740 if (!iter)
2741 return;
2742
2743 cpu_buffer = iter->cpu_buffer;
2744
2745 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2746 rb_iter_reset(iter);
2747 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2748}
2749EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2750
2751
2752
2753
2754
2755int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2756{
2757 struct ring_buffer_per_cpu *cpu_buffer;
2758
2759 cpu_buffer = iter->cpu_buffer;
2760
2761 return iter->head_page == cpu_buffer->commit_page &&
2762 iter->head == rb_commit_index(cpu_buffer);
2763}
2764EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2765
2766static void
2767rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2768 struct ring_buffer_event *event)
2769{
2770 u64 delta;
2771
2772 switch (event->type_len) {
2773 case RINGBUF_TYPE_PADDING:
2774 return;
2775
2776 case RINGBUF_TYPE_TIME_EXTEND:
2777 delta = event->array[0];
2778 delta <<= TS_SHIFT;
2779 delta += event->time_delta;
2780 cpu_buffer->read_stamp += delta;
2781 return;
2782
2783 case RINGBUF_TYPE_TIME_STAMP:
2784
2785 return;
2786
2787 case RINGBUF_TYPE_DATA:
2788 cpu_buffer->read_stamp += event->time_delta;
2789 return;
2790
2791 default:
2792 BUG();
2793 }
2794 return;
2795}
2796
2797static void
2798rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2799 struct ring_buffer_event *event)
2800{
2801 u64 delta;
2802
2803 switch (event->type_len) {
2804 case RINGBUF_TYPE_PADDING:
2805 return;
2806
2807 case RINGBUF_TYPE_TIME_EXTEND:
2808 delta = event->array[0];
2809 delta <<= TS_SHIFT;
2810 delta += event->time_delta;
2811 iter->read_stamp += delta;
2812 return;
2813
2814 case RINGBUF_TYPE_TIME_STAMP:
2815
2816 return;
2817
2818 case RINGBUF_TYPE_DATA:
2819 iter->read_stamp += event->time_delta;
2820 return;
2821
2822 default:
2823 BUG();
2824 }
2825 return;
2826}
2827
2828static struct buffer_page *
2829rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2830{
2831 struct buffer_page *reader = NULL;
2832 unsigned long flags;
2833 int nr_loops = 0;
2834 int ret;
2835
2836 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock);
2838
2839 again:
2840
2841
2842
2843
2844
2845
2846 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2847 reader = NULL;
2848 goto out;
2849 }
2850
2851 reader = cpu_buffer->reader_page;
2852
2853
2854 if (cpu_buffer->reader_page->read < rb_page_size(reader))
2855 goto out;
2856
2857
2858 if (RB_WARN_ON(cpu_buffer,
2859 cpu_buffer->reader_page->read > rb_page_size(reader)))
2860 goto out;
2861
2862
2863 reader = NULL;
2864 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2865 goto out;
2866
2867
2868
2869
2870 local_set(&cpu_buffer->reader_page->write, 0);
2871 local_set(&cpu_buffer->reader_page->entries, 0);
2872 local_set(&cpu_buffer->reader_page->page->commit, 0);
2873
2874 spin:
2875
2876
2877
2878 reader = rb_set_head_page(cpu_buffer);
2879 cpu_buffer->reader_page->list.next = reader->list.next;
2880 cpu_buffer->reader_page->list.prev = reader->list.prev;
2881
2882
2883
2884
2885
2886
2887 cpu_buffer->pages = reader->list.prev;
2888
2889
2890 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2904
2905
2906
2907
2908 if (!ret)
2909 goto spin;
2910
2911
2912
2913
2914
2915
2916 reader->list.next->prev = &cpu_buffer->reader_page->list;
2917 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2918
2919
2920 cpu_buffer->reader_page = reader;
2921 rb_reset_reader_page(cpu_buffer);
2922
2923 goto again;
2924
2925 out:
2926 __raw_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags);
2928
2929 return reader;
2930}
2931
2932static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2933{
2934 struct ring_buffer_event *event;
2935 struct buffer_page *reader;
2936 unsigned length;
2937
2938 reader = rb_get_reader_page(cpu_buffer);
2939
2940
2941 if (RB_WARN_ON(cpu_buffer, !reader))
2942 return;
2943
2944 event = rb_reader_event(cpu_buffer);
2945
2946 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
2947 cpu_buffer->read++;
2948
2949 rb_update_read_stamp(cpu_buffer, event);
2950
2951 length = rb_event_length(event);
2952 cpu_buffer->reader_page->read += length;
2953}
2954
2955static void rb_advance_iter(struct ring_buffer_iter *iter)
2956{
2957 struct ring_buffer *buffer;
2958 struct ring_buffer_per_cpu *cpu_buffer;
2959 struct ring_buffer_event *event;
2960 unsigned length;
2961
2962 cpu_buffer = iter->cpu_buffer;
2963 buffer = cpu_buffer->buffer;
2964
2965
2966
2967
2968 if (iter->head >= rb_page_size(iter->head_page)) {
2969
2970 if (iter->head_page == cpu_buffer->commit_page)
2971 return;
2972 rb_inc_iter(iter);
2973 return;
2974 }
2975
2976 event = rb_iter_head_event(iter);
2977
2978 length = rb_event_length(event);
2979
2980
2981
2982
2983
2984 if (RB_WARN_ON(cpu_buffer,
2985 (iter->head_page == cpu_buffer->commit_page) &&
2986 (iter->head + length > rb_commit_index(cpu_buffer))))
2987 return;
2988
2989 rb_update_iter_read_stamp(iter, event);
2990
2991 iter->head += length;
2992
2993
2994 if ((iter->head >= rb_page_size(iter->head_page)) &&
2995 (iter->head_page != cpu_buffer->commit_page))
2996 rb_advance_iter(iter);
2997}
2998
2999static struct ring_buffer_event *
3000rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
3001{
3002 struct ring_buffer_event *event;
3003 struct buffer_page *reader;
3004 int nr_loops = 0;
3005
3006 again:
3007
3008
3009
3010
3011
3012
3013 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3014 return NULL;
3015
3016 reader = rb_get_reader_page(cpu_buffer);
3017 if (!reader)
3018 return NULL;
3019
3020 event = rb_reader_event(cpu_buffer);
3021
3022 switch (event->type_len) {
3023 case RINGBUF_TYPE_PADDING:
3024 if (rb_null_event(event))
3025 RB_WARN_ON(cpu_buffer, 1);
3026
3027
3028
3029
3030
3031
3032
3033
3034 return event;
3035
3036 case RINGBUF_TYPE_TIME_EXTEND:
3037
3038 rb_advance_reader(cpu_buffer);
3039 goto again;
3040
3041 case RINGBUF_TYPE_TIME_STAMP:
3042
3043 rb_advance_reader(cpu_buffer);
3044 goto again;
3045
3046 case RINGBUF_TYPE_DATA:
3047 if (ts) {
3048 *ts = cpu_buffer->read_stamp + event->time_delta;
3049 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3050 cpu_buffer->cpu, ts);
3051 }
3052 return event;
3053
3054 default:
3055 BUG();
3056 }
3057
3058 return NULL;
3059}
3060EXPORT_SYMBOL_GPL(ring_buffer_peek);
3061
3062static struct ring_buffer_event *
3063rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3064{
3065 struct ring_buffer *buffer;
3066 struct ring_buffer_per_cpu *cpu_buffer;
3067 struct ring_buffer_event *event;
3068 int nr_loops = 0;
3069
3070 if (ring_buffer_iter_empty(iter))
3071 return NULL;
3072
3073 cpu_buffer = iter->cpu_buffer;
3074 buffer = cpu_buffer->buffer;
3075
3076 again:
3077
3078
3079
3080
3081
3082
3083
3084
3085 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3086 return NULL;
3087
3088 if (rb_per_cpu_empty(cpu_buffer))
3089 return NULL;
3090
3091 event = rb_iter_head_event(iter);
3092
3093 switch (event->type_len) {
3094 case RINGBUF_TYPE_PADDING:
3095 if (rb_null_event(event)) {
3096 rb_inc_iter(iter);
3097 goto again;
3098 }
3099 rb_advance_iter(iter);
3100 return event;
3101
3102 case RINGBUF_TYPE_TIME_EXTEND:
3103
3104 rb_advance_iter(iter);
3105 goto again;
3106
3107 case RINGBUF_TYPE_TIME_STAMP:
3108
3109 rb_advance_iter(iter);
3110 goto again;
3111
3112 case RINGBUF_TYPE_DATA:
3113 if (ts) {
3114 *ts = iter->read_stamp + event->time_delta;
3115 ring_buffer_normalize_time_stamp(buffer,
3116 cpu_buffer->cpu, ts);
3117 }
3118 return event;
3119
3120 default:
3121 BUG();
3122 }
3123
3124 return NULL;
3125}
3126EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3127
3128static inline int rb_ok_to_lock(void)
3129{
3130
3131
3132
3133
3134
3135
3136 if (likely(!in_nmi()))
3137 return 1;
3138
3139 tracing_off_permanent();
3140 return 0;
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152struct ring_buffer_event *
3153ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3154{
3155 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3156 struct ring_buffer_event *event;
3157 unsigned long flags;
3158 int dolock;
3159
3160 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3161 return NULL;
3162
3163 dolock = rb_ok_to_lock();
3164 again:
3165 local_irq_save(flags);
3166 if (dolock)
3167 spin_lock(&cpu_buffer->reader_lock);
3168 event = rb_buffer_peek(cpu_buffer, ts);
3169 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3170 rb_advance_reader(cpu_buffer);
3171 if (dolock)
3172 spin_unlock(&cpu_buffer->reader_lock);
3173 local_irq_restore(flags);
3174
3175 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3176 goto again;
3177
3178 return event;
3179}
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189struct ring_buffer_event *
3190ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3191{
3192 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3193 struct ring_buffer_event *event;
3194 unsigned long flags;
3195
3196 again:
3197 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3198 event = rb_iter_peek(iter, ts);
3199 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3200
3201 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3202 goto again;
3203
3204 return event;
3205}
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215struct ring_buffer_event *
3216ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3217{
3218 struct ring_buffer_per_cpu *cpu_buffer;
3219 struct ring_buffer_event *event = NULL;
3220 unsigned long flags;
3221 int dolock;
3222
3223 dolock = rb_ok_to_lock();
3224
3225 again:
3226
3227 preempt_disable();
3228
3229 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3230 goto out;
3231
3232 cpu_buffer = buffer->buffers[cpu];
3233 local_irq_save(flags);
3234 if (dolock)
3235 spin_lock(&cpu_buffer->reader_lock);
3236
3237 event = rb_buffer_peek(cpu_buffer, ts);
3238 if (event)
3239 rb_advance_reader(cpu_buffer);
3240
3241 if (dolock)
3242 spin_unlock(&cpu_buffer->reader_lock);
3243 local_irq_restore(flags);
3244
3245 out:
3246 preempt_enable();
3247
3248 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3249 goto again;
3250
3251 return event;
3252}
3253EXPORT_SYMBOL_GPL(ring_buffer_consume);
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267struct ring_buffer_iter *
3268ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3269{
3270 struct ring_buffer_per_cpu *cpu_buffer;
3271 struct ring_buffer_iter *iter;
3272 unsigned long flags;
3273
3274 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3275 return NULL;
3276
3277 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3278 if (!iter)
3279 return NULL;
3280
3281 cpu_buffer = buffer->buffers[cpu];
3282
3283 iter->cpu_buffer = cpu_buffer;
3284
3285 atomic_inc(&cpu_buffer->record_disabled);
3286 synchronize_sched();
3287
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293
3294 return iter;
3295}
3296EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3297
3298
3299
3300
3301
3302
3303
3304
3305void
3306ring_buffer_read_finish(struct ring_buffer_iter *iter)
3307{
3308 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3309
3310 atomic_dec(&cpu_buffer->record_disabled);
3311 kfree(iter);
3312}
3313EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3314
3315
3316
3317
3318
3319
3320
3321
3322struct ring_buffer_event *
3323ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3324{
3325 struct ring_buffer_event *event;
3326 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3327 unsigned long flags;
3328
3329 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3330 again:
3331 event = rb_iter_peek(iter, ts);
3332 if (!event)
3333 goto out;
3334
3335 if (event->type_len == RINGBUF_TYPE_PADDING)
3336 goto again;
3337
3338 rb_advance_iter(iter);
3339 out:
3340 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3341
3342 return event;
3343}
3344EXPORT_SYMBOL_GPL(ring_buffer_read);
3345
3346
3347
3348
3349
3350unsigned long ring_buffer_size(struct ring_buffer *buffer)
3351{
3352 return BUF_PAGE_SIZE * buffer->pages;
3353}
3354EXPORT_SYMBOL_GPL(ring_buffer_size);
3355
3356static void
3357rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3358{
3359 rb_head_page_deactivate(cpu_buffer);
3360
3361 cpu_buffer->head_page
3362 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3363 local_set(&cpu_buffer->head_page->write, 0);
3364 local_set(&cpu_buffer->head_page->entries, 0);
3365 local_set(&cpu_buffer->head_page->page->commit, 0);
3366
3367 cpu_buffer->head_page->read = 0;
3368
3369 cpu_buffer->tail_page = cpu_buffer->head_page;
3370 cpu_buffer->commit_page = cpu_buffer->head_page;
3371
3372 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3373 local_set(&cpu_buffer->reader_page->write, 0);
3374 local_set(&cpu_buffer->reader_page->entries, 0);
3375 local_set(&cpu_buffer->reader_page->page->commit, 0);
3376 cpu_buffer->reader_page->read = 0;
3377
3378 local_set(&cpu_buffer->commit_overrun, 0);
3379 local_set(&cpu_buffer->overrun, 0);
3380 local_set(&cpu_buffer->entries, 0);
3381 local_set(&cpu_buffer->committing, 0);
3382 local_set(&cpu_buffer->commits, 0);
3383 cpu_buffer->read = 0;
3384
3385 cpu_buffer->write_stamp = 0;
3386 cpu_buffer->read_stamp = 0;
3387
3388 rb_head_page_activate(cpu_buffer);
3389}
3390
3391
3392
3393
3394
3395
3396void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3397{
3398 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3399 unsigned long flags;
3400
3401 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3402 return;
3403
3404 atomic_inc(&cpu_buffer->record_disabled);
3405
3406 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3407
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out;
3410
3411 __raw_spin_lock(&cpu_buffer->lock);
3412
3413 rb_reset_cpu(cpu_buffer);
3414
3415 __raw_spin_unlock(&cpu_buffer->lock);
3416
3417 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3419
3420 atomic_dec(&cpu_buffer->record_disabled);
3421}
3422EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3423
3424
3425
3426
3427
3428void ring_buffer_reset(struct ring_buffer *buffer)
3429{
3430 int cpu;
3431
3432 for_each_buffer_cpu(buffer, cpu)
3433 ring_buffer_reset_cpu(buffer, cpu);
3434}
3435EXPORT_SYMBOL_GPL(ring_buffer_reset);
3436
3437
3438
3439
3440
3441int ring_buffer_empty(struct ring_buffer *buffer)
3442{
3443 struct ring_buffer_per_cpu *cpu_buffer;
3444 unsigned long flags;
3445 int dolock;
3446 int cpu;
3447 int ret;
3448
3449 dolock = rb_ok_to_lock();
3450
3451
3452 for_each_buffer_cpu(buffer, cpu) {
3453 cpu_buffer = buffer->buffers[cpu];
3454 local_irq_save(flags);
3455 if (dolock)
3456 spin_lock(&cpu_buffer->reader_lock);
3457 ret = rb_per_cpu_empty(cpu_buffer);
3458 if (dolock)
3459 spin_unlock(&cpu_buffer->reader_lock);
3460 local_irq_restore(flags);
3461
3462 if (!ret)
3463 return 0;
3464 }
3465
3466 return 1;
3467}
3468EXPORT_SYMBOL_GPL(ring_buffer_empty);
3469
3470
3471
3472
3473
3474
3475int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3476{
3477 struct ring_buffer_per_cpu *cpu_buffer;
3478 unsigned long flags;
3479 int dolock;
3480 int ret;
3481
3482 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3483 return 1;
3484
3485 dolock = rb_ok_to_lock();
3486
3487 cpu_buffer = buffer->buffers[cpu];
3488 local_irq_save(flags);
3489 if (dolock)
3490 spin_lock(&cpu_buffer->reader_lock);
3491 ret = rb_per_cpu_empty(cpu_buffer);
3492 if (dolock)
3493 spin_unlock(&cpu_buffer->reader_lock);
3494 local_irq_restore(flags);
3495
3496 return ret;
3497}
3498EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
3499
3500#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3512 struct ring_buffer *buffer_b, int cpu)
3513{
3514 struct ring_buffer_per_cpu *cpu_buffer_a;
3515 struct ring_buffer_per_cpu *cpu_buffer_b;
3516 int ret = -EINVAL;
3517
3518 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3519 !cpumask_test_cpu(cpu, buffer_b->cpumask))
3520 goto out;
3521
3522
3523 if (buffer_a->pages != buffer_b->pages)
3524 goto out;
3525
3526 ret = -EAGAIN;
3527
3528 if (ring_buffer_flags != RB_BUFFERS_ON)
3529 goto out;
3530
3531 if (atomic_read(&buffer_a->record_disabled))
3532 goto out;
3533
3534 if (atomic_read(&buffer_b->record_disabled))
3535 goto out;
3536
3537 cpu_buffer_a = buffer_a->buffers[cpu];
3538 cpu_buffer_b = buffer_b->buffers[cpu];
3539
3540 if (atomic_read(&cpu_buffer_a->record_disabled))
3541 goto out;
3542
3543 if (atomic_read(&cpu_buffer_b->record_disabled))
3544 goto out;
3545
3546
3547
3548
3549
3550
3551
3552 atomic_inc(&cpu_buffer_a->record_disabled);
3553 atomic_inc(&cpu_buffer_b->record_disabled);
3554
3555 ret = -EBUSY;
3556 if (local_read(&cpu_buffer_a->committing))
3557 goto out_dec;
3558 if (local_read(&cpu_buffer_b->committing))
3559 goto out_dec;
3560
3561 buffer_a->buffers[cpu] = cpu_buffer_b;
3562 buffer_b->buffers[cpu] = cpu_buffer_a;
3563
3564 cpu_buffer_b->buffer = buffer_a;
3565 cpu_buffer_a->buffer = buffer_b;
3566
3567 ret = 0;
3568
3569out_dec:
3570 atomic_dec(&cpu_buffer_a->record_disabled);
3571 atomic_dec(&cpu_buffer_b->record_disabled);
3572out:
3573 return ret;
3574}
3575EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
3576#endif
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
3594{
3595 struct buffer_data_page *bpage;
3596 unsigned long addr;
3597
3598 addr = __get_free_page(GFP_KERNEL);
3599 if (!addr)
3600 return NULL;
3601
3602 bpage = (void *)addr;
3603
3604 rb_init_page(bpage);
3605
3606 return bpage;
3607}
3608EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
3609
3610
3611
3612
3613
3614
3615
3616
3617void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3618{
3619 free_page((unsigned long)data);
3620}
3621EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656int ring_buffer_read_page(struct ring_buffer *buffer,
3657 void **data_page, size_t len, int cpu, int full)
3658{
3659 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3660 struct ring_buffer_event *event;
3661 struct buffer_data_page *bpage;
3662 struct buffer_page *reader;
3663 unsigned long flags;
3664 unsigned int commit;
3665 unsigned int read;
3666 u64 save_timestamp;
3667 int ret = -1;
3668
3669 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3670 goto out;
3671
3672
3673
3674
3675
3676 if (len <= BUF_PAGE_HDR_SIZE)
3677 goto out;
3678
3679 len -= BUF_PAGE_HDR_SIZE;
3680
3681 if (!data_page)
3682 goto out;
3683
3684 bpage = *data_page;
3685 if (!bpage)
3686 goto out;
3687
3688 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3689
3690 reader = rb_get_reader_page(cpu_buffer);
3691 if (!reader)
3692 goto out_unlock;
3693
3694 event = rb_reader_event(cpu_buffer);
3695
3696 read = reader->read;
3697 commit = rb_page_commit(reader);
3698
3699
3700
3701
3702
3703
3704
3705
3706 if (read || (len < (commit - read)) ||
3707 cpu_buffer->reader_page == cpu_buffer->commit_page) {
3708 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3709 unsigned int rpos = read;
3710 unsigned int pos = 0;
3711 unsigned int size;
3712
3713 if (full)
3714 goto out_unlock;
3715
3716 if (len > (commit - read))
3717 len = (commit - read);
3718
3719 size = rb_event_length(event);
3720
3721 if (len < size)
3722 goto out_unlock;
3723
3724
3725 save_timestamp = cpu_buffer->read_stamp;
3726
3727
3728 do {
3729 memcpy(bpage->data + pos, rpage->data + rpos, size);
3730
3731 len -= size;
3732
3733 rb_advance_reader(cpu_buffer);
3734 rpos = reader->read;
3735 pos += size;
3736
3737 event = rb_reader_event(cpu_buffer);
3738 size = rb_event_length(event);
3739 } while (len > size);
3740
3741
3742 local_set(&bpage->commit, pos);
3743 bpage->time_stamp = save_timestamp;
3744
3745
3746 read = 0;
3747 } else {
3748
3749 cpu_buffer->read += rb_page_entries(reader);
3750
3751
3752 rb_init_page(bpage);
3753 bpage = reader->page;
3754 reader->page = *data_page;
3755 local_set(&reader->write, 0);
3756 local_set(&reader->entries, 0);
3757 reader->read = 0;
3758 *data_page = bpage;
3759 }
3760 ret = read;
3761
3762 out_unlock:
3763 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3764
3765 out:
3766 return ret;
3767}
3768EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3769
3770#ifdef CONFIG_TRACING
3771static ssize_t
3772rb_simple_read(struct file *filp, char __user *ubuf,
3773 size_t cnt, loff_t *ppos)
3774{
3775 unsigned long *p = filp->private_data;
3776 char buf[64];
3777 int r;
3778
3779 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3780 r = sprintf(buf, "permanently disabled\n");
3781 else
3782 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3783
3784 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3785}
3786
3787static ssize_t
3788rb_simple_write(struct file *filp, const char __user *ubuf,
3789 size_t cnt, loff_t *ppos)
3790{
3791 unsigned long *p = filp->private_data;
3792 char buf[64];
3793 unsigned long val;
3794 int ret;
3795
3796 if (cnt >= sizeof(buf))
3797 return -EINVAL;
3798
3799 if (copy_from_user(&buf, ubuf, cnt))
3800 return -EFAULT;
3801
3802 buf[cnt] = 0;
3803
3804 ret = strict_strtoul(buf, 10, &val);
3805 if (ret < 0)
3806 return ret;
3807
3808 if (val)
3809 set_bit(RB_BUFFERS_ON_BIT, p);
3810 else
3811 clear_bit(RB_BUFFERS_ON_BIT, p);
3812
3813 (*ppos)++;
3814
3815 return cnt;
3816}
3817
3818static const struct file_operations rb_simple_fops = {
3819 .open = tracing_open_generic,
3820 .read = rb_simple_read,
3821 .write = rb_simple_write,
3822};
3823
3824
3825static __init int rb_init_debugfs(void)
3826{
3827 struct dentry *d_tracer;
3828
3829 d_tracer = tracing_init_dentry();
3830
3831 trace_create_file("tracing_on", 0644, d_tracer,
3832 &ring_buffer_flags, &rb_simple_fops);
3833
3834 return 0;
3835}
3836
3837fs_initcall(rb_init_debugfs);
3838#endif
3839
3840#ifdef CONFIG_HOTPLUG_CPU
3841static int rb_cpu_notify(struct notifier_block *self,
3842 unsigned long action, void *hcpu)
3843{
3844 struct ring_buffer *buffer =
3845 container_of(self, struct ring_buffer, cpu_notify);
3846 long cpu = (long)hcpu;
3847
3848 switch (action) {
3849 case CPU_UP_PREPARE:
3850 case CPU_UP_PREPARE_FROZEN:
3851 if (cpumask_test_cpu(cpu, buffer->cpumask))
3852 return NOTIFY_OK;
3853
3854 buffer->buffers[cpu] =
3855 rb_allocate_cpu_buffer(buffer, cpu);
3856 if (!buffer->buffers[cpu]) {
3857 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3858 cpu);
3859 return NOTIFY_OK;
3860 }
3861 smp_wmb();
3862 cpumask_set_cpu(cpu, buffer->cpumask);
3863 break;
3864 case CPU_DOWN_PREPARE:
3865 case CPU_DOWN_PREPARE_FROZEN:
3866
3867
3868
3869
3870
3871 break;
3872 default:
3873 break;
3874 }
3875 return NOTIFY_OK;
3876}
3877#endif
3878