1
2
3
4
5
6
7#include <linux/trace_events.h>
8#include <linux/ring_buffer.h>
9#include <linux/trace_clock.h>
10#include <linux/sched/clock.h>
11#include <linux/trace_seq.h>
12#include <linux/spinlock.h>
13#include <linux/irq_work.h>
14#include <linux/security.h>
15#include <linux/uaccess.h>
16#include <linux/hardirq.h>
17#include <linux/kthread.h>
18#include <linux/module.h>
19#include <linux/percpu.h>
20#include <linux/mutex.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/hash.h>
25#include <linux/list.h>
26#include <linux/cpu.h>
27#include <linux/oom.h>
28
29#include <asm/local.h>
30
31static void update_pages_handler(struct work_struct *work);
32
33
34
35
36int ring_buffer_print_entry_header(struct trace_seq *s)
37{
38 trace_seq_puts(s, "# compressed entry header\n");
39 trace_seq_puts(s, "\ttype_len : 5 bits\n");
40 trace_seq_puts(s, "\ttime_delta : 27 bits\n");
41 trace_seq_puts(s, "\tarray : 32 bits\n");
42 trace_seq_putc(s, '\n');
43 trace_seq_printf(s, "\tpadding : type == %d\n",
44 RINGBUF_TYPE_PADDING);
45 trace_seq_printf(s, "\ttime_extend : type == %d\n",
46 RINGBUF_TYPE_TIME_EXTEND);
47 trace_seq_printf(s, "\ttime_stamp : type == %d\n",
48 RINGBUF_TYPE_TIME_STAMP);
49 trace_seq_printf(s, "\tdata max type_len == %d\n",
50 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
51
52 return !trace_seq_has_overflowed(s);
53}
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124#define RB_BUFFER_OFF (1 << 20)
125
126#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
127
128#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
129#define RB_ALIGNMENT 4U
130#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
131#define RB_EVNT_MIN_SIZE 8U
132#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT)
133
134
135#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
136
137enum {
138 RB_LEN_TIME_EXTEND = 8,
139 RB_LEN_TIME_STAMP = 8,
140};
141
142#define skip_time_extend(event) \
143 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
144
145#define extended_time(event) \
146 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
147
148static inline int rb_null_event(struct ring_buffer_event *event)
149{
150 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
151}
152
153static void rb_event_set_padding(struct ring_buffer_event *event)
154{
155
156 event->type_len = RINGBUF_TYPE_PADDING;
157 event->time_delta = 0;
158}
159
160static unsigned
161rb_event_data_length(struct ring_buffer_event *event)
162{
163 unsigned length;
164
165 if (event->type_len)
166 length = event->type_len * RB_ALIGNMENT;
167 else
168 length = event->array[0];
169 return length + RB_EVNT_HDR_SIZE;
170}
171
172
173
174
175
176
177static inline unsigned
178rb_event_length(struct ring_buffer_event *event)
179{
180 switch (event->type_len) {
181 case RINGBUF_TYPE_PADDING:
182 if (rb_null_event(event))
183
184 return -1;
185 return event->array[0] + RB_EVNT_HDR_SIZE;
186
187 case RINGBUF_TYPE_TIME_EXTEND:
188 return RB_LEN_TIME_EXTEND;
189
190 case RINGBUF_TYPE_TIME_STAMP:
191 return RB_LEN_TIME_STAMP;
192
193 case RINGBUF_TYPE_DATA:
194 return rb_event_data_length(event);
195 default:
196 WARN_ON_ONCE(1);
197 }
198
199 return 0;
200}
201
202
203
204
205
206static inline unsigned
207rb_event_ts_length(struct ring_buffer_event *event)
208{
209 unsigned len = 0;
210
211 if (extended_time(event)) {
212
213 len = RB_LEN_TIME_EXTEND;
214 event = skip_time_extend(event);
215 }
216 return len + rb_event_length(event);
217}
218
219
220
221
222
223
224
225
226
227
228
229unsigned ring_buffer_event_length(struct ring_buffer_event *event)
230{
231 unsigned length;
232
233 if (extended_time(event))
234 event = skip_time_extend(event);
235
236 length = rb_event_length(event);
237 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
238 return length;
239 length -= RB_EVNT_HDR_SIZE;
240 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
241 length -= sizeof(event->array[0]);
242 return length;
243}
244EXPORT_SYMBOL_GPL(ring_buffer_event_length);
245
246
247static __always_inline void *
248rb_event_data(struct ring_buffer_event *event)
249{
250 if (extended_time(event))
251 event = skip_time_extend(event);
252 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
253
254 if (event->type_len)
255 return (void *)&event->array[0];
256
257 return (void *)&event->array[1];
258}
259
260
261
262
263
264void *ring_buffer_event_data(struct ring_buffer_event *event)
265{
266 return rb_event_data(event);
267}
268EXPORT_SYMBOL_GPL(ring_buffer_event_data);
269
270#define for_each_buffer_cpu(buffer, cpu) \
271 for_each_cpu(cpu, buffer->cpumask)
272
273#define for_each_online_buffer_cpu(buffer, cpu) \
274 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
275
276#define TS_SHIFT 27
277#define TS_MASK ((1ULL << TS_SHIFT) - 1)
278#define TS_DELTA_TEST (~TS_MASK)
279
280
281
282
283
284
285
286
287
288
289
290u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
291{
292 u64 ts;
293
294 ts = event->array[0];
295 ts <<= TS_SHIFT;
296 ts += event->time_delta;
297
298 return ts;
299}
300
301
302#define RB_MISSED_EVENTS (1 << 31)
303
304#define RB_MISSED_STORED (1 << 30)
305
306struct buffer_data_page {
307 u64 time_stamp;
308 local_t commit;
309 unsigned char data[] RB_ALIGN_DATA;
310};
311
312
313
314
315
316
317
318
319
320struct buffer_page {
321 struct list_head list;
322 local_t write;
323 unsigned read;
324 local_t entries;
325 unsigned long real_end;
326 struct buffer_data_page *page;
327};
328
329
330
331
332
333
334
335
336
337
338
339
340
341#define RB_WRITE_MASK 0xfffff
342#define RB_WRITE_INTCNT (1 << 20)
343
344static void rb_init_page(struct buffer_data_page *bpage)
345{
346 local_set(&bpage->commit, 0);
347}
348
349
350
351
352
353static void free_buffer_page(struct buffer_page *bpage)
354{
355 free_page((unsigned long)bpage->page);
356 kfree(bpage);
357}
358
359
360
361
362static inline int test_time_stamp(u64 delta)
363{
364 if (delta & TS_DELTA_TEST)
365 return 1;
366 return 0;
367}
368
369#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
370
371
372#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
373
374int ring_buffer_print_page_header(struct trace_seq *s)
375{
376 struct buffer_data_page field;
377
378 trace_seq_printf(s, "\tfield: u64 timestamp;\t"
379 "offset:0;\tsize:%u;\tsigned:%u;\n",
380 (unsigned int)sizeof(field.time_stamp),
381 (unsigned int)is_signed_type(u64));
382
383 trace_seq_printf(s, "\tfield: local_t commit;\t"
384 "offset:%u;\tsize:%u;\tsigned:%u;\n",
385 (unsigned int)offsetof(typeof(field), commit),
386 (unsigned int)sizeof(field.commit),
387 (unsigned int)is_signed_type(long));
388
389 trace_seq_printf(s, "\tfield: int overwrite;\t"
390 "offset:%u;\tsize:%u;\tsigned:%u;\n",
391 (unsigned int)offsetof(typeof(field), commit),
392 1,
393 (unsigned int)is_signed_type(long));
394
395 trace_seq_printf(s, "\tfield: char data;\t"
396 "offset:%u;\tsize:%u;\tsigned:%u;\n",
397 (unsigned int)offsetof(typeof(field), data),
398 (unsigned int)BUF_PAGE_SIZE,
399 (unsigned int)is_signed_type(char));
400
401 return !trace_seq_has_overflowed(s);
402}
403
404struct rb_irq_work {
405 struct irq_work work;
406 wait_queue_head_t waiters;
407 wait_queue_head_t full_waiters;
408 bool waiters_pending;
409 bool full_waiters_pending;
410 bool wakeup_full;
411};
412
413
414
415
416struct rb_event_info {
417 u64 ts;
418 u64 delta;
419 u64 before;
420 u64 after;
421 unsigned long length;
422 struct buffer_page *tail_page;
423 int add_timestamp;
424};
425
426
427
428
429
430
431
432
433enum {
434 RB_ADD_STAMP_NONE = 0,
435 RB_ADD_STAMP_EXTEND = BIT(1),
436 RB_ADD_STAMP_ABSOLUTE = BIT(2),
437 RB_ADD_STAMP_FORCE = BIT(3)
438};
439
440
441
442
443
444
445
446
447
448enum {
449 RB_CTX_NMI,
450 RB_CTX_IRQ,
451 RB_CTX_SOFTIRQ,
452 RB_CTX_NORMAL,
453 RB_CTX_MAX
454};
455
456#if BITS_PER_LONG == 32
457#define RB_TIME_32
458#endif
459
460
461
462
463#ifdef RB_TIME_32
464
465struct rb_time_struct {
466 local_t cnt;
467 local_t top;
468 local_t bottom;
469};
470#else
471#include <asm/local64.h>
472struct rb_time_struct {
473 local64_t time;
474};
475#endif
476typedef struct rb_time_struct rb_time_t;
477
478
479
480
481struct ring_buffer_per_cpu {
482 int cpu;
483 atomic_t record_disabled;
484 atomic_t resize_disabled;
485 struct trace_buffer *buffer;
486 raw_spinlock_t reader_lock;
487 arch_spinlock_t lock;
488 struct lock_class_key lock_key;
489 struct buffer_data_page *free_page;
490 unsigned long nr_pages;
491 unsigned int current_context;
492 struct list_head *pages;
493 struct buffer_page *head_page;
494 struct buffer_page *tail_page;
495 struct buffer_page *commit_page;
496 struct buffer_page *reader_page;
497 unsigned long lost_events;
498 unsigned long last_overrun;
499 unsigned long nest;
500 local_t entries_bytes;
501 local_t entries;
502 local_t overrun;
503 local_t commit_overrun;
504 local_t dropped_events;
505 local_t committing;
506 local_t commits;
507 local_t pages_touched;
508 local_t pages_read;
509 long last_pages_touch;
510 size_t shortest_full;
511 unsigned long read;
512 unsigned long read_bytes;
513 rb_time_t write_stamp;
514 rb_time_t before_stamp;
515 u64 read_stamp;
516
517 long nr_pages_to_update;
518 struct list_head new_pages;
519 struct work_struct update_pages_work;
520 struct completion update_done;
521
522 struct rb_irq_work irq_work;
523};
524
525struct trace_buffer {
526 unsigned flags;
527 int cpus;
528 atomic_t record_disabled;
529 cpumask_var_t cpumask;
530
531 struct lock_class_key *reader_lock_key;
532
533 struct mutex mutex;
534
535 struct ring_buffer_per_cpu **buffers;
536
537 struct hlist_node node;
538 u64 (*clock)(void);
539
540 struct rb_irq_work irq_work;
541 bool time_stamp_abs;
542};
543
544struct ring_buffer_iter {
545 struct ring_buffer_per_cpu *cpu_buffer;
546 unsigned long head;
547 unsigned long next_event;
548 struct buffer_page *head_page;
549 struct buffer_page *cache_reader_page;
550 unsigned long cache_read;
551 u64 read_stamp;
552 u64 page_stamp;
553 struct ring_buffer_event *event;
554 int missed_events;
555};
556
557#ifdef RB_TIME_32
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590#define RB_TIME_SHIFT 30
591#define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
592
593static inline int rb_time_cnt(unsigned long val)
594{
595 return (val >> RB_TIME_SHIFT) & 3;
596}
597
598static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
599{
600 u64 val;
601
602 val = top & RB_TIME_VAL_MASK;
603 val <<= RB_TIME_SHIFT;
604 val |= bottom & RB_TIME_VAL_MASK;
605
606 return val;
607}
608
609static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
610{
611 unsigned long top, bottom;
612 unsigned long c;
613
614
615
616
617
618
619 do {
620 c = local_read(&t->cnt);
621 top = local_read(&t->top);
622 bottom = local_read(&t->bottom);
623 } while (c != local_read(&t->cnt));
624
625 *cnt = rb_time_cnt(top);
626
627
628 if (*cnt != rb_time_cnt(bottom))
629 return false;
630
631 *ret = rb_time_val(top, bottom);
632 return true;
633}
634
635static bool rb_time_read(rb_time_t *t, u64 *ret)
636{
637 unsigned long cnt;
638
639 return __rb_time_read(t, ret, &cnt);
640}
641
642static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
643{
644 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
645}
646
647static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom)
648{
649 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
650 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
651}
652
653static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
654{
655 val = rb_time_val_cnt(val, cnt);
656 local_set(t, val);
657}
658
659static void rb_time_set(rb_time_t *t, u64 val)
660{
661 unsigned long cnt, top, bottom;
662
663 rb_time_split(val, &top, &bottom);
664
665
666 do {
667 cnt = local_inc_return(&t->cnt);
668 rb_time_val_set(&t->top, top, cnt);
669 rb_time_val_set(&t->bottom, bottom, cnt);
670 } while (cnt != local_read(&t->cnt));
671}
672
673static inline bool
674rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
675{
676 unsigned long ret;
677
678 ret = local_cmpxchg(l, expect, set);
679 return ret == expect;
680}
681
682static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
683{
684 unsigned long cnt, top, bottom;
685 unsigned long cnt2, top2, bottom2;
686 u64 val;
687
688
689 if (!__rb_time_read(t, &val, &cnt2))
690 return false;
691
692 if (val != expect)
693 return false;
694
695 cnt = local_read(&t->cnt);
696 if ((cnt & 3) != cnt2)
697 return false;
698
699 cnt2 = cnt + 1;
700
701 rb_time_split(val, &top, &bottom);
702 top = rb_time_val_cnt(top, cnt);
703 bottom = rb_time_val_cnt(bottom, cnt);
704
705 rb_time_split(set, &top2, &bottom2);
706 top2 = rb_time_val_cnt(top2, cnt2);
707 bottom2 = rb_time_val_cnt(bottom2, cnt2);
708
709 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
710 return false;
711 if (!rb_time_read_cmpxchg(&t->top, top, top2))
712 return false;
713 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
714 return false;
715 return true;
716}
717
718#else
719
720
721
722static inline bool rb_time_read(rb_time_t *t, u64 *ret)
723{
724 *ret = local64_read(&t->time);
725 return true;
726}
727static void rb_time_set(rb_time_t *t, u64 val)
728{
729 local64_set(&t->time, val);
730}
731
732static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
733{
734 u64 val;
735 val = local64_cmpxchg(&t->time, expect, set);
736 return val == expect;
737}
738#endif
739
740
741
742
743
744
745
746
747size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
748{
749 return buffer->buffers[cpu]->nr_pages;
750}
751
752
753
754
755
756
757
758
759size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
760{
761 size_t read;
762 size_t cnt;
763
764 read = local_read(&buffer->buffers[cpu]->pages_read);
765 cnt = local_read(&buffer->buffers[cpu]->pages_touched);
766
767 if (cnt < read) {
768 WARN_ON_ONCE(read > cnt + 1);
769 return 0;
770 }
771
772 return cnt - read;
773}
774
775
776
777
778
779
780
781static void rb_wake_up_waiters(struct irq_work *work)
782{
783 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
784
785 wake_up_all(&rbwork->waiters);
786 if (rbwork->wakeup_full) {
787 rbwork->wakeup_full = false;
788 wake_up_all(&rbwork->full_waiters);
789 }
790}
791
792
793
794
795
796
797
798
799
800
801
802int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
803{
804 struct ring_buffer_per_cpu *cpu_buffer;
805 DEFINE_WAIT(wait);
806 struct rb_irq_work *work;
807 int ret = 0;
808
809
810
811
812
813
814 if (cpu == RING_BUFFER_ALL_CPUS) {
815 work = &buffer->irq_work;
816
817 full = 0;
818 } else {
819 if (!cpumask_test_cpu(cpu, buffer->cpumask))
820 return -ENODEV;
821 cpu_buffer = buffer->buffers[cpu];
822 work = &cpu_buffer->irq_work;
823 }
824
825
826 while (true) {
827 if (full)
828 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
829 else
830 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852 if (full)
853 work->full_waiters_pending = true;
854 else
855 work->waiters_pending = true;
856
857 if (signal_pending(current)) {
858 ret = -EINTR;
859 break;
860 }
861
862 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
863 break;
864
865 if (cpu != RING_BUFFER_ALL_CPUS &&
866 !ring_buffer_empty_cpu(buffer, cpu)) {
867 unsigned long flags;
868 bool pagebusy;
869 size_t nr_pages;
870 size_t dirty;
871
872 if (!full)
873 break;
874
875 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
876 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
877 nr_pages = cpu_buffer->nr_pages;
878 dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
879 if (!cpu_buffer->shortest_full ||
880 cpu_buffer->shortest_full < full)
881 cpu_buffer->shortest_full = full;
882 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
883 if (!pagebusy &&
884 (!nr_pages || (dirty * 100) > full * nr_pages))
885 break;
886 }
887
888 schedule();
889 }
890
891 if (full)
892 finish_wait(&work->full_waiters, &wait);
893 else
894 finish_wait(&work->waiters, &wait);
895
896 return ret;
897}
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
914 struct file *filp, poll_table *poll_table)
915{
916 struct ring_buffer_per_cpu *cpu_buffer;
917 struct rb_irq_work *work;
918
919 if (cpu == RING_BUFFER_ALL_CPUS)
920 work = &buffer->irq_work;
921 else {
922 if (!cpumask_test_cpu(cpu, buffer->cpumask))
923 return -EINVAL;
924
925 cpu_buffer = buffer->buffers[cpu];
926 work = &cpu_buffer->irq_work;
927 }
928
929 poll_wait(filp, &work->waiters, poll_table);
930 work->waiters_pending = true;
931
932
933
934
935
936
937
938
939
940
941
942
943
944 smp_mb();
945
946 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
947 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
948 return EPOLLIN | EPOLLRDNORM;
949 return 0;
950}
951
952
953#define RB_WARN_ON(b, cond) \
954 ({ \
955 int _____ret = unlikely(cond); \
956 if (_____ret) { \
957 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
958 struct ring_buffer_per_cpu *__b = \
959 (void *)b; \
960 atomic_inc(&__b->buffer->record_disabled); \
961 } else \
962 atomic_inc(&b->record_disabled); \
963 WARN_ON(1); \
964 } \
965 _____ret; \
966 })
967
968
969#define DEBUG_SHIFT 0
970
971static inline u64 rb_time_stamp(struct trace_buffer *buffer)
972{
973 u64 ts;
974
975
976 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
977 ts = trace_clock_local();
978 else
979 ts = buffer->clock();
980
981
982 return ts << DEBUG_SHIFT;
983}
984
985u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
986{
987 u64 time;
988
989 preempt_disable_notrace();
990 time = rb_time_stamp(buffer);
991 preempt_enable_notrace();
992
993 return time;
994}
995EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
996
997void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
998 int cpu, u64 *ts)
999{
1000
1001 *ts >>= DEBUG_SHIFT;
1002}
1003EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074#define RB_PAGE_NORMAL 0UL
1075#define RB_PAGE_HEAD 1UL
1076#define RB_PAGE_UPDATE 2UL
1077
1078
1079#define RB_FLAG_MASK 3UL
1080
1081
1082#define RB_PAGE_MOVED 4UL
1083
1084
1085
1086
1087static struct list_head *rb_list_head(struct list_head *list)
1088{
1089 unsigned long val = (unsigned long)list;
1090
1091 return (struct list_head *)(val & ~RB_FLAG_MASK);
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static inline int
1103rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1104 struct buffer_page *page, struct list_head *list)
1105{
1106 unsigned long val;
1107
1108 val = (unsigned long)list->next;
1109
1110 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1111 return RB_PAGE_MOVED;
1112
1113 return val & RB_FLAG_MASK;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123static bool rb_is_reader_page(struct buffer_page *page)
1124{
1125 struct list_head *list = page->list.prev;
1126
1127 return rb_list_head(list->next) != &page->list;
1128}
1129
1130
1131
1132
1133static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
1134 struct list_head *list)
1135{
1136 unsigned long *ptr;
1137
1138 ptr = (unsigned long *)&list->next;
1139 *ptr |= RB_PAGE_HEAD;
1140 *ptr &= ~RB_PAGE_UPDATE;
1141}
1142
1143
1144
1145
1146static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1147{
1148 struct buffer_page *head;
1149
1150 head = cpu_buffer->head_page;
1151 if (!head)
1152 return;
1153
1154
1155
1156
1157 rb_set_list_to_head(cpu_buffer, head->list.prev);
1158}
1159
1160static void rb_list_head_clear(struct list_head *list)
1161{
1162 unsigned long *ptr = (unsigned long *)&list->next;
1163
1164 *ptr &= ~RB_FLAG_MASK;
1165}
1166
1167
1168
1169
1170static void
1171rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1172{
1173 struct list_head *hd;
1174
1175
1176 rb_list_head_clear(cpu_buffer->pages);
1177
1178 list_for_each(hd, cpu_buffer->pages)
1179 rb_list_head_clear(hd);
1180}
1181
1182static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1183 struct buffer_page *head,
1184 struct buffer_page *prev,
1185 int old_flag, int new_flag)
1186{
1187 struct list_head *list;
1188 unsigned long val = (unsigned long)&head->list;
1189 unsigned long ret;
1190
1191 list = &prev->list;
1192
1193 val &= ~RB_FLAG_MASK;
1194
1195 ret = cmpxchg((unsigned long *)&list->next,
1196 val | old_flag, val | new_flag);
1197
1198
1199 if ((ret & ~RB_FLAG_MASK) != val)
1200 return RB_PAGE_MOVED;
1201
1202 return ret & RB_FLAG_MASK;
1203}
1204
1205static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1206 struct buffer_page *head,
1207 struct buffer_page *prev,
1208 int old_flag)
1209{
1210 return rb_head_page_set(cpu_buffer, head, prev,
1211 old_flag, RB_PAGE_UPDATE);
1212}
1213
1214static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1215 struct buffer_page *head,
1216 struct buffer_page *prev,
1217 int old_flag)
1218{
1219 return rb_head_page_set(cpu_buffer, head, prev,
1220 old_flag, RB_PAGE_HEAD);
1221}
1222
1223static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1224 struct buffer_page *head,
1225 struct buffer_page *prev,
1226 int old_flag)
1227{
1228 return rb_head_page_set(cpu_buffer, head, prev,
1229 old_flag, RB_PAGE_NORMAL);
1230}
1231
1232static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
1233 struct buffer_page **bpage)
1234{
1235 struct list_head *p = rb_list_head((*bpage)->list.next);
1236
1237 *bpage = list_entry(p, struct buffer_page, list);
1238}
1239
1240static struct buffer_page *
1241rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1242{
1243 struct buffer_page *head;
1244 struct buffer_page *page;
1245 struct list_head *list;
1246 int i;
1247
1248 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1249 return NULL;
1250
1251
1252 list = cpu_buffer->pages;
1253 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1254 return NULL;
1255
1256 page = head = cpu_buffer->head_page;
1257
1258
1259
1260
1261
1262
1263 for (i = 0; i < 3; i++) {
1264 do {
1265 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
1266 cpu_buffer->head_page = page;
1267 return page;
1268 }
1269 rb_inc_page(cpu_buffer, &page);
1270 } while (page != head);
1271 }
1272
1273 RB_WARN_ON(cpu_buffer, 1);
1274
1275 return NULL;
1276}
1277
1278static int rb_head_page_replace(struct buffer_page *old,
1279 struct buffer_page *new)
1280{
1281 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1282 unsigned long val;
1283 unsigned long ret;
1284
1285 val = *ptr & ~RB_FLAG_MASK;
1286 val |= RB_PAGE_HEAD;
1287
1288 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1289
1290 return ret == val;
1291}
1292
1293
1294
1295
1296static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1297 struct buffer_page *tail_page,
1298 struct buffer_page *next_page)
1299{
1300 unsigned long old_entries;
1301 unsigned long old_write;
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1313 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1314
1315 local_inc(&cpu_buffer->pages_touched);
1316
1317
1318
1319
1320 barrier();
1321
1322
1323
1324
1325
1326
1327 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1328
1329 unsigned long val = old_write & ~RB_WRITE_MASK;
1330 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 (void)local_cmpxchg(&next_page->write, old_write, val);
1343 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1344
1345
1346
1347
1348
1349
1350 local_set(&next_page->page->commit, 0);
1351
1352
1353 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1354 }
1355}
1356
1357static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1358 struct buffer_page *bpage)
1359{
1360 unsigned long val = (unsigned long)bpage;
1361
1362 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1363 return 1;
1364
1365 return 0;
1366}
1367
1368
1369
1370
1371static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1372 struct list_head *list)
1373{
1374 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1375 return 1;
1376 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1377 return 1;
1378 return 0;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1389{
1390 struct list_head *head = cpu_buffer->pages;
1391 struct buffer_page *bpage, *tmp;
1392
1393
1394 if (cpu_buffer->head_page)
1395 rb_set_head_page(cpu_buffer);
1396
1397 rb_head_page_deactivate(cpu_buffer);
1398
1399 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1400 return -1;
1401 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1402 return -1;
1403
1404 if (rb_check_list(cpu_buffer, head))
1405 return -1;
1406
1407 list_for_each_entry_safe(bpage, tmp, head, list) {
1408 if (RB_WARN_ON(cpu_buffer,
1409 bpage->list.next->prev != &bpage->list))
1410 return -1;
1411 if (RB_WARN_ON(cpu_buffer,
1412 bpage->list.prev->next != &bpage->list))
1413 return -1;
1414 if (rb_check_list(cpu_buffer, &bpage->list))
1415 return -1;
1416 }
1417
1418 rb_head_page_activate(cpu_buffer);
1419
1420 return 0;
1421}
1422
1423static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1424{
1425 struct buffer_page *bpage, *tmp;
1426 bool user_thread = current->mm != NULL;
1427 gfp_t mflags;
1428 long i;
1429
1430
1431
1432
1433
1434
1435
1436
1437 i = si_mem_available();
1438 if (i < nr_pages)
1439 return -ENOMEM;
1440
1441
1442
1443
1444
1445
1446 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 if (user_thread)
1458 set_current_oom_origin();
1459 for (i = 0; i < nr_pages; i++) {
1460 struct page *page;
1461
1462 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1463 mflags, cpu_to_node(cpu));
1464 if (!bpage)
1465 goto free_pages;
1466
1467 list_add(&bpage->list, pages);
1468
1469 page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
1470 if (!page)
1471 goto free_pages;
1472 bpage->page = page_address(page);
1473 rb_init_page(bpage->page);
1474
1475 if (user_thread && fatal_signal_pending(current))
1476 goto free_pages;
1477 }
1478 if (user_thread)
1479 clear_current_oom_origin();
1480
1481 return 0;
1482
1483free_pages:
1484 list_for_each_entry_safe(bpage, tmp, pages, list) {
1485 list_del_init(&bpage->list);
1486 free_buffer_page(bpage);
1487 }
1488 if (user_thread)
1489 clear_current_oom_origin();
1490
1491 return -ENOMEM;
1492}
1493
1494static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1495 unsigned long nr_pages)
1496{
1497 LIST_HEAD(pages);
1498
1499 WARN_ON(!nr_pages);
1500
1501 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1502 return -ENOMEM;
1503
1504
1505
1506
1507
1508
1509 cpu_buffer->pages = pages.next;
1510 list_del(&pages);
1511
1512 cpu_buffer->nr_pages = nr_pages;
1513
1514 rb_check_pages(cpu_buffer);
1515
1516 return 0;
1517}
1518
1519static struct ring_buffer_per_cpu *
1520rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1521{
1522 struct ring_buffer_per_cpu *cpu_buffer;
1523 struct buffer_page *bpage;
1524 struct page *page;
1525 int ret;
1526
1527 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1528 GFP_KERNEL, cpu_to_node(cpu));
1529 if (!cpu_buffer)
1530 return NULL;
1531
1532 cpu_buffer->cpu = cpu;
1533 cpu_buffer->buffer = buffer;
1534 raw_spin_lock_init(&cpu_buffer->reader_lock);
1535 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1536 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1537 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1538 init_completion(&cpu_buffer->update_done);
1539 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1540 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1541 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1542
1543 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1544 GFP_KERNEL, cpu_to_node(cpu));
1545 if (!bpage)
1546 goto fail_free_buffer;
1547
1548 rb_check_bpage(cpu_buffer, bpage);
1549
1550 cpu_buffer->reader_page = bpage;
1551 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1552 if (!page)
1553 goto fail_free_reader;
1554 bpage->page = page_address(page);
1555 rb_init_page(bpage->page);
1556
1557 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1558 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1559
1560 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1561 if (ret < 0)
1562 goto fail_free_reader;
1563
1564 cpu_buffer->head_page
1565 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1566 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1567
1568 rb_head_page_activate(cpu_buffer);
1569
1570 return cpu_buffer;
1571
1572 fail_free_reader:
1573 free_buffer_page(cpu_buffer->reader_page);
1574
1575 fail_free_buffer:
1576 kfree(cpu_buffer);
1577 return NULL;
1578}
1579
1580static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1581{
1582 struct list_head *head = cpu_buffer->pages;
1583 struct buffer_page *bpage, *tmp;
1584
1585 free_buffer_page(cpu_buffer->reader_page);
1586
1587 rb_head_page_deactivate(cpu_buffer);
1588
1589 if (head) {
1590 list_for_each_entry_safe(bpage, tmp, head, list) {
1591 list_del_init(&bpage->list);
1592 free_buffer_page(bpage);
1593 }
1594 bpage = list_entry(head, struct buffer_page, list);
1595 free_buffer_page(bpage);
1596 }
1597
1598 kfree(cpu_buffer);
1599}
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1613 struct lock_class_key *key)
1614{
1615 struct trace_buffer *buffer;
1616 long nr_pages;
1617 int bsize;
1618 int cpu;
1619 int ret;
1620
1621
1622 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1623 GFP_KERNEL);
1624 if (!buffer)
1625 return NULL;
1626
1627 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1628 goto fail_free_buffer;
1629
1630 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1631 buffer->flags = flags;
1632 buffer->clock = trace_clock_local;
1633 buffer->reader_lock_key = key;
1634
1635 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1636 init_waitqueue_head(&buffer->irq_work.waiters);
1637
1638
1639 if (nr_pages < 2)
1640 nr_pages = 2;
1641
1642 buffer->cpus = nr_cpu_ids;
1643
1644 bsize = sizeof(void *) * nr_cpu_ids;
1645 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1646 GFP_KERNEL);
1647 if (!buffer->buffers)
1648 goto fail_free_cpumask;
1649
1650 cpu = raw_smp_processor_id();
1651 cpumask_set_cpu(cpu, buffer->cpumask);
1652 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1653 if (!buffer->buffers[cpu])
1654 goto fail_free_buffers;
1655
1656 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1657 if (ret < 0)
1658 goto fail_free_buffers;
1659
1660 mutex_init(&buffer->mutex);
1661
1662 return buffer;
1663
1664 fail_free_buffers:
1665 for_each_buffer_cpu(buffer, cpu) {
1666 if (buffer->buffers[cpu])
1667 rb_free_cpu_buffer(buffer->buffers[cpu]);
1668 }
1669 kfree(buffer->buffers);
1670
1671 fail_free_cpumask:
1672 free_cpumask_var(buffer->cpumask);
1673
1674 fail_free_buffer:
1675 kfree(buffer);
1676 return NULL;
1677}
1678EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1679
1680
1681
1682
1683
1684void
1685ring_buffer_free(struct trace_buffer *buffer)
1686{
1687 int cpu;
1688
1689 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1690
1691 for_each_buffer_cpu(buffer, cpu)
1692 rb_free_cpu_buffer(buffer->buffers[cpu]);
1693
1694 kfree(buffer->buffers);
1695 free_cpumask_var(buffer->cpumask);
1696
1697 kfree(buffer);
1698}
1699EXPORT_SYMBOL_GPL(ring_buffer_free);
1700
1701void ring_buffer_set_clock(struct trace_buffer *buffer,
1702 u64 (*clock)(void))
1703{
1704 buffer->clock = clock;
1705}
1706
1707void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1708{
1709 buffer->time_stamp_abs = abs;
1710}
1711
1712bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1713{
1714 return buffer->time_stamp_abs;
1715}
1716
1717static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1718
1719static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1720{
1721 return local_read(&bpage->entries) & RB_WRITE_MASK;
1722}
1723
1724static inline unsigned long rb_page_write(struct buffer_page *bpage)
1725{
1726 return local_read(&bpage->write) & RB_WRITE_MASK;
1727}
1728
1729static int
1730rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1731{
1732 struct list_head *tail_page, *to_remove, *next_page;
1733 struct buffer_page *to_remove_page, *tmp_iter_page;
1734 struct buffer_page *last_page, *first_page;
1735 unsigned long nr_removed;
1736 unsigned long head_bit;
1737 int page_entries;
1738
1739 head_bit = 0;
1740
1741 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1742 atomic_inc(&cpu_buffer->record_disabled);
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 tail_page = &cpu_buffer->tail_page->list;
1753
1754
1755
1756
1757
1758 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1759 tail_page = rb_list_head(tail_page->next);
1760 to_remove = tail_page;
1761
1762
1763 first_page = list_entry(rb_list_head(to_remove->next),
1764 struct buffer_page, list);
1765
1766 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1767 to_remove = rb_list_head(to_remove)->next;
1768 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1769 }
1770
1771 next_page = rb_list_head(to_remove)->next;
1772
1773
1774
1775
1776
1777
1778 tail_page->next = (struct list_head *)((unsigned long)next_page |
1779 head_bit);
1780 next_page = rb_list_head(next_page);
1781 next_page->prev = tail_page;
1782
1783
1784 cpu_buffer->pages = next_page;
1785
1786
1787 if (head_bit)
1788 cpu_buffer->head_page = list_entry(next_page,
1789 struct buffer_page, list);
1790
1791
1792
1793
1794
1795 cpu_buffer->read = 0;
1796
1797
1798 atomic_dec(&cpu_buffer->record_disabled);
1799 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1800
1801 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1802
1803
1804 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1805 list);
1806 tmp_iter_page = first_page;
1807
1808 do {
1809 cond_resched();
1810
1811 to_remove_page = tmp_iter_page;
1812 rb_inc_page(cpu_buffer, &tmp_iter_page);
1813
1814
1815 page_entries = rb_page_entries(to_remove_page);
1816 if (page_entries) {
1817
1818
1819
1820
1821
1822
1823 local_add(page_entries, &cpu_buffer->overrun);
1824 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1825 }
1826
1827
1828
1829
1830
1831 free_buffer_page(to_remove_page);
1832 nr_removed--;
1833
1834 } while (to_remove_page != last_page);
1835
1836 RB_WARN_ON(cpu_buffer, nr_removed);
1837
1838 return nr_removed == 0;
1839}
1840
1841static int
1842rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1843{
1844 struct list_head *pages = &cpu_buffer->new_pages;
1845 int retries, success;
1846
1847 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 retries = 10;
1863 success = 0;
1864 while (retries--) {
1865 struct list_head *head_page, *prev_page, *r;
1866 struct list_head *last_page, *first_page;
1867 struct list_head *head_page_with_bit;
1868
1869 head_page = &rb_set_head_page(cpu_buffer)->list;
1870 if (!head_page)
1871 break;
1872 prev_page = head_page->prev;
1873
1874 first_page = pages->next;
1875 last_page = pages->prev;
1876
1877 head_page_with_bit = (struct list_head *)
1878 ((unsigned long)head_page | RB_PAGE_HEAD);
1879
1880 last_page->next = head_page_with_bit;
1881 first_page->prev = prev_page;
1882
1883 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1884
1885 if (r == head_page_with_bit) {
1886
1887
1888
1889
1890
1891 head_page->prev = last_page;
1892 success = 1;
1893 break;
1894 }
1895 }
1896
1897 if (success)
1898 INIT_LIST_HEAD(pages);
1899
1900
1901
1902
1903 RB_WARN_ON(cpu_buffer, !success);
1904 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1905
1906
1907 if (!success) {
1908 struct buffer_page *bpage, *tmp;
1909 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1910 list) {
1911 list_del_init(&bpage->list);
1912 free_buffer_page(bpage);
1913 }
1914 }
1915 return success;
1916}
1917
1918static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1919{
1920 int success;
1921
1922 if (cpu_buffer->nr_pages_to_update > 0)
1923 success = rb_insert_pages(cpu_buffer);
1924 else
1925 success = rb_remove_pages(cpu_buffer,
1926 -cpu_buffer->nr_pages_to_update);
1927
1928 if (success)
1929 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1930}
1931
1932static void update_pages_handler(struct work_struct *work)
1933{
1934 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1935 struct ring_buffer_per_cpu, update_pages_work);
1936 rb_update_pages(cpu_buffer);
1937 complete(&cpu_buffer->update_done);
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
1951 int cpu_id)
1952{
1953 struct ring_buffer_per_cpu *cpu_buffer;
1954 unsigned long nr_pages;
1955 int cpu, err = 0;
1956
1957
1958
1959
1960 if (!buffer)
1961 return size;
1962
1963
1964 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1965 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1966 return size;
1967
1968 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1969
1970
1971 if (nr_pages < 2)
1972 nr_pages = 2;
1973
1974 size = nr_pages * BUF_PAGE_SIZE;
1975
1976
1977 mutex_lock(&buffer->mutex);
1978
1979
1980 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1981
1982
1983
1984
1985
1986 for_each_buffer_cpu(buffer, cpu) {
1987 cpu_buffer = buffer->buffers[cpu];
1988 if (atomic_read(&cpu_buffer->resize_disabled)) {
1989 err = -EBUSY;
1990 goto out_err_unlock;
1991 }
1992 }
1993
1994
1995 for_each_buffer_cpu(buffer, cpu) {
1996 cpu_buffer = buffer->buffers[cpu];
1997
1998 cpu_buffer->nr_pages_to_update = nr_pages -
1999 cpu_buffer->nr_pages;
2000
2001
2002
2003 if (cpu_buffer->nr_pages_to_update <= 0)
2004 continue;
2005
2006
2007
2008
2009 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2010 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
2011 &cpu_buffer->new_pages, cpu)) {
2012
2013 err = -ENOMEM;
2014 goto out_err;
2015 }
2016 }
2017
2018 get_online_cpus();
2019
2020
2021
2022
2023
2024 for_each_buffer_cpu(buffer, cpu) {
2025 cpu_buffer = buffer->buffers[cpu];
2026 if (!cpu_buffer->nr_pages_to_update)
2027 continue;
2028
2029
2030 if (!cpu_online(cpu)) {
2031 rb_update_pages(cpu_buffer);
2032 cpu_buffer->nr_pages_to_update = 0;
2033 } else {
2034 schedule_work_on(cpu,
2035 &cpu_buffer->update_pages_work);
2036 }
2037 }
2038
2039
2040 for_each_buffer_cpu(buffer, cpu) {
2041 cpu_buffer = buffer->buffers[cpu];
2042 if (!cpu_buffer->nr_pages_to_update)
2043 continue;
2044
2045 if (cpu_online(cpu))
2046 wait_for_completion(&cpu_buffer->update_done);
2047 cpu_buffer->nr_pages_to_update = 0;
2048 }
2049
2050 put_online_cpus();
2051 } else {
2052
2053 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
2054 goto out;
2055
2056 cpu_buffer = buffer->buffers[cpu_id];
2057
2058 if (nr_pages == cpu_buffer->nr_pages)
2059 goto out;
2060
2061
2062
2063
2064
2065
2066 if (atomic_read(&cpu_buffer->resize_disabled)) {
2067 err = -EBUSY;
2068 goto out_err_unlock;
2069 }
2070
2071 cpu_buffer->nr_pages_to_update = nr_pages -
2072 cpu_buffer->nr_pages;
2073
2074 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2075 if (cpu_buffer->nr_pages_to_update > 0 &&
2076 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
2077 &cpu_buffer->new_pages, cpu_id)) {
2078 err = -ENOMEM;
2079 goto out_err;
2080 }
2081
2082 get_online_cpus();
2083
2084
2085 if (!cpu_online(cpu_id))
2086 rb_update_pages(cpu_buffer);
2087 else {
2088 schedule_work_on(cpu_id,
2089 &cpu_buffer->update_pages_work);
2090 wait_for_completion(&cpu_buffer->update_done);
2091 }
2092
2093 cpu_buffer->nr_pages_to_update = 0;
2094 put_online_cpus();
2095 }
2096
2097 out:
2098
2099
2100
2101
2102
2103
2104
2105 if (atomic_read(&buffer->record_disabled)) {
2106 atomic_inc(&buffer->record_disabled);
2107
2108
2109
2110
2111
2112
2113 synchronize_rcu();
2114 for_each_buffer_cpu(buffer, cpu) {
2115 cpu_buffer = buffer->buffers[cpu];
2116 rb_check_pages(cpu_buffer);
2117 }
2118 atomic_dec(&buffer->record_disabled);
2119 }
2120
2121 mutex_unlock(&buffer->mutex);
2122 return size;
2123
2124 out_err:
2125 for_each_buffer_cpu(buffer, cpu) {
2126 struct buffer_page *bpage, *tmp;
2127
2128 cpu_buffer = buffer->buffers[cpu];
2129 cpu_buffer->nr_pages_to_update = 0;
2130
2131 if (list_empty(&cpu_buffer->new_pages))
2132 continue;
2133
2134 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2135 list) {
2136 list_del_init(&bpage->list);
2137 free_buffer_page(bpage);
2138 }
2139 }
2140 out_err_unlock:
2141 mutex_unlock(&buffer->mutex);
2142 return err;
2143}
2144EXPORT_SYMBOL_GPL(ring_buffer_resize);
2145
2146void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2147{
2148 mutex_lock(&buffer->mutex);
2149 if (val)
2150 buffer->flags |= RB_FL_OVERWRITE;
2151 else
2152 buffer->flags &= ~RB_FL_OVERWRITE;
2153 mutex_unlock(&buffer->mutex);
2154}
2155EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2156
2157static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2158{
2159 return bpage->page->data + index;
2160}
2161
2162static __always_inline struct ring_buffer_event *
2163rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2164{
2165 return __rb_page_index(cpu_buffer->reader_page,
2166 cpu_buffer->reader_page->read);
2167}
2168
2169static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
2170{
2171 return local_read(&bpage->page->commit);
2172}
2173
2174static struct ring_buffer_event *
2175rb_iter_head_event(struct ring_buffer_iter *iter)
2176{
2177 struct ring_buffer_event *event;
2178 struct buffer_page *iter_head_page = iter->head_page;
2179 unsigned long commit;
2180 unsigned length;
2181
2182 if (iter->head != iter->next_event)
2183 return iter->event;
2184
2185
2186
2187
2188
2189
2190 commit = rb_page_commit(iter_head_page);
2191 smp_rmb();
2192 event = __rb_page_index(iter_head_page, iter->head);
2193 length = rb_event_length(event);
2194
2195
2196
2197
2198
2199 barrier();
2200
2201 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
2202
2203 goto reset;
2204
2205 memcpy(iter->event, event, length);
2206
2207
2208
2209
2210 smp_rmb();
2211
2212
2213 if (iter->page_stamp != iter_head_page->page->time_stamp ||
2214 commit > rb_page_commit(iter_head_page))
2215 goto reset;
2216
2217 iter->next_event = iter->head + length;
2218 return iter->event;
2219 reset:
2220
2221 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2222 iter->head = 0;
2223 iter->next_event = 0;
2224 iter->missed_events = 1;
2225 return NULL;
2226}
2227
2228
2229static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2230{
2231 return rb_page_commit(bpage);
2232}
2233
2234static __always_inline unsigned
2235rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2236{
2237 return rb_page_commit(cpu_buffer->commit_page);
2238}
2239
2240static __always_inline unsigned
2241rb_event_index(struct ring_buffer_event *event)
2242{
2243 unsigned long addr = (unsigned long)event;
2244
2245 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
2246}
2247
2248static void rb_inc_iter(struct ring_buffer_iter *iter)
2249{
2250 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2251
2252
2253
2254
2255
2256
2257
2258 if (iter->head_page == cpu_buffer->reader_page)
2259 iter->head_page = rb_set_head_page(cpu_buffer);
2260 else
2261 rb_inc_page(cpu_buffer, &iter->head_page);
2262
2263 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2264 iter->head = 0;
2265 iter->next_event = 0;
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275static int
2276rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2277 struct buffer_page *tail_page,
2278 struct buffer_page *next_page)
2279{
2280 struct buffer_page *new_head;
2281 int entries;
2282 int type;
2283 int ret;
2284
2285 entries = rb_page_entries(next_page);
2286
2287
2288
2289
2290
2291
2292 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2293 RB_PAGE_HEAD);
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306 switch (type) {
2307 case RB_PAGE_HEAD:
2308
2309
2310
2311
2312
2313 local_add(entries, &cpu_buffer->overrun);
2314 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2315
2316
2317
2318
2319
2320
2321
2322 break;
2323
2324 case RB_PAGE_UPDATE:
2325
2326
2327
2328
2329 break;
2330 case RB_PAGE_NORMAL:
2331
2332
2333
2334
2335
2336 return 1;
2337 case RB_PAGE_MOVED:
2338
2339
2340
2341
2342
2343 return 1;
2344 default:
2345 RB_WARN_ON(cpu_buffer, 1);
2346 return -1;
2347 }
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363 new_head = next_page;
2364 rb_inc_page(cpu_buffer, &new_head);
2365
2366 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2367 RB_PAGE_NORMAL);
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377 switch (ret) {
2378 case RB_PAGE_HEAD:
2379 case RB_PAGE_NORMAL:
2380
2381 break;
2382 default:
2383 RB_WARN_ON(cpu_buffer, 1);
2384 return -1;
2385 }
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397 if (ret == RB_PAGE_NORMAL) {
2398 struct buffer_page *buffer_tail_page;
2399
2400 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2401
2402
2403
2404
2405 if (buffer_tail_page != tail_page &&
2406 buffer_tail_page != next_page)
2407 rb_head_page_set_normal(cpu_buffer, new_head,
2408 next_page,
2409 RB_PAGE_HEAD);
2410 }
2411
2412
2413
2414
2415
2416
2417 if (type == RB_PAGE_HEAD) {
2418 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2419 tail_page,
2420 RB_PAGE_UPDATE);
2421 if (RB_WARN_ON(cpu_buffer,
2422 ret != RB_PAGE_UPDATE))
2423 return -1;
2424 }
2425
2426 return 0;
2427}
2428
2429static inline void
2430rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2431 unsigned long tail, struct rb_event_info *info)
2432{
2433 struct buffer_page *tail_page = info->tail_page;
2434 struct ring_buffer_event *event;
2435 unsigned long length = info->length;
2436
2437
2438
2439
2440
2441 if (tail >= BUF_PAGE_SIZE) {
2442
2443
2444
2445
2446
2447 if (tail == BUF_PAGE_SIZE)
2448 tail_page->real_end = 0;
2449
2450 local_sub(length, &tail_page->write);
2451 return;
2452 }
2453
2454 event = __rb_page_index(tail_page, tail);
2455
2456
2457 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2458
2459
2460
2461
2462
2463
2464 tail_page->real_end = tail;
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2478
2479
2480
2481 rb_event_set_padding(event);
2482
2483
2484 local_sub(length, &tail_page->write);
2485 return;
2486 }
2487
2488
2489 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2490 event->type_len = RINGBUF_TYPE_PADDING;
2491
2492 event->time_delta = 1;
2493
2494
2495 length = (tail + length) - BUF_PAGE_SIZE;
2496 local_sub(length, &tail_page->write);
2497}
2498
2499static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2500
2501
2502
2503
2504static noinline struct ring_buffer_event *
2505rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2506 unsigned long tail, struct rb_event_info *info)
2507{
2508 struct buffer_page *tail_page = info->tail_page;
2509 struct buffer_page *commit_page = cpu_buffer->commit_page;
2510 struct trace_buffer *buffer = cpu_buffer->buffer;
2511 struct buffer_page *next_page;
2512 int ret;
2513
2514 next_page = tail_page;
2515
2516 rb_inc_page(cpu_buffer, &next_page);
2517
2518
2519
2520
2521
2522
2523 if (unlikely(next_page == commit_page)) {
2524 local_inc(&cpu_buffer->commit_overrun);
2525 goto out_reset;
2526 }
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2543
2544
2545
2546
2547
2548 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2549
2550
2551
2552
2553 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2554 local_inc(&cpu_buffer->dropped_events);
2555 goto out_reset;
2556 }
2557
2558 ret = rb_handle_head_page(cpu_buffer,
2559 tail_page,
2560 next_page);
2561 if (ret < 0)
2562 goto out_reset;
2563 if (ret)
2564 goto out_again;
2565 } else {
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576 if (unlikely((cpu_buffer->commit_page !=
2577 cpu_buffer->tail_page) &&
2578 (cpu_buffer->commit_page ==
2579 cpu_buffer->reader_page))) {
2580 local_inc(&cpu_buffer->commit_overrun);
2581 goto out_reset;
2582 }
2583 }
2584 }
2585
2586 rb_tail_page_update(cpu_buffer, tail_page, next_page);
2587
2588 out_again:
2589
2590 rb_reset_tail(cpu_buffer, tail, info);
2591
2592
2593 rb_end_commit(cpu_buffer);
2594
2595 local_inc(&cpu_buffer->committing);
2596
2597
2598 return ERR_PTR(-EAGAIN);
2599
2600 out_reset:
2601
2602 rb_reset_tail(cpu_buffer, tail, info);
2603
2604 return NULL;
2605}
2606
2607
2608static struct ring_buffer_event *
2609rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2610{
2611 if (abs)
2612 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2613 else
2614 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2615
2616
2617 if (abs || rb_event_index(event)) {
2618 event->time_delta = delta & TS_MASK;
2619 event->array[0] = delta >> TS_SHIFT;
2620 } else {
2621
2622 event->time_delta = 0;
2623 event->array[0] = 0;
2624 }
2625
2626 return skip_time_extend(event);
2627}
2628
2629static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2630 struct ring_buffer_event *event);
2631
2632#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2633static inline bool sched_clock_stable(void)
2634{
2635 return true;
2636}
2637#endif
2638
2639static void
2640rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2641 struct rb_event_info *info)
2642{
2643 u64 write_stamp;
2644
2645 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2646 (unsigned long long)info->delta,
2647 (unsigned long long)info->ts,
2648 (unsigned long long)info->before,
2649 (unsigned long long)info->after,
2650 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
2651 sched_clock_stable() ? "" :
2652 "If you just came from a suspend/resume,\n"
2653 "please switch to the trace global clock:\n"
2654 " echo global > /sys/kernel/debug/tracing/trace_clock\n"
2655 "or add trace_clock=global to the kernel command line\n");
2656}
2657
2658static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2659 struct ring_buffer_event **event,
2660 struct rb_event_info *info,
2661 u64 *delta,
2662 unsigned int *length)
2663{
2664 bool abs = info->add_timestamp &
2665 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2666
2667 if (unlikely(info->delta > (1ULL << 59))) {
2668
2669 if (info->before == info->after && info->before > info->ts) {
2670
2671 static int once;
2672
2673
2674
2675
2676
2677 if (!once) {
2678 once++;
2679 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2680 info->before, info->ts);
2681 }
2682 } else
2683 rb_check_timestamp(cpu_buffer, info);
2684 if (!abs)
2685 info->delta = 0;
2686 }
2687 *event = rb_add_time_stamp(*event, info->delta, abs);
2688 *length -= RB_LEN_TIME_EXTEND;
2689 *delta = 0;
2690}
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703static void
2704rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2705 struct ring_buffer_event *event,
2706 struct rb_event_info *info)
2707{
2708 unsigned length = info->length;
2709 u64 delta = info->delta;
2710
2711
2712
2713
2714
2715 if (unlikely(info->add_timestamp))
2716 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
2717
2718 event->time_delta = delta;
2719 length -= RB_EVNT_HDR_SIZE;
2720 if (length > RB_MAX_SMALL_DATA) {
2721 event->type_len = 0;
2722 event->array[0] = length;
2723 } else
2724 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2725}
2726
2727static unsigned rb_calculate_event_length(unsigned length)
2728{
2729 struct ring_buffer_event event;
2730
2731
2732 if (!length)
2733 length++;
2734
2735 if (length > RB_MAX_SMALL_DATA)
2736 length += sizeof(event.array[0]);
2737
2738 length += RB_EVNT_HDR_SIZE;
2739 length = ALIGN(length, RB_ALIGNMENT);
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2754 length += RB_ALIGNMENT;
2755
2756 return length;
2757}
2758
2759static __always_inline bool
2760rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2761 struct ring_buffer_event *event)
2762{
2763 unsigned long addr = (unsigned long)event;
2764 unsigned long index;
2765
2766 index = rb_event_index(event);
2767 addr &= PAGE_MASK;
2768
2769 return cpu_buffer->commit_page->page == (void *)addr &&
2770 rb_commit_index(cpu_buffer) == index;
2771}
2772
2773static u64 rb_time_delta(struct ring_buffer_event *event)
2774{
2775 switch (event->type_len) {
2776 case RINGBUF_TYPE_PADDING:
2777 return 0;
2778
2779 case RINGBUF_TYPE_TIME_EXTEND:
2780 return ring_buffer_event_time_stamp(event);
2781
2782 case RINGBUF_TYPE_TIME_STAMP:
2783 return 0;
2784
2785 case RINGBUF_TYPE_DATA:
2786 return event->time_delta;
2787 default:
2788 return 0;
2789 }
2790}
2791
2792static inline int
2793rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2794 struct ring_buffer_event *event)
2795{
2796 unsigned long new_index, old_index;
2797 struct buffer_page *bpage;
2798 unsigned long index;
2799 unsigned long addr;
2800 u64 write_stamp;
2801 u64 delta;
2802
2803 new_index = rb_event_index(event);
2804 old_index = new_index + rb_event_ts_length(event);
2805 addr = (unsigned long)event;
2806 addr &= PAGE_MASK;
2807
2808 bpage = READ_ONCE(cpu_buffer->tail_page);
2809
2810 delta = rb_time_delta(event);
2811
2812 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
2813 return 0;
2814
2815
2816 barrier();
2817
2818 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2819 unsigned long write_mask =
2820 local_read(&bpage->write) & ~RB_WRITE_MASK;
2821 unsigned long event_length = rb_event_length(event);
2822
2823
2824 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
2825 write_stamp, write_stamp - delta))
2826 return 0;
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842 old_index += write_mask;
2843 new_index += write_mask;
2844 index = local_cmpxchg(&bpage->write, old_index, new_index);
2845 if (index == old_index) {
2846
2847 local_sub(event_length, &cpu_buffer->entries_bytes);
2848 return 1;
2849 }
2850 }
2851
2852
2853 return 0;
2854}
2855
2856static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2857{
2858 local_inc(&cpu_buffer->committing);
2859 local_inc(&cpu_buffer->commits);
2860}
2861
2862static __always_inline void
2863rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2864{
2865 unsigned long max_count;
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875 again:
2876 max_count = cpu_buffer->nr_pages * 100;
2877
2878 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2879 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2880 return;
2881 if (RB_WARN_ON(cpu_buffer,
2882 rb_is_reader_page(cpu_buffer->tail_page)))
2883 return;
2884 local_set(&cpu_buffer->commit_page->page->commit,
2885 rb_page_write(cpu_buffer->commit_page));
2886 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2887
2888 barrier();
2889 }
2890 while (rb_commit_index(cpu_buffer) !=
2891 rb_page_write(cpu_buffer->commit_page)) {
2892
2893 local_set(&cpu_buffer->commit_page->page->commit,
2894 rb_page_write(cpu_buffer->commit_page));
2895 RB_WARN_ON(cpu_buffer,
2896 local_read(&cpu_buffer->commit_page->page->commit) &
2897 ~RB_WRITE_MASK);
2898 barrier();
2899 }
2900
2901
2902 barrier();
2903
2904
2905
2906
2907
2908
2909 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2910 goto again;
2911}
2912
2913static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2914{
2915 unsigned long commits;
2916
2917 if (RB_WARN_ON(cpu_buffer,
2918 !local_read(&cpu_buffer->committing)))
2919 return;
2920
2921 again:
2922 commits = local_read(&cpu_buffer->commits);
2923
2924 barrier();
2925 if (local_read(&cpu_buffer->committing) == 1)
2926 rb_set_commit_to_write(cpu_buffer);
2927
2928 local_dec(&cpu_buffer->committing);
2929
2930
2931 barrier();
2932
2933
2934
2935
2936
2937
2938 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2939 !local_read(&cpu_buffer->committing)) {
2940 local_inc(&cpu_buffer->committing);
2941 goto again;
2942 }
2943}
2944
2945static inline void rb_event_discard(struct ring_buffer_event *event)
2946{
2947 if (extended_time(event))
2948 event = skip_time_extend(event);
2949
2950
2951 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2952 event->type_len = RINGBUF_TYPE_PADDING;
2953
2954 if (!event->time_delta)
2955 event->time_delta = 1;
2956}
2957
2958static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2959 struct ring_buffer_event *event)
2960{
2961 local_inc(&cpu_buffer->entries);
2962 rb_end_commit(cpu_buffer);
2963}
2964
2965static __always_inline void
2966rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2967{
2968 size_t nr_pages;
2969 size_t dirty;
2970 size_t full;
2971
2972 if (buffer->irq_work.waiters_pending) {
2973 buffer->irq_work.waiters_pending = false;
2974
2975 irq_work_queue(&buffer->irq_work.work);
2976 }
2977
2978 if (cpu_buffer->irq_work.waiters_pending) {
2979 cpu_buffer->irq_work.waiters_pending = false;
2980
2981 irq_work_queue(&cpu_buffer->irq_work.work);
2982 }
2983
2984 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
2985 return;
2986
2987 if (cpu_buffer->reader_page == cpu_buffer->commit_page)
2988 return;
2989
2990 if (!cpu_buffer->irq_work.full_waiters_pending)
2991 return;
2992
2993 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
2994
2995 full = cpu_buffer->shortest_full;
2996 nr_pages = cpu_buffer->nr_pages;
2997 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
2998 if (full && nr_pages && (dirty * 100) <= full * nr_pages)
2999 return;
3000
3001 cpu_buffer->irq_work.wakeup_full = true;
3002 cpu_buffer->irq_work.full_waiters_pending = false;
3003
3004 irq_work_queue(&cpu_buffer->irq_work.work);
3005}
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045static __always_inline int
3046trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3047{
3048 unsigned int val = cpu_buffer->current_context;
3049 unsigned long pc = preempt_count();
3050 int bit;
3051
3052 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
3053 bit = RB_CTX_NORMAL;
3054 else
3055 bit = pc & NMI_MASK ? RB_CTX_NMI :
3056 pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
3057
3058 if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
3059 return 1;
3060
3061 val |= (1 << (bit + cpu_buffer->nest));
3062 cpu_buffer->current_context = val;
3063
3064 return 0;
3065}
3066
3067static __always_inline void
3068trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3069{
3070 cpu_buffer->current_context &=
3071 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3072}
3073
3074
3075#define NESTED_BITS 4
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090void ring_buffer_nest_start(struct trace_buffer *buffer)
3091{
3092 struct ring_buffer_per_cpu *cpu_buffer;
3093 int cpu;
3094
3095
3096 preempt_disable_notrace();
3097 cpu = raw_smp_processor_id();
3098 cpu_buffer = buffer->buffers[cpu];
3099
3100 cpu_buffer->nest += NESTED_BITS;
3101}
3102
3103
3104
3105
3106
3107
3108
3109
3110void ring_buffer_nest_end(struct trace_buffer *buffer)
3111{
3112 struct ring_buffer_per_cpu *cpu_buffer;
3113 int cpu;
3114
3115
3116 cpu = raw_smp_processor_id();
3117 cpu_buffer = buffer->buffers[cpu];
3118
3119 cpu_buffer->nest -= NESTED_BITS;
3120 preempt_enable_notrace();
3121}
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132int ring_buffer_unlock_commit(struct trace_buffer *buffer,
3133 struct ring_buffer_event *event)
3134{
3135 struct ring_buffer_per_cpu *cpu_buffer;
3136 int cpu = raw_smp_processor_id();
3137
3138 cpu_buffer = buffer->buffers[cpu];
3139
3140 rb_commit(cpu_buffer, event);
3141
3142 rb_wakeups(buffer, cpu_buffer);
3143
3144 trace_recursive_unlock(cpu_buffer);
3145
3146 preempt_enable_notrace();
3147
3148 return 0;
3149}
3150EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3151
3152static struct ring_buffer_event *
3153__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3154 struct rb_event_info *info)
3155{
3156 struct ring_buffer_event *event;
3157 struct buffer_page *tail_page;
3158 unsigned long tail, write, w;
3159 bool a_ok;
3160 bool b_ok;
3161
3162
3163 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3164
3165 w = local_read(&tail_page->write) & RB_WRITE_MASK;
3166 barrier();
3167 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3168 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3169 barrier();
3170 info->ts = rb_time_stamp(cpu_buffer->buffer);
3171
3172 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3173 info->delta = info->ts;
3174 } else {
3175
3176
3177
3178
3179
3180 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
3181 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3182 info->length += RB_LEN_TIME_EXTEND;
3183 } else {
3184 info->delta = info->ts - info->after;
3185 if (unlikely(test_time_stamp(info->delta))) {
3186 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3187 info->length += RB_LEN_TIME_EXTEND;
3188 }
3189 }
3190 }
3191
3192 rb_time_set(&cpu_buffer->before_stamp, info->ts);
3193
3194 write = local_add_return(info->length, &tail_page->write);
3195
3196
3197 write &= RB_WRITE_MASK;
3198
3199 tail = write - info->length;
3200
3201
3202 if (unlikely(write > BUF_PAGE_SIZE)) {
3203 if (tail != w) {
3204
3205 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3206 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3207 if (a_ok && b_ok && info->before != info->after)
3208 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
3209 info->before, info->after);
3210 }
3211 return rb_move_tail(cpu_buffer, tail, info);
3212 }
3213
3214 if (likely(tail == w)) {
3215 u64 save_before;
3216 bool s_ok;
3217
3218
3219 rb_time_set(&cpu_buffer->write_stamp, info->ts);
3220 barrier();
3221 s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
3222 RB_WARN_ON(cpu_buffer, !s_ok);
3223 if (likely(!(info->add_timestamp &
3224 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3225
3226 info->delta = info->ts - info->after;
3227 else
3228
3229 info->delta = info->ts;
3230 barrier();
3231 if (unlikely(info->ts != save_before)) {
3232
3233
3234 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3235 RB_WARN_ON(cpu_buffer, !a_ok);
3236
3237
3238 if (save_before > info->after) {
3239
3240
3241
3242
3243 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3244 info->after, save_before);
3245 }
3246 }
3247 } else {
3248 u64 ts;
3249
3250 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3251
3252 RB_WARN_ON(cpu_buffer, !a_ok);
3253 ts = rb_time_stamp(cpu_buffer->buffer);
3254 barrier();
3255 if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3256 info->after < ts) {
3257
3258 info->delta = ts - info->after;
3259 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3260 info->after, info->ts);
3261 info->ts = ts;
3262 } else {
3263
3264
3265
3266
3267
3268
3269
3270
3271 info->delta = 0;
3272 }
3273 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3274 }
3275
3276
3277
3278
3279
3280 if (unlikely(!tail && !(info->add_timestamp &
3281 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3282 info->delta = 0;
3283
3284
3285
3286 event = __rb_page_index(tail_page, tail);
3287 rb_update_event(cpu_buffer, event, info);
3288
3289 local_inc(&tail_page->entries);
3290
3291
3292
3293
3294
3295 if (unlikely(!tail))
3296 tail_page->page->time_stamp = info->ts;
3297
3298
3299 local_add(info->length, &cpu_buffer->entries_bytes);
3300
3301 return event;
3302}
3303
3304static __always_inline struct ring_buffer_event *
3305rb_reserve_next_event(struct trace_buffer *buffer,
3306 struct ring_buffer_per_cpu *cpu_buffer,
3307 unsigned long length)
3308{
3309 struct ring_buffer_event *event;
3310 struct rb_event_info info;
3311 int nr_loops = 0;
3312 int add_ts_default;
3313
3314 rb_start_commit(cpu_buffer);
3315
3316
3317#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3318
3319
3320
3321
3322
3323
3324 barrier();
3325 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3326 local_dec(&cpu_buffer->committing);
3327 local_dec(&cpu_buffer->commits);
3328 return NULL;
3329 }
3330#endif
3331
3332 info.length = rb_calculate_event_length(length);
3333
3334 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3335 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3336 info.length += RB_LEN_TIME_EXTEND;
3337 } else {
3338 add_ts_default = RB_ADD_STAMP_NONE;
3339 }
3340
3341 again:
3342 info.add_timestamp = add_ts_default;
3343 info.delta = 0;
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3355 goto out_fail;
3356
3357 event = __rb_reserve_next(cpu_buffer, &info);
3358
3359 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3360 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3361 info.length -= RB_LEN_TIME_EXTEND;
3362 goto again;
3363 }
3364
3365 if (likely(event))
3366 return event;
3367 out_fail:
3368 rb_end_commit(cpu_buffer);
3369 return NULL;
3370}
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387struct ring_buffer_event *
3388ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3389{
3390 struct ring_buffer_per_cpu *cpu_buffer;
3391 struct ring_buffer_event *event;
3392 int cpu;
3393
3394
3395 preempt_disable_notrace();
3396
3397 if (unlikely(atomic_read(&buffer->record_disabled)))
3398 goto out;
3399
3400 cpu = raw_smp_processor_id();
3401
3402 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3403 goto out;
3404
3405 cpu_buffer = buffer->buffers[cpu];
3406
3407 if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3408 goto out;
3409
3410 if (unlikely(length > BUF_MAX_DATA_SIZE))
3411 goto out;
3412
3413 if (unlikely(trace_recursive_lock(cpu_buffer)))
3414 goto out;
3415
3416 event = rb_reserve_next_event(buffer, cpu_buffer, length);
3417 if (!event)
3418 goto out_unlock;
3419
3420 return event;
3421
3422 out_unlock:
3423 trace_recursive_unlock(cpu_buffer);
3424 out:
3425 preempt_enable_notrace();
3426 return NULL;
3427}
3428EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3429
3430
3431
3432
3433
3434
3435
3436static inline void
3437rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3438 struct ring_buffer_event *event)
3439{
3440 unsigned long addr = (unsigned long)event;
3441 struct buffer_page *bpage = cpu_buffer->commit_page;
3442 struct buffer_page *start;
3443
3444 addr &= PAGE_MASK;
3445
3446
3447 if (likely(bpage->page == (void *)addr)) {
3448 local_dec(&bpage->entries);
3449 return;
3450 }
3451
3452
3453
3454
3455
3456 rb_inc_page(cpu_buffer, &bpage);
3457 start = bpage;
3458 do {
3459 if (bpage->page == (void *)addr) {
3460 local_dec(&bpage->entries);
3461 return;
3462 }
3463 rb_inc_page(cpu_buffer, &bpage);
3464 } while (bpage != start);
3465
3466
3467 RB_WARN_ON(cpu_buffer, 1);
3468}
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489void ring_buffer_discard_commit(struct trace_buffer *buffer,
3490 struct ring_buffer_event *event)
3491{
3492 struct ring_buffer_per_cpu *cpu_buffer;
3493 int cpu;
3494
3495
3496 rb_event_discard(event);
3497
3498 cpu = smp_processor_id();
3499 cpu_buffer = buffer->buffers[cpu];
3500
3501
3502
3503
3504
3505
3506 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3507
3508 rb_decrement_entry(cpu_buffer, event);
3509 if (rb_try_to_discard(cpu_buffer, event))
3510 goto out;
3511
3512 out:
3513 rb_end_commit(cpu_buffer);
3514
3515 trace_recursive_unlock(cpu_buffer);
3516
3517 preempt_enable_notrace();
3518
3519}
3520EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535int ring_buffer_write(struct trace_buffer *buffer,
3536 unsigned long length,
3537 void *data)
3538{
3539 struct ring_buffer_per_cpu *cpu_buffer;
3540 struct ring_buffer_event *event;
3541 void *body;
3542 int ret = -EBUSY;
3543 int cpu;
3544
3545 preempt_disable_notrace();
3546
3547 if (atomic_read(&buffer->record_disabled))
3548 goto out;
3549
3550 cpu = raw_smp_processor_id();
3551
3552 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3553 goto out;
3554
3555 cpu_buffer = buffer->buffers[cpu];
3556
3557 if (atomic_read(&cpu_buffer->record_disabled))
3558 goto out;
3559
3560 if (length > BUF_MAX_DATA_SIZE)
3561 goto out;
3562
3563 if (unlikely(trace_recursive_lock(cpu_buffer)))
3564 goto out;
3565
3566 event = rb_reserve_next_event(buffer, cpu_buffer, length);
3567 if (!event)
3568 goto out_unlock;
3569
3570 body = rb_event_data(event);
3571
3572 memcpy(body, data, length);
3573
3574 rb_commit(cpu_buffer, event);
3575
3576 rb_wakeups(buffer, cpu_buffer);
3577
3578 ret = 0;
3579
3580 out_unlock:
3581 trace_recursive_unlock(cpu_buffer);
3582
3583 out:
3584 preempt_enable_notrace();
3585
3586 return ret;
3587}
3588EXPORT_SYMBOL_GPL(ring_buffer_write);
3589
3590static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3591{
3592 struct buffer_page *reader = cpu_buffer->reader_page;
3593 struct buffer_page *head = rb_set_head_page(cpu_buffer);
3594 struct buffer_page *commit = cpu_buffer->commit_page;
3595
3596
3597 if (unlikely(!head))
3598 return true;
3599
3600 return reader->read == rb_page_commit(reader) &&
3601 (commit == reader ||
3602 (commit == head &&
3603 head->read == rb_page_commit(commit)));
3604}
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615void ring_buffer_record_disable(struct trace_buffer *buffer)
3616{
3617 atomic_inc(&buffer->record_disabled);
3618}
3619EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3620
3621
3622
3623
3624
3625
3626
3627
3628void ring_buffer_record_enable(struct trace_buffer *buffer)
3629{
3630 atomic_dec(&buffer->record_disabled);
3631}
3632EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645void ring_buffer_record_off(struct trace_buffer *buffer)
3646{
3647 unsigned int rd;
3648 unsigned int new_rd;
3649
3650 do {
3651 rd = atomic_read(&buffer->record_disabled);
3652 new_rd = rd | RB_BUFFER_OFF;
3653 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3654}
3655EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668void ring_buffer_record_on(struct trace_buffer *buffer)
3669{
3670 unsigned int rd;
3671 unsigned int new_rd;
3672
3673 do {
3674 rd = atomic_read(&buffer->record_disabled);
3675 new_rd = rd & ~RB_BUFFER_OFF;
3676 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3677}
3678EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3679
3680
3681
3682
3683
3684
3685
3686bool ring_buffer_record_is_on(struct trace_buffer *buffer)
3687{
3688 return !atomic_read(&buffer->record_disabled);
3689}
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
3703{
3704 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3705}
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
3718{
3719 struct ring_buffer_per_cpu *cpu_buffer;
3720
3721 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3722 return;
3723
3724 cpu_buffer = buffer->buffers[cpu];
3725 atomic_inc(&cpu_buffer->record_disabled);
3726}
3727EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
3738{
3739 struct ring_buffer_per_cpu *cpu_buffer;
3740
3741 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3742 return;
3743
3744 cpu_buffer = buffer->buffers[cpu];
3745 atomic_dec(&cpu_buffer->record_disabled);
3746}
3747EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3748
3749
3750
3751
3752
3753
3754
3755static inline unsigned long
3756rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3757{
3758 return local_read(&cpu_buffer->entries) -
3759 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3760}
3761
3762
3763
3764
3765
3766
3767u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
3768{
3769 unsigned long flags;
3770 struct ring_buffer_per_cpu *cpu_buffer;
3771 struct buffer_page *bpage;
3772 u64 ret = 0;
3773
3774 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3775 return 0;
3776
3777 cpu_buffer = buffer->buffers[cpu];
3778 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3779
3780
3781
3782
3783 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3784 bpage = cpu_buffer->reader_page;
3785 else
3786 bpage = rb_set_head_page(cpu_buffer);
3787 if (bpage)
3788 ret = bpage->page->time_stamp;
3789 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3790
3791 return ret;
3792}
3793EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3794
3795
3796
3797
3798
3799
3800unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
3801{
3802 struct ring_buffer_per_cpu *cpu_buffer;
3803 unsigned long ret;
3804
3805 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3806 return 0;
3807
3808 cpu_buffer = buffer->buffers[cpu];
3809 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3810
3811 return ret;
3812}
3813EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3814
3815
3816
3817
3818
3819
3820unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
3821{
3822 struct ring_buffer_per_cpu *cpu_buffer;
3823
3824 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3825 return 0;
3826
3827 cpu_buffer = buffer->buffers[cpu];
3828
3829 return rb_num_of_entries(cpu_buffer);
3830}
3831EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3832
3833
3834
3835
3836
3837
3838
3839unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
3840{
3841 struct ring_buffer_per_cpu *cpu_buffer;
3842 unsigned long ret;
3843
3844 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3845 return 0;
3846
3847 cpu_buffer = buffer->buffers[cpu];
3848 ret = local_read(&cpu_buffer->overrun);
3849
3850 return ret;
3851}
3852EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3853
3854
3855
3856
3857
3858
3859
3860
3861unsigned long
3862ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
3863{
3864 struct ring_buffer_per_cpu *cpu_buffer;
3865 unsigned long ret;
3866
3867 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3868 return 0;
3869
3870 cpu_buffer = buffer->buffers[cpu];
3871 ret = local_read(&cpu_buffer->commit_overrun);
3872
3873 return ret;
3874}
3875EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3876
3877
3878
3879
3880
3881
3882
3883unsigned long
3884ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
3885{
3886 struct ring_buffer_per_cpu *cpu_buffer;
3887 unsigned long ret;
3888
3889 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3890 return 0;
3891
3892 cpu_buffer = buffer->buffers[cpu];
3893 ret = local_read(&cpu_buffer->dropped_events);
3894
3895 return ret;
3896}
3897EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3898
3899
3900
3901
3902
3903
3904unsigned long
3905ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
3906{
3907 struct ring_buffer_per_cpu *cpu_buffer;
3908
3909 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3910 return 0;
3911
3912 cpu_buffer = buffer->buffers[cpu];
3913 return cpu_buffer->read;
3914}
3915EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3916
3917
3918
3919
3920
3921
3922
3923
3924unsigned long ring_buffer_entries(struct trace_buffer *buffer)
3925{
3926 struct ring_buffer_per_cpu *cpu_buffer;
3927 unsigned long entries = 0;
3928 int cpu;
3929
3930
3931 for_each_buffer_cpu(buffer, cpu) {
3932 cpu_buffer = buffer->buffers[cpu];
3933 entries += rb_num_of_entries(cpu_buffer);
3934 }
3935
3936 return entries;
3937}
3938EXPORT_SYMBOL_GPL(ring_buffer_entries);
3939
3940
3941
3942
3943
3944
3945
3946
3947unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
3948{
3949 struct ring_buffer_per_cpu *cpu_buffer;
3950 unsigned long overruns = 0;
3951 int cpu;
3952
3953
3954 for_each_buffer_cpu(buffer, cpu) {
3955 cpu_buffer = buffer->buffers[cpu];
3956 overruns += local_read(&cpu_buffer->overrun);
3957 }
3958
3959 return overruns;
3960}
3961EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3962
3963static void rb_iter_reset(struct ring_buffer_iter *iter)
3964{
3965 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3966
3967
3968 iter->head_page = cpu_buffer->reader_page;
3969 iter->head = cpu_buffer->reader_page->read;
3970 iter->next_event = iter->head;
3971
3972 iter->cache_reader_page = iter->head_page;
3973 iter->cache_read = cpu_buffer->read;
3974
3975 if (iter->head) {
3976 iter->read_stamp = cpu_buffer->read_stamp;
3977 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
3978 } else {
3979 iter->read_stamp = iter->head_page->page->time_stamp;
3980 iter->page_stamp = iter->read_stamp;
3981 }
3982}
3983
3984
3985
3986
3987
3988
3989
3990
3991void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3992{
3993 struct ring_buffer_per_cpu *cpu_buffer;
3994 unsigned long flags;
3995
3996 if (!iter)
3997 return;
3998
3999 cpu_buffer = iter->cpu_buffer;
4000
4001 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4002 rb_iter_reset(iter);
4003 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4004}
4005EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4006
4007
4008
4009
4010
4011int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4012{
4013 struct ring_buffer_per_cpu *cpu_buffer;
4014 struct buffer_page *reader;
4015 struct buffer_page *head_page;
4016 struct buffer_page *commit_page;
4017 struct buffer_page *curr_commit_page;
4018 unsigned commit;
4019 u64 curr_commit_ts;
4020 u64 commit_ts;
4021
4022 cpu_buffer = iter->cpu_buffer;
4023 reader = cpu_buffer->reader_page;
4024 head_page = cpu_buffer->head_page;
4025 commit_page = cpu_buffer->commit_page;
4026 commit_ts = commit_page->page->time_stamp;
4027
4028
4029
4030
4031
4032
4033 smp_rmb();
4034 commit = rb_page_commit(commit_page);
4035
4036 smp_rmb();
4037
4038
4039 curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4040 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4041
4042
4043 if (curr_commit_page != commit_page ||
4044 curr_commit_ts != commit_ts)
4045 return 0;
4046
4047
4048 return ((iter->head_page == commit_page && iter->head >= commit) ||
4049 (iter->head_page == reader && commit_page == head_page &&
4050 head_page->read == commit &&
4051 iter->head == rb_page_commit(cpu_buffer->reader_page)));
4052}
4053EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4054
4055static void
4056rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4057 struct ring_buffer_event *event)
4058{
4059 u64 delta;
4060
4061 switch (event->type_len) {
4062 case RINGBUF_TYPE_PADDING:
4063 return;
4064
4065 case RINGBUF_TYPE_TIME_EXTEND:
4066 delta = ring_buffer_event_time_stamp(event);
4067 cpu_buffer->read_stamp += delta;
4068 return;
4069
4070 case RINGBUF_TYPE_TIME_STAMP:
4071 delta = ring_buffer_event_time_stamp(event);
4072 cpu_buffer->read_stamp = delta;
4073 return;
4074
4075 case RINGBUF_TYPE_DATA:
4076 cpu_buffer->read_stamp += event->time_delta;
4077 return;
4078
4079 default:
4080 RB_WARN_ON(cpu_buffer, 1);
4081 }
4082 return;
4083}
4084
4085static void
4086rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4087 struct ring_buffer_event *event)
4088{
4089 u64 delta;
4090
4091 switch (event->type_len) {
4092 case RINGBUF_TYPE_PADDING:
4093 return;
4094
4095 case RINGBUF_TYPE_TIME_EXTEND:
4096 delta = ring_buffer_event_time_stamp(event);
4097 iter->read_stamp += delta;
4098 return;
4099
4100 case RINGBUF_TYPE_TIME_STAMP:
4101 delta = ring_buffer_event_time_stamp(event);
4102 iter->read_stamp = delta;
4103 return;
4104
4105 case RINGBUF_TYPE_DATA:
4106 iter->read_stamp += event->time_delta;
4107 return;
4108
4109 default:
4110 RB_WARN_ON(iter->cpu_buffer, 1);
4111 }
4112 return;
4113}
4114
4115static struct buffer_page *
4116rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4117{
4118 struct buffer_page *reader = NULL;
4119 unsigned long overwrite;
4120 unsigned long flags;
4121 int nr_loops = 0;
4122 int ret;
4123
4124 local_irq_save(flags);
4125 arch_spin_lock(&cpu_buffer->lock);
4126
4127 again:
4128
4129
4130
4131
4132
4133
4134 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4135 reader = NULL;
4136 goto out;
4137 }
4138
4139 reader = cpu_buffer->reader_page;
4140
4141
4142 if (cpu_buffer->reader_page->read < rb_page_size(reader))
4143 goto out;
4144
4145
4146 if (RB_WARN_ON(cpu_buffer,
4147 cpu_buffer->reader_page->read > rb_page_size(reader)))
4148 goto out;
4149
4150
4151 reader = NULL;
4152 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4153 goto out;
4154
4155
4156 if (rb_num_of_entries(cpu_buffer) == 0)
4157 goto out;
4158
4159
4160
4161
4162 local_set(&cpu_buffer->reader_page->write, 0);
4163 local_set(&cpu_buffer->reader_page->entries, 0);
4164 local_set(&cpu_buffer->reader_page->page->commit, 0);
4165 cpu_buffer->reader_page->real_end = 0;
4166
4167 spin:
4168
4169
4170
4171 reader = rb_set_head_page(cpu_buffer);
4172 if (!reader)
4173 goto out;
4174 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4175 cpu_buffer->reader_page->list.prev = reader->list.prev;
4176
4177
4178
4179
4180
4181
4182 cpu_buffer->pages = reader->list.prev;
4183
4184
4185 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196 smp_mb();
4197 overwrite = local_read(&(cpu_buffer->overrun));
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4211
4212
4213
4214
4215 if (!ret)
4216 goto spin;
4217
4218
4219
4220
4221
4222
4223 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4224 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
4225
4226 local_inc(&cpu_buffer->pages_read);
4227
4228
4229 cpu_buffer->reader_page = reader;
4230 cpu_buffer->reader_page->read = 0;
4231
4232 if (overwrite != cpu_buffer->last_overrun) {
4233 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4234 cpu_buffer->last_overrun = overwrite;
4235 }
4236
4237 goto again;
4238
4239 out:
4240
4241 if (reader && reader->read == 0)
4242 cpu_buffer->read_stamp = reader->page->time_stamp;
4243
4244 arch_spin_unlock(&cpu_buffer->lock);
4245 local_irq_restore(flags);
4246
4247 return reader;
4248}
4249
4250static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4251{
4252 struct ring_buffer_event *event;
4253 struct buffer_page *reader;
4254 unsigned length;
4255
4256 reader = rb_get_reader_page(cpu_buffer);
4257
4258
4259 if (RB_WARN_ON(cpu_buffer, !reader))
4260 return;
4261
4262 event = rb_reader_event(cpu_buffer);
4263
4264 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4265 cpu_buffer->read++;
4266
4267 rb_update_read_stamp(cpu_buffer, event);
4268
4269 length = rb_event_length(event);
4270 cpu_buffer->reader_page->read += length;
4271}
4272
4273static void rb_advance_iter(struct ring_buffer_iter *iter)
4274{
4275 struct ring_buffer_per_cpu *cpu_buffer;
4276
4277 cpu_buffer = iter->cpu_buffer;
4278
4279
4280 if (iter->head == iter->next_event) {
4281
4282 if (rb_iter_head_event(iter) == NULL)
4283 return;
4284 }
4285
4286 iter->head = iter->next_event;
4287
4288
4289
4290
4291 if (iter->next_event >= rb_page_size(iter->head_page)) {
4292
4293 if (iter->head_page == cpu_buffer->commit_page)
4294 return;
4295 rb_inc_iter(iter);
4296 return;
4297 }
4298
4299 rb_update_iter_read_stamp(iter, iter->event);
4300}
4301
4302static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4303{
4304 return cpu_buffer->lost_events;
4305}
4306
4307static struct ring_buffer_event *
4308rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4309 unsigned long *lost_events)
4310{
4311 struct ring_buffer_event *event;
4312 struct buffer_page *reader;
4313 int nr_loops = 0;
4314
4315 if (ts)
4316 *ts = 0;
4317 again:
4318
4319
4320
4321
4322
4323
4324 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4325 return NULL;
4326
4327 reader = rb_get_reader_page(cpu_buffer);
4328 if (!reader)
4329 return NULL;
4330
4331 event = rb_reader_event(cpu_buffer);
4332
4333 switch (event->type_len) {
4334 case RINGBUF_TYPE_PADDING:
4335 if (rb_null_event(event))
4336 RB_WARN_ON(cpu_buffer, 1);
4337
4338
4339
4340
4341
4342
4343
4344
4345 return event;
4346
4347 case RINGBUF_TYPE_TIME_EXTEND:
4348
4349 rb_advance_reader(cpu_buffer);
4350 goto again;
4351
4352 case RINGBUF_TYPE_TIME_STAMP:
4353 if (ts) {
4354 *ts = ring_buffer_event_time_stamp(event);
4355 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4356 cpu_buffer->cpu, ts);
4357 }
4358
4359 rb_advance_reader(cpu_buffer);
4360 goto again;
4361
4362 case RINGBUF_TYPE_DATA:
4363 if (ts && !(*ts)) {
4364 *ts = cpu_buffer->read_stamp + event->time_delta;
4365 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4366 cpu_buffer->cpu, ts);
4367 }
4368 if (lost_events)
4369 *lost_events = rb_lost_events(cpu_buffer);
4370 return event;
4371
4372 default:
4373 RB_WARN_ON(cpu_buffer, 1);
4374 }
4375
4376 return NULL;
4377}
4378EXPORT_SYMBOL_GPL(ring_buffer_peek);
4379
4380static struct ring_buffer_event *
4381rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4382{
4383 struct trace_buffer *buffer;
4384 struct ring_buffer_per_cpu *cpu_buffer;
4385 struct ring_buffer_event *event;
4386 int nr_loops = 0;
4387
4388 if (ts)
4389 *ts = 0;
4390
4391 cpu_buffer = iter->cpu_buffer;
4392 buffer = cpu_buffer->buffer;
4393
4394
4395
4396
4397
4398
4399 if (unlikely(iter->cache_read != cpu_buffer->read ||
4400 iter->cache_reader_page != cpu_buffer->reader_page))
4401 rb_iter_reset(iter);
4402
4403 again:
4404 if (ring_buffer_iter_empty(iter))
4405 return NULL;
4406
4407
4408
4409
4410
4411
4412
4413
4414 if (++nr_loops > 3)
4415 return NULL;
4416
4417 if (rb_per_cpu_empty(cpu_buffer))
4418 return NULL;
4419
4420 if (iter->head >= rb_page_size(iter->head_page)) {
4421 rb_inc_iter(iter);
4422 goto again;
4423 }
4424
4425 event = rb_iter_head_event(iter);
4426 if (!event)
4427 goto again;
4428
4429 switch (event->type_len) {
4430 case RINGBUF_TYPE_PADDING:
4431 if (rb_null_event(event)) {
4432 rb_inc_iter(iter);
4433 goto again;
4434 }
4435 rb_advance_iter(iter);
4436 return event;
4437
4438 case RINGBUF_TYPE_TIME_EXTEND:
4439
4440 rb_advance_iter(iter);
4441 goto again;
4442
4443 case RINGBUF_TYPE_TIME_STAMP:
4444 if (ts) {
4445 *ts = ring_buffer_event_time_stamp(event);
4446 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4447 cpu_buffer->cpu, ts);
4448 }
4449
4450 rb_advance_iter(iter);
4451 goto again;
4452
4453 case RINGBUF_TYPE_DATA:
4454 if (ts && !(*ts)) {
4455 *ts = iter->read_stamp + event->time_delta;
4456 ring_buffer_normalize_time_stamp(buffer,
4457 cpu_buffer->cpu, ts);
4458 }
4459 return event;
4460
4461 default:
4462 RB_WARN_ON(cpu_buffer, 1);
4463 }
4464
4465 return NULL;
4466}
4467EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4468
4469static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4470{
4471 if (likely(!in_nmi())) {
4472 raw_spin_lock(&cpu_buffer->reader_lock);
4473 return true;
4474 }
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485 if (raw_spin_trylock(&cpu_buffer->reader_lock))
4486 return true;
4487
4488
4489 atomic_inc(&cpu_buffer->record_disabled);
4490 return false;
4491}
4492
4493static inline void
4494rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4495{
4496 if (likely(locked))
4497 raw_spin_unlock(&cpu_buffer->reader_lock);
4498 return;
4499}
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511struct ring_buffer_event *
4512ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4513 unsigned long *lost_events)
4514{
4515 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4516 struct ring_buffer_event *event;
4517 unsigned long flags;
4518 bool dolock;
4519
4520 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4521 return NULL;
4522
4523 again:
4524 local_irq_save(flags);
4525 dolock = rb_reader_lock(cpu_buffer);
4526 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4527 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4528 rb_advance_reader(cpu_buffer);
4529 rb_reader_unlock(cpu_buffer, dolock);
4530 local_irq_restore(flags);
4531
4532 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4533 goto again;
4534
4535 return event;
4536}
4537
4538
4539
4540
4541
4542
4543bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4544{
4545 bool ret = iter->missed_events != 0;
4546
4547 iter->missed_events = 0;
4548 return ret;
4549}
4550EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560struct ring_buffer_event *
4561ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4562{
4563 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4564 struct ring_buffer_event *event;
4565 unsigned long flags;
4566
4567 again:
4568 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4569 event = rb_iter_peek(iter, ts);
4570 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4571
4572 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4573 goto again;
4574
4575 return event;
4576}
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589struct ring_buffer_event *
4590ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
4591 unsigned long *lost_events)
4592{
4593 struct ring_buffer_per_cpu *cpu_buffer;
4594 struct ring_buffer_event *event = NULL;
4595 unsigned long flags;
4596 bool dolock;
4597
4598 again:
4599
4600 preempt_disable();
4601
4602 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4603 goto out;
4604
4605 cpu_buffer = buffer->buffers[cpu];
4606 local_irq_save(flags);
4607 dolock = rb_reader_lock(cpu_buffer);
4608
4609 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4610 if (event) {
4611 cpu_buffer->lost_events = 0;
4612 rb_advance_reader(cpu_buffer);
4613 }
4614
4615 rb_reader_unlock(cpu_buffer, dolock);
4616 local_irq_restore(flags);
4617
4618 out:
4619 preempt_enable();
4620
4621 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4622 goto again;
4623
4624 return event;
4625}
4626EXPORT_SYMBOL_GPL(ring_buffer_consume);
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649struct ring_buffer_iter *
4650ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
4651{
4652 struct ring_buffer_per_cpu *cpu_buffer;
4653 struct ring_buffer_iter *iter;
4654
4655 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4656 return NULL;
4657
4658 iter = kzalloc(sizeof(*iter), flags);
4659 if (!iter)
4660 return NULL;
4661
4662 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
4663 if (!iter->event) {
4664 kfree(iter);
4665 return NULL;
4666 }
4667
4668 cpu_buffer = buffer->buffers[cpu];
4669
4670 iter->cpu_buffer = cpu_buffer;
4671
4672 atomic_inc(&cpu_buffer->resize_disabled);
4673
4674 return iter;
4675}
4676EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4677
4678
4679
4680
4681
4682
4683
4684
4685void
4686ring_buffer_read_prepare_sync(void)
4687{
4688 synchronize_rcu();
4689}
4690EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703void
4704ring_buffer_read_start(struct ring_buffer_iter *iter)
4705{
4706 struct ring_buffer_per_cpu *cpu_buffer;
4707 unsigned long flags;
4708
4709 if (!iter)
4710 return;
4711
4712 cpu_buffer = iter->cpu_buffer;
4713
4714 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4715 arch_spin_lock(&cpu_buffer->lock);
4716 rb_iter_reset(iter);
4717 arch_spin_unlock(&cpu_buffer->lock);
4718 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4719}
4720EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4721
4722
4723
4724
4725
4726
4727
4728
4729void
4730ring_buffer_read_finish(struct ring_buffer_iter *iter)
4731{
4732 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4733 unsigned long flags;
4734
4735
4736
4737
4738
4739
4740
4741 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4742 rb_check_pages(cpu_buffer);
4743 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4744
4745 atomic_dec(&cpu_buffer->resize_disabled);
4746 kfree(iter->event);
4747 kfree(iter);
4748}
4749EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4750
4751
4752
4753
4754
4755
4756
4757
4758void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
4759{
4760 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4761 unsigned long flags;
4762
4763 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4764
4765 rb_advance_iter(iter);
4766
4767 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4768}
4769EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
4770
4771
4772
4773
4774
4775
4776unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
4777{
4778
4779
4780
4781
4782
4783
4784 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4785 return 0;
4786
4787 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4788}
4789EXPORT_SYMBOL_GPL(ring_buffer_size);
4790
4791static void
4792rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4793{
4794 rb_head_page_deactivate(cpu_buffer);
4795
4796 cpu_buffer->head_page
4797 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4798 local_set(&cpu_buffer->head_page->write, 0);
4799 local_set(&cpu_buffer->head_page->entries, 0);
4800 local_set(&cpu_buffer->head_page->page->commit, 0);
4801
4802 cpu_buffer->head_page->read = 0;
4803
4804 cpu_buffer->tail_page = cpu_buffer->head_page;
4805 cpu_buffer->commit_page = cpu_buffer->head_page;
4806
4807 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4808 INIT_LIST_HEAD(&cpu_buffer->new_pages);
4809 local_set(&cpu_buffer->reader_page->write, 0);
4810 local_set(&cpu_buffer->reader_page->entries, 0);
4811 local_set(&cpu_buffer->reader_page->page->commit, 0);
4812 cpu_buffer->reader_page->read = 0;
4813
4814 local_set(&cpu_buffer->entries_bytes, 0);
4815 local_set(&cpu_buffer->overrun, 0);
4816 local_set(&cpu_buffer->commit_overrun, 0);
4817 local_set(&cpu_buffer->dropped_events, 0);
4818 local_set(&cpu_buffer->entries, 0);
4819 local_set(&cpu_buffer->committing, 0);
4820 local_set(&cpu_buffer->commits, 0);
4821 local_set(&cpu_buffer->pages_touched, 0);
4822 local_set(&cpu_buffer->pages_read, 0);
4823 cpu_buffer->last_pages_touch = 0;
4824 cpu_buffer->shortest_full = 0;
4825 cpu_buffer->read = 0;
4826 cpu_buffer->read_bytes = 0;
4827
4828 rb_time_set(&cpu_buffer->write_stamp, 0);
4829 rb_time_set(&cpu_buffer->before_stamp, 0);
4830
4831 cpu_buffer->lost_events = 0;
4832 cpu_buffer->last_overrun = 0;
4833
4834 rb_head_page_activate(cpu_buffer);
4835}
4836
4837
4838static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
4839{
4840 unsigned long flags;
4841
4842 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4843
4844 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4845 goto out;
4846
4847 arch_spin_lock(&cpu_buffer->lock);
4848
4849 rb_reset_cpu(cpu_buffer);
4850
4851 arch_spin_unlock(&cpu_buffer->lock);
4852
4853 out:
4854 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4855}
4856
4857
4858
4859
4860
4861
4862void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
4863{
4864 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4865
4866 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4867 return;
4868
4869 atomic_inc(&cpu_buffer->resize_disabled);
4870 atomic_inc(&cpu_buffer->record_disabled);
4871
4872
4873 synchronize_rcu();
4874
4875 reset_disabled_cpu_buffer(cpu_buffer);
4876
4877 atomic_dec(&cpu_buffer->record_disabled);
4878 atomic_dec(&cpu_buffer->resize_disabled);
4879}
4880EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4881
4882
4883
4884
4885
4886
4887void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
4888{
4889 struct ring_buffer_per_cpu *cpu_buffer;
4890 int cpu;
4891
4892 for_each_online_buffer_cpu(buffer, cpu) {
4893 cpu_buffer = buffer->buffers[cpu];
4894
4895 atomic_inc(&cpu_buffer->resize_disabled);
4896 atomic_inc(&cpu_buffer->record_disabled);
4897 }
4898
4899
4900 synchronize_rcu();
4901
4902 for_each_online_buffer_cpu(buffer, cpu) {
4903 cpu_buffer = buffer->buffers[cpu];
4904
4905 reset_disabled_cpu_buffer(cpu_buffer);
4906
4907 atomic_dec(&cpu_buffer->record_disabled);
4908 atomic_dec(&cpu_buffer->resize_disabled);
4909 }
4910}
4911
4912
4913
4914
4915
4916void ring_buffer_reset(struct trace_buffer *buffer)
4917{
4918 struct ring_buffer_per_cpu *cpu_buffer;
4919 int cpu;
4920
4921 for_each_buffer_cpu(buffer, cpu) {
4922 cpu_buffer = buffer->buffers[cpu];
4923
4924 atomic_inc(&cpu_buffer->resize_disabled);
4925 atomic_inc(&cpu_buffer->record_disabled);
4926 }
4927
4928
4929 synchronize_rcu();
4930
4931 for_each_buffer_cpu(buffer, cpu) {
4932 cpu_buffer = buffer->buffers[cpu];
4933
4934 reset_disabled_cpu_buffer(cpu_buffer);
4935
4936 atomic_dec(&cpu_buffer->record_disabled);
4937 atomic_dec(&cpu_buffer->resize_disabled);
4938 }
4939}
4940EXPORT_SYMBOL_GPL(ring_buffer_reset);
4941
4942
4943
4944
4945
4946bool ring_buffer_empty(struct trace_buffer *buffer)
4947{
4948 struct ring_buffer_per_cpu *cpu_buffer;
4949 unsigned long flags;
4950 bool dolock;
4951 int cpu;
4952 int ret;
4953
4954
4955 for_each_buffer_cpu(buffer, cpu) {
4956 cpu_buffer = buffer->buffers[cpu];
4957 local_irq_save(flags);
4958 dolock = rb_reader_lock(cpu_buffer);
4959 ret = rb_per_cpu_empty(cpu_buffer);
4960 rb_reader_unlock(cpu_buffer, dolock);
4961 local_irq_restore(flags);
4962
4963 if (!ret)
4964 return false;
4965 }
4966
4967 return true;
4968}
4969EXPORT_SYMBOL_GPL(ring_buffer_empty);
4970
4971
4972
4973
4974
4975
4976bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
4977{
4978 struct ring_buffer_per_cpu *cpu_buffer;
4979 unsigned long flags;
4980 bool dolock;
4981 int ret;
4982
4983 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4984 return true;
4985
4986 cpu_buffer = buffer->buffers[cpu];
4987 local_irq_save(flags);
4988 dolock = rb_reader_lock(cpu_buffer);
4989 ret = rb_per_cpu_empty(cpu_buffer);
4990 rb_reader_unlock(cpu_buffer, dolock);
4991 local_irq_restore(flags);
4992
4993 return ret;
4994}
4995EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4996
4997#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5010 struct trace_buffer *buffer_b, int cpu)
5011{
5012 struct ring_buffer_per_cpu *cpu_buffer_a;
5013 struct ring_buffer_per_cpu *cpu_buffer_b;
5014 int ret = -EINVAL;
5015
5016 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5017 !cpumask_test_cpu(cpu, buffer_b->cpumask))
5018 goto out;
5019
5020 cpu_buffer_a = buffer_a->buffers[cpu];
5021 cpu_buffer_b = buffer_b->buffers[cpu];
5022
5023
5024 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5025 goto out;
5026
5027 ret = -EAGAIN;
5028
5029 if (atomic_read(&buffer_a->record_disabled))
5030 goto out;
5031
5032 if (atomic_read(&buffer_b->record_disabled))
5033 goto out;
5034
5035 if (atomic_read(&cpu_buffer_a->record_disabled))
5036 goto out;
5037
5038 if (atomic_read(&cpu_buffer_b->record_disabled))
5039 goto out;
5040
5041
5042
5043
5044
5045
5046
5047 atomic_inc(&cpu_buffer_a->record_disabled);
5048 atomic_inc(&cpu_buffer_b->record_disabled);
5049
5050 ret = -EBUSY;
5051 if (local_read(&cpu_buffer_a->committing))
5052 goto out_dec;
5053 if (local_read(&cpu_buffer_b->committing))
5054 goto out_dec;
5055
5056 buffer_a->buffers[cpu] = cpu_buffer_b;
5057 buffer_b->buffers[cpu] = cpu_buffer_a;
5058
5059 cpu_buffer_b->buffer = buffer_a;
5060 cpu_buffer_a->buffer = buffer_b;
5061
5062 ret = 0;
5063
5064out_dec:
5065 atomic_dec(&cpu_buffer_a->record_disabled);
5066 atomic_dec(&cpu_buffer_b->record_disabled);
5067out:
5068 return ret;
5069}
5070EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5071#endif
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5090{
5091 struct ring_buffer_per_cpu *cpu_buffer;
5092 struct buffer_data_page *bpage = NULL;
5093 unsigned long flags;
5094 struct page *page;
5095
5096 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5097 return ERR_PTR(-ENODEV);
5098
5099 cpu_buffer = buffer->buffers[cpu];
5100 local_irq_save(flags);
5101 arch_spin_lock(&cpu_buffer->lock);
5102
5103 if (cpu_buffer->free_page) {
5104 bpage = cpu_buffer->free_page;
5105 cpu_buffer->free_page = NULL;
5106 }
5107
5108 arch_spin_unlock(&cpu_buffer->lock);
5109 local_irq_restore(flags);
5110
5111 if (bpage)
5112 goto out;
5113
5114 page = alloc_pages_node(cpu_to_node(cpu),
5115 GFP_KERNEL | __GFP_NORETRY, 0);
5116 if (!page)
5117 return ERR_PTR(-ENOMEM);
5118
5119 bpage = page_address(page);
5120
5121 out:
5122 rb_init_page(bpage);
5123
5124 return bpage;
5125}
5126EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
5137{
5138 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5139 struct buffer_data_page *bpage = data;
5140 struct page *page = virt_to_page(bpage);
5141 unsigned long flags;
5142
5143
5144 if (page_ref_count(page) > 1)
5145 goto out;
5146
5147 local_irq_save(flags);
5148 arch_spin_lock(&cpu_buffer->lock);
5149
5150 if (!cpu_buffer->free_page) {
5151 cpu_buffer->free_page = bpage;
5152 bpage = NULL;
5153 }
5154
5155 arch_spin_unlock(&cpu_buffer->lock);
5156 local_irq_restore(flags);
5157
5158 out:
5159 free_page((unsigned long)bpage);
5160}
5161EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196int ring_buffer_read_page(struct trace_buffer *buffer,
5197 void **data_page, size_t len, int cpu, int full)
5198{
5199 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5200 struct ring_buffer_event *event;
5201 struct buffer_data_page *bpage;
5202 struct buffer_page *reader;
5203 unsigned long missed_events;
5204 unsigned long flags;
5205 unsigned int commit;
5206 unsigned int read;
5207 u64 save_timestamp;
5208 int ret = -1;
5209
5210 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5211 goto out;
5212
5213
5214
5215
5216
5217 if (len <= BUF_PAGE_HDR_SIZE)
5218 goto out;
5219
5220 len -= BUF_PAGE_HDR_SIZE;
5221
5222 if (!data_page)
5223 goto out;
5224
5225 bpage = *data_page;
5226 if (!bpage)
5227 goto out;
5228
5229 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5230
5231 reader = rb_get_reader_page(cpu_buffer);
5232 if (!reader)
5233 goto out_unlock;
5234
5235 event = rb_reader_event(cpu_buffer);
5236
5237 read = reader->read;
5238 commit = rb_page_commit(reader);
5239
5240
5241 missed_events = cpu_buffer->lost_events;
5242
5243
5244
5245
5246
5247
5248
5249
5250 if (read || (len < (commit - read)) ||
5251 cpu_buffer->reader_page == cpu_buffer->commit_page) {
5252 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5253 unsigned int rpos = read;
5254 unsigned int pos = 0;
5255 unsigned int size;
5256
5257 if (full)
5258 goto out_unlock;
5259
5260 if (len > (commit - read))
5261 len = (commit - read);
5262
5263
5264 size = rb_event_ts_length(event);
5265
5266 if (len < size)
5267 goto out_unlock;
5268
5269
5270 save_timestamp = cpu_buffer->read_stamp;
5271
5272
5273 do {
5274
5275
5276
5277
5278
5279
5280 size = rb_event_length(event);
5281 memcpy(bpage->data + pos, rpage->data + rpos, size);
5282
5283 len -= size;
5284
5285 rb_advance_reader(cpu_buffer);
5286 rpos = reader->read;
5287 pos += size;
5288
5289 if (rpos >= commit)
5290 break;
5291
5292 event = rb_reader_event(cpu_buffer);
5293
5294 size = rb_event_ts_length(event);
5295 } while (len >= size);
5296
5297
5298 local_set(&bpage->commit, pos);
5299 bpage->time_stamp = save_timestamp;
5300
5301
5302 read = 0;
5303 } else {
5304
5305 cpu_buffer->read += rb_page_entries(reader);
5306 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
5307
5308
5309 rb_init_page(bpage);
5310 bpage = reader->page;
5311 reader->page = *data_page;
5312 local_set(&reader->write, 0);
5313 local_set(&reader->entries, 0);
5314 reader->read = 0;
5315 *data_page = bpage;
5316
5317
5318
5319
5320
5321
5322 if (reader->real_end)
5323 local_set(&bpage->commit, reader->real_end);
5324 }
5325 ret = read;
5326
5327 cpu_buffer->lost_events = 0;
5328
5329 commit = local_read(&bpage->commit);
5330
5331
5332
5333 if (missed_events) {
5334
5335
5336
5337 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
5338 memcpy(&bpage->data[commit], &missed_events,
5339 sizeof(missed_events));
5340 local_add(RB_MISSED_STORED, &bpage->commit);
5341 commit += sizeof(missed_events);
5342 }
5343 local_add(RB_MISSED_EVENTS, &bpage->commit);
5344 }
5345
5346
5347
5348
5349 if (commit < BUF_PAGE_SIZE)
5350 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
5351
5352 out_unlock:
5353 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5354
5355 out:
5356 return ret;
5357}
5358EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5359
5360
5361
5362
5363
5364
5365int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
5366{
5367 struct trace_buffer *buffer;
5368 long nr_pages_same;
5369 int cpu_i;
5370 unsigned long nr_pages;
5371
5372 buffer = container_of(node, struct trace_buffer, node);
5373 if (cpumask_test_cpu(cpu, buffer->cpumask))
5374 return 0;
5375
5376 nr_pages = 0;
5377 nr_pages_same = 1;
5378
5379 for_each_buffer_cpu(buffer, cpu_i) {
5380
5381 if (nr_pages == 0)
5382 nr_pages = buffer->buffers[cpu_i]->nr_pages;
5383 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
5384 nr_pages_same = 0;
5385 break;
5386 }
5387 }
5388
5389 if (!nr_pages_same)
5390 nr_pages = 2;
5391 buffer->buffers[cpu] =
5392 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
5393 if (!buffer->buffers[cpu]) {
5394 WARN(1, "failed to allocate ring buffer on CPU %u\n",
5395 cpu);
5396 return -ENOMEM;
5397 }
5398 smp_wmb();
5399 cpumask_set_cpu(cpu, buffer->cpumask);
5400 return 0;
5401}
5402
5403#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419static struct task_struct *rb_threads[NR_CPUS] __initdata;
5420
5421struct rb_test_data {
5422 struct trace_buffer *buffer;
5423 unsigned long events;
5424 unsigned long bytes_written;
5425 unsigned long bytes_alloc;
5426 unsigned long bytes_dropped;
5427 unsigned long events_nested;
5428 unsigned long bytes_written_nested;
5429 unsigned long bytes_alloc_nested;
5430 unsigned long bytes_dropped_nested;
5431 int min_size_nested;
5432 int max_size_nested;
5433 int max_size;
5434 int min_size;
5435 int cpu;
5436 int cnt;
5437};
5438
5439static struct rb_test_data rb_data[NR_CPUS] __initdata;
5440
5441
5442#define RB_TEST_BUFFER_SIZE 1048576
5443
5444static char rb_string[] __initdata =
5445 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
5446 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
5447 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
5448
5449static bool rb_test_started __initdata;
5450
5451struct rb_item {
5452 int size;
5453 char str[];
5454};
5455
5456static __init int rb_write_something(struct rb_test_data *data, bool nested)
5457{
5458 struct ring_buffer_event *event;
5459 struct rb_item *item;
5460 bool started;
5461 int event_len;
5462 int size;
5463 int len;
5464 int cnt;
5465
5466
5467 cnt = data->cnt + (nested ? 27 : 0);
5468
5469
5470 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
5471
5472 len = size + sizeof(struct rb_item);
5473
5474 started = rb_test_started;
5475
5476 smp_rmb();
5477
5478 event = ring_buffer_lock_reserve(data->buffer, len);
5479 if (!event) {
5480
5481 if (started) {
5482 if (nested)
5483 data->bytes_dropped += len;
5484 else
5485 data->bytes_dropped_nested += len;
5486 }
5487 return len;
5488 }
5489
5490 event_len = ring_buffer_event_length(event);
5491
5492 if (RB_WARN_ON(data->buffer, event_len < len))
5493 goto out;
5494
5495 item = ring_buffer_event_data(event);
5496 item->size = size;
5497 memcpy(item->str, rb_string, size);
5498
5499 if (nested) {
5500 data->bytes_alloc_nested += event_len;
5501 data->bytes_written_nested += len;
5502 data->events_nested++;
5503 if (!data->min_size_nested || len < data->min_size_nested)
5504 data->min_size_nested = len;
5505 if (len > data->max_size_nested)
5506 data->max_size_nested = len;
5507 } else {
5508 data->bytes_alloc += event_len;
5509 data->bytes_written += len;
5510 data->events++;
5511 if (!data->min_size || len < data->min_size)
5512 data->max_size = len;
5513 if (len > data->max_size)
5514 data->max_size = len;
5515 }
5516
5517 out:
5518 ring_buffer_unlock_commit(data->buffer, event);
5519
5520 return 0;
5521}
5522
5523static __init int rb_test(void *arg)
5524{
5525 struct rb_test_data *data = arg;
5526
5527 while (!kthread_should_stop()) {
5528 rb_write_something(data, false);
5529 data->cnt++;
5530
5531 set_current_state(TASK_INTERRUPTIBLE);
5532
5533 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
5534 }
5535
5536 return 0;
5537}
5538
5539static __init void rb_ipi(void *ignore)
5540{
5541 struct rb_test_data *data;
5542 int cpu = smp_processor_id();
5543
5544 data = &rb_data[cpu];
5545 rb_write_something(data, true);
5546}
5547
5548static __init int rb_hammer_test(void *arg)
5549{
5550 while (!kthread_should_stop()) {
5551
5552
5553 smp_call_function(rb_ipi, NULL, 1);
5554
5555 schedule();
5556 }
5557
5558 return 0;
5559}
5560
5561static __init int test_ringbuffer(void)
5562{
5563 struct task_struct *rb_hammer;
5564 struct trace_buffer *buffer;
5565 int cpu;
5566 int ret = 0;
5567
5568 if (security_locked_down(LOCKDOWN_TRACEFS)) {
5569 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
5570 return 0;
5571 }
5572
5573 pr_info("Running ring buffer tests...\n");
5574
5575 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5576 if (WARN_ON(!buffer))
5577 return 0;
5578
5579
5580 ring_buffer_record_off(buffer);
5581
5582 for_each_online_cpu(cpu) {
5583 rb_data[cpu].buffer = buffer;
5584 rb_data[cpu].cpu = cpu;
5585 rb_data[cpu].cnt = cpu;
5586 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5587 "rbtester/%d", cpu);
5588 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
5589 pr_cont("FAILED\n");
5590 ret = PTR_ERR(rb_threads[cpu]);
5591 goto out_free;
5592 }
5593
5594 kthread_bind(rb_threads[cpu], cpu);
5595 wake_up_process(rb_threads[cpu]);
5596 }
5597
5598
5599 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
5600 if (WARN_ON(IS_ERR(rb_hammer))) {
5601 pr_cont("FAILED\n");
5602 ret = PTR_ERR(rb_hammer);
5603 goto out_free;
5604 }
5605
5606 ring_buffer_record_on(buffer);
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616 smp_wmb();
5617 rb_test_started = true;
5618
5619 set_current_state(TASK_INTERRUPTIBLE);
5620 ;
5621 schedule_timeout(10 * HZ);
5622
5623 kthread_stop(rb_hammer);
5624
5625 out_free:
5626 for_each_online_cpu(cpu) {
5627 if (!rb_threads[cpu])
5628 break;
5629 kthread_stop(rb_threads[cpu]);
5630 }
5631 if (ret) {
5632 ring_buffer_free(buffer);
5633 return ret;
5634 }
5635
5636
5637 pr_info("finished\n");
5638 for_each_online_cpu(cpu) {
5639 struct ring_buffer_event *event;
5640 struct rb_test_data *data = &rb_data[cpu];
5641 struct rb_item *item;
5642 unsigned long total_events;
5643 unsigned long total_dropped;
5644 unsigned long total_written;
5645 unsigned long total_alloc;
5646 unsigned long total_read = 0;
5647 unsigned long total_size = 0;
5648 unsigned long total_len = 0;
5649 unsigned long total_lost = 0;
5650 unsigned long lost;
5651 int big_event_size;
5652 int small_event_size;
5653
5654 ret = -1;
5655
5656 total_events = data->events + data->events_nested;
5657 total_written = data->bytes_written + data->bytes_written_nested;
5658 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5659 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5660
5661 big_event_size = data->max_size + data->max_size_nested;
5662 small_event_size = data->min_size + data->min_size_nested;
5663
5664 pr_info("CPU %d:\n", cpu);
5665 pr_info(" events: %ld\n", total_events);
5666 pr_info(" dropped bytes: %ld\n", total_dropped);
5667 pr_info(" alloced bytes: %ld\n", total_alloc);
5668 pr_info(" written bytes: %ld\n", total_written);
5669 pr_info(" biggest event: %d\n", big_event_size);
5670 pr_info(" smallest event: %d\n", small_event_size);
5671
5672 if (RB_WARN_ON(buffer, total_dropped))
5673 break;
5674
5675 ret = 0;
5676
5677 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5678 total_lost += lost;
5679 item = ring_buffer_event_data(event);
5680 total_len += ring_buffer_event_length(event);
5681 total_size += item->size + sizeof(struct rb_item);
5682 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
5683 pr_info("FAILED!\n");
5684 pr_info("buffer had: %.*s\n", item->size, item->str);
5685 pr_info("expected: %.*s\n", item->size, rb_string);
5686 RB_WARN_ON(buffer, 1);
5687 ret = -1;
5688 break;
5689 }
5690 total_read++;
5691 }
5692 if (ret)
5693 break;
5694
5695 ret = -1;
5696
5697 pr_info(" read events: %ld\n", total_read);
5698 pr_info(" lost events: %ld\n", total_lost);
5699 pr_info(" total events: %ld\n", total_lost + total_read);
5700 pr_info(" recorded len bytes: %ld\n", total_len);
5701 pr_info(" recorded size bytes: %ld\n", total_size);
5702 if (total_lost)
5703 pr_info(" With dropped events, record len and size may not match\n"
5704 " alloced and written from above\n");
5705 if (!total_lost) {
5706 if (RB_WARN_ON(buffer, total_len != total_alloc ||
5707 total_size != total_written))
5708 break;
5709 }
5710 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
5711 break;
5712
5713 ret = 0;
5714 }
5715 if (!ret)
5716 pr_info("Ring buffer PASSED!\n");
5717
5718 ring_buffer_free(buffer);
5719 return 0;
5720}
5721
5722late_initcall(test_ringbuffer);
5723#endif
5724