1
2
3
4
5
6
7#include <linux/trace_events.h>
8#include <linux/ring_buffer.h>
9#include <linux/trace_clock.h>
10#include <linux/sched/clock.h>
11#include <linux/trace_seq.h>
12#include <linux/spinlock.h>
13#include <linux/irq_work.h>
14#include <linux/uaccess.h>
15#include <linux/hardirq.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/percpu.h>
19#include <linux/mutex.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/hash.h>
24#include <linux/list.h>
25#include <linux/cpu.h>
26#include <linux/oom.h>
27
28#include <asm/local.h>
29
30static void update_pages_handler(struct work_struct *work);
31
32
33
34
35int ring_buffer_print_entry_header(struct trace_seq *s)
36{
37 trace_seq_puts(s, "# compressed entry header\n");
38 trace_seq_puts(s, "\ttype_len : 5 bits\n");
39 trace_seq_puts(s, "\ttime_delta : 27 bits\n");
40 trace_seq_puts(s, "\tarray : 32 bits\n");
41 trace_seq_putc(s, '\n');
42 trace_seq_printf(s, "\tpadding : type == %d\n",
43 RINGBUF_TYPE_PADDING);
44 trace_seq_printf(s, "\ttime_extend : type == %d\n",
45 RINGBUF_TYPE_TIME_EXTEND);
46 trace_seq_printf(s, "\ttime_stamp : type == %d\n",
47 RINGBUF_TYPE_TIME_STAMP);
48 trace_seq_printf(s, "\tdata max type_len == %d\n",
49 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
50
51 return !trace_seq_has_overflowed(s);
52}
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123#define RB_BUFFER_OFF (1 << 20)
124
125#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
126
127#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
128#define RB_ALIGNMENT 4U
129#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
130#define RB_EVNT_MIN_SIZE 8U
131#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT)
132
133
134#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
135
136enum {
137 RB_LEN_TIME_EXTEND = 8,
138 RB_LEN_TIME_STAMP = 8,
139};
140
141#define skip_time_extend(event) \
142 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
143
144#define extended_time(event) \
145 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
146
147static inline int rb_null_event(struct ring_buffer_event *event)
148{
149 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
150}
151
152static void rb_event_set_padding(struct ring_buffer_event *event)
153{
154
155 event->type_len = RINGBUF_TYPE_PADDING;
156 event->time_delta = 0;
157}
158
159static unsigned
160rb_event_data_length(struct ring_buffer_event *event)
161{
162 unsigned length;
163
164 if (event->type_len)
165 length = event->type_len * RB_ALIGNMENT;
166 else
167 length = event->array[0];
168 return length + RB_EVNT_HDR_SIZE;
169}
170
171
172
173
174
175
176static inline unsigned
177rb_event_length(struct ring_buffer_event *event)
178{
179 switch (event->type_len) {
180 case RINGBUF_TYPE_PADDING:
181 if (rb_null_event(event))
182
183 return -1;
184 return event->array[0] + RB_EVNT_HDR_SIZE;
185
186 case RINGBUF_TYPE_TIME_EXTEND:
187 return RB_LEN_TIME_EXTEND;
188
189 case RINGBUF_TYPE_TIME_STAMP:
190 return RB_LEN_TIME_STAMP;
191
192 case RINGBUF_TYPE_DATA:
193 return rb_event_data_length(event);
194 default:
195 BUG();
196 }
197
198 return 0;
199}
200
201
202
203
204
205static inline unsigned
206rb_event_ts_length(struct ring_buffer_event *event)
207{
208 unsigned len = 0;
209
210 if (extended_time(event)) {
211
212 len = RB_LEN_TIME_EXTEND;
213 event = skip_time_extend(event);
214 }
215 return len + rb_event_length(event);
216}
217
218
219
220
221
222
223
224
225
226
227
228unsigned ring_buffer_event_length(struct ring_buffer_event *event)
229{
230 unsigned length;
231
232 if (extended_time(event))
233 event = skip_time_extend(event);
234
235 length = rb_event_length(event);
236 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
237 return length;
238 length -= RB_EVNT_HDR_SIZE;
239 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
240 length -= sizeof(event->array[0]);
241 return length;
242}
243EXPORT_SYMBOL_GPL(ring_buffer_event_length);
244
245
246static __always_inline void *
247rb_event_data(struct ring_buffer_event *event)
248{
249 if (extended_time(event))
250 event = skip_time_extend(event);
251 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
252
253 if (event->type_len)
254 return (void *)&event->array[0];
255
256 return (void *)&event->array[1];
257}
258
259
260
261
262
263void *ring_buffer_event_data(struct ring_buffer_event *event)
264{
265 return rb_event_data(event);
266}
267EXPORT_SYMBOL_GPL(ring_buffer_event_data);
268
269#define for_each_buffer_cpu(buffer, cpu) \
270 for_each_cpu(cpu, buffer->cpumask)
271
272#define TS_SHIFT 27
273#define TS_MASK ((1ULL << TS_SHIFT) - 1)
274#define TS_DELTA_TEST (~TS_MASK)
275
276
277
278
279
280
281
282
283
284
285
286u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
287{
288 u64 ts;
289
290 ts = event->array[0];
291 ts <<= TS_SHIFT;
292 ts += event->time_delta;
293
294 return ts;
295}
296
297
298#define RB_MISSED_EVENTS (1 << 31)
299
300#define RB_MISSED_STORED (1 << 30)
301
302#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
303
304struct buffer_data_page {
305 u64 time_stamp;
306 local_t commit;
307 unsigned char data[] RB_ALIGN_DATA;
308};
309
310
311
312
313
314
315
316
317
318struct buffer_page {
319 struct list_head list;
320 local_t write;
321 unsigned read;
322 local_t entries;
323 unsigned long real_end;
324 struct buffer_data_page *page;
325};
326
327
328
329
330
331
332
333
334
335
336
337
338
339#define RB_WRITE_MASK 0xfffff
340#define RB_WRITE_INTCNT (1 << 20)
341
342static void rb_init_page(struct buffer_data_page *bpage)
343{
344 local_set(&bpage->commit, 0);
345}
346
347
348
349
350
351static void free_buffer_page(struct buffer_page *bpage)
352{
353 free_page((unsigned long)bpage->page);
354 kfree(bpage);
355}
356
357
358
359
360static inline int test_time_stamp(u64 delta)
361{
362 if (delta & TS_DELTA_TEST)
363 return 1;
364 return 0;
365}
366
367#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
368
369
370#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
371
372int ring_buffer_print_page_header(struct trace_seq *s)
373{
374 struct buffer_data_page field;
375
376 trace_seq_printf(s, "\tfield: u64 timestamp;\t"
377 "offset:0;\tsize:%u;\tsigned:%u;\n",
378 (unsigned int)sizeof(field.time_stamp),
379 (unsigned int)is_signed_type(u64));
380
381 trace_seq_printf(s, "\tfield: local_t commit;\t"
382 "offset:%u;\tsize:%u;\tsigned:%u;\n",
383 (unsigned int)offsetof(typeof(field), commit),
384 (unsigned int)sizeof(field.commit),
385 (unsigned int)is_signed_type(long));
386
387 trace_seq_printf(s, "\tfield: int overwrite;\t"
388 "offset:%u;\tsize:%u;\tsigned:%u;\n",
389 (unsigned int)offsetof(typeof(field), commit),
390 1,
391 (unsigned int)is_signed_type(long));
392
393 trace_seq_printf(s, "\tfield: char data;\t"
394 "offset:%u;\tsize:%u;\tsigned:%u;\n",
395 (unsigned int)offsetof(typeof(field), data),
396 (unsigned int)BUF_PAGE_SIZE,
397 (unsigned int)is_signed_type(char));
398
399 return !trace_seq_has_overflowed(s);
400}
401
402struct rb_irq_work {
403 struct irq_work work;
404 wait_queue_head_t waiters;
405 wait_queue_head_t full_waiters;
406 bool waiters_pending;
407 bool full_waiters_pending;
408 bool wakeup_full;
409};
410
411
412
413
414struct rb_event_info {
415 u64 ts;
416 u64 delta;
417 unsigned long length;
418 struct buffer_page *tail_page;
419 int add_timestamp;
420};
421
422
423
424
425
426
427
428
429
430
431enum {
432 RB_CTX_NMI,
433 RB_CTX_IRQ,
434 RB_CTX_SOFTIRQ,
435 RB_CTX_NORMAL,
436 RB_CTX_MAX
437};
438
439
440
441
442struct ring_buffer_per_cpu {
443 int cpu;
444 atomic_t record_disabled;
445 struct ring_buffer *buffer;
446 raw_spinlock_t reader_lock;
447 arch_spinlock_t lock;
448 struct lock_class_key lock_key;
449 struct buffer_data_page *free_page;
450 unsigned long nr_pages;
451 unsigned int current_context;
452 struct list_head *pages;
453 struct buffer_page *head_page;
454 struct buffer_page *tail_page;
455 struct buffer_page *commit_page;
456 struct buffer_page *reader_page;
457 unsigned long lost_events;
458 unsigned long last_overrun;
459 unsigned long nest;
460 local_t entries_bytes;
461 local_t entries;
462 local_t overrun;
463 local_t commit_overrun;
464 local_t dropped_events;
465 local_t committing;
466 local_t commits;
467 local_t pages_touched;
468 local_t pages_read;
469 long last_pages_touch;
470 size_t shortest_full;
471 unsigned long read;
472 unsigned long read_bytes;
473 u64 write_stamp;
474 u64 read_stamp;
475
476 long nr_pages_to_update;
477 struct list_head new_pages;
478 struct work_struct update_pages_work;
479 struct completion update_done;
480
481 struct rb_irq_work irq_work;
482};
483
484struct ring_buffer {
485 unsigned flags;
486 int cpus;
487 atomic_t record_disabled;
488 atomic_t resize_disabled;
489 cpumask_var_t cpumask;
490
491 struct lock_class_key *reader_lock_key;
492
493 struct mutex mutex;
494
495 struct ring_buffer_per_cpu **buffers;
496
497 struct hlist_node node;
498 u64 (*clock)(void);
499
500 struct rb_irq_work irq_work;
501 bool time_stamp_abs;
502};
503
504struct ring_buffer_iter {
505 struct ring_buffer_per_cpu *cpu_buffer;
506 unsigned long head;
507 struct buffer_page *head_page;
508 struct buffer_page *cache_reader_page;
509 unsigned long cache_read;
510 u64 read_stamp;
511};
512
513
514
515
516
517
518
519
520size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
521{
522 return buffer->buffers[cpu]->nr_pages;
523}
524
525
526
527
528
529
530
531
532size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
533{
534 size_t read;
535 size_t cnt;
536
537 read = local_read(&buffer->buffers[cpu]->pages_read);
538 cnt = local_read(&buffer->buffers[cpu]->pages_touched);
539
540 if (cnt < read) {
541 WARN_ON_ONCE(read > cnt + 1);
542 return 0;
543 }
544
545 return cnt - read;
546}
547
548
549
550
551
552
553
554static void rb_wake_up_waiters(struct irq_work *work)
555{
556 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
557
558 wake_up_all(&rbwork->waiters);
559 if (rbwork->wakeup_full) {
560 rbwork->wakeup_full = false;
561 wake_up_all(&rbwork->full_waiters);
562 }
563}
564
565
566
567
568
569
570
571
572
573
574
575int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
576{
577 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
578 DEFINE_WAIT(wait);
579 struct rb_irq_work *work;
580 int ret = 0;
581
582
583
584
585
586
587 if (cpu == RING_BUFFER_ALL_CPUS) {
588 work = &buffer->irq_work;
589
590 full = 0;
591 } else {
592 if (!cpumask_test_cpu(cpu, buffer->cpumask))
593 return -ENODEV;
594 cpu_buffer = buffer->buffers[cpu];
595 work = &cpu_buffer->irq_work;
596 }
597
598
599 while (true) {
600 if (full)
601 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
602 else
603 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625 if (full)
626 work->full_waiters_pending = true;
627 else
628 work->waiters_pending = true;
629
630 if (signal_pending(current)) {
631 ret = -EINTR;
632 break;
633 }
634
635 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
636 break;
637
638 if (cpu != RING_BUFFER_ALL_CPUS &&
639 !ring_buffer_empty_cpu(buffer, cpu)) {
640 unsigned long flags;
641 bool pagebusy;
642 size_t nr_pages;
643 size_t dirty;
644
645 if (!full)
646 break;
647
648 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
649 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
650 nr_pages = cpu_buffer->nr_pages;
651 dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
652 if (!cpu_buffer->shortest_full ||
653 cpu_buffer->shortest_full < full)
654 cpu_buffer->shortest_full = full;
655 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
656 if (!pagebusy &&
657 (!nr_pages || (dirty * 100) > full * nr_pages))
658 break;
659 }
660
661 schedule();
662 }
663
664 if (full)
665 finish_wait(&work->full_waiters, &wait);
666 else
667 finish_wait(&work->waiters, &wait);
668
669 return ret;
670}
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
687 struct file *filp, poll_table *poll_table)
688{
689 struct ring_buffer_per_cpu *cpu_buffer;
690 struct rb_irq_work *work;
691
692 if (cpu == RING_BUFFER_ALL_CPUS)
693 work = &buffer->irq_work;
694 else {
695 if (!cpumask_test_cpu(cpu, buffer->cpumask))
696 return -EINVAL;
697
698 cpu_buffer = buffer->buffers[cpu];
699 work = &cpu_buffer->irq_work;
700 }
701
702 poll_wait(filp, &work->waiters, poll_table);
703 work->waiters_pending = true;
704
705
706
707
708
709
710
711
712
713
714
715
716
717 smp_mb();
718
719 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
720 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
721 return EPOLLIN | EPOLLRDNORM;
722 return 0;
723}
724
725
726#define RB_WARN_ON(b, cond) \
727 ({ \
728 int _____ret = unlikely(cond); \
729 if (_____ret) { \
730 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
731 struct ring_buffer_per_cpu *__b = \
732 (void *)b; \
733 atomic_inc(&__b->buffer->record_disabled); \
734 } else \
735 atomic_inc(&b->record_disabled); \
736 WARN_ON(1); \
737 } \
738 _____ret; \
739 })
740
741
742#define DEBUG_SHIFT 0
743
744static inline u64 rb_time_stamp(struct ring_buffer *buffer)
745{
746
747 return buffer->clock() << DEBUG_SHIFT;
748}
749
750u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
751{
752 u64 time;
753
754 preempt_disable_notrace();
755 time = rb_time_stamp(buffer);
756 preempt_enable_notrace();
757
758 return time;
759}
760EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
761
762void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
763 int cpu, u64 *ts)
764{
765
766 *ts >>= DEBUG_SHIFT;
767}
768EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839#define RB_PAGE_NORMAL 0UL
840#define RB_PAGE_HEAD 1UL
841#define RB_PAGE_UPDATE 2UL
842
843
844#define RB_FLAG_MASK 3UL
845
846
847#define RB_PAGE_MOVED 4UL
848
849
850
851
852static struct list_head *rb_list_head(struct list_head *list)
853{
854 unsigned long val = (unsigned long)list;
855
856 return (struct list_head *)(val & ~RB_FLAG_MASK);
857}
858
859
860
861
862
863
864
865
866
867static inline int
868rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
869 struct buffer_page *page, struct list_head *list)
870{
871 unsigned long val;
872
873 val = (unsigned long)list->next;
874
875 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
876 return RB_PAGE_MOVED;
877
878 return val & RB_FLAG_MASK;
879}
880
881
882
883
884
885
886
887
888static bool rb_is_reader_page(struct buffer_page *page)
889{
890 struct list_head *list = page->list.prev;
891
892 return rb_list_head(list->next) != &page->list;
893}
894
895
896
897
898static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
899 struct list_head *list)
900{
901 unsigned long *ptr;
902
903 ptr = (unsigned long *)&list->next;
904 *ptr |= RB_PAGE_HEAD;
905 *ptr &= ~RB_PAGE_UPDATE;
906}
907
908
909
910
911static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
912{
913 struct buffer_page *head;
914
915 head = cpu_buffer->head_page;
916 if (!head)
917 return;
918
919
920
921
922 rb_set_list_to_head(cpu_buffer, head->list.prev);
923}
924
925static void rb_list_head_clear(struct list_head *list)
926{
927 unsigned long *ptr = (unsigned long *)&list->next;
928
929 *ptr &= ~RB_FLAG_MASK;
930}
931
932
933
934
935static void
936rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
937{
938 struct list_head *hd;
939
940
941 rb_list_head_clear(cpu_buffer->pages);
942
943 list_for_each(hd, cpu_buffer->pages)
944 rb_list_head_clear(hd);
945}
946
947static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
948 struct buffer_page *head,
949 struct buffer_page *prev,
950 int old_flag, int new_flag)
951{
952 struct list_head *list;
953 unsigned long val = (unsigned long)&head->list;
954 unsigned long ret;
955
956 list = &prev->list;
957
958 val &= ~RB_FLAG_MASK;
959
960 ret = cmpxchg((unsigned long *)&list->next,
961 val | old_flag, val | new_flag);
962
963
964 if ((ret & ~RB_FLAG_MASK) != val)
965 return RB_PAGE_MOVED;
966
967 return ret & RB_FLAG_MASK;
968}
969
970static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
971 struct buffer_page *head,
972 struct buffer_page *prev,
973 int old_flag)
974{
975 return rb_head_page_set(cpu_buffer, head, prev,
976 old_flag, RB_PAGE_UPDATE);
977}
978
979static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
980 struct buffer_page *head,
981 struct buffer_page *prev,
982 int old_flag)
983{
984 return rb_head_page_set(cpu_buffer, head, prev,
985 old_flag, RB_PAGE_HEAD);
986}
987
988static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
989 struct buffer_page *head,
990 struct buffer_page *prev,
991 int old_flag)
992{
993 return rb_head_page_set(cpu_buffer, head, prev,
994 old_flag, RB_PAGE_NORMAL);
995}
996
997static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
998 struct buffer_page **bpage)
999{
1000 struct list_head *p = rb_list_head((*bpage)->list.next);
1001
1002 *bpage = list_entry(p, struct buffer_page, list);
1003}
1004
1005static struct buffer_page *
1006rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1007{
1008 struct buffer_page *head;
1009 struct buffer_page *page;
1010 struct list_head *list;
1011 int i;
1012
1013 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1014 return NULL;
1015
1016
1017 list = cpu_buffer->pages;
1018 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1019 return NULL;
1020
1021 page = head = cpu_buffer->head_page;
1022
1023
1024
1025
1026
1027
1028 for (i = 0; i < 3; i++) {
1029 do {
1030 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
1031 cpu_buffer->head_page = page;
1032 return page;
1033 }
1034 rb_inc_page(cpu_buffer, &page);
1035 } while (page != head);
1036 }
1037
1038 RB_WARN_ON(cpu_buffer, 1);
1039
1040 return NULL;
1041}
1042
1043static int rb_head_page_replace(struct buffer_page *old,
1044 struct buffer_page *new)
1045{
1046 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1047 unsigned long val;
1048 unsigned long ret;
1049
1050 val = *ptr & ~RB_FLAG_MASK;
1051 val |= RB_PAGE_HEAD;
1052
1053 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1054
1055 return ret == val;
1056}
1057
1058
1059
1060
1061static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1062 struct buffer_page *tail_page,
1063 struct buffer_page *next_page)
1064{
1065 unsigned long old_entries;
1066 unsigned long old_write;
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1078 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1079
1080 local_inc(&cpu_buffer->pages_touched);
1081
1082
1083
1084
1085 barrier();
1086
1087
1088
1089
1090
1091
1092 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1093
1094 unsigned long val = old_write & ~RB_WRITE_MASK;
1095 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 (void)local_cmpxchg(&next_page->write, old_write, val);
1108 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1109
1110
1111
1112
1113
1114
1115 local_set(&next_page->page->commit, 0);
1116
1117
1118 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1119 }
1120}
1121
1122static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1123 struct buffer_page *bpage)
1124{
1125 unsigned long val = (unsigned long)bpage;
1126
1127 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1128 return 1;
1129
1130 return 0;
1131}
1132
1133
1134
1135
1136static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1137 struct list_head *list)
1138{
1139 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1140 return 1;
1141 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1142 return 1;
1143 return 0;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1154{
1155 struct list_head *head = cpu_buffer->pages;
1156 struct buffer_page *bpage, *tmp;
1157
1158
1159 if (cpu_buffer->head_page)
1160 rb_set_head_page(cpu_buffer);
1161
1162 rb_head_page_deactivate(cpu_buffer);
1163
1164 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1165 return -1;
1166 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1167 return -1;
1168
1169 if (rb_check_list(cpu_buffer, head))
1170 return -1;
1171
1172 list_for_each_entry_safe(bpage, tmp, head, list) {
1173 if (RB_WARN_ON(cpu_buffer,
1174 bpage->list.next->prev != &bpage->list))
1175 return -1;
1176 if (RB_WARN_ON(cpu_buffer,
1177 bpage->list.prev->next != &bpage->list))
1178 return -1;
1179 if (rb_check_list(cpu_buffer, &bpage->list))
1180 return -1;
1181 }
1182
1183 rb_head_page_activate(cpu_buffer);
1184
1185 return 0;
1186}
1187
1188static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1189{
1190 struct buffer_page *bpage, *tmp;
1191 bool user_thread = current->mm != NULL;
1192 gfp_t mflags;
1193 long i;
1194
1195
1196
1197
1198
1199
1200
1201
1202 i = si_mem_available();
1203 if (i < nr_pages)
1204 return -ENOMEM;
1205
1206
1207
1208
1209
1210
1211 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 if (user_thread)
1223 set_current_oom_origin();
1224 for (i = 0; i < nr_pages; i++) {
1225 struct page *page;
1226
1227 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1228 mflags, cpu_to_node(cpu));
1229 if (!bpage)
1230 goto free_pages;
1231
1232 list_add(&bpage->list, pages);
1233
1234 page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
1235 if (!page)
1236 goto free_pages;
1237 bpage->page = page_address(page);
1238 rb_init_page(bpage->page);
1239
1240 if (user_thread && fatal_signal_pending(current))
1241 goto free_pages;
1242 }
1243 if (user_thread)
1244 clear_current_oom_origin();
1245
1246 return 0;
1247
1248free_pages:
1249 list_for_each_entry_safe(bpage, tmp, pages, list) {
1250 list_del_init(&bpage->list);
1251 free_buffer_page(bpage);
1252 }
1253 if (user_thread)
1254 clear_current_oom_origin();
1255
1256 return -ENOMEM;
1257}
1258
1259static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1260 unsigned long nr_pages)
1261{
1262 LIST_HEAD(pages);
1263
1264 WARN_ON(!nr_pages);
1265
1266 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1267 return -ENOMEM;
1268
1269
1270
1271
1272
1273
1274 cpu_buffer->pages = pages.next;
1275 list_del(&pages);
1276
1277 cpu_buffer->nr_pages = nr_pages;
1278
1279 rb_check_pages(cpu_buffer);
1280
1281 return 0;
1282}
1283
1284static struct ring_buffer_per_cpu *
1285rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1286{
1287 struct ring_buffer_per_cpu *cpu_buffer;
1288 struct buffer_page *bpage;
1289 struct page *page;
1290 int ret;
1291
1292 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1293 GFP_KERNEL, cpu_to_node(cpu));
1294 if (!cpu_buffer)
1295 return NULL;
1296
1297 cpu_buffer->cpu = cpu;
1298 cpu_buffer->buffer = buffer;
1299 raw_spin_lock_init(&cpu_buffer->reader_lock);
1300 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1301 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1302 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1303 init_completion(&cpu_buffer->update_done);
1304 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1305 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1306 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1307
1308 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1309 GFP_KERNEL, cpu_to_node(cpu));
1310 if (!bpage)
1311 goto fail_free_buffer;
1312
1313 rb_check_bpage(cpu_buffer, bpage);
1314
1315 cpu_buffer->reader_page = bpage;
1316 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1317 if (!page)
1318 goto fail_free_reader;
1319 bpage->page = page_address(page);
1320 rb_init_page(bpage->page);
1321
1322 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1323 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1324
1325 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1326 if (ret < 0)
1327 goto fail_free_reader;
1328
1329 cpu_buffer->head_page
1330 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1331 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1332
1333 rb_head_page_activate(cpu_buffer);
1334
1335 return cpu_buffer;
1336
1337 fail_free_reader:
1338 free_buffer_page(cpu_buffer->reader_page);
1339
1340 fail_free_buffer:
1341 kfree(cpu_buffer);
1342 return NULL;
1343}
1344
1345static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1346{
1347 struct list_head *head = cpu_buffer->pages;
1348 struct buffer_page *bpage, *tmp;
1349
1350 free_buffer_page(cpu_buffer->reader_page);
1351
1352 rb_head_page_deactivate(cpu_buffer);
1353
1354 if (head) {
1355 list_for_each_entry_safe(bpage, tmp, head, list) {
1356 list_del_init(&bpage->list);
1357 free_buffer_page(bpage);
1358 }
1359 bpage = list_entry(head, struct buffer_page, list);
1360 free_buffer_page(bpage);
1361 }
1362
1363 kfree(cpu_buffer);
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1377 struct lock_class_key *key)
1378{
1379 struct ring_buffer *buffer;
1380 long nr_pages;
1381 int bsize;
1382 int cpu;
1383 int ret;
1384
1385
1386 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1387 GFP_KERNEL);
1388 if (!buffer)
1389 return NULL;
1390
1391 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1392 goto fail_free_buffer;
1393
1394 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1395 buffer->flags = flags;
1396 buffer->clock = trace_clock_local;
1397 buffer->reader_lock_key = key;
1398
1399 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1400 init_waitqueue_head(&buffer->irq_work.waiters);
1401
1402
1403 if (nr_pages < 2)
1404 nr_pages = 2;
1405
1406 buffer->cpus = nr_cpu_ids;
1407
1408 bsize = sizeof(void *) * nr_cpu_ids;
1409 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1410 GFP_KERNEL);
1411 if (!buffer->buffers)
1412 goto fail_free_cpumask;
1413
1414 cpu = raw_smp_processor_id();
1415 cpumask_set_cpu(cpu, buffer->cpumask);
1416 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1417 if (!buffer->buffers[cpu])
1418 goto fail_free_buffers;
1419
1420 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1421 if (ret < 0)
1422 goto fail_free_buffers;
1423
1424 mutex_init(&buffer->mutex);
1425
1426 return buffer;
1427
1428 fail_free_buffers:
1429 for_each_buffer_cpu(buffer, cpu) {
1430 if (buffer->buffers[cpu])
1431 rb_free_cpu_buffer(buffer->buffers[cpu]);
1432 }
1433 kfree(buffer->buffers);
1434
1435 fail_free_cpumask:
1436 free_cpumask_var(buffer->cpumask);
1437
1438 fail_free_buffer:
1439 kfree(buffer);
1440 return NULL;
1441}
1442EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1443
1444
1445
1446
1447
1448void
1449ring_buffer_free(struct ring_buffer *buffer)
1450{
1451 int cpu;
1452
1453 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1454
1455 for_each_buffer_cpu(buffer, cpu)
1456 rb_free_cpu_buffer(buffer->buffers[cpu]);
1457
1458 kfree(buffer->buffers);
1459 free_cpumask_var(buffer->cpumask);
1460
1461 kfree(buffer);
1462}
1463EXPORT_SYMBOL_GPL(ring_buffer_free);
1464
1465void ring_buffer_set_clock(struct ring_buffer *buffer,
1466 u64 (*clock)(void))
1467{
1468 buffer->clock = clock;
1469}
1470
1471void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
1472{
1473 buffer->time_stamp_abs = abs;
1474}
1475
1476bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
1477{
1478 return buffer->time_stamp_abs;
1479}
1480
1481static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1482
1483static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1484{
1485 return local_read(&bpage->entries) & RB_WRITE_MASK;
1486}
1487
1488static inline unsigned long rb_page_write(struct buffer_page *bpage)
1489{
1490 return local_read(&bpage->write) & RB_WRITE_MASK;
1491}
1492
1493static int
1494rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1495{
1496 struct list_head *tail_page, *to_remove, *next_page;
1497 struct buffer_page *to_remove_page, *tmp_iter_page;
1498 struct buffer_page *last_page, *first_page;
1499 unsigned long nr_removed;
1500 unsigned long head_bit;
1501 int page_entries;
1502
1503 head_bit = 0;
1504
1505 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1506 atomic_inc(&cpu_buffer->record_disabled);
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 tail_page = &cpu_buffer->tail_page->list;
1517
1518
1519
1520
1521
1522 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1523 tail_page = rb_list_head(tail_page->next);
1524 to_remove = tail_page;
1525
1526
1527 first_page = list_entry(rb_list_head(to_remove->next),
1528 struct buffer_page, list);
1529
1530 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1531 to_remove = rb_list_head(to_remove)->next;
1532 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1533 }
1534
1535 next_page = rb_list_head(to_remove)->next;
1536
1537
1538
1539
1540
1541
1542 tail_page->next = (struct list_head *)((unsigned long)next_page |
1543 head_bit);
1544 next_page = rb_list_head(next_page);
1545 next_page->prev = tail_page;
1546
1547
1548 cpu_buffer->pages = next_page;
1549
1550
1551 if (head_bit)
1552 cpu_buffer->head_page = list_entry(next_page,
1553 struct buffer_page, list);
1554
1555
1556
1557
1558
1559 cpu_buffer->read = 0;
1560
1561
1562 atomic_dec(&cpu_buffer->record_disabled);
1563 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1564
1565 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1566
1567
1568 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1569 list);
1570 tmp_iter_page = first_page;
1571
1572 do {
1573 cond_resched();
1574
1575 to_remove_page = tmp_iter_page;
1576 rb_inc_page(cpu_buffer, &tmp_iter_page);
1577
1578
1579 page_entries = rb_page_entries(to_remove_page);
1580 if (page_entries) {
1581
1582
1583
1584
1585
1586
1587 local_add(page_entries, &cpu_buffer->overrun);
1588 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1589 }
1590
1591
1592
1593
1594
1595 free_buffer_page(to_remove_page);
1596 nr_removed--;
1597
1598 } while (to_remove_page != last_page);
1599
1600 RB_WARN_ON(cpu_buffer, nr_removed);
1601
1602 return nr_removed == 0;
1603}
1604
1605static int
1606rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1607{
1608 struct list_head *pages = &cpu_buffer->new_pages;
1609 int retries, success;
1610
1611 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626 retries = 10;
1627 success = 0;
1628 while (retries--) {
1629 struct list_head *head_page, *prev_page, *r;
1630 struct list_head *last_page, *first_page;
1631 struct list_head *head_page_with_bit;
1632
1633 head_page = &rb_set_head_page(cpu_buffer)->list;
1634 if (!head_page)
1635 break;
1636 prev_page = head_page->prev;
1637
1638 first_page = pages->next;
1639 last_page = pages->prev;
1640
1641 head_page_with_bit = (struct list_head *)
1642 ((unsigned long)head_page | RB_PAGE_HEAD);
1643
1644 last_page->next = head_page_with_bit;
1645 first_page->prev = prev_page;
1646
1647 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1648
1649 if (r == head_page_with_bit) {
1650
1651
1652
1653
1654
1655 head_page->prev = last_page;
1656 success = 1;
1657 break;
1658 }
1659 }
1660
1661 if (success)
1662 INIT_LIST_HEAD(pages);
1663
1664
1665
1666
1667 RB_WARN_ON(cpu_buffer, !success);
1668 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1669
1670
1671 if (!success) {
1672 struct buffer_page *bpage, *tmp;
1673 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1674 list) {
1675 list_del_init(&bpage->list);
1676 free_buffer_page(bpage);
1677 }
1678 }
1679 return success;
1680}
1681
1682static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1683{
1684 int success;
1685
1686 if (cpu_buffer->nr_pages_to_update > 0)
1687 success = rb_insert_pages(cpu_buffer);
1688 else
1689 success = rb_remove_pages(cpu_buffer,
1690 -cpu_buffer->nr_pages_to_update);
1691
1692 if (success)
1693 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1694}
1695
1696static void update_pages_handler(struct work_struct *work)
1697{
1698 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1699 struct ring_buffer_per_cpu, update_pages_work);
1700 rb_update_pages(cpu_buffer);
1701 complete(&cpu_buffer->update_done);
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1715 int cpu_id)
1716{
1717 struct ring_buffer_per_cpu *cpu_buffer;
1718 unsigned long nr_pages;
1719 int cpu, err = 0;
1720
1721
1722
1723
1724 if (!buffer)
1725 return size;
1726
1727
1728 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1729 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1730 return size;
1731
1732 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1733
1734
1735 if (nr_pages < 2)
1736 nr_pages = 2;
1737
1738 size = nr_pages * BUF_PAGE_SIZE;
1739
1740
1741
1742
1743
1744
1745 if (atomic_read(&buffer->resize_disabled))
1746 return -EBUSY;
1747
1748
1749 mutex_lock(&buffer->mutex);
1750
1751 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1752
1753 for_each_buffer_cpu(buffer, cpu) {
1754 cpu_buffer = buffer->buffers[cpu];
1755
1756 cpu_buffer->nr_pages_to_update = nr_pages -
1757 cpu_buffer->nr_pages;
1758
1759
1760
1761 if (cpu_buffer->nr_pages_to_update <= 0)
1762 continue;
1763
1764
1765
1766
1767 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1768 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1769 &cpu_buffer->new_pages, cpu)) {
1770
1771 err = -ENOMEM;
1772 goto out_err;
1773 }
1774 }
1775
1776 get_online_cpus();
1777
1778
1779
1780
1781
1782 for_each_buffer_cpu(buffer, cpu) {
1783 cpu_buffer = buffer->buffers[cpu];
1784 if (!cpu_buffer->nr_pages_to_update)
1785 continue;
1786
1787
1788 if (!cpu_online(cpu)) {
1789 rb_update_pages(cpu_buffer);
1790 cpu_buffer->nr_pages_to_update = 0;
1791 } else {
1792 schedule_work_on(cpu,
1793 &cpu_buffer->update_pages_work);
1794 }
1795 }
1796
1797
1798 for_each_buffer_cpu(buffer, cpu) {
1799 cpu_buffer = buffer->buffers[cpu];
1800 if (!cpu_buffer->nr_pages_to_update)
1801 continue;
1802
1803 if (cpu_online(cpu))
1804 wait_for_completion(&cpu_buffer->update_done);
1805 cpu_buffer->nr_pages_to_update = 0;
1806 }
1807
1808 put_online_cpus();
1809 } else {
1810
1811 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1812 goto out;
1813
1814 cpu_buffer = buffer->buffers[cpu_id];
1815
1816 if (nr_pages == cpu_buffer->nr_pages)
1817 goto out;
1818
1819 cpu_buffer->nr_pages_to_update = nr_pages -
1820 cpu_buffer->nr_pages;
1821
1822 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1823 if (cpu_buffer->nr_pages_to_update > 0 &&
1824 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1825 &cpu_buffer->new_pages, cpu_id)) {
1826 err = -ENOMEM;
1827 goto out_err;
1828 }
1829
1830 get_online_cpus();
1831
1832
1833 if (!cpu_online(cpu_id))
1834 rb_update_pages(cpu_buffer);
1835 else {
1836 schedule_work_on(cpu_id,
1837 &cpu_buffer->update_pages_work);
1838 wait_for_completion(&cpu_buffer->update_done);
1839 }
1840
1841 cpu_buffer->nr_pages_to_update = 0;
1842 put_online_cpus();
1843 }
1844
1845 out:
1846
1847
1848
1849
1850
1851
1852
1853 if (atomic_read(&buffer->record_disabled)) {
1854 atomic_inc(&buffer->record_disabled);
1855
1856
1857
1858
1859
1860
1861 synchronize_rcu();
1862 for_each_buffer_cpu(buffer, cpu) {
1863 cpu_buffer = buffer->buffers[cpu];
1864 rb_check_pages(cpu_buffer);
1865 }
1866 atomic_dec(&buffer->record_disabled);
1867 }
1868
1869 mutex_unlock(&buffer->mutex);
1870 return size;
1871
1872 out_err:
1873 for_each_buffer_cpu(buffer, cpu) {
1874 struct buffer_page *bpage, *tmp;
1875
1876 cpu_buffer = buffer->buffers[cpu];
1877 cpu_buffer->nr_pages_to_update = 0;
1878
1879 if (list_empty(&cpu_buffer->new_pages))
1880 continue;
1881
1882 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1883 list) {
1884 list_del_init(&bpage->list);
1885 free_buffer_page(bpage);
1886 }
1887 }
1888 mutex_unlock(&buffer->mutex);
1889 return err;
1890}
1891EXPORT_SYMBOL_GPL(ring_buffer_resize);
1892
1893void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1894{
1895 mutex_lock(&buffer->mutex);
1896 if (val)
1897 buffer->flags |= RB_FL_OVERWRITE;
1898 else
1899 buffer->flags &= ~RB_FL_OVERWRITE;
1900 mutex_unlock(&buffer->mutex);
1901}
1902EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1903
1904static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1905{
1906 return bpage->page->data + index;
1907}
1908
1909static __always_inline struct ring_buffer_event *
1910rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1911{
1912 return __rb_page_index(cpu_buffer->reader_page,
1913 cpu_buffer->reader_page->read);
1914}
1915
1916static __always_inline struct ring_buffer_event *
1917rb_iter_head_event(struct ring_buffer_iter *iter)
1918{
1919 return __rb_page_index(iter->head_page, iter->head);
1920}
1921
1922static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
1923{
1924 return local_read(&bpage->page->commit);
1925}
1926
1927
1928static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
1929{
1930 return rb_page_commit(bpage);
1931}
1932
1933static __always_inline unsigned
1934rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1935{
1936 return rb_page_commit(cpu_buffer->commit_page);
1937}
1938
1939static __always_inline unsigned
1940rb_event_index(struct ring_buffer_event *event)
1941{
1942 unsigned long addr = (unsigned long)event;
1943
1944 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1945}
1946
1947static void rb_inc_iter(struct ring_buffer_iter *iter)
1948{
1949 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1950
1951
1952
1953
1954
1955
1956
1957 if (iter->head_page == cpu_buffer->reader_page)
1958 iter->head_page = rb_set_head_page(cpu_buffer);
1959 else
1960 rb_inc_page(cpu_buffer, &iter->head_page);
1961
1962 iter->read_stamp = iter->head_page->page->time_stamp;
1963 iter->head = 0;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973static int
1974rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1975 struct buffer_page *tail_page,
1976 struct buffer_page *next_page)
1977{
1978 struct buffer_page *new_head;
1979 int entries;
1980 int type;
1981 int ret;
1982
1983 entries = rb_page_entries(next_page);
1984
1985
1986
1987
1988
1989
1990 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1991 RB_PAGE_HEAD);
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004 switch (type) {
2005 case RB_PAGE_HEAD:
2006
2007
2008
2009
2010
2011 local_add(entries, &cpu_buffer->overrun);
2012 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2013
2014
2015
2016
2017
2018
2019
2020 break;
2021
2022 case RB_PAGE_UPDATE:
2023
2024
2025
2026
2027 break;
2028 case RB_PAGE_NORMAL:
2029
2030
2031
2032
2033
2034 return 1;
2035 case RB_PAGE_MOVED:
2036
2037
2038
2039
2040
2041 return 1;
2042 default:
2043 RB_WARN_ON(cpu_buffer, 1);
2044 return -1;
2045 }
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 new_head = next_page;
2062 rb_inc_page(cpu_buffer, &new_head);
2063
2064 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2065 RB_PAGE_NORMAL);
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075 switch (ret) {
2076 case RB_PAGE_HEAD:
2077 case RB_PAGE_NORMAL:
2078
2079 break;
2080 default:
2081 RB_WARN_ON(cpu_buffer, 1);
2082 return -1;
2083 }
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095 if (ret == RB_PAGE_NORMAL) {
2096 struct buffer_page *buffer_tail_page;
2097
2098 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2099
2100
2101
2102
2103 if (buffer_tail_page != tail_page &&
2104 buffer_tail_page != next_page)
2105 rb_head_page_set_normal(cpu_buffer, new_head,
2106 next_page,
2107 RB_PAGE_HEAD);
2108 }
2109
2110
2111
2112
2113
2114
2115 if (type == RB_PAGE_HEAD) {
2116 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2117 tail_page,
2118 RB_PAGE_UPDATE);
2119 if (RB_WARN_ON(cpu_buffer,
2120 ret != RB_PAGE_UPDATE))
2121 return -1;
2122 }
2123
2124 return 0;
2125}
2126
2127static inline void
2128rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2129 unsigned long tail, struct rb_event_info *info)
2130{
2131 struct buffer_page *tail_page = info->tail_page;
2132 struct ring_buffer_event *event;
2133 unsigned long length = info->length;
2134
2135
2136
2137
2138
2139 if (tail >= BUF_PAGE_SIZE) {
2140
2141
2142
2143
2144
2145 if (tail == BUF_PAGE_SIZE)
2146 tail_page->real_end = 0;
2147
2148 local_sub(length, &tail_page->write);
2149 return;
2150 }
2151
2152 event = __rb_page_index(tail_page, tail);
2153
2154
2155 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2156
2157
2158
2159
2160
2161
2162 tail_page->real_end = tail;
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2176
2177
2178
2179 rb_event_set_padding(event);
2180
2181
2182 local_sub(length, &tail_page->write);
2183 return;
2184 }
2185
2186
2187 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2188 event->type_len = RINGBUF_TYPE_PADDING;
2189
2190 event->time_delta = 1;
2191
2192
2193 length = (tail + length) - BUF_PAGE_SIZE;
2194 local_sub(length, &tail_page->write);
2195}
2196
2197static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2198
2199
2200
2201
2202static noinline struct ring_buffer_event *
2203rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2204 unsigned long tail, struct rb_event_info *info)
2205{
2206 struct buffer_page *tail_page = info->tail_page;
2207 struct buffer_page *commit_page = cpu_buffer->commit_page;
2208 struct ring_buffer *buffer = cpu_buffer->buffer;
2209 struct buffer_page *next_page;
2210 int ret;
2211
2212 next_page = tail_page;
2213
2214 rb_inc_page(cpu_buffer, &next_page);
2215
2216
2217
2218
2219
2220
2221 if (unlikely(next_page == commit_page)) {
2222 local_inc(&cpu_buffer->commit_overrun);
2223 goto out_reset;
2224 }
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2241
2242
2243
2244
2245
2246 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2247
2248
2249
2250
2251 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2252 local_inc(&cpu_buffer->dropped_events);
2253 goto out_reset;
2254 }
2255
2256 ret = rb_handle_head_page(cpu_buffer,
2257 tail_page,
2258 next_page);
2259 if (ret < 0)
2260 goto out_reset;
2261 if (ret)
2262 goto out_again;
2263 } else {
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274 if (unlikely((cpu_buffer->commit_page !=
2275 cpu_buffer->tail_page) &&
2276 (cpu_buffer->commit_page ==
2277 cpu_buffer->reader_page))) {
2278 local_inc(&cpu_buffer->commit_overrun);
2279 goto out_reset;
2280 }
2281 }
2282 }
2283
2284 rb_tail_page_update(cpu_buffer, tail_page, next_page);
2285
2286 out_again:
2287
2288 rb_reset_tail(cpu_buffer, tail, info);
2289
2290
2291 rb_end_commit(cpu_buffer);
2292
2293 local_inc(&cpu_buffer->committing);
2294
2295
2296 return ERR_PTR(-EAGAIN);
2297
2298 out_reset:
2299
2300 rb_reset_tail(cpu_buffer, tail, info);
2301
2302 return NULL;
2303}
2304
2305
2306static noinline struct ring_buffer_event *
2307rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2308{
2309 if (abs)
2310 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2311 else
2312 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2313
2314
2315 if (abs || rb_event_index(event)) {
2316 event->time_delta = delta & TS_MASK;
2317 event->array[0] = delta >> TS_SHIFT;
2318 } else {
2319
2320 event->time_delta = 0;
2321 event->array[0] = 0;
2322 }
2323
2324 return skip_time_extend(event);
2325}
2326
2327static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2328 struct ring_buffer_event *event);
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341static void
2342rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2343 struct ring_buffer_event *event,
2344 struct rb_event_info *info)
2345{
2346 unsigned length = info->length;
2347 u64 delta = info->delta;
2348
2349
2350 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2351 delta = 0;
2352
2353
2354
2355
2356
2357 if (unlikely(info->add_timestamp)) {
2358 bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
2359
2360 event = rb_add_time_stamp(event, info->delta, abs);
2361 length -= RB_LEN_TIME_EXTEND;
2362 delta = 0;
2363 }
2364
2365 event->time_delta = delta;
2366 length -= RB_EVNT_HDR_SIZE;
2367 if (length > RB_MAX_SMALL_DATA) {
2368 event->type_len = 0;
2369 event->array[0] = length;
2370 } else
2371 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2372}
2373
2374static unsigned rb_calculate_event_length(unsigned length)
2375{
2376 struct ring_buffer_event event;
2377
2378
2379 if (!length)
2380 length++;
2381
2382 if (length > RB_MAX_SMALL_DATA)
2383 length += sizeof(event.array[0]);
2384
2385 length += RB_EVNT_HDR_SIZE;
2386 length = ALIGN(length, RB_ALIGNMENT);
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2401 length += RB_ALIGNMENT;
2402
2403 return length;
2404}
2405
2406#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2407static inline bool sched_clock_stable(void)
2408{
2409 return true;
2410}
2411#endif
2412
2413static inline int
2414rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2415 struct ring_buffer_event *event)
2416{
2417 unsigned long new_index, old_index;
2418 struct buffer_page *bpage;
2419 unsigned long index;
2420 unsigned long addr;
2421
2422 new_index = rb_event_index(event);
2423 old_index = new_index + rb_event_ts_length(event);
2424 addr = (unsigned long)event;
2425 addr &= PAGE_MASK;
2426
2427 bpage = READ_ONCE(cpu_buffer->tail_page);
2428
2429 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2430 unsigned long write_mask =
2431 local_read(&bpage->write) & ~RB_WRITE_MASK;
2432 unsigned long event_length = rb_event_length(event);
2433
2434
2435
2436
2437
2438
2439 old_index += write_mask;
2440 new_index += write_mask;
2441 index = local_cmpxchg(&bpage->write, old_index, new_index);
2442 if (index == old_index) {
2443
2444 local_sub(event_length, &cpu_buffer->entries_bytes);
2445 return 1;
2446 }
2447 }
2448
2449
2450 return 0;
2451}
2452
2453static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2454{
2455 local_inc(&cpu_buffer->committing);
2456 local_inc(&cpu_buffer->commits);
2457}
2458
2459static __always_inline void
2460rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2461{
2462 unsigned long max_count;
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472 again:
2473 max_count = cpu_buffer->nr_pages * 100;
2474
2475 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2476 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2477 return;
2478 if (RB_WARN_ON(cpu_buffer,
2479 rb_is_reader_page(cpu_buffer->tail_page)))
2480 return;
2481 local_set(&cpu_buffer->commit_page->page->commit,
2482 rb_page_write(cpu_buffer->commit_page));
2483 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2484
2485 if (rb_page_write(cpu_buffer->commit_page))
2486 cpu_buffer->write_stamp =
2487 cpu_buffer->commit_page->page->time_stamp;
2488
2489 barrier();
2490 }
2491 while (rb_commit_index(cpu_buffer) !=
2492 rb_page_write(cpu_buffer->commit_page)) {
2493
2494 local_set(&cpu_buffer->commit_page->page->commit,
2495 rb_page_write(cpu_buffer->commit_page));
2496 RB_WARN_ON(cpu_buffer,
2497 local_read(&cpu_buffer->commit_page->page->commit) &
2498 ~RB_WRITE_MASK);
2499 barrier();
2500 }
2501
2502
2503 barrier();
2504
2505
2506
2507
2508
2509
2510 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2511 goto again;
2512}
2513
2514static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2515{
2516 unsigned long commits;
2517
2518 if (RB_WARN_ON(cpu_buffer,
2519 !local_read(&cpu_buffer->committing)))
2520 return;
2521
2522 again:
2523 commits = local_read(&cpu_buffer->commits);
2524
2525 barrier();
2526 if (local_read(&cpu_buffer->committing) == 1)
2527 rb_set_commit_to_write(cpu_buffer);
2528
2529 local_dec(&cpu_buffer->committing);
2530
2531
2532 barrier();
2533
2534
2535
2536
2537
2538
2539 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2540 !local_read(&cpu_buffer->committing)) {
2541 local_inc(&cpu_buffer->committing);
2542 goto again;
2543 }
2544}
2545
2546static inline void rb_event_discard(struct ring_buffer_event *event)
2547{
2548 if (extended_time(event))
2549 event = skip_time_extend(event);
2550
2551
2552 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2553 event->type_len = RINGBUF_TYPE_PADDING;
2554
2555 if (!event->time_delta)
2556 event->time_delta = 1;
2557}
2558
2559static __always_inline bool
2560rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2561 struct ring_buffer_event *event)
2562{
2563 unsigned long addr = (unsigned long)event;
2564 unsigned long index;
2565
2566 index = rb_event_index(event);
2567 addr &= PAGE_MASK;
2568
2569 return cpu_buffer->commit_page->page == (void *)addr &&
2570 rb_commit_index(cpu_buffer) == index;
2571}
2572
2573static __always_inline void
2574rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2575 struct ring_buffer_event *event)
2576{
2577 u64 delta;
2578
2579
2580
2581
2582
2583 if (rb_event_is_commit(cpu_buffer, event)) {
2584
2585
2586
2587
2588 if (!rb_event_index(event))
2589 cpu_buffer->write_stamp =
2590 cpu_buffer->commit_page->page->time_stamp;
2591 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2592 delta = ring_buffer_event_time_stamp(event);
2593 cpu_buffer->write_stamp += delta;
2594 } else if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
2595 delta = ring_buffer_event_time_stamp(event);
2596 cpu_buffer->write_stamp = delta;
2597 } else
2598 cpu_buffer->write_stamp += event->time_delta;
2599 }
2600}
2601
2602static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2603 struct ring_buffer_event *event)
2604{
2605 local_inc(&cpu_buffer->entries);
2606 rb_update_write_stamp(cpu_buffer, event);
2607 rb_end_commit(cpu_buffer);
2608}
2609
2610static __always_inline void
2611rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2612{
2613 size_t nr_pages;
2614 size_t dirty;
2615 size_t full;
2616
2617 if (buffer->irq_work.waiters_pending) {
2618 buffer->irq_work.waiters_pending = false;
2619
2620 irq_work_queue(&buffer->irq_work.work);
2621 }
2622
2623 if (cpu_buffer->irq_work.waiters_pending) {
2624 cpu_buffer->irq_work.waiters_pending = false;
2625
2626 irq_work_queue(&cpu_buffer->irq_work.work);
2627 }
2628
2629 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
2630 return;
2631
2632 if (cpu_buffer->reader_page == cpu_buffer->commit_page)
2633 return;
2634
2635 if (!cpu_buffer->irq_work.full_waiters_pending)
2636 return;
2637
2638 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
2639
2640 full = cpu_buffer->shortest_full;
2641 nr_pages = cpu_buffer->nr_pages;
2642 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
2643 if (full && nr_pages && (dirty * 100) <= full * nr_pages)
2644 return;
2645
2646 cpu_buffer->irq_work.wakeup_full = true;
2647 cpu_buffer->irq_work.full_waiters_pending = false;
2648
2649 irq_work_queue(&cpu_buffer->irq_work.work);
2650}
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690static __always_inline int
2691trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2692{
2693 unsigned int val = cpu_buffer->current_context;
2694 unsigned long pc = preempt_count();
2695 int bit;
2696
2697 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
2698 bit = RB_CTX_NORMAL;
2699 else
2700 bit = pc & NMI_MASK ? RB_CTX_NMI :
2701 pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
2702
2703 if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
2704 return 1;
2705
2706 val |= (1 << (bit + cpu_buffer->nest));
2707 cpu_buffer->current_context = val;
2708
2709 return 0;
2710}
2711
2712static __always_inline void
2713trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2714{
2715 cpu_buffer->current_context &=
2716 cpu_buffer->current_context - (1 << cpu_buffer->nest);
2717}
2718
2719
2720#define NESTED_BITS 4
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735void ring_buffer_nest_start(struct ring_buffer *buffer)
2736{
2737 struct ring_buffer_per_cpu *cpu_buffer;
2738 int cpu;
2739
2740
2741 preempt_disable_notrace();
2742 cpu = raw_smp_processor_id();
2743 cpu_buffer = buffer->buffers[cpu];
2744
2745 cpu_buffer->nest += NESTED_BITS;
2746}
2747
2748
2749
2750
2751
2752
2753
2754
2755void ring_buffer_nest_end(struct ring_buffer *buffer)
2756{
2757 struct ring_buffer_per_cpu *cpu_buffer;
2758 int cpu;
2759
2760
2761 cpu = raw_smp_processor_id();
2762 cpu_buffer = buffer->buffers[cpu];
2763
2764 cpu_buffer->nest -= NESTED_BITS;
2765 preempt_enable_notrace();
2766}
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2778 struct ring_buffer_event *event)
2779{
2780 struct ring_buffer_per_cpu *cpu_buffer;
2781 int cpu = raw_smp_processor_id();
2782
2783 cpu_buffer = buffer->buffers[cpu];
2784
2785 rb_commit(cpu_buffer, event);
2786
2787 rb_wakeups(buffer, cpu_buffer);
2788
2789 trace_recursive_unlock(cpu_buffer);
2790
2791 preempt_enable_notrace();
2792
2793 return 0;
2794}
2795EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2796
2797static noinline void
2798rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2799 struct rb_event_info *info)
2800{
2801 WARN_ONCE(info->delta > (1ULL << 59),
2802 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2803 (unsigned long long)info->delta,
2804 (unsigned long long)info->ts,
2805 (unsigned long long)cpu_buffer->write_stamp,
2806 sched_clock_stable() ? "" :
2807 "If you just came from a suspend/resume,\n"
2808 "please switch to the trace global clock:\n"
2809 " echo global > /sys/kernel/debug/tracing/trace_clock\n"
2810 "or add trace_clock=global to the kernel command line\n");
2811 info->add_timestamp = 1;
2812}
2813
2814static struct ring_buffer_event *
2815__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2816 struct rb_event_info *info)
2817{
2818 struct ring_buffer_event *event;
2819 struct buffer_page *tail_page;
2820 unsigned long tail, write;
2821
2822
2823
2824
2825
2826
2827 if (unlikely(info->add_timestamp))
2828 info->length += RB_LEN_TIME_EXTEND;
2829
2830
2831 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2832 write = local_add_return(info->length, &tail_page->write);
2833
2834
2835 write &= RB_WRITE_MASK;
2836 tail = write - info->length;
2837
2838
2839
2840
2841
2842 if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer))
2843 info->delta = 0;
2844
2845
2846 if (unlikely(write > BUF_PAGE_SIZE))
2847 return rb_move_tail(cpu_buffer, tail, info);
2848
2849
2850
2851 event = __rb_page_index(tail_page, tail);
2852 rb_update_event(cpu_buffer, event, info);
2853
2854 local_inc(&tail_page->entries);
2855
2856
2857
2858
2859
2860 if (!tail)
2861 tail_page->page->time_stamp = info->ts;
2862
2863
2864 local_add(info->length, &cpu_buffer->entries_bytes);
2865
2866 return event;
2867}
2868
2869static __always_inline struct ring_buffer_event *
2870rb_reserve_next_event(struct ring_buffer *buffer,
2871 struct ring_buffer_per_cpu *cpu_buffer,
2872 unsigned long length)
2873{
2874 struct ring_buffer_event *event;
2875 struct rb_event_info info;
2876 int nr_loops = 0;
2877 u64 diff;
2878
2879 rb_start_commit(cpu_buffer);
2880
2881#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2882
2883
2884
2885
2886
2887
2888 barrier();
2889 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
2890 local_dec(&cpu_buffer->committing);
2891 local_dec(&cpu_buffer->commits);
2892 return NULL;
2893 }
2894#endif
2895
2896 info.length = rb_calculate_event_length(length);
2897 again:
2898 info.add_timestamp = 0;
2899 info.delta = 0;
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2911 goto out_fail;
2912
2913 info.ts = rb_time_stamp(cpu_buffer->buffer);
2914 diff = info.ts - cpu_buffer->write_stamp;
2915
2916
2917 barrier();
2918
2919 if (ring_buffer_time_stamp_abs(buffer)) {
2920 info.delta = info.ts;
2921 rb_handle_timestamp(cpu_buffer, &info);
2922 } else
2923 if (likely(info.ts >= cpu_buffer->write_stamp)) {
2924 info.delta = diff;
2925 if (unlikely(test_time_stamp(info.delta)))
2926 rb_handle_timestamp(cpu_buffer, &info);
2927 }
2928
2929 event = __rb_reserve_next(cpu_buffer, &info);
2930
2931 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2932 if (info.add_timestamp)
2933 info.length -= RB_LEN_TIME_EXTEND;
2934 goto again;
2935 }
2936
2937 if (!event)
2938 goto out_fail;
2939
2940 return event;
2941
2942 out_fail:
2943 rb_end_commit(cpu_buffer);
2944 return NULL;
2945}
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962struct ring_buffer_event *
2963ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2964{
2965 struct ring_buffer_per_cpu *cpu_buffer;
2966 struct ring_buffer_event *event;
2967 int cpu;
2968
2969
2970 preempt_disable_notrace();
2971
2972 if (unlikely(atomic_read(&buffer->record_disabled)))
2973 goto out;
2974
2975 cpu = raw_smp_processor_id();
2976
2977 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2978 goto out;
2979
2980 cpu_buffer = buffer->buffers[cpu];
2981
2982 if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2983 goto out;
2984
2985 if (unlikely(length > BUF_MAX_DATA_SIZE))
2986 goto out;
2987
2988 if (unlikely(trace_recursive_lock(cpu_buffer)))
2989 goto out;
2990
2991 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2992 if (!event)
2993 goto out_unlock;
2994
2995 return event;
2996
2997 out_unlock:
2998 trace_recursive_unlock(cpu_buffer);
2999 out:
3000 preempt_enable_notrace();
3001 return NULL;
3002}
3003EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3004
3005
3006
3007
3008
3009
3010
3011static inline void
3012rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3013 struct ring_buffer_event *event)
3014{
3015 unsigned long addr = (unsigned long)event;
3016 struct buffer_page *bpage = cpu_buffer->commit_page;
3017 struct buffer_page *start;
3018
3019 addr &= PAGE_MASK;
3020
3021
3022 if (likely(bpage->page == (void *)addr)) {
3023 local_dec(&bpage->entries);
3024 return;
3025 }
3026
3027
3028
3029
3030
3031 rb_inc_page(cpu_buffer, &bpage);
3032 start = bpage;
3033 do {
3034 if (bpage->page == (void *)addr) {
3035 local_dec(&bpage->entries);
3036 return;
3037 }
3038 rb_inc_page(cpu_buffer, &bpage);
3039 } while (bpage != start);
3040
3041
3042 RB_WARN_ON(cpu_buffer, 1);
3043}
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064void ring_buffer_discard_commit(struct ring_buffer *buffer,
3065 struct ring_buffer_event *event)
3066{
3067 struct ring_buffer_per_cpu *cpu_buffer;
3068 int cpu;
3069
3070
3071 rb_event_discard(event);
3072
3073 cpu = smp_processor_id();
3074 cpu_buffer = buffer->buffers[cpu];
3075
3076
3077
3078
3079
3080
3081 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3082
3083 rb_decrement_entry(cpu_buffer, event);
3084 if (rb_try_to_discard(cpu_buffer, event))
3085 goto out;
3086
3087
3088
3089
3090
3091 rb_update_write_stamp(cpu_buffer, event);
3092 out:
3093 rb_end_commit(cpu_buffer);
3094
3095 trace_recursive_unlock(cpu_buffer);
3096
3097 preempt_enable_notrace();
3098
3099}
3100EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115int ring_buffer_write(struct ring_buffer *buffer,
3116 unsigned long length,
3117 void *data)
3118{
3119 struct ring_buffer_per_cpu *cpu_buffer;
3120 struct ring_buffer_event *event;
3121 void *body;
3122 int ret = -EBUSY;
3123 int cpu;
3124
3125 preempt_disable_notrace();
3126
3127 if (atomic_read(&buffer->record_disabled))
3128 goto out;
3129
3130 cpu = raw_smp_processor_id();
3131
3132 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3133 goto out;
3134
3135 cpu_buffer = buffer->buffers[cpu];
3136
3137 if (atomic_read(&cpu_buffer->record_disabled))
3138 goto out;
3139
3140 if (length > BUF_MAX_DATA_SIZE)
3141 goto out;
3142
3143 if (unlikely(trace_recursive_lock(cpu_buffer)))
3144 goto out;
3145
3146 event = rb_reserve_next_event(buffer, cpu_buffer, length);
3147 if (!event)
3148 goto out_unlock;
3149
3150 body = rb_event_data(event);
3151
3152 memcpy(body, data, length);
3153
3154 rb_commit(cpu_buffer, event);
3155
3156 rb_wakeups(buffer, cpu_buffer);
3157
3158 ret = 0;
3159
3160 out_unlock:
3161 trace_recursive_unlock(cpu_buffer);
3162
3163 out:
3164 preempt_enable_notrace();
3165
3166 return ret;
3167}
3168EXPORT_SYMBOL_GPL(ring_buffer_write);
3169
3170static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3171{
3172 struct buffer_page *reader = cpu_buffer->reader_page;
3173 struct buffer_page *head = rb_set_head_page(cpu_buffer);
3174 struct buffer_page *commit = cpu_buffer->commit_page;
3175
3176
3177 if (unlikely(!head))
3178 return true;
3179
3180 return reader->read == rb_page_commit(reader) &&
3181 (commit == reader ||
3182 (commit == head &&
3183 head->read == rb_page_commit(commit)));
3184}
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195void ring_buffer_record_disable(struct ring_buffer *buffer)
3196{
3197 atomic_inc(&buffer->record_disabled);
3198}
3199EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3200
3201
3202
3203
3204
3205
3206
3207
3208void ring_buffer_record_enable(struct ring_buffer *buffer)
3209{
3210 atomic_dec(&buffer->record_disabled);
3211}
3212EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225void ring_buffer_record_off(struct ring_buffer *buffer)
3226{
3227 unsigned int rd;
3228 unsigned int new_rd;
3229
3230 do {
3231 rd = atomic_read(&buffer->record_disabled);
3232 new_rd = rd | RB_BUFFER_OFF;
3233 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3234}
3235EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248void ring_buffer_record_on(struct ring_buffer *buffer)
3249{
3250 unsigned int rd;
3251 unsigned int new_rd;
3252
3253 do {
3254 rd = atomic_read(&buffer->record_disabled);
3255 new_rd = rd & ~RB_BUFFER_OFF;
3256 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3257}
3258EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3259
3260
3261
3262
3263
3264
3265
3266bool ring_buffer_record_is_on(struct ring_buffer *buffer)
3267{
3268 return !atomic_read(&buffer->record_disabled);
3269}
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
3283{
3284 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3285}
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3298{
3299 struct ring_buffer_per_cpu *cpu_buffer;
3300
3301 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3302 return;
3303
3304 cpu_buffer = buffer->buffers[cpu];
3305 atomic_inc(&cpu_buffer->record_disabled);
3306}
3307EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3318{
3319 struct ring_buffer_per_cpu *cpu_buffer;
3320
3321 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3322 return;
3323
3324 cpu_buffer = buffer->buffers[cpu];
3325 atomic_dec(&cpu_buffer->record_disabled);
3326}
3327EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3328
3329
3330
3331
3332
3333
3334
3335static inline unsigned long
3336rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3337{
3338 return local_read(&cpu_buffer->entries) -
3339 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3340}
3341
3342
3343
3344
3345
3346
3347u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3348{
3349 unsigned long flags;
3350 struct ring_buffer_per_cpu *cpu_buffer;
3351 struct buffer_page *bpage;
3352 u64 ret = 0;
3353
3354 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3355 return 0;
3356
3357 cpu_buffer = buffer->buffers[cpu];
3358 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3359
3360
3361
3362
3363 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3364 bpage = cpu_buffer->reader_page;
3365 else
3366 bpage = rb_set_head_page(cpu_buffer);
3367 if (bpage)
3368 ret = bpage->page->time_stamp;
3369 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3370
3371 return ret;
3372}
3373EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3374
3375
3376
3377
3378
3379
3380unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3381{
3382 struct ring_buffer_per_cpu *cpu_buffer;
3383 unsigned long ret;
3384
3385 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3386 return 0;
3387
3388 cpu_buffer = buffer->buffers[cpu];
3389 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3390
3391 return ret;
3392}
3393EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3394
3395
3396
3397
3398
3399
3400unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3401{
3402 struct ring_buffer_per_cpu *cpu_buffer;
3403
3404 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3405 return 0;
3406
3407 cpu_buffer = buffer->buffers[cpu];
3408
3409 return rb_num_of_entries(cpu_buffer);
3410}
3411EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3412
3413
3414
3415
3416
3417
3418
3419unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3420{
3421 struct ring_buffer_per_cpu *cpu_buffer;
3422 unsigned long ret;
3423
3424 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3425 return 0;
3426
3427 cpu_buffer = buffer->buffers[cpu];
3428 ret = local_read(&cpu_buffer->overrun);
3429
3430 return ret;
3431}
3432EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3433
3434
3435
3436
3437
3438
3439
3440
3441unsigned long
3442ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3443{
3444 struct ring_buffer_per_cpu *cpu_buffer;
3445 unsigned long ret;
3446
3447 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3448 return 0;
3449
3450 cpu_buffer = buffer->buffers[cpu];
3451 ret = local_read(&cpu_buffer->commit_overrun);
3452
3453 return ret;
3454}
3455EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3456
3457
3458
3459
3460
3461
3462
3463unsigned long
3464ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3465{
3466 struct ring_buffer_per_cpu *cpu_buffer;
3467 unsigned long ret;
3468
3469 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3470 return 0;
3471
3472 cpu_buffer = buffer->buffers[cpu];
3473 ret = local_read(&cpu_buffer->dropped_events);
3474
3475 return ret;
3476}
3477EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3478
3479
3480
3481
3482
3483
3484unsigned long
3485ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3486{
3487 struct ring_buffer_per_cpu *cpu_buffer;
3488
3489 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3490 return 0;
3491
3492 cpu_buffer = buffer->buffers[cpu];
3493 return cpu_buffer->read;
3494}
3495EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3496
3497
3498
3499
3500
3501
3502
3503
3504unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3505{
3506 struct ring_buffer_per_cpu *cpu_buffer;
3507 unsigned long entries = 0;
3508 int cpu;
3509
3510
3511 for_each_buffer_cpu(buffer, cpu) {
3512 cpu_buffer = buffer->buffers[cpu];
3513 entries += rb_num_of_entries(cpu_buffer);
3514 }
3515
3516 return entries;
3517}
3518EXPORT_SYMBOL_GPL(ring_buffer_entries);
3519
3520
3521
3522
3523
3524
3525
3526
3527unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3528{
3529 struct ring_buffer_per_cpu *cpu_buffer;
3530 unsigned long overruns = 0;
3531 int cpu;
3532
3533
3534 for_each_buffer_cpu(buffer, cpu) {
3535 cpu_buffer = buffer->buffers[cpu];
3536 overruns += local_read(&cpu_buffer->overrun);
3537 }
3538
3539 return overruns;
3540}
3541EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3542
3543static void rb_iter_reset(struct ring_buffer_iter *iter)
3544{
3545 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3546
3547
3548 iter->head_page = cpu_buffer->reader_page;
3549 iter->head = cpu_buffer->reader_page->read;
3550
3551 iter->cache_reader_page = iter->head_page;
3552 iter->cache_read = cpu_buffer->read;
3553
3554 if (iter->head)
3555 iter->read_stamp = cpu_buffer->read_stamp;
3556 else
3557 iter->read_stamp = iter->head_page->page->time_stamp;
3558}
3559
3560
3561
3562
3563
3564
3565
3566
3567void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3568{
3569 struct ring_buffer_per_cpu *cpu_buffer;
3570 unsigned long flags;
3571
3572 if (!iter)
3573 return;
3574
3575 cpu_buffer = iter->cpu_buffer;
3576
3577 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3578 rb_iter_reset(iter);
3579 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3580}
3581EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3582
3583
3584
3585
3586
3587int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3588{
3589 struct ring_buffer_per_cpu *cpu_buffer;
3590 struct buffer_page *reader;
3591 struct buffer_page *head_page;
3592 struct buffer_page *commit_page;
3593 unsigned commit;
3594
3595 cpu_buffer = iter->cpu_buffer;
3596
3597
3598 reader = cpu_buffer->reader_page;
3599 head_page = cpu_buffer->head_page;
3600 commit_page = cpu_buffer->commit_page;
3601 commit = rb_page_commit(commit_page);
3602
3603 return ((iter->head_page == commit_page && iter->head == commit) ||
3604 (iter->head_page == reader && commit_page == head_page &&
3605 head_page->read == commit &&
3606 iter->head == rb_page_commit(cpu_buffer->reader_page)));
3607}
3608EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3609
3610static void
3611rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3612 struct ring_buffer_event *event)
3613{
3614 u64 delta;
3615
3616 switch (event->type_len) {
3617 case RINGBUF_TYPE_PADDING:
3618 return;
3619
3620 case RINGBUF_TYPE_TIME_EXTEND:
3621 delta = ring_buffer_event_time_stamp(event);
3622 cpu_buffer->read_stamp += delta;
3623 return;
3624
3625 case RINGBUF_TYPE_TIME_STAMP:
3626 delta = ring_buffer_event_time_stamp(event);
3627 cpu_buffer->read_stamp = delta;
3628 return;
3629
3630 case RINGBUF_TYPE_DATA:
3631 cpu_buffer->read_stamp += event->time_delta;
3632 return;
3633
3634 default:
3635 BUG();
3636 }
3637 return;
3638}
3639
3640static void
3641rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3642 struct ring_buffer_event *event)
3643{
3644 u64 delta;
3645
3646 switch (event->type_len) {
3647 case RINGBUF_TYPE_PADDING:
3648 return;
3649
3650 case RINGBUF_TYPE_TIME_EXTEND:
3651 delta = ring_buffer_event_time_stamp(event);
3652 iter->read_stamp += delta;
3653 return;
3654
3655 case RINGBUF_TYPE_TIME_STAMP:
3656 delta = ring_buffer_event_time_stamp(event);
3657 iter->read_stamp = delta;
3658 return;
3659
3660 case RINGBUF_TYPE_DATA:
3661 iter->read_stamp += event->time_delta;
3662 return;
3663
3664 default:
3665 BUG();
3666 }
3667 return;
3668}
3669
3670static struct buffer_page *
3671rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3672{
3673 struct buffer_page *reader = NULL;
3674 unsigned long overwrite;
3675 unsigned long flags;
3676 int nr_loops = 0;
3677 int ret;
3678
3679 local_irq_save(flags);
3680 arch_spin_lock(&cpu_buffer->lock);
3681
3682 again:
3683
3684
3685
3686
3687
3688
3689 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3690 reader = NULL;
3691 goto out;
3692 }
3693
3694 reader = cpu_buffer->reader_page;
3695
3696
3697 if (cpu_buffer->reader_page->read < rb_page_size(reader))
3698 goto out;
3699
3700
3701 if (RB_WARN_ON(cpu_buffer,
3702 cpu_buffer->reader_page->read > rb_page_size(reader)))
3703 goto out;
3704
3705
3706 reader = NULL;
3707 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3708 goto out;
3709
3710
3711 if (rb_num_of_entries(cpu_buffer) == 0)
3712 goto out;
3713
3714
3715
3716
3717 local_set(&cpu_buffer->reader_page->write, 0);
3718 local_set(&cpu_buffer->reader_page->entries, 0);
3719 local_set(&cpu_buffer->reader_page->page->commit, 0);
3720 cpu_buffer->reader_page->real_end = 0;
3721
3722 spin:
3723
3724
3725
3726 reader = rb_set_head_page(cpu_buffer);
3727 if (!reader)
3728 goto out;
3729 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3730 cpu_buffer->reader_page->list.prev = reader->list.prev;
3731
3732
3733
3734
3735
3736
3737 cpu_buffer->pages = reader->list.prev;
3738
3739
3740 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751 smp_mb();
3752 overwrite = local_read(&(cpu_buffer->overrun));
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3766
3767
3768
3769
3770 if (!ret)
3771 goto spin;
3772
3773
3774
3775
3776
3777
3778 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3779 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3780
3781 local_inc(&cpu_buffer->pages_read);
3782
3783
3784 cpu_buffer->reader_page = reader;
3785 cpu_buffer->reader_page->read = 0;
3786
3787 if (overwrite != cpu_buffer->last_overrun) {
3788 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3789 cpu_buffer->last_overrun = overwrite;
3790 }
3791
3792 goto again;
3793
3794 out:
3795
3796 if (reader && reader->read == 0)
3797 cpu_buffer->read_stamp = reader->page->time_stamp;
3798
3799 arch_spin_unlock(&cpu_buffer->lock);
3800 local_irq_restore(flags);
3801
3802 return reader;
3803}
3804
3805static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3806{
3807 struct ring_buffer_event *event;
3808 struct buffer_page *reader;
3809 unsigned length;
3810
3811 reader = rb_get_reader_page(cpu_buffer);
3812
3813
3814 if (RB_WARN_ON(cpu_buffer, !reader))
3815 return;
3816
3817 event = rb_reader_event(cpu_buffer);
3818
3819 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3820 cpu_buffer->read++;
3821
3822 rb_update_read_stamp(cpu_buffer, event);
3823
3824 length = rb_event_length(event);
3825 cpu_buffer->reader_page->read += length;
3826}
3827
3828static void rb_advance_iter(struct ring_buffer_iter *iter)
3829{
3830 struct ring_buffer_per_cpu *cpu_buffer;
3831 struct ring_buffer_event *event;
3832 unsigned length;
3833
3834 cpu_buffer = iter->cpu_buffer;
3835
3836
3837
3838
3839 if (iter->head >= rb_page_size(iter->head_page)) {
3840
3841 if (iter->head_page == cpu_buffer->commit_page)
3842 return;
3843 rb_inc_iter(iter);
3844 return;
3845 }
3846
3847 event = rb_iter_head_event(iter);
3848
3849 length = rb_event_length(event);
3850
3851
3852
3853
3854
3855 if (RB_WARN_ON(cpu_buffer,
3856 (iter->head_page == cpu_buffer->commit_page) &&
3857 (iter->head + length > rb_commit_index(cpu_buffer))))
3858 return;
3859
3860 rb_update_iter_read_stamp(iter, event);
3861
3862 iter->head += length;
3863
3864
3865 if ((iter->head >= rb_page_size(iter->head_page)) &&
3866 (iter->head_page != cpu_buffer->commit_page))
3867 rb_inc_iter(iter);
3868}
3869
3870static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3871{
3872 return cpu_buffer->lost_events;
3873}
3874
3875static struct ring_buffer_event *
3876rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3877 unsigned long *lost_events)
3878{
3879 struct ring_buffer_event *event;
3880 struct buffer_page *reader;
3881 int nr_loops = 0;
3882
3883 if (ts)
3884 *ts = 0;
3885 again:
3886
3887
3888
3889
3890
3891
3892 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3893 return NULL;
3894
3895 reader = rb_get_reader_page(cpu_buffer);
3896 if (!reader)
3897 return NULL;
3898
3899 event = rb_reader_event(cpu_buffer);
3900
3901 switch (event->type_len) {
3902 case RINGBUF_TYPE_PADDING:
3903 if (rb_null_event(event))
3904 RB_WARN_ON(cpu_buffer, 1);
3905
3906
3907
3908
3909
3910
3911
3912
3913 return event;
3914
3915 case RINGBUF_TYPE_TIME_EXTEND:
3916
3917 rb_advance_reader(cpu_buffer);
3918 goto again;
3919
3920 case RINGBUF_TYPE_TIME_STAMP:
3921 if (ts) {
3922 *ts = ring_buffer_event_time_stamp(event);
3923 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3924 cpu_buffer->cpu, ts);
3925 }
3926
3927 rb_advance_reader(cpu_buffer);
3928 goto again;
3929
3930 case RINGBUF_TYPE_DATA:
3931 if (ts && !(*ts)) {
3932 *ts = cpu_buffer->read_stamp + event->time_delta;
3933 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3934 cpu_buffer->cpu, ts);
3935 }
3936 if (lost_events)
3937 *lost_events = rb_lost_events(cpu_buffer);
3938 return event;
3939
3940 default:
3941 BUG();
3942 }
3943
3944 return NULL;
3945}
3946EXPORT_SYMBOL_GPL(ring_buffer_peek);
3947
3948static struct ring_buffer_event *
3949rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3950{
3951 struct ring_buffer *buffer;
3952 struct ring_buffer_per_cpu *cpu_buffer;
3953 struct ring_buffer_event *event;
3954 int nr_loops = 0;
3955
3956 if (ts)
3957 *ts = 0;
3958
3959 cpu_buffer = iter->cpu_buffer;
3960 buffer = cpu_buffer->buffer;
3961
3962
3963
3964
3965
3966
3967 if (unlikely(iter->cache_read != cpu_buffer->read ||
3968 iter->cache_reader_page != cpu_buffer->reader_page))
3969 rb_iter_reset(iter);
3970
3971 again:
3972 if (ring_buffer_iter_empty(iter))
3973 return NULL;
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3984 return NULL;
3985
3986 if (rb_per_cpu_empty(cpu_buffer))
3987 return NULL;
3988
3989 if (iter->head >= rb_page_size(iter->head_page)) {
3990 rb_inc_iter(iter);
3991 goto again;
3992 }
3993
3994 event = rb_iter_head_event(iter);
3995
3996 switch (event->type_len) {
3997 case RINGBUF_TYPE_PADDING:
3998 if (rb_null_event(event)) {
3999 rb_inc_iter(iter);
4000 goto again;
4001 }
4002 rb_advance_iter(iter);
4003 return event;
4004
4005 case RINGBUF_TYPE_TIME_EXTEND:
4006
4007 rb_advance_iter(iter);
4008 goto again;
4009
4010 case RINGBUF_TYPE_TIME_STAMP:
4011 if (ts) {
4012 *ts = ring_buffer_event_time_stamp(event);
4013 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4014 cpu_buffer->cpu, ts);
4015 }
4016
4017 rb_advance_iter(iter);
4018 goto again;
4019
4020 case RINGBUF_TYPE_DATA:
4021 if (ts && !(*ts)) {
4022 *ts = iter->read_stamp + event->time_delta;
4023 ring_buffer_normalize_time_stamp(buffer,
4024 cpu_buffer->cpu, ts);
4025 }
4026 return event;
4027
4028 default:
4029 BUG();
4030 }
4031
4032 return NULL;
4033}
4034EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4035
4036static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4037{
4038 if (likely(!in_nmi())) {
4039 raw_spin_lock(&cpu_buffer->reader_lock);
4040 return true;
4041 }
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052 if (raw_spin_trylock(&cpu_buffer->reader_lock))
4053 return true;
4054
4055
4056 atomic_inc(&cpu_buffer->record_disabled);
4057 return false;
4058}
4059
4060static inline void
4061rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4062{
4063 if (likely(locked))
4064 raw_spin_unlock(&cpu_buffer->reader_lock);
4065 return;
4066}
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078struct ring_buffer_event *
4079ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
4080 unsigned long *lost_events)
4081{
4082 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4083 struct ring_buffer_event *event;
4084 unsigned long flags;
4085 bool dolock;
4086
4087 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4088 return NULL;
4089
4090 again:
4091 local_irq_save(flags);
4092 dolock = rb_reader_lock(cpu_buffer);
4093 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4094 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4095 rb_advance_reader(cpu_buffer);
4096 rb_reader_unlock(cpu_buffer, dolock);
4097 local_irq_restore(flags);
4098
4099 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4100 goto again;
4101
4102 return event;
4103}
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113struct ring_buffer_event *
4114ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4115{
4116 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4117 struct ring_buffer_event *event;
4118 unsigned long flags;
4119
4120 again:
4121 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4122 event = rb_iter_peek(iter, ts);
4123 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4124
4125 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4126 goto again;
4127
4128 return event;
4129}
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142struct ring_buffer_event *
4143ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
4144 unsigned long *lost_events)
4145{
4146 struct ring_buffer_per_cpu *cpu_buffer;
4147 struct ring_buffer_event *event = NULL;
4148 unsigned long flags;
4149 bool dolock;
4150
4151 again:
4152
4153 preempt_disable();
4154
4155 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4156 goto out;
4157
4158 cpu_buffer = buffer->buffers[cpu];
4159 local_irq_save(flags);
4160 dolock = rb_reader_lock(cpu_buffer);
4161
4162 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4163 if (event) {
4164 cpu_buffer->lost_events = 0;
4165 rb_advance_reader(cpu_buffer);
4166 }
4167
4168 rb_reader_unlock(cpu_buffer, dolock);
4169 local_irq_restore(flags);
4170
4171 out:
4172 preempt_enable();
4173
4174 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4175 goto again;
4176
4177 return event;
4178}
4179EXPORT_SYMBOL_GPL(ring_buffer_consume);
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202struct ring_buffer_iter *
4203ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
4204{
4205 struct ring_buffer_per_cpu *cpu_buffer;
4206 struct ring_buffer_iter *iter;
4207
4208 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4209 return NULL;
4210
4211 iter = kmalloc(sizeof(*iter), flags);
4212 if (!iter)
4213 return NULL;
4214
4215 cpu_buffer = buffer->buffers[cpu];
4216
4217 iter->cpu_buffer = cpu_buffer;
4218
4219 atomic_inc(&buffer->resize_disabled);
4220 atomic_inc(&cpu_buffer->record_disabled);
4221
4222 return iter;
4223}
4224EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4225
4226
4227
4228
4229
4230
4231
4232
4233void
4234ring_buffer_read_prepare_sync(void)
4235{
4236 synchronize_rcu();
4237}
4238EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251void
4252ring_buffer_read_start(struct ring_buffer_iter *iter)
4253{
4254 struct ring_buffer_per_cpu *cpu_buffer;
4255 unsigned long flags;
4256
4257 if (!iter)
4258 return;
4259
4260 cpu_buffer = iter->cpu_buffer;
4261
4262 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4263 arch_spin_lock(&cpu_buffer->lock);
4264 rb_iter_reset(iter);
4265 arch_spin_unlock(&cpu_buffer->lock);
4266 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4267}
4268EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4269
4270
4271
4272
4273
4274
4275
4276
4277void
4278ring_buffer_read_finish(struct ring_buffer_iter *iter)
4279{
4280 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4281 unsigned long flags;
4282
4283
4284
4285
4286
4287
4288
4289 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4290 rb_check_pages(cpu_buffer);
4291 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4292
4293 atomic_dec(&cpu_buffer->record_disabled);
4294 atomic_dec(&cpu_buffer->buffer->resize_disabled);
4295 kfree(iter);
4296}
4297EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4298
4299
4300
4301
4302
4303
4304
4305
4306struct ring_buffer_event *
4307ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4308{
4309 struct ring_buffer_event *event;
4310 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4311 unsigned long flags;
4312
4313 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4314 again:
4315 event = rb_iter_peek(iter, ts);
4316 if (!event)
4317 goto out;
4318
4319 if (event->type_len == RINGBUF_TYPE_PADDING)
4320 goto again;
4321
4322 rb_advance_iter(iter);
4323 out:
4324 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4325
4326 return event;
4327}
4328EXPORT_SYMBOL_GPL(ring_buffer_read);
4329
4330
4331
4332
4333
4334unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4335{
4336
4337
4338
4339
4340
4341
4342 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4343 return 0;
4344
4345 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4346}
4347EXPORT_SYMBOL_GPL(ring_buffer_size);
4348
4349static void
4350rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4351{
4352 rb_head_page_deactivate(cpu_buffer);
4353
4354 cpu_buffer->head_page
4355 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4356 local_set(&cpu_buffer->head_page->write, 0);
4357 local_set(&cpu_buffer->head_page->entries, 0);
4358 local_set(&cpu_buffer->head_page->page->commit, 0);
4359
4360 cpu_buffer->head_page->read = 0;
4361
4362 cpu_buffer->tail_page = cpu_buffer->head_page;
4363 cpu_buffer->commit_page = cpu_buffer->head_page;
4364
4365 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4366 INIT_LIST_HEAD(&cpu_buffer->new_pages);
4367 local_set(&cpu_buffer->reader_page->write, 0);
4368 local_set(&cpu_buffer->reader_page->entries, 0);
4369 local_set(&cpu_buffer->reader_page->page->commit, 0);
4370 cpu_buffer->reader_page->read = 0;
4371
4372 local_set(&cpu_buffer->entries_bytes, 0);
4373 local_set(&cpu_buffer->overrun, 0);
4374 local_set(&cpu_buffer->commit_overrun, 0);
4375 local_set(&cpu_buffer->dropped_events, 0);
4376 local_set(&cpu_buffer->entries, 0);
4377 local_set(&cpu_buffer->committing, 0);
4378 local_set(&cpu_buffer->commits, 0);
4379 local_set(&cpu_buffer->pages_touched, 0);
4380 local_set(&cpu_buffer->pages_read, 0);
4381 cpu_buffer->last_pages_touch = 0;
4382 cpu_buffer->shortest_full = 0;
4383 cpu_buffer->read = 0;
4384 cpu_buffer->read_bytes = 0;
4385
4386 cpu_buffer->write_stamp = 0;
4387 cpu_buffer->read_stamp = 0;
4388
4389 cpu_buffer->lost_events = 0;
4390 cpu_buffer->last_overrun = 0;
4391
4392 rb_head_page_activate(cpu_buffer);
4393}
4394
4395
4396
4397
4398
4399
4400void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4401{
4402 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4403 unsigned long flags;
4404
4405 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4406 return;
4407
4408 atomic_inc(&buffer->resize_disabled);
4409 atomic_inc(&cpu_buffer->record_disabled);
4410
4411
4412 synchronize_rcu();
4413
4414 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4415
4416 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4417 goto out;
4418
4419 arch_spin_lock(&cpu_buffer->lock);
4420
4421 rb_reset_cpu(cpu_buffer);
4422
4423 arch_spin_unlock(&cpu_buffer->lock);
4424
4425 out:
4426 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4427
4428 atomic_dec(&cpu_buffer->record_disabled);
4429 atomic_dec(&buffer->resize_disabled);
4430}
4431EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4432
4433
4434
4435
4436
4437void ring_buffer_reset(struct ring_buffer *buffer)
4438{
4439 int cpu;
4440
4441 for_each_buffer_cpu(buffer, cpu)
4442 ring_buffer_reset_cpu(buffer, cpu);
4443}
4444EXPORT_SYMBOL_GPL(ring_buffer_reset);
4445
4446
4447
4448
4449
4450bool ring_buffer_empty(struct ring_buffer *buffer)
4451{
4452 struct ring_buffer_per_cpu *cpu_buffer;
4453 unsigned long flags;
4454 bool dolock;
4455 int cpu;
4456 int ret;
4457
4458
4459 for_each_buffer_cpu(buffer, cpu) {
4460 cpu_buffer = buffer->buffers[cpu];
4461 local_irq_save(flags);
4462 dolock = rb_reader_lock(cpu_buffer);
4463 ret = rb_per_cpu_empty(cpu_buffer);
4464 rb_reader_unlock(cpu_buffer, dolock);
4465 local_irq_restore(flags);
4466
4467 if (!ret)
4468 return false;
4469 }
4470
4471 return true;
4472}
4473EXPORT_SYMBOL_GPL(ring_buffer_empty);
4474
4475
4476
4477
4478
4479
4480bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4481{
4482 struct ring_buffer_per_cpu *cpu_buffer;
4483 unsigned long flags;
4484 bool dolock;
4485 int ret;
4486
4487 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4488 return true;
4489
4490 cpu_buffer = buffer->buffers[cpu];
4491 local_irq_save(flags);
4492 dolock = rb_reader_lock(cpu_buffer);
4493 ret = rb_per_cpu_empty(cpu_buffer);
4494 rb_reader_unlock(cpu_buffer, dolock);
4495 local_irq_restore(flags);
4496
4497 return ret;
4498}
4499EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4500
4501#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4513 struct ring_buffer *buffer_b, int cpu)
4514{
4515 struct ring_buffer_per_cpu *cpu_buffer_a;
4516 struct ring_buffer_per_cpu *cpu_buffer_b;
4517 int ret = -EINVAL;
4518
4519 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4520 !cpumask_test_cpu(cpu, buffer_b->cpumask))
4521 goto out;
4522
4523 cpu_buffer_a = buffer_a->buffers[cpu];
4524 cpu_buffer_b = buffer_b->buffers[cpu];
4525
4526
4527 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4528 goto out;
4529
4530 ret = -EAGAIN;
4531
4532 if (atomic_read(&buffer_a->record_disabled))
4533 goto out;
4534
4535 if (atomic_read(&buffer_b->record_disabled))
4536 goto out;
4537
4538 if (atomic_read(&cpu_buffer_a->record_disabled))
4539 goto out;
4540
4541 if (atomic_read(&cpu_buffer_b->record_disabled))
4542 goto out;
4543
4544
4545
4546
4547
4548
4549
4550 atomic_inc(&cpu_buffer_a->record_disabled);
4551 atomic_inc(&cpu_buffer_b->record_disabled);
4552
4553 ret = -EBUSY;
4554 if (local_read(&cpu_buffer_a->committing))
4555 goto out_dec;
4556 if (local_read(&cpu_buffer_b->committing))
4557 goto out_dec;
4558
4559 buffer_a->buffers[cpu] = cpu_buffer_b;
4560 buffer_b->buffers[cpu] = cpu_buffer_a;
4561
4562 cpu_buffer_b->buffer = buffer_a;
4563 cpu_buffer_a->buffer = buffer_b;
4564
4565 ret = 0;
4566
4567out_dec:
4568 atomic_dec(&cpu_buffer_a->record_disabled);
4569 atomic_dec(&cpu_buffer_b->record_disabled);
4570out:
4571 return ret;
4572}
4573EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4574#endif
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4593{
4594 struct ring_buffer_per_cpu *cpu_buffer;
4595 struct buffer_data_page *bpage = NULL;
4596 unsigned long flags;
4597 struct page *page;
4598
4599 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4600 return ERR_PTR(-ENODEV);
4601
4602 cpu_buffer = buffer->buffers[cpu];
4603 local_irq_save(flags);
4604 arch_spin_lock(&cpu_buffer->lock);
4605
4606 if (cpu_buffer->free_page) {
4607 bpage = cpu_buffer->free_page;
4608 cpu_buffer->free_page = NULL;
4609 }
4610
4611 arch_spin_unlock(&cpu_buffer->lock);
4612 local_irq_restore(flags);
4613
4614 if (bpage)
4615 goto out;
4616
4617 page = alloc_pages_node(cpu_to_node(cpu),
4618 GFP_KERNEL | __GFP_NORETRY, 0);
4619 if (!page)
4620 return ERR_PTR(-ENOMEM);
4621
4622 bpage = page_address(page);
4623
4624 out:
4625 rb_init_page(bpage);
4626
4627 return bpage;
4628}
4629EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4640{
4641 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4642 struct buffer_data_page *bpage = data;
4643 struct page *page = virt_to_page(bpage);
4644 unsigned long flags;
4645
4646
4647 if (page_ref_count(page) > 1)
4648 goto out;
4649
4650 local_irq_save(flags);
4651 arch_spin_lock(&cpu_buffer->lock);
4652
4653 if (!cpu_buffer->free_page) {
4654 cpu_buffer->free_page = bpage;
4655 bpage = NULL;
4656 }
4657
4658 arch_spin_unlock(&cpu_buffer->lock);
4659 local_irq_restore(flags);
4660
4661 out:
4662 free_page((unsigned long)bpage);
4663}
4664EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699int ring_buffer_read_page(struct ring_buffer *buffer,
4700 void **data_page, size_t len, int cpu, int full)
4701{
4702 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4703 struct ring_buffer_event *event;
4704 struct buffer_data_page *bpage;
4705 struct buffer_page *reader;
4706 unsigned long missed_events;
4707 unsigned long flags;
4708 unsigned int commit;
4709 unsigned int read;
4710 u64 save_timestamp;
4711 int ret = -1;
4712
4713 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4714 goto out;
4715
4716
4717
4718
4719
4720 if (len <= BUF_PAGE_HDR_SIZE)
4721 goto out;
4722
4723 len -= BUF_PAGE_HDR_SIZE;
4724
4725 if (!data_page)
4726 goto out;
4727
4728 bpage = *data_page;
4729 if (!bpage)
4730 goto out;
4731
4732 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4733
4734 reader = rb_get_reader_page(cpu_buffer);
4735 if (!reader)
4736 goto out_unlock;
4737
4738 event = rb_reader_event(cpu_buffer);
4739
4740 read = reader->read;
4741 commit = rb_page_commit(reader);
4742
4743
4744 missed_events = cpu_buffer->lost_events;
4745
4746
4747
4748
4749
4750
4751
4752
4753 if (read || (len < (commit - read)) ||
4754 cpu_buffer->reader_page == cpu_buffer->commit_page) {
4755 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4756 unsigned int rpos = read;
4757 unsigned int pos = 0;
4758 unsigned int size;
4759
4760 if (full)
4761 goto out_unlock;
4762
4763 if (len > (commit - read))
4764 len = (commit - read);
4765
4766
4767 size = rb_event_ts_length(event);
4768
4769 if (len < size)
4770 goto out_unlock;
4771
4772
4773 save_timestamp = cpu_buffer->read_stamp;
4774
4775
4776 do {
4777
4778
4779
4780
4781
4782
4783 size = rb_event_length(event);
4784 memcpy(bpage->data + pos, rpage->data + rpos, size);
4785
4786 len -= size;
4787
4788 rb_advance_reader(cpu_buffer);
4789 rpos = reader->read;
4790 pos += size;
4791
4792 if (rpos >= commit)
4793 break;
4794
4795 event = rb_reader_event(cpu_buffer);
4796
4797 size = rb_event_ts_length(event);
4798 } while (len >= size);
4799
4800
4801 local_set(&bpage->commit, pos);
4802 bpage->time_stamp = save_timestamp;
4803
4804
4805 read = 0;
4806 } else {
4807
4808 cpu_buffer->read += rb_page_entries(reader);
4809 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4810
4811
4812 rb_init_page(bpage);
4813 bpage = reader->page;
4814 reader->page = *data_page;
4815 local_set(&reader->write, 0);
4816 local_set(&reader->entries, 0);
4817 reader->read = 0;
4818 *data_page = bpage;
4819
4820
4821
4822
4823
4824
4825 if (reader->real_end)
4826 local_set(&bpage->commit, reader->real_end);
4827 }
4828 ret = read;
4829
4830 cpu_buffer->lost_events = 0;
4831
4832 commit = local_read(&bpage->commit);
4833
4834
4835
4836 if (missed_events) {
4837
4838
4839
4840 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4841 memcpy(&bpage->data[commit], &missed_events,
4842 sizeof(missed_events));
4843 local_add(RB_MISSED_STORED, &bpage->commit);
4844 commit += sizeof(missed_events);
4845 }
4846 local_add(RB_MISSED_EVENTS, &bpage->commit);
4847 }
4848
4849
4850
4851
4852 if (commit < BUF_PAGE_SIZE)
4853 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4854
4855 out_unlock:
4856 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4857
4858 out:
4859 return ret;
4860}
4861EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4862
4863
4864
4865
4866
4867
4868int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
4869{
4870 struct ring_buffer *buffer;
4871 long nr_pages_same;
4872 int cpu_i;
4873 unsigned long nr_pages;
4874
4875 buffer = container_of(node, struct ring_buffer, node);
4876 if (cpumask_test_cpu(cpu, buffer->cpumask))
4877 return 0;
4878
4879 nr_pages = 0;
4880 nr_pages_same = 1;
4881
4882 for_each_buffer_cpu(buffer, cpu_i) {
4883
4884 if (nr_pages == 0)
4885 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4886 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4887 nr_pages_same = 0;
4888 break;
4889 }
4890 }
4891
4892 if (!nr_pages_same)
4893 nr_pages = 2;
4894 buffer->buffers[cpu] =
4895 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4896 if (!buffer->buffers[cpu]) {
4897 WARN(1, "failed to allocate ring buffer on CPU %u\n",
4898 cpu);
4899 return -ENOMEM;
4900 }
4901 smp_wmb();
4902 cpumask_set_cpu(cpu, buffer->cpumask);
4903 return 0;
4904}
4905
4906#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922static struct task_struct *rb_threads[NR_CPUS] __initdata;
4923
4924struct rb_test_data {
4925 struct ring_buffer *buffer;
4926 unsigned long events;
4927 unsigned long bytes_written;
4928 unsigned long bytes_alloc;
4929 unsigned long bytes_dropped;
4930 unsigned long events_nested;
4931 unsigned long bytes_written_nested;
4932 unsigned long bytes_alloc_nested;
4933 unsigned long bytes_dropped_nested;
4934 int min_size_nested;
4935 int max_size_nested;
4936 int max_size;
4937 int min_size;
4938 int cpu;
4939 int cnt;
4940};
4941
4942static struct rb_test_data rb_data[NR_CPUS] __initdata;
4943
4944
4945#define RB_TEST_BUFFER_SIZE 1048576
4946
4947static char rb_string[] __initdata =
4948 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4949 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4950 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4951
4952static bool rb_test_started __initdata;
4953
4954struct rb_item {
4955 int size;
4956 char str[];
4957};
4958
4959static __init int rb_write_something(struct rb_test_data *data, bool nested)
4960{
4961 struct ring_buffer_event *event;
4962 struct rb_item *item;
4963 bool started;
4964 int event_len;
4965 int size;
4966 int len;
4967 int cnt;
4968
4969
4970 cnt = data->cnt + (nested ? 27 : 0);
4971
4972
4973 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
4974
4975 len = size + sizeof(struct rb_item);
4976
4977 started = rb_test_started;
4978
4979 smp_rmb();
4980
4981 event = ring_buffer_lock_reserve(data->buffer, len);
4982 if (!event) {
4983
4984 if (started) {
4985 if (nested)
4986 data->bytes_dropped += len;
4987 else
4988 data->bytes_dropped_nested += len;
4989 }
4990 return len;
4991 }
4992
4993 event_len = ring_buffer_event_length(event);
4994
4995 if (RB_WARN_ON(data->buffer, event_len < len))
4996 goto out;
4997
4998 item = ring_buffer_event_data(event);
4999 item->size = size;
5000 memcpy(item->str, rb_string, size);
5001
5002 if (nested) {
5003 data->bytes_alloc_nested += event_len;
5004 data->bytes_written_nested += len;
5005 data->events_nested++;
5006 if (!data->min_size_nested || len < data->min_size_nested)
5007 data->min_size_nested = len;
5008 if (len > data->max_size_nested)
5009 data->max_size_nested = len;
5010 } else {
5011 data->bytes_alloc += event_len;
5012 data->bytes_written += len;
5013 data->events++;
5014 if (!data->min_size || len < data->min_size)
5015 data->max_size = len;
5016 if (len > data->max_size)
5017 data->max_size = len;
5018 }
5019
5020 out:
5021 ring_buffer_unlock_commit(data->buffer, event);
5022
5023 return 0;
5024}
5025
5026static __init int rb_test(void *arg)
5027{
5028 struct rb_test_data *data = arg;
5029
5030 while (!kthread_should_stop()) {
5031 rb_write_something(data, false);
5032 data->cnt++;
5033
5034 set_current_state(TASK_INTERRUPTIBLE);
5035
5036 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
5037 }
5038
5039 return 0;
5040}
5041
5042static __init void rb_ipi(void *ignore)
5043{
5044 struct rb_test_data *data;
5045 int cpu = smp_processor_id();
5046
5047 data = &rb_data[cpu];
5048 rb_write_something(data, true);
5049}
5050
5051static __init int rb_hammer_test(void *arg)
5052{
5053 while (!kthread_should_stop()) {
5054
5055
5056 smp_call_function(rb_ipi, NULL, 1);
5057
5058 schedule();
5059 }
5060
5061 return 0;
5062}
5063
5064static __init int test_ringbuffer(void)
5065{
5066 struct task_struct *rb_hammer;
5067 struct ring_buffer *buffer;
5068 int cpu;
5069 int ret = 0;
5070
5071 pr_info("Running ring buffer tests...\n");
5072
5073 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5074 if (WARN_ON(!buffer))
5075 return 0;
5076
5077
5078 ring_buffer_record_off(buffer);
5079
5080 for_each_online_cpu(cpu) {
5081 rb_data[cpu].buffer = buffer;
5082 rb_data[cpu].cpu = cpu;
5083 rb_data[cpu].cnt = cpu;
5084 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5085 "rbtester/%d", cpu);
5086 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
5087 pr_cont("FAILED\n");
5088 ret = PTR_ERR(rb_threads[cpu]);
5089 goto out_free;
5090 }
5091
5092 kthread_bind(rb_threads[cpu], cpu);
5093 wake_up_process(rb_threads[cpu]);
5094 }
5095
5096
5097 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
5098 if (WARN_ON(IS_ERR(rb_hammer))) {
5099 pr_cont("FAILED\n");
5100 ret = PTR_ERR(rb_hammer);
5101 goto out_free;
5102 }
5103
5104 ring_buffer_record_on(buffer);
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114 smp_wmb();
5115 rb_test_started = true;
5116
5117 set_current_state(TASK_INTERRUPTIBLE);
5118 ;
5119 schedule_timeout(10 * HZ);
5120
5121 kthread_stop(rb_hammer);
5122
5123 out_free:
5124 for_each_online_cpu(cpu) {
5125 if (!rb_threads[cpu])
5126 break;
5127 kthread_stop(rb_threads[cpu]);
5128 }
5129 if (ret) {
5130 ring_buffer_free(buffer);
5131 return ret;
5132 }
5133
5134
5135 pr_info("finished\n");
5136 for_each_online_cpu(cpu) {
5137 struct ring_buffer_event *event;
5138 struct rb_test_data *data = &rb_data[cpu];
5139 struct rb_item *item;
5140 unsigned long total_events;
5141 unsigned long total_dropped;
5142 unsigned long total_written;
5143 unsigned long total_alloc;
5144 unsigned long total_read = 0;
5145 unsigned long total_size = 0;
5146 unsigned long total_len = 0;
5147 unsigned long total_lost = 0;
5148 unsigned long lost;
5149 int big_event_size;
5150 int small_event_size;
5151
5152 ret = -1;
5153
5154 total_events = data->events + data->events_nested;
5155 total_written = data->bytes_written + data->bytes_written_nested;
5156 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5157 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5158
5159 big_event_size = data->max_size + data->max_size_nested;
5160 small_event_size = data->min_size + data->min_size_nested;
5161
5162 pr_info("CPU %d:\n", cpu);
5163 pr_info(" events: %ld\n", total_events);
5164 pr_info(" dropped bytes: %ld\n", total_dropped);
5165 pr_info(" alloced bytes: %ld\n", total_alloc);
5166 pr_info(" written bytes: %ld\n", total_written);
5167 pr_info(" biggest event: %d\n", big_event_size);
5168 pr_info(" smallest event: %d\n", small_event_size);
5169
5170 if (RB_WARN_ON(buffer, total_dropped))
5171 break;
5172
5173 ret = 0;
5174
5175 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5176 total_lost += lost;
5177 item = ring_buffer_event_data(event);
5178 total_len += ring_buffer_event_length(event);
5179 total_size += item->size + sizeof(struct rb_item);
5180 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
5181 pr_info("FAILED!\n");
5182 pr_info("buffer had: %.*s\n", item->size, item->str);
5183 pr_info("expected: %.*s\n", item->size, rb_string);
5184 RB_WARN_ON(buffer, 1);
5185 ret = -1;
5186 break;
5187 }
5188 total_read++;
5189 }
5190 if (ret)
5191 break;
5192
5193 ret = -1;
5194
5195 pr_info(" read events: %ld\n", total_read);
5196 pr_info(" lost events: %ld\n", total_lost);
5197 pr_info(" total events: %ld\n", total_lost + total_read);
5198 pr_info(" recorded len bytes: %ld\n", total_len);
5199 pr_info(" recorded size bytes: %ld\n", total_size);
5200 if (total_lost)
5201 pr_info(" With dropped events, record len and size may not match\n"
5202 " alloced and written from above\n");
5203 if (!total_lost) {
5204 if (RB_WARN_ON(buffer, total_len != total_alloc ||
5205 total_size != total_written))
5206 break;
5207 }
5208 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
5209 break;
5210
5211 ret = 0;
5212 }
5213 if (!ret)
5214 pr_info("Ring buffer PASSED!\n");
5215
5216 ring_buffer_free(buffer);
5217 return 0;
5218}
5219
5220late_initcall(test_ringbuffer);
5221#endif
5222