1
2
3
4
5
6
7
8
9
10
11
12#include <linux/perf_event.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15#include <linux/circ_buf.h>
16#include <linux/poll.h>
17#include <linux/nospec.h>
18
19#include "internal.h"
20
21static void perf_output_wakeup(struct perf_output_handle *handle)
22{
23 atomic_set(&handle->rb->poll, EPOLLIN);
24
25 handle->event->pending_wakeup = 1;
26 irq_work_queue(&handle->event->pending);
27}
28
29
30
31
32
33
34
35
36
37static void perf_output_get_handle(struct perf_output_handle *handle)
38{
39 struct ring_buffer *rb = handle->rb;
40
41 preempt_disable();
42 local_inc(&rb->nest);
43 handle->wakeup = local_read(&rb->wakeup);
44}
45
46static void perf_output_put_handle(struct perf_output_handle *handle)
47{
48 struct ring_buffer *rb = handle->rb;
49 unsigned long head;
50
51again:
52 head = local_read(&rb->head);
53
54
55
56
57
58 if (!local_dec_and_test(&rb->nest))
59 goto out;
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 smp_wmb();
88 rb->user_page->data_head = head;
89
90
91
92
93
94 if (unlikely(head != local_read(&rb->head))) {
95 local_inc(&rb->nest);
96 goto again;
97 }
98
99 if (handle->wakeup != local_read(&rb->wakeup))
100 perf_output_wakeup(handle);
101
102out:
103 preempt_enable();
104}
105
106static bool __always_inline
107ring_buffer_has_space(unsigned long head, unsigned long tail,
108 unsigned long data_size, unsigned int size,
109 bool backward)
110{
111 if (!backward)
112 return CIRC_SPACE(head, tail, data_size) >= size;
113 else
114 return CIRC_SPACE(tail, head, data_size) >= size;
115}
116
117static int __always_inline
118__perf_output_begin(struct perf_output_handle *handle,
119 struct perf_event *event, unsigned int size,
120 bool backward)
121{
122 struct ring_buffer *rb;
123 unsigned long tail, offset, head;
124 int have_lost, page_shift;
125 struct {
126 struct perf_event_header header;
127 u64 id;
128 u64 lost;
129 } lost_event;
130
131 rcu_read_lock();
132
133
134
135 if (event->parent)
136 event = event->parent;
137
138 rb = rcu_dereference(event->rb);
139 if (unlikely(!rb))
140 goto out;
141
142 if (unlikely(rb->paused)) {
143 if (rb->nr_pages)
144 local_inc(&rb->lost);
145 goto out;
146 }
147
148 handle->rb = rb;
149 handle->event = event;
150
151 have_lost = local_read(&rb->lost);
152 if (unlikely(have_lost)) {
153 size += sizeof(lost_event);
154 if (event->attr.sample_id_all)
155 size += event->id_header_size;
156 }
157
158 perf_output_get_handle(handle);
159
160 do {
161 tail = READ_ONCE(rb->user_page->data_tail);
162 offset = head = local_read(&rb->head);
163 if (!rb->overwrite) {
164 if (unlikely(!ring_buffer_has_space(head, tail,
165 perf_data_size(rb),
166 size, backward)))
167 goto fail;
168 }
169
170
171
172
173
174
175
176
177
178
179
180
181
182 if (!backward)
183 head += size;
184 else
185 head -= size;
186 } while (local_cmpxchg(&rb->head, offset, head) != offset);
187
188 if (backward) {
189 offset = head;
190 head = (u64)(-head);
191 }
192
193
194
195
196
197
198 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
199 local_add(rb->watermark, &rb->wakeup);
200
201 page_shift = PAGE_SHIFT + page_order(rb);
202
203 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
204 offset &= (1UL << page_shift) - 1;
205 handle->addr = rb->data_pages[handle->page] + offset;
206 handle->size = (1UL << page_shift) - offset;
207
208 if (unlikely(have_lost)) {
209 struct perf_sample_data sample_data;
210
211 lost_event.header.size = sizeof(lost_event);
212 lost_event.header.type = PERF_RECORD_LOST;
213 lost_event.header.misc = 0;
214 lost_event.id = event->id;
215 lost_event.lost = local_xchg(&rb->lost, 0);
216
217 perf_event_header__init_id(&lost_event.header,
218 &sample_data, event);
219 perf_output_put(handle, lost_event);
220 perf_event__output_id_sample(event, handle, &sample_data);
221 }
222
223 return 0;
224
225fail:
226 local_inc(&rb->lost);
227 perf_output_put_handle(handle);
228out:
229 rcu_read_unlock();
230
231 return -ENOSPC;
232}
233
234int perf_output_begin_forward(struct perf_output_handle *handle,
235 struct perf_event *event, unsigned int size)
236{
237 return __perf_output_begin(handle, event, size, false);
238}
239
240int perf_output_begin_backward(struct perf_output_handle *handle,
241 struct perf_event *event, unsigned int size)
242{
243 return __perf_output_begin(handle, event, size, true);
244}
245
246int perf_output_begin(struct perf_output_handle *handle,
247 struct perf_event *event, unsigned int size)
248{
249
250 return __perf_output_begin(handle, event, size,
251 unlikely(is_write_backward(event)));
252}
253
254unsigned int perf_output_copy(struct perf_output_handle *handle,
255 const void *buf, unsigned int len)
256{
257 return __output_copy(handle, buf, len);
258}
259
260unsigned int perf_output_skip(struct perf_output_handle *handle,
261 unsigned int len)
262{
263 return __output_skip(handle, NULL, len);
264}
265
266void perf_output_end(struct perf_output_handle *handle)
267{
268 perf_output_put_handle(handle);
269 rcu_read_unlock();
270}
271
272static void
273ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
274{
275 long max_size = perf_data_size(rb);
276
277 if (watermark)
278 rb->watermark = min(max_size, watermark);
279
280 if (!rb->watermark)
281 rb->watermark = max_size / 2;
282
283 if (flags & RING_BUFFER_WRITABLE)
284 rb->overwrite = 0;
285 else
286 rb->overwrite = 1;
287
288 atomic_set(&rb->refcount, 1);
289
290 INIT_LIST_HEAD(&rb->event_list);
291 spin_lock_init(&rb->event_lock);
292
293
294
295
296
297 if (!rb->nr_pages)
298 rb->paused = 1;
299}
300
301void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
302{
303
304
305
306
307 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
308 return;
309
310 handle->aux_flags |= flags;
311}
312EXPORT_SYMBOL_GPL(perf_aux_output_flag);
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328void *perf_aux_output_begin(struct perf_output_handle *handle,
329 struct perf_event *event)
330{
331 struct perf_event *output_event = event;
332 unsigned long aux_head, aux_tail;
333 struct ring_buffer *rb;
334
335 if (output_event->parent)
336 output_event = output_event->parent;
337
338
339
340
341
342
343 rb = ring_buffer_get(output_event);
344 if (!rb)
345 return NULL;
346
347 if (!rb_has_aux(rb))
348 goto err;
349
350
351
352
353
354
355
356
357
358 if (!atomic_read(&rb->aux_mmap_count))
359 goto err;
360
361 if (!atomic_inc_not_zero(&rb->aux_refcount))
362 goto err;
363
364
365
366
367
368 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
369 goto err_put;
370
371 aux_head = rb->aux_head;
372
373 handle->rb = rb;
374 handle->event = event;
375 handle->head = aux_head;
376 handle->size = 0;
377 handle->aux_flags = 0;
378
379
380
381
382
383
384 if (!rb->aux_overwrite) {
385 aux_tail = READ_ONCE(rb->user_page->aux_tail);
386 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
387 if (aux_head - aux_tail < perf_aux_size(rb))
388 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
389
390
391
392
393
394
395 if (!handle->size) {
396 event->pending_disable = 1;
397 perf_output_wakeup(handle);
398 local_set(&rb->aux_nest, 0);
399 goto err_put;
400 }
401 }
402
403 return handle->rb->aux_priv;
404
405err_put:
406
407 rb_free_aux(rb);
408
409err:
410 ring_buffer_put(rb);
411 handle->event = NULL;
412
413 return NULL;
414}
415EXPORT_SYMBOL_GPL(perf_aux_output_begin);
416
417static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
418{
419 if (rb->aux_overwrite)
420 return false;
421
422 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
423 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
424 return true;
425 }
426
427 return false;
428}
429
430
431
432
433
434
435
436
437
438
439
440void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
441{
442 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
443 struct ring_buffer *rb = handle->rb;
444 unsigned long aux_head;
445
446
447 if (rb->aux_overwrite) {
448 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
449
450 aux_head = handle->head;
451 rb->aux_head = aux_head;
452 } else {
453 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
454
455 aux_head = rb->aux_head;
456 rb->aux_head += size;
457 }
458
459 if (size || handle->aux_flags) {
460
461
462
463
464 perf_event_aux_event(handle->event, aux_head, size,
465 handle->aux_flags);
466 }
467
468 rb->user_page->aux_head = rb->aux_head;
469 if (rb_need_aux_wakeup(rb))
470 wakeup = true;
471
472 if (wakeup) {
473 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
474 handle->event->pending_disable = 1;
475 perf_output_wakeup(handle);
476 }
477
478 handle->event = NULL;
479
480 local_set(&rb->aux_nest, 0);
481
482 rb_free_aux(rb);
483 ring_buffer_put(rb);
484}
485EXPORT_SYMBOL_GPL(perf_aux_output_end);
486
487
488
489
490
491int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
492{
493 struct ring_buffer *rb = handle->rb;
494
495 if (size > handle->size)
496 return -ENOSPC;
497
498 rb->aux_head += size;
499
500 rb->user_page->aux_head = rb->aux_head;
501 if (rb_need_aux_wakeup(rb)) {
502 perf_output_wakeup(handle);
503 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
504 }
505
506 handle->head = rb->aux_head;
507 handle->size -= size;
508
509 return 0;
510}
511EXPORT_SYMBOL_GPL(perf_aux_output_skip);
512
513void *perf_get_aux(struct perf_output_handle *handle)
514{
515
516 if (!handle->event)
517 return NULL;
518
519 return handle->rb->aux_priv;
520}
521EXPORT_SYMBOL_GPL(perf_get_aux);
522
523#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
524
525static struct page *rb_alloc_aux_page(int node, int order)
526{
527 struct page *page;
528
529 if (order > MAX_ORDER)
530 order = MAX_ORDER;
531
532 do {
533 page = alloc_pages_node(node, PERF_AUX_GFP, order);
534 } while (!page && order--);
535
536 if (page && order) {
537
538
539
540
541
542
543 split_page(page, order);
544 SetPagePrivate(page);
545 set_page_private(page, order);
546 }
547
548 return page;
549}
550
551static void rb_free_aux_page(struct ring_buffer *rb, int idx)
552{
553 struct page *page = virt_to_page(rb->aux_pages[idx]);
554
555 ClearPagePrivate(page);
556 page->mapping = NULL;
557 __free_page(page);
558}
559
560static void __rb_free_aux(struct ring_buffer *rb)
561{
562 int pg;
563
564
565
566
567
568
569
570 WARN_ON_ONCE(in_atomic());
571
572 if (rb->aux_priv) {
573 rb->free_aux(rb->aux_priv);
574 rb->free_aux = NULL;
575 rb->aux_priv = NULL;
576 }
577
578 if (rb->aux_nr_pages) {
579 for (pg = 0; pg < rb->aux_nr_pages; pg++)
580 rb_free_aux_page(rb, pg);
581
582 kfree(rb->aux_pages);
583 rb->aux_nr_pages = 0;
584 }
585}
586
587int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
588 pgoff_t pgoff, int nr_pages, long watermark, int flags)
589{
590 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
591 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
592 int ret = -ENOMEM, max_order = 0;
593
594 if (!has_aux(event))
595 return -EOPNOTSUPP;
596
597 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
598
599
600
601
602 max_order = ilog2(nr_pages);
603
604
605
606
607
608 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
609 !overwrite) {
610 if (!max_order)
611 return -EINVAL;
612
613 max_order--;
614 }
615 }
616
617 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
618 if (!rb->aux_pages)
619 return -ENOMEM;
620
621 rb->free_aux = event->pmu->free_aux;
622 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
623 struct page *page;
624 int last, order;
625
626 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
627 page = rb_alloc_aux_page(node, order);
628 if (!page)
629 goto out;
630
631 for (last = rb->aux_nr_pages + (1 << page_private(page));
632 last > rb->aux_nr_pages; rb->aux_nr_pages++)
633 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
634 }
635
636
637
638
639
640
641
642 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
643 overwrite) {
644 struct page *page = virt_to_page(rb->aux_pages[0]);
645
646 if (page_private(page) != max_order)
647 goto out;
648 }
649
650 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
651 overwrite);
652 if (!rb->aux_priv)
653 goto out;
654
655 ret = 0;
656
657
658
659
660
661
662
663 atomic_set(&rb->aux_refcount, 1);
664
665 rb->aux_overwrite = overwrite;
666 rb->aux_watermark = watermark;
667
668 if (!rb->aux_watermark && !rb->aux_overwrite)
669 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
670
671out:
672 if (!ret)
673 rb->aux_pgoff = pgoff;
674 else
675 __rb_free_aux(rb);
676
677 return ret;
678}
679
680void rb_free_aux(struct ring_buffer *rb)
681{
682 if (atomic_dec_and_test(&rb->aux_refcount))
683 __rb_free_aux(rb);
684}
685
686#ifndef CONFIG_PERF_USE_VMALLOC
687
688
689
690
691
692static struct page *
693__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
694{
695 if (pgoff > rb->nr_pages)
696 return NULL;
697
698 if (pgoff == 0)
699 return virt_to_page(rb->user_page);
700
701 return virt_to_page(rb->data_pages[pgoff - 1]);
702}
703
704static void *perf_mmap_alloc_page(int cpu)
705{
706 struct page *page;
707 int node;
708
709 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
710 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
711 if (!page)
712 return NULL;
713
714 return page_address(page);
715}
716
717struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
718{
719 struct ring_buffer *rb;
720 unsigned long size;
721 int i;
722
723 size = sizeof(struct ring_buffer);
724 size += nr_pages * sizeof(void *);
725
726 rb = kzalloc(size, GFP_KERNEL);
727 if (!rb)
728 goto fail;
729
730 rb->user_page = perf_mmap_alloc_page(cpu);
731 if (!rb->user_page)
732 goto fail_user_page;
733
734 for (i = 0; i < nr_pages; i++) {
735 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
736 if (!rb->data_pages[i])
737 goto fail_data_pages;
738 }
739
740 rb->nr_pages = nr_pages;
741
742 ring_buffer_init(rb, watermark, flags);
743
744 return rb;
745
746fail_data_pages:
747 for (i--; i >= 0; i--)
748 free_page((unsigned long)rb->data_pages[i]);
749
750 free_page((unsigned long)rb->user_page);
751
752fail_user_page:
753 kfree(rb);
754
755fail:
756 return NULL;
757}
758
759static void perf_mmap_free_page(unsigned long addr)
760{
761 struct page *page = virt_to_page((void *)addr);
762
763 page->mapping = NULL;
764 __free_page(page);
765}
766
767void rb_free(struct ring_buffer *rb)
768{
769 int i;
770
771 perf_mmap_free_page((unsigned long)rb->user_page);
772 for (i = 0; i < rb->nr_pages; i++)
773 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
774 kfree(rb);
775}
776
777#else
778static int data_page_nr(struct ring_buffer *rb)
779{
780 return rb->nr_pages << page_order(rb);
781}
782
783static struct page *
784__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
785{
786
787 if (pgoff > data_page_nr(rb))
788 return NULL;
789
790 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
791}
792
793static void perf_mmap_unmark_page(void *addr)
794{
795 struct page *page = vmalloc_to_page(addr);
796
797 page->mapping = NULL;
798}
799
800static void rb_free_work(struct work_struct *work)
801{
802 struct ring_buffer *rb;
803 void *base;
804 int i, nr;
805
806 rb = container_of(work, struct ring_buffer, work);
807 nr = data_page_nr(rb);
808
809 base = rb->user_page;
810
811 for (i = 0; i <= nr; i++)
812 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
813
814 vfree(base);
815 kfree(rb);
816}
817
818void rb_free(struct ring_buffer *rb)
819{
820 schedule_work(&rb->work);
821}
822
823struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
824{
825 struct ring_buffer *rb;
826 unsigned long size;
827 void *all_buf;
828
829 size = sizeof(struct ring_buffer);
830 size += sizeof(void *);
831
832 rb = kzalloc(size, GFP_KERNEL);
833 if (!rb)
834 goto fail;
835
836 INIT_WORK(&rb->work, rb_free_work);
837
838 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
839 if (!all_buf)
840 goto fail_all_buf;
841
842 rb->user_page = all_buf;
843 rb->data_pages[0] = all_buf + PAGE_SIZE;
844 if (nr_pages) {
845 rb->nr_pages = 1;
846 rb->page_order = ilog2(nr_pages);
847 }
848
849 ring_buffer_init(rb, watermark, flags);
850
851 return rb;
852
853fail_all_buf:
854 kfree(rb);
855
856fail:
857 return NULL;
858}
859
860#endif
861
862struct page *
863perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
864{
865 if (rb->aux_nr_pages) {
866
867 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
868 return NULL;
869
870
871 if (pgoff >= rb->aux_pgoff) {
872 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
873 return virt_to_page(rb->aux_pages[aux_pgoff]);
874 }
875 }
876
877 return __perf_mmap_to_page(rb, pgoff);
878}
879