1
2
3
4
5
6
7
8
9
10
11
12#include <linux/perf_event.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15#include <linux/circ_buf.h>
16#include <linux/poll.h>
17
18#include "internal.h"
19
20static void perf_output_wakeup(struct perf_output_handle *handle)
21{
22 atomic_set(&handle->rb->poll, POLLIN);
23
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
26}
27
28
29
30
31
32
33
34
35
36static void perf_output_get_handle(struct perf_output_handle *handle)
37{
38 struct ring_buffer *rb = handle->rb;
39
40 preempt_disable();
41 local_inc(&rb->nest);
42 handle->wakeup = local_read(&rb->wakeup);
43}
44
45static void perf_output_put_handle(struct perf_output_handle *handle)
46{
47 struct ring_buffer *rb = handle->rb;
48 unsigned long head;
49
50again:
51 head = local_read(&rb->head);
52
53
54
55
56
57 if (!local_dec_and_test(&rb->nest))
58 goto out;
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 smp_wmb();
87 rb->user_page->data_head = head;
88
89
90
91
92
93 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest);
95 goto again;
96 }
97
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
100
101out:
102 preempt_enable();
103}
104
105static bool __always_inline
106ring_buffer_has_space(unsigned long head, unsigned long tail,
107 unsigned long data_size, unsigned int size,
108 bool backward)
109{
110 if (!backward)
111 return CIRC_SPACE(head, tail, data_size) >= size;
112 else
113 return CIRC_SPACE(tail, head, data_size) >= size;
114}
115
116static int __always_inline
117__perf_output_begin(struct perf_output_handle *handle,
118 struct perf_event *event, unsigned int size,
119 bool backward)
120{
121 struct ring_buffer *rb;
122 unsigned long tail, offset, head;
123 int have_lost, page_shift;
124 struct {
125 struct perf_event_header header;
126 u64 id;
127 u64 lost;
128 } lost_event;
129
130 rcu_read_lock();
131
132
133
134 if (event->parent)
135 event = event->parent;
136
137 rb = rcu_dereference(event->rb);
138 if (unlikely(!rb))
139 goto out;
140
141 if (unlikely(rb->paused)) {
142 if (rb->nr_pages)
143 local_inc(&rb->lost);
144 goto out;
145 }
146
147 handle->rb = rb;
148 handle->event = event;
149
150 have_lost = local_read(&rb->lost);
151 if (unlikely(have_lost)) {
152 size += sizeof(lost_event);
153 if (event->attr.sample_id_all)
154 size += event->id_header_size;
155 }
156
157 perf_output_get_handle(handle);
158
159 do {
160 tail = READ_ONCE(rb->user_page->data_tail);
161 offset = head = local_read(&rb->head);
162 if (!rb->overwrite) {
163 if (unlikely(!ring_buffer_has_space(head, tail,
164 perf_data_size(rb),
165 size, backward)))
166 goto fail;
167 }
168
169
170
171
172
173
174
175
176
177
178
179
180
181 if (!backward)
182 head += size;
183 else
184 head -= size;
185 } while (local_cmpxchg(&rb->head, offset, head) != offset);
186
187 if (backward) {
188 offset = head;
189 head = (u64)(-head);
190 }
191
192
193
194
195
196
197 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
198 local_add(rb->watermark, &rb->wakeup);
199
200 page_shift = PAGE_SHIFT + page_order(rb);
201
202 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
203 offset &= (1UL << page_shift) - 1;
204 handle->addr = rb->data_pages[handle->page] + offset;
205 handle->size = (1UL << page_shift) - offset;
206
207 if (unlikely(have_lost)) {
208 struct perf_sample_data sample_data;
209
210 lost_event.header.size = sizeof(lost_event);
211 lost_event.header.type = PERF_RECORD_LOST;
212 lost_event.header.misc = 0;
213 lost_event.id = event->id;
214 lost_event.lost = local_xchg(&rb->lost, 0);
215
216 perf_event_header__init_id(&lost_event.header,
217 &sample_data, event);
218 perf_output_put(handle, lost_event);
219 perf_event__output_id_sample(event, handle, &sample_data);
220 }
221
222 return 0;
223
224fail:
225 local_inc(&rb->lost);
226 perf_output_put_handle(handle);
227out:
228 rcu_read_unlock();
229
230 return -ENOSPC;
231}
232
233int perf_output_begin_forward(struct perf_output_handle *handle,
234 struct perf_event *event, unsigned int size)
235{
236 return __perf_output_begin(handle, event, size, false);
237}
238
239int perf_output_begin_backward(struct perf_output_handle *handle,
240 struct perf_event *event, unsigned int size)
241{
242 return __perf_output_begin(handle, event, size, true);
243}
244
245int perf_output_begin(struct perf_output_handle *handle,
246 struct perf_event *event, unsigned int size)
247{
248
249 return __perf_output_begin(handle, event, size,
250 unlikely(is_write_backward(event)));
251}
252
253unsigned int perf_output_copy(struct perf_output_handle *handle,
254 const void *buf, unsigned int len)
255{
256 return __output_copy(handle, buf, len);
257}
258
259unsigned int perf_output_skip(struct perf_output_handle *handle,
260 unsigned int len)
261{
262 return __output_skip(handle, NULL, len);
263}
264
265void perf_output_end(struct perf_output_handle *handle)
266{
267 perf_output_put_handle(handle);
268 rcu_read_unlock();
269}
270
271static void
272ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
273{
274 long max_size = perf_data_size(rb);
275
276 if (watermark)
277 rb->watermark = min(max_size, watermark);
278
279 if (!rb->watermark)
280 rb->watermark = max_size / 2;
281
282 if (flags & RING_BUFFER_WRITABLE)
283 rb->overwrite = 0;
284 else
285 rb->overwrite = 1;
286
287 atomic_set(&rb->refcount, 1);
288
289 INIT_LIST_HEAD(&rb->event_list);
290 spin_lock_init(&rb->event_lock);
291
292
293
294
295
296 if (!rb->nr_pages)
297 rb->paused = 1;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314void *perf_aux_output_begin(struct perf_output_handle *handle,
315 struct perf_event *event)
316{
317 struct perf_event *output_event = event;
318 unsigned long aux_head, aux_tail;
319 struct ring_buffer *rb;
320
321 if (output_event->parent)
322 output_event = output_event->parent;
323
324
325
326
327
328
329 rb = ring_buffer_get(output_event);
330 if (!rb)
331 return NULL;
332
333 if (!rb_has_aux(rb))
334 goto err;
335
336
337
338
339
340
341
342
343
344 if (!atomic_read(&rb->aux_mmap_count))
345 goto err;
346
347 if (!atomic_inc_not_zero(&rb->aux_refcount))
348 goto err;
349
350
351
352
353
354 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
355 goto err_put;
356
357 aux_head = local_read(&rb->aux_head);
358
359 handle->rb = rb;
360 handle->event = event;
361 handle->head = aux_head;
362 handle->size = 0;
363
364
365
366
367
368
369 if (!rb->aux_overwrite) {
370 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
371 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
372 if (aux_head - aux_tail < perf_aux_size(rb))
373 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
374
375
376
377
378
379
380 if (!handle->size) {
381 event->pending_disable = 1;
382 perf_output_wakeup(handle);
383 local_set(&rb->aux_nest, 0);
384 goto err_put;
385 }
386 }
387
388 return handle->rb->aux_priv;
389
390err_put:
391
392 rb_free_aux(rb);
393
394err:
395 ring_buffer_put(rb);
396 handle->event = NULL;
397
398 return NULL;
399}
400
401
402
403
404
405
406
407
408
409
410
411void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
412 bool truncated)
413{
414 struct ring_buffer *rb = handle->rb;
415 bool wakeup = truncated;
416 unsigned long aux_head;
417 u64 flags = 0;
418
419 if (truncated)
420 flags |= PERF_AUX_FLAG_TRUNCATED;
421
422
423 if (rb->aux_overwrite) {
424 flags |= PERF_AUX_FLAG_OVERWRITE;
425
426 aux_head = handle->head;
427 local_set(&rb->aux_head, aux_head);
428 } else {
429 aux_head = local_read(&rb->aux_head);
430 local_add(size, &rb->aux_head);
431 }
432
433 if (size || flags) {
434
435
436
437
438 perf_event_aux_event(handle->event, aux_head, size, flags);
439 }
440
441 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
442
443 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
444 wakeup = true;
445 local_add(rb->aux_watermark, &rb->aux_wakeup);
446 }
447
448 if (wakeup) {
449 if (truncated)
450 handle->event->pending_disable = 1;
451 perf_output_wakeup(handle);
452 }
453
454 handle->event = NULL;
455
456 local_set(&rb->aux_nest, 0);
457
458 rb_free_aux(rb);
459 ring_buffer_put(rb);
460}
461
462
463
464
465
466int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
467{
468 struct ring_buffer *rb = handle->rb;
469 unsigned long aux_head;
470
471 if (size > handle->size)
472 return -ENOSPC;
473
474 local_add(size, &rb->aux_head);
475
476 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
477 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
478 perf_output_wakeup(handle);
479 local_add(rb->aux_watermark, &rb->aux_wakeup);
480 handle->wakeup = local_read(&rb->aux_wakeup) +
481 rb->aux_watermark;
482 }
483
484 handle->head = aux_head;
485 handle->size -= size;
486
487 return 0;
488}
489
490void *perf_get_aux(struct perf_output_handle *handle)
491{
492
493 if (!handle->event)
494 return NULL;
495
496 return handle->rb->aux_priv;
497}
498
499#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
500
501static struct page *rb_alloc_aux_page(int node, int order)
502{
503 struct page *page;
504
505 if (order > MAX_ORDER)
506 order = MAX_ORDER;
507
508 do {
509 page = alloc_pages_node(node, PERF_AUX_GFP, order);
510 } while (!page && order--);
511
512 if (page && order) {
513
514
515
516
517
518
519 split_page(page, order);
520 SetPagePrivate(page);
521 set_page_private(page, order);
522 }
523
524 return page;
525}
526
527static void rb_free_aux_page(struct ring_buffer *rb, int idx)
528{
529 struct page *page = virt_to_page(rb->aux_pages[idx]);
530
531 ClearPagePrivate(page);
532 page->mapping = NULL;
533 __free_page(page);
534}
535
536static void __rb_free_aux(struct ring_buffer *rb)
537{
538 int pg;
539
540
541
542
543
544
545
546 WARN_ON_ONCE(in_atomic());
547
548 if (rb->aux_priv) {
549 rb->free_aux(rb->aux_priv);
550 rb->free_aux = NULL;
551 rb->aux_priv = NULL;
552 }
553
554 if (rb->aux_nr_pages) {
555 for (pg = 0; pg < rb->aux_nr_pages; pg++)
556 rb_free_aux_page(rb, pg);
557
558 kfree(rb->aux_pages);
559 rb->aux_nr_pages = 0;
560 }
561}
562
563int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
564 pgoff_t pgoff, int nr_pages, long watermark, int flags)
565{
566 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
567 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
568 int ret = -ENOMEM, max_order = 0;
569
570 if (!has_aux(event))
571 return -ENOTSUPP;
572
573 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
574
575
576
577
578 max_order = ilog2(nr_pages);
579
580
581
582
583
584 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
585 !overwrite) {
586 if (!max_order)
587 return -EINVAL;
588
589 max_order--;
590 }
591 }
592
593 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
594 if (!rb->aux_pages)
595 return -ENOMEM;
596
597 rb->free_aux = event->pmu->free_aux;
598 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
599 struct page *page;
600 int last, order;
601
602 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
603 page = rb_alloc_aux_page(node, order);
604 if (!page)
605 goto out;
606
607 for (last = rb->aux_nr_pages + (1 << page_private(page));
608 last > rb->aux_nr_pages; rb->aux_nr_pages++)
609 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
610 }
611
612
613
614
615
616
617
618 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
619 overwrite) {
620 struct page *page = virt_to_page(rb->aux_pages[0]);
621
622 if (page_private(page) != max_order)
623 goto out;
624 }
625
626 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
627 overwrite);
628 if (!rb->aux_priv)
629 goto out;
630
631 ret = 0;
632
633
634
635
636
637
638
639 atomic_set(&rb->aux_refcount, 1);
640
641 rb->aux_overwrite = overwrite;
642 rb->aux_watermark = watermark;
643
644 if (!rb->aux_watermark && !rb->aux_overwrite)
645 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
646
647out:
648 if (!ret)
649 rb->aux_pgoff = pgoff;
650 else
651 __rb_free_aux(rb);
652
653 return ret;
654}
655
656void rb_free_aux(struct ring_buffer *rb)
657{
658 if (atomic_dec_and_test(&rb->aux_refcount))
659 __rb_free_aux(rb);
660}
661
662#ifndef CONFIG_PERF_USE_VMALLOC
663
664
665
666
667
668static struct page *
669__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
670{
671 if (pgoff > rb->nr_pages)
672 return NULL;
673
674 if (pgoff == 0)
675 return virt_to_page(rb->user_page);
676
677 return virt_to_page(rb->data_pages[pgoff - 1]);
678}
679
680static void *perf_mmap_alloc_page(int cpu)
681{
682 struct page *page;
683 int node;
684
685 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
686 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
687 if (!page)
688 return NULL;
689
690 return page_address(page);
691}
692
693struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
694{
695 struct ring_buffer *rb;
696 unsigned long size;
697 int i;
698
699 size = sizeof(struct ring_buffer);
700 size += nr_pages * sizeof(void *);
701
702 rb = kzalloc(size, GFP_KERNEL);
703 if (!rb)
704 goto fail;
705
706 rb->user_page = perf_mmap_alloc_page(cpu);
707 if (!rb->user_page)
708 goto fail_user_page;
709
710 for (i = 0; i < nr_pages; i++) {
711 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
712 if (!rb->data_pages[i])
713 goto fail_data_pages;
714 }
715
716 rb->nr_pages = nr_pages;
717
718 ring_buffer_init(rb, watermark, flags);
719
720 return rb;
721
722fail_data_pages:
723 for (i--; i >= 0; i--)
724 free_page((unsigned long)rb->data_pages[i]);
725
726 free_page((unsigned long)rb->user_page);
727
728fail_user_page:
729 kfree(rb);
730
731fail:
732 return NULL;
733}
734
735static void perf_mmap_free_page(unsigned long addr)
736{
737 struct page *page = virt_to_page((void *)addr);
738
739 page->mapping = NULL;
740 __free_page(page);
741}
742
743void rb_free(struct ring_buffer *rb)
744{
745 int i;
746
747 perf_mmap_free_page((unsigned long)rb->user_page);
748 for (i = 0; i < rb->nr_pages; i++)
749 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
750 kfree(rb);
751}
752
753#else
754static int data_page_nr(struct ring_buffer *rb)
755{
756 return rb->nr_pages << page_order(rb);
757}
758
759static struct page *
760__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
761{
762
763 if (pgoff > data_page_nr(rb))
764 return NULL;
765
766 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
767}
768
769static void perf_mmap_unmark_page(void *addr)
770{
771 struct page *page = vmalloc_to_page(addr);
772
773 page->mapping = NULL;
774}
775
776static void rb_free_work(struct work_struct *work)
777{
778 struct ring_buffer *rb;
779 void *base;
780 int i, nr;
781
782 rb = container_of(work, struct ring_buffer, work);
783 nr = data_page_nr(rb);
784
785 base = rb->user_page;
786
787 for (i = 0; i <= nr; i++)
788 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
789
790 vfree(base);
791 kfree(rb);
792}
793
794void rb_free(struct ring_buffer *rb)
795{
796 schedule_work(&rb->work);
797}
798
799struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
800{
801 struct ring_buffer *rb;
802 unsigned long size;
803 void *all_buf;
804
805 size = sizeof(struct ring_buffer);
806 size += sizeof(void *);
807
808 rb = kzalloc(size, GFP_KERNEL);
809 if (!rb)
810 goto fail;
811
812 INIT_WORK(&rb->work, rb_free_work);
813
814 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
815 if (!all_buf)
816 goto fail_all_buf;
817
818 rb->user_page = all_buf;
819 rb->data_pages[0] = all_buf + PAGE_SIZE;
820 if (nr_pages) {
821 rb->nr_pages = 1;
822 rb->page_order = ilog2(nr_pages);
823 }
824
825 ring_buffer_init(rb, watermark, flags);
826
827 return rb;
828
829fail_all_buf:
830 kfree(rb);
831
832fail:
833 return NULL;
834}
835
836#endif
837
838struct page *
839perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
840{
841 if (rb->aux_nr_pages) {
842
843 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
844 return NULL;
845
846
847 if (pgoff >= rb->aux_pgoff)
848 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
849 }
850
851 return __perf_mmap_to_page(rb, pgoff);
852}
853