1
2
3
4
5
6
7
8
9
10
11
12#include <linux/perf_event.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15#include <linux/circ_buf.h>
16#include <linux/poll.h>
17
18#include "internal.h"
19
20static void perf_output_wakeup(struct perf_output_handle *handle)
21{
22 atomic_set(&handle->rb->poll, POLLIN);
23
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
26}
27
28
29
30
31
32
33
34
35
36static void perf_output_get_handle(struct perf_output_handle *handle)
37{
38 struct ring_buffer *rb = handle->rb;
39
40 preempt_disable();
41 local_inc(&rb->nest);
42 handle->wakeup = local_read(&rb->wakeup);
43}
44
45static void perf_output_put_handle(struct perf_output_handle *handle)
46{
47 struct ring_buffer *rb = handle->rb;
48 unsigned long head;
49
50again:
51 head = local_read(&rb->head);
52
53
54
55
56
57 if (!local_dec_and_test(&rb->nest))
58 goto out;
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 smp_wmb();
87 rb->user_page->data_head = head;
88
89
90
91
92
93 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest);
95 goto again;
96 }
97
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
100
101out:
102 preempt_enable();
103}
104
105static bool __always_inline
106ring_buffer_has_space(unsigned long head, unsigned long tail,
107 unsigned long data_size, unsigned int size,
108 bool backward)
109{
110 if (!backward)
111 return CIRC_SPACE(head, tail, data_size) >= size;
112 else
113 return CIRC_SPACE(tail, head, data_size) >= size;
114}
115
116static int __always_inline
117__perf_output_begin(struct perf_output_handle *handle,
118 struct perf_event *event, unsigned int size,
119 bool backward)
120{
121 struct ring_buffer *rb;
122 unsigned long tail, offset, head;
123 int have_lost, page_shift;
124 struct {
125 struct perf_event_header header;
126 u64 id;
127 u64 lost;
128 } lost_event;
129
130 rcu_read_lock();
131
132
133
134 if (event->parent)
135 event = event->parent;
136
137 rb = rcu_dereference(event->rb);
138 if (unlikely(!rb))
139 goto out;
140
141 if (unlikely(rb->paused)) {
142 if (rb->nr_pages)
143 local_inc(&rb->lost);
144 goto out;
145 }
146
147 handle->rb = rb;
148 handle->event = event;
149
150 have_lost = local_read(&rb->lost);
151 if (unlikely(have_lost)) {
152 size += sizeof(lost_event);
153 if (event->attr.sample_id_all)
154 size += event->id_header_size;
155 }
156
157 perf_output_get_handle(handle);
158
159 do {
160 tail = READ_ONCE(rb->user_page->data_tail);
161 offset = head = local_read(&rb->head);
162 if (!rb->overwrite) {
163 if (unlikely(!ring_buffer_has_space(head, tail,
164 perf_data_size(rb),
165 size, backward)))
166 goto fail;
167 }
168
169
170
171
172
173
174
175
176
177
178
179
180
181 if (!backward)
182 head += size;
183 else
184 head -= size;
185 } while (local_cmpxchg(&rb->head, offset, head) != offset);
186
187 if (backward) {
188 offset = head;
189 head = (u64)(-head);
190 }
191
192
193
194
195
196
197 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
198 local_add(rb->watermark, &rb->wakeup);
199
200 page_shift = PAGE_SHIFT + page_order(rb);
201
202 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
203 offset &= (1UL << page_shift) - 1;
204 handle->addr = rb->data_pages[handle->page] + offset;
205 handle->size = (1UL << page_shift) - offset;
206
207 if (unlikely(have_lost)) {
208 struct perf_sample_data sample_data;
209
210 lost_event.header.size = sizeof(lost_event);
211 lost_event.header.type = PERF_RECORD_LOST;
212 lost_event.header.misc = 0;
213 lost_event.id = event->id;
214 lost_event.lost = local_xchg(&rb->lost, 0);
215
216 perf_event_header__init_id(&lost_event.header,
217 &sample_data, event);
218 perf_output_put(handle, lost_event);
219 perf_event__output_id_sample(event, handle, &sample_data);
220 }
221
222 return 0;
223
224fail:
225 local_inc(&rb->lost);
226 perf_output_put_handle(handle);
227out:
228 rcu_read_unlock();
229
230 return -ENOSPC;
231}
232
233int perf_output_begin_forward(struct perf_output_handle *handle,
234 struct perf_event *event, unsigned int size)
235{
236 return __perf_output_begin(handle, event, size, false);
237}
238
239int perf_output_begin_backward(struct perf_output_handle *handle,
240 struct perf_event *event, unsigned int size)
241{
242 return __perf_output_begin(handle, event, size, true);
243}
244
245int perf_output_begin(struct perf_output_handle *handle,
246 struct perf_event *event, unsigned int size)
247{
248
249 return __perf_output_begin(handle, event, size,
250 unlikely(is_write_backward(event)));
251}
252
253unsigned int perf_output_copy(struct perf_output_handle *handle,
254 const void *buf, unsigned int len)
255{
256 return __output_copy(handle, buf, len);
257}
258
259unsigned int perf_output_skip(struct perf_output_handle *handle,
260 unsigned int len)
261{
262 return __output_skip(handle, NULL, len);
263}
264
265void perf_output_end(struct perf_output_handle *handle)
266{
267 perf_output_put_handle(handle);
268 rcu_read_unlock();
269}
270
271static void
272ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
273{
274 long max_size = perf_data_size(rb);
275
276 if (watermark)
277 rb->watermark = min(max_size, watermark);
278
279 if (!rb->watermark)
280 rb->watermark = max_size / 2;
281
282 if (flags & RING_BUFFER_WRITABLE)
283 rb->overwrite = 0;
284 else
285 rb->overwrite = 1;
286
287 atomic_set(&rb->refcount, 1);
288
289 INIT_LIST_HEAD(&rb->event_list);
290 spin_lock_init(&rb->event_lock);
291
292
293
294
295
296 if (!rb->nr_pages)
297 rb->paused = 1;
298}
299
300void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
301{
302
303
304
305
306 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
307 return;
308
309 handle->aux_flags |= flags;
310}
311EXPORT_SYMBOL_GPL(perf_aux_output_flag);
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327void *perf_aux_output_begin(struct perf_output_handle *handle,
328 struct perf_event *event)
329{
330 struct perf_event *output_event = event;
331 unsigned long aux_head, aux_tail;
332 struct ring_buffer *rb;
333
334 if (output_event->parent)
335 output_event = output_event->parent;
336
337
338
339
340
341
342 rb = ring_buffer_get(output_event);
343 if (!rb)
344 return NULL;
345
346 if (!rb_has_aux(rb))
347 goto err;
348
349
350
351
352
353
354
355
356
357 if (!atomic_read(&rb->aux_mmap_count))
358 goto err;
359
360 if (!atomic_inc_not_zero(&rb->aux_refcount))
361 goto err;
362
363
364
365
366
367 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
368 goto err_put;
369
370 aux_head = local_read(&rb->aux_head);
371
372 handle->rb = rb;
373 handle->event = event;
374 handle->head = aux_head;
375 handle->size = 0;
376 handle->aux_flags = 0;
377
378
379
380
381
382
383 if (!rb->aux_overwrite) {
384 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
385 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
386 if (aux_head - aux_tail < perf_aux_size(rb))
387 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
388
389
390
391
392
393
394 if (!handle->size) {
395 event->pending_disable = 1;
396 perf_output_wakeup(handle);
397 local_set(&rb->aux_nest, 0);
398 goto err_put;
399 }
400 }
401
402 return handle->rb->aux_priv;
403
404err_put:
405
406 rb_free_aux(rb);
407
408err:
409 ring_buffer_put(rb);
410 handle->event = NULL;
411
412 return NULL;
413}
414
415
416
417
418
419
420
421
422
423
424
425void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
426{
427 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
428 struct ring_buffer *rb = handle->rb;
429 unsigned long aux_head;
430
431
432 if (rb->aux_overwrite) {
433 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
434
435 aux_head = handle->head;
436 local_set(&rb->aux_head, aux_head);
437 } else {
438 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
439
440 aux_head = local_read(&rb->aux_head);
441 local_add(size, &rb->aux_head);
442 }
443
444 if (size || handle->aux_flags) {
445
446
447
448
449 perf_event_aux_event(handle->event, aux_head, size,
450 handle->aux_flags);
451 }
452
453 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
454
455 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
456 wakeup = true;
457 local_add(rb->aux_watermark, &rb->aux_wakeup);
458 }
459
460 if (wakeup) {
461 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
462 handle->event->pending_disable = 1;
463 perf_output_wakeup(handle);
464 }
465
466 handle->event = NULL;
467
468 local_set(&rb->aux_nest, 0);
469
470 rb_free_aux(rb);
471 ring_buffer_put(rb);
472}
473
474
475
476
477
478int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
479{
480 struct ring_buffer *rb = handle->rb;
481 unsigned long aux_head;
482
483 if (size > handle->size)
484 return -ENOSPC;
485
486 local_add(size, &rb->aux_head);
487
488 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
489 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
490 perf_output_wakeup(handle);
491 local_add(rb->aux_watermark, &rb->aux_wakeup);
492 handle->wakeup = local_read(&rb->aux_wakeup) +
493 rb->aux_watermark;
494 }
495
496 handle->head = aux_head;
497 handle->size -= size;
498
499 return 0;
500}
501
502void *perf_get_aux(struct perf_output_handle *handle)
503{
504
505 if (!handle->event)
506 return NULL;
507
508 return handle->rb->aux_priv;
509}
510
511#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
512
513static struct page *rb_alloc_aux_page(int node, int order)
514{
515 struct page *page;
516
517 if (order > MAX_ORDER)
518 order = MAX_ORDER;
519
520 do {
521 page = alloc_pages_node(node, PERF_AUX_GFP, order);
522 } while (!page && order--);
523
524 if (page && order) {
525
526
527
528
529
530
531 split_page(page, order);
532 SetPagePrivate(page);
533 set_page_private(page, order);
534 }
535
536 return page;
537}
538
539static void rb_free_aux_page(struct ring_buffer *rb, int idx)
540{
541 struct page *page = virt_to_page(rb->aux_pages[idx]);
542
543 ClearPagePrivate(page);
544 page->mapping = NULL;
545 __free_page(page);
546}
547
548static void __rb_free_aux(struct ring_buffer *rb)
549{
550 int pg;
551
552
553
554
555
556
557
558 WARN_ON_ONCE(in_atomic());
559
560 if (rb->aux_priv) {
561 rb->free_aux(rb->aux_priv);
562 rb->free_aux = NULL;
563 rb->aux_priv = NULL;
564 }
565
566 if (rb->aux_nr_pages) {
567 for (pg = 0; pg < rb->aux_nr_pages; pg++)
568 rb_free_aux_page(rb, pg);
569
570 kfree(rb->aux_pages);
571 rb->aux_nr_pages = 0;
572 }
573}
574
575int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
576 pgoff_t pgoff, int nr_pages, long watermark, int flags)
577{
578 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
579 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
580 int ret = -ENOMEM, max_order = 0;
581
582 if (!has_aux(event))
583 return -EOPNOTSUPP;
584
585 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
586
587
588
589
590 max_order = ilog2(nr_pages);
591
592
593
594
595
596 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
597 !overwrite) {
598 if (!max_order)
599 return -EINVAL;
600
601 max_order--;
602 }
603 }
604
605 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
606 if (!rb->aux_pages)
607 return -ENOMEM;
608
609 rb->free_aux = event->pmu->free_aux;
610 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
611 struct page *page;
612 int last, order;
613
614 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
615 page = rb_alloc_aux_page(node, order);
616 if (!page)
617 goto out;
618
619 for (last = rb->aux_nr_pages + (1 << page_private(page));
620 last > rb->aux_nr_pages; rb->aux_nr_pages++)
621 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
622 }
623
624
625
626
627
628
629
630 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
631 overwrite) {
632 struct page *page = virt_to_page(rb->aux_pages[0]);
633
634 if (page_private(page) != max_order)
635 goto out;
636 }
637
638 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
639 overwrite);
640 if (!rb->aux_priv)
641 goto out;
642
643 ret = 0;
644
645
646
647
648
649
650
651 atomic_set(&rb->aux_refcount, 1);
652
653 rb->aux_overwrite = overwrite;
654 rb->aux_watermark = watermark;
655
656 if (!rb->aux_watermark && !rb->aux_overwrite)
657 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
658
659out:
660 if (!ret)
661 rb->aux_pgoff = pgoff;
662 else
663 __rb_free_aux(rb);
664
665 return ret;
666}
667
668void rb_free_aux(struct ring_buffer *rb)
669{
670 if (atomic_dec_and_test(&rb->aux_refcount))
671 __rb_free_aux(rb);
672}
673
674#ifndef CONFIG_PERF_USE_VMALLOC
675
676
677
678
679
680static struct page *
681__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
682{
683 if (pgoff > rb->nr_pages)
684 return NULL;
685
686 if (pgoff == 0)
687 return virt_to_page(rb->user_page);
688
689 return virt_to_page(rb->data_pages[pgoff - 1]);
690}
691
692static void *perf_mmap_alloc_page(int cpu)
693{
694 struct page *page;
695 int node;
696
697 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
698 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
699 if (!page)
700 return NULL;
701
702 return page_address(page);
703}
704
705struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
706{
707 struct ring_buffer *rb;
708 unsigned long size;
709 int i;
710
711 size = sizeof(struct ring_buffer);
712 size += nr_pages * sizeof(void *);
713
714 rb = kzalloc(size, GFP_KERNEL);
715 if (!rb)
716 goto fail;
717
718 rb->user_page = perf_mmap_alloc_page(cpu);
719 if (!rb->user_page)
720 goto fail_user_page;
721
722 for (i = 0; i < nr_pages; i++) {
723 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
724 if (!rb->data_pages[i])
725 goto fail_data_pages;
726 }
727
728 rb->nr_pages = nr_pages;
729
730 ring_buffer_init(rb, watermark, flags);
731
732 return rb;
733
734fail_data_pages:
735 for (i--; i >= 0; i--)
736 free_page((unsigned long)rb->data_pages[i]);
737
738 free_page((unsigned long)rb->user_page);
739
740fail_user_page:
741 kfree(rb);
742
743fail:
744 return NULL;
745}
746
747static void perf_mmap_free_page(unsigned long addr)
748{
749 struct page *page = virt_to_page((void *)addr);
750
751 page->mapping = NULL;
752 __free_page(page);
753}
754
755void rb_free(struct ring_buffer *rb)
756{
757 int i;
758
759 perf_mmap_free_page((unsigned long)rb->user_page);
760 for (i = 0; i < rb->nr_pages; i++)
761 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
762 kfree(rb);
763}
764
765#else
766static int data_page_nr(struct ring_buffer *rb)
767{
768 return rb->nr_pages << page_order(rb);
769}
770
771static struct page *
772__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
773{
774
775 if (pgoff > data_page_nr(rb))
776 return NULL;
777
778 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
779}
780
781static void perf_mmap_unmark_page(void *addr)
782{
783 struct page *page = vmalloc_to_page(addr);
784
785 page->mapping = NULL;
786}
787
788static void rb_free_work(struct work_struct *work)
789{
790 struct ring_buffer *rb;
791 void *base;
792 int i, nr;
793
794 rb = container_of(work, struct ring_buffer, work);
795 nr = data_page_nr(rb);
796
797 base = rb->user_page;
798
799 for (i = 0; i <= nr; i++)
800 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
801
802 vfree(base);
803 kfree(rb);
804}
805
806void rb_free(struct ring_buffer *rb)
807{
808 schedule_work(&rb->work);
809}
810
811struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
812{
813 struct ring_buffer *rb;
814 unsigned long size;
815 void *all_buf;
816
817 size = sizeof(struct ring_buffer);
818 size += sizeof(void *);
819
820 rb = kzalloc(size, GFP_KERNEL);
821 if (!rb)
822 goto fail;
823
824 INIT_WORK(&rb->work, rb_free_work);
825
826 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
827 if (!all_buf)
828 goto fail_all_buf;
829
830 rb->user_page = all_buf;
831 rb->data_pages[0] = all_buf + PAGE_SIZE;
832 if (nr_pages) {
833 rb->nr_pages = 1;
834 rb->page_order = ilog2(nr_pages);
835 }
836
837 ring_buffer_init(rb, watermark, flags);
838
839 return rb;
840
841fail_all_buf:
842 kfree(rb);
843
844fail:
845 return NULL;
846}
847
848#endif
849
850struct page *
851perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
852{
853 if (rb->aux_nr_pages) {
854
855 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
856 return NULL;
857
858
859 if (pgoff >= rb->aux_pgoff)
860 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
861 }
862
863 return __perf_mmap_to_page(rb, pgoff);
864}
865