1
2
3
4
5
6
7
8
9
10
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/idr.h>
17#include <linux/file.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/hash.h>
21#include <linux/tick.h>
22#include <linux/sysfs.h>
23#include <linux/dcache.h>
24#include <linux/percpu.h>
25#include <linux/ptrace.h>
26#include <linux/reboot.h>
27#include <linux/vmstat.h>
28#include <linux/device.h>
29#include <linux/export.h>
30#include <linux/vmalloc.h>
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
36#include <linux/kernel_stat.h>
37#include <linux/perf_event.h>
38#include <linux/ftrace_event.h>
39#include <linux/hw_breakpoint.h>
40#include <linux/mm_types.h>
41#include <linux/cgroup.h>
42
43#include "internal.h"
44
45#include <asm/irq_regs.h>
46
47struct remote_function_call {
48 struct task_struct *p;
49 int (*func)(void *info);
50 void *info;
51 int ret;
52};
53
54static void remote_function(void *data)
55{
56 struct remote_function_call *tfc = data;
57 struct task_struct *p = tfc->p;
58
59 if (p) {
60 tfc->ret = -EAGAIN;
61 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
62 return;
63 }
64
65 tfc->ret = tfc->func(tfc->info);
66}
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int
82task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
83{
84 struct remote_function_call data = {
85 .p = p,
86 .func = func,
87 .info = info,
88 .ret = -ESRCH,
89 };
90
91 if (task_curr(p))
92 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
93
94 return data.ret;
95}
96
97
98
99
100
101
102
103
104
105
106static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
107{
108 struct remote_function_call data = {
109 .p = NULL,
110 .func = func,
111 .info = info,
112 .ret = -ENXIO,
113 };
114
115 smp_call_function_single(cpu, remote_function, &data, 1);
116
117 return data.ret;
118}
119
120#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
121 PERF_FLAG_FD_OUTPUT |\
122 PERF_FLAG_PID_CGROUP)
123
124
125
126
127#define PERF_SAMPLE_BRANCH_PERM_PLM \
128 (PERF_SAMPLE_BRANCH_KERNEL |\
129 PERF_SAMPLE_BRANCH_HV)
130
131enum event_type_t {
132 EVENT_FLEXIBLE = 0x1,
133 EVENT_PINNED = 0x2,
134 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
135};
136
137
138
139
140
141struct static_key_deferred perf_sched_events __read_mostly;
142static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
143static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
144
145static atomic_t nr_mmap_events __read_mostly;
146static atomic_t nr_comm_events __read_mostly;
147static atomic_t nr_task_events __read_mostly;
148static atomic_t nr_freq_events __read_mostly;
149
150static LIST_HEAD(pmus);
151static DEFINE_MUTEX(pmus_lock);
152static struct srcu_struct pmus_srcu;
153
154
155
156
157
158
159
160
161int sysctl_perf_event_paranoid __read_mostly = 1;
162
163
164int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024);
165
166
167
168
169#define DEFAULT_MAX_SAMPLE_RATE 100000
170#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
171#define DEFAULT_CPU_TIME_MAX_PERCENT 25
172
173int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
174
175static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
176static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
177
178static atomic_t perf_sample_allowed_ns __read_mostly =
179 ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
180
181void update_perf_cpu_limits(void)
182{
183 u64 tmp = perf_sample_period_ns;
184
185 tmp *= sysctl_perf_cpu_time_max_percent;
186 do_div(tmp, 100);
187 atomic_set(&perf_sample_allowed_ns, tmp);
188}
189
190static int perf_rotate_context(struct perf_cpu_context *cpuctx);
191
192int perf_proc_update_handler(struct ctl_table *table, int write,
193 void __user *buffer, size_t *lenp,
194 loff_t *ppos)
195{
196 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
197
198 if (ret || !write)
199 return ret;
200
201 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
202 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
203 update_perf_cpu_limits();
204
205 return 0;
206}
207
208int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
209
210int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
211 void __user *buffer, size_t *lenp,
212 loff_t *ppos)
213{
214 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
215
216 if (ret || !write)
217 return ret;
218
219 update_perf_cpu_limits();
220
221 return 0;
222}
223
224
225
226
227
228
229
230#define NR_ACCUMULATED_SAMPLES 128
231DEFINE_PER_CPU(u64, running_sample_length);
232
233void perf_sample_event_took(u64 sample_len_ns)
234{
235 u64 avg_local_sample_len;
236 u64 local_samples_len;
237
238 if (atomic_read(&perf_sample_allowed_ns) == 0)
239 return;
240
241
242 local_samples_len = __get_cpu_var(running_sample_length);
243 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
244 local_samples_len += sample_len_ns;
245 __get_cpu_var(running_sample_length) = local_samples_len;
246
247
248
249
250
251
252 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
253
254 if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
255 return;
256
257 if (max_samples_per_tick <= 1)
258 return;
259
260 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
261 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
262 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
263
264 printk_ratelimited(KERN_WARNING
265 "perf samples too long (%lld > %d), lowering "
266 "kernel.perf_event_max_sample_rate to %d\n",
267 avg_local_sample_len,
268 atomic_read(&perf_sample_allowed_ns),
269 sysctl_perf_event_sample_rate);
270
271 update_perf_cpu_limits();
272}
273
274static atomic64_t perf_event_id;
275
276static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
277 enum event_type_t event_type);
278
279static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
280 enum event_type_t event_type,
281 struct task_struct *task);
282
283static void update_context_time(struct perf_event_context *ctx);
284static u64 perf_event_time(struct perf_event *event);
285
286void __weak perf_event_print_debug(void) { }
287
288extern __weak const char *perf_pmu_name(void)
289{
290 return "pmu";
291}
292
293static inline u64 perf_clock(void)
294{
295 return local_clock();
296}
297
298static inline struct perf_cpu_context *
299__get_cpu_context(struct perf_event_context *ctx)
300{
301 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
302}
303
304static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
305 struct perf_event_context *ctx)
306{
307 raw_spin_lock(&cpuctx->ctx.lock);
308 if (ctx)
309 raw_spin_lock(&ctx->lock);
310}
311
312static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
313 struct perf_event_context *ctx)
314{
315 if (ctx)
316 raw_spin_unlock(&ctx->lock);
317 raw_spin_unlock(&cpuctx->ctx.lock);
318}
319
320#ifdef CONFIG_CGROUP_PERF
321
322
323
324
325
326struct perf_cgroup_info {
327 u64 time;
328 u64 timestamp;
329};
330
331struct perf_cgroup {
332 struct cgroup_subsys_state css;
333 struct perf_cgroup_info __percpu *info;
334};
335
336
337
338
339
340
341static inline struct perf_cgroup *
342perf_cgroup_from_task(struct task_struct *task)
343{
344 return container_of(task_css(task, perf_subsys_id),
345 struct perf_cgroup, css);
346}
347
348static inline bool
349perf_cgroup_match(struct perf_event *event)
350{
351 struct perf_event_context *ctx = event->ctx;
352 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
353
354
355 if (!event->cgrp)
356 return true;
357
358
359 if (!cpuctx->cgrp)
360 return false;
361
362
363
364
365
366
367
368 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
369 event->cgrp->css.cgroup);
370}
371
372static inline bool perf_tryget_cgroup(struct perf_event *event)
373{
374 return css_tryget(&event->cgrp->css);
375}
376
377static inline void perf_put_cgroup(struct perf_event *event)
378{
379 css_put(&event->cgrp->css);
380}
381
382static inline void perf_detach_cgroup(struct perf_event *event)
383{
384 perf_put_cgroup(event);
385 event->cgrp = NULL;
386}
387
388static inline int is_cgroup_event(struct perf_event *event)
389{
390 return event->cgrp != NULL;
391}
392
393static inline u64 perf_cgroup_event_time(struct perf_event *event)
394{
395 struct perf_cgroup_info *t;
396
397 t = per_cpu_ptr(event->cgrp->info, event->cpu);
398 return t->time;
399}
400
401static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
402{
403 struct perf_cgroup_info *info;
404 u64 now;
405
406 now = perf_clock();
407
408 info = this_cpu_ptr(cgrp->info);
409
410 info->time += now - info->timestamp;
411 info->timestamp = now;
412}
413
414static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
415{
416 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
417 if (cgrp_out)
418 __update_cgrp_time(cgrp_out);
419}
420
421static inline void update_cgrp_time_from_event(struct perf_event *event)
422{
423 struct perf_cgroup *cgrp;
424
425
426
427
428
429 if (!is_cgroup_event(event))
430 return;
431
432 cgrp = perf_cgroup_from_task(current);
433
434
435
436 if (cgrp == event->cgrp)
437 __update_cgrp_time(event->cgrp);
438}
439
440static inline void
441perf_cgroup_set_timestamp(struct task_struct *task,
442 struct perf_event_context *ctx)
443{
444 struct perf_cgroup *cgrp;
445 struct perf_cgroup_info *info;
446
447
448
449
450
451
452 if (!task || !ctx->nr_cgroups)
453 return;
454
455 cgrp = perf_cgroup_from_task(task);
456 info = this_cpu_ptr(cgrp->info);
457 info->timestamp = ctx->timestamp;
458}
459
460#define PERF_CGROUP_SWOUT 0x1
461#define PERF_CGROUP_SWIN 0x2
462
463
464
465
466
467
468
469void perf_cgroup_switch(struct task_struct *task, int mode)
470{
471 struct perf_cpu_context *cpuctx;
472 struct pmu *pmu;
473 unsigned long flags;
474
475
476
477
478
479
480 local_irq_save(flags);
481
482
483
484
485
486 rcu_read_lock();
487
488 list_for_each_entry_rcu(pmu, &pmus, entry) {
489 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
490 if (cpuctx->unique_pmu != pmu)
491 continue;
492
493
494
495
496
497
498
499
500 if (cpuctx->ctx.nr_cgroups > 0) {
501 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
502 perf_pmu_disable(cpuctx->ctx.pmu);
503
504 if (mode & PERF_CGROUP_SWOUT) {
505 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
506
507
508
509
510 cpuctx->cgrp = NULL;
511 }
512
513 if (mode & PERF_CGROUP_SWIN) {
514 WARN_ON_ONCE(cpuctx->cgrp);
515
516
517
518
519
520 cpuctx->cgrp = perf_cgroup_from_task(task);
521 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
522 }
523 perf_pmu_enable(cpuctx->ctx.pmu);
524 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
525 }
526 }
527
528 rcu_read_unlock();
529
530 local_irq_restore(flags);
531}
532
533static inline void perf_cgroup_sched_out(struct task_struct *task,
534 struct task_struct *next)
535{
536 struct perf_cgroup *cgrp1;
537 struct perf_cgroup *cgrp2 = NULL;
538
539
540
541
542 cgrp1 = perf_cgroup_from_task(task);
543
544
545
546
547
548 if (next)
549 cgrp2 = perf_cgroup_from_task(next);
550
551
552
553
554
555
556 if (cgrp1 != cgrp2)
557 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
558}
559
560static inline void perf_cgroup_sched_in(struct task_struct *prev,
561 struct task_struct *task)
562{
563 struct perf_cgroup *cgrp1;
564 struct perf_cgroup *cgrp2 = NULL;
565
566
567
568
569 cgrp1 = perf_cgroup_from_task(task);
570
571
572 cgrp2 = perf_cgroup_from_task(prev);
573
574
575
576
577
578
579 if (cgrp1 != cgrp2)
580 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
581}
582
583static inline int perf_cgroup_connect(int fd, struct perf_event *event,
584 struct perf_event_attr *attr,
585 struct perf_event *group_leader)
586{
587 struct perf_cgroup *cgrp;
588 struct cgroup_subsys_state *css;
589 struct fd f = fdget(fd);
590 int ret = 0;
591
592 if (!f.file)
593 return -EBADF;
594
595 rcu_read_lock();
596
597 css = css_from_dir(f.file->f_dentry, &perf_subsys);
598 if (IS_ERR(css)) {
599 ret = PTR_ERR(css);
600 goto out;
601 }
602
603 cgrp = container_of(css, struct perf_cgroup, css);
604 event->cgrp = cgrp;
605
606
607 if (!perf_tryget_cgroup(event)) {
608 event->cgrp = NULL;
609 ret = -ENOENT;
610 goto out;
611 }
612
613
614
615
616
617
618 if (group_leader && group_leader->cgrp != cgrp) {
619 perf_detach_cgroup(event);
620 ret = -EINVAL;
621 }
622out:
623 rcu_read_unlock();
624 fdput(f);
625 return ret;
626}
627
628static inline void
629perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
630{
631 struct perf_cgroup_info *t;
632 t = per_cpu_ptr(event->cgrp->info, event->cpu);
633 event->shadow_ctx_time = now - t->timestamp;
634}
635
636static inline void
637perf_cgroup_defer_enabled(struct perf_event *event)
638{
639
640
641
642
643
644
645 if (is_cgroup_event(event) && !perf_cgroup_match(event))
646 event->cgrp_defer_enabled = 1;
647}
648
649static inline void
650perf_cgroup_mark_enabled(struct perf_event *event,
651 struct perf_event_context *ctx)
652{
653 struct perf_event *sub;
654 u64 tstamp = perf_event_time(event);
655
656 if (!event->cgrp_defer_enabled)
657 return;
658
659 event->cgrp_defer_enabled = 0;
660
661 event->tstamp_enabled = tstamp - event->total_time_enabled;
662 list_for_each_entry(sub, &event->sibling_list, group_entry) {
663 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
664 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
665 sub->cgrp_defer_enabled = 0;
666 }
667 }
668}
669#else
670
671static inline bool
672perf_cgroup_match(struct perf_event *event)
673{
674 return true;
675}
676
677static inline void perf_detach_cgroup(struct perf_event *event)
678{}
679
680static inline int is_cgroup_event(struct perf_event *event)
681{
682 return 0;
683}
684
685static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
686{
687 return 0;
688}
689
690static inline void update_cgrp_time_from_event(struct perf_event *event)
691{
692}
693
694static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
695{
696}
697
698static inline void perf_cgroup_sched_out(struct task_struct *task,
699 struct task_struct *next)
700{
701}
702
703static inline void perf_cgroup_sched_in(struct task_struct *prev,
704 struct task_struct *task)
705{
706}
707
708static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
709 struct perf_event_attr *attr,
710 struct perf_event *group_leader)
711{
712 return -EINVAL;
713}
714
715static inline void
716perf_cgroup_set_timestamp(struct task_struct *task,
717 struct perf_event_context *ctx)
718{
719}
720
721void
722perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
723{
724}
725
726static inline void
727perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
728{
729}
730
731static inline u64 perf_cgroup_event_time(struct perf_event *event)
732{
733 return 0;
734}
735
736static inline void
737perf_cgroup_defer_enabled(struct perf_event *event)
738{
739}
740
741static inline void
742perf_cgroup_mark_enabled(struct perf_event *event,
743 struct perf_event_context *ctx)
744{
745}
746#endif
747
748
749
750
751
752#define PERF_CPU_HRTIMER (1000 / HZ)
753
754
755
756static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
757{
758 struct perf_cpu_context *cpuctx;
759 enum hrtimer_restart ret = HRTIMER_NORESTART;
760 int rotations = 0;
761
762 WARN_ON(!irqs_disabled());
763
764 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
765
766 rotations = perf_rotate_context(cpuctx);
767
768
769
770
771 if (rotations) {
772 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
773 ret = HRTIMER_RESTART;
774 }
775
776 return ret;
777}
778
779
780void perf_cpu_hrtimer_cancel(int cpu)
781{
782 struct perf_cpu_context *cpuctx;
783 struct pmu *pmu;
784 unsigned long flags;
785
786 if (WARN_ON(cpu != smp_processor_id()))
787 return;
788
789 local_irq_save(flags);
790
791 rcu_read_lock();
792
793 list_for_each_entry_rcu(pmu, &pmus, entry) {
794 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
795
796 if (pmu->task_ctx_nr == perf_sw_context)
797 continue;
798
799 hrtimer_cancel(&cpuctx->hrtimer);
800 }
801
802 rcu_read_unlock();
803
804 local_irq_restore(flags);
805}
806
807static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
808{
809 struct hrtimer *hr = &cpuctx->hrtimer;
810 struct pmu *pmu = cpuctx->ctx.pmu;
811 int timer;
812
813
814 if (pmu->task_ctx_nr == perf_sw_context)
815 return;
816
817
818
819
820
821 timer = pmu->hrtimer_interval_ms;
822 if (timer < 1)
823 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
824
825 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
826
827 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
828 hr->function = perf_cpu_hrtimer_handler;
829}
830
831static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
832{
833 struct hrtimer *hr = &cpuctx->hrtimer;
834 struct pmu *pmu = cpuctx->ctx.pmu;
835
836
837 if (pmu->task_ctx_nr == perf_sw_context)
838 return;
839
840 if (hrtimer_active(hr))
841 return;
842
843 if (!hrtimer_callback_running(hr))
844 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
845 0, HRTIMER_MODE_REL_PINNED, 0);
846}
847
848void perf_pmu_disable(struct pmu *pmu)
849{
850 int *count = this_cpu_ptr(pmu->pmu_disable_count);
851 if (!(*count)++)
852 pmu->pmu_disable(pmu);
853}
854
855void perf_pmu_enable(struct pmu *pmu)
856{
857 int *count = this_cpu_ptr(pmu->pmu_disable_count);
858 if (!--(*count))
859 pmu->pmu_enable(pmu);
860}
861
862static DEFINE_PER_CPU(struct list_head, rotation_list);
863
864
865
866
867
868
869static void perf_pmu_rotate_start(struct pmu *pmu)
870{
871 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
872 struct list_head *head = &__get_cpu_var(rotation_list);
873
874 WARN_ON(!irqs_disabled());
875
876 if (list_empty(&cpuctx->rotation_list))
877 list_add(&cpuctx->rotation_list, head);
878}
879
880static void get_ctx(struct perf_event_context *ctx)
881{
882 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
883}
884
885static void put_ctx(struct perf_event_context *ctx)
886{
887 if (atomic_dec_and_test(&ctx->refcount)) {
888 if (ctx->parent_ctx)
889 put_ctx(ctx->parent_ctx);
890 if (ctx->task)
891 put_task_struct(ctx->task);
892 kfree_rcu(ctx, rcu_head);
893 }
894}
895
896static void unclone_ctx(struct perf_event_context *ctx)
897{
898 if (ctx->parent_ctx) {
899 put_ctx(ctx->parent_ctx);
900 ctx->parent_ctx = NULL;
901 }
902}
903
904static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
905{
906
907
908
909 if (event->parent)
910 event = event->parent;
911
912 return task_tgid_nr_ns(p, event->ns);
913}
914
915static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
916{
917
918
919
920 if (event->parent)
921 event = event->parent;
922
923 return task_pid_nr_ns(p, event->ns);
924}
925
926
927
928
929
930static u64 primary_event_id(struct perf_event *event)
931{
932 u64 id = event->id;
933
934 if (event->parent)
935 id = event->parent->id;
936
937 return id;
938}
939
940
941
942
943
944
945static struct perf_event_context *
946perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
947{
948 struct perf_event_context *ctx;
949
950retry:
951
952
953
954
955
956
957
958
959
960 preempt_disable();
961 rcu_read_lock();
962 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
963 if (ctx) {
964
965
966
967
968
969
970
971
972
973
974 raw_spin_lock_irqsave(&ctx->lock, *flags);
975 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
976 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
977 rcu_read_unlock();
978 preempt_enable();
979 goto retry;
980 }
981
982 if (!atomic_inc_not_zero(&ctx->refcount)) {
983 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
984 ctx = NULL;
985 }
986 }
987 rcu_read_unlock();
988 preempt_enable();
989 return ctx;
990}
991
992
993
994
995
996
997static struct perf_event_context *
998perf_pin_task_context(struct task_struct *task, int ctxn)
999{
1000 struct perf_event_context *ctx;
1001 unsigned long flags;
1002
1003 ctx = perf_lock_task_context(task, ctxn, &flags);
1004 if (ctx) {
1005 ++ctx->pin_count;
1006 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1007 }
1008 return ctx;
1009}
1010
1011static void perf_unpin_context(struct perf_event_context *ctx)
1012{
1013 unsigned long flags;
1014
1015 raw_spin_lock_irqsave(&ctx->lock, flags);
1016 --ctx->pin_count;
1017 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1018}
1019
1020
1021
1022
1023static void update_context_time(struct perf_event_context *ctx)
1024{
1025 u64 now = perf_clock();
1026
1027 ctx->time += now - ctx->timestamp;
1028 ctx->timestamp = now;
1029}
1030
1031static u64 perf_event_time(struct perf_event *event)
1032{
1033 struct perf_event_context *ctx = event->ctx;
1034
1035 if (is_cgroup_event(event))
1036 return perf_cgroup_event_time(event);
1037
1038 return ctx ? ctx->time : 0;
1039}
1040
1041
1042
1043
1044
1045static void update_event_times(struct perf_event *event)
1046{
1047 struct perf_event_context *ctx = event->ctx;
1048 u64 run_end;
1049
1050 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1051 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1052 return;
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 if (is_cgroup_event(event))
1064 run_end = perf_cgroup_event_time(event);
1065 else if (ctx->is_active)
1066 run_end = ctx->time;
1067 else
1068 run_end = event->tstamp_stopped;
1069
1070 event->total_time_enabled = run_end - event->tstamp_enabled;
1071
1072 if (event->state == PERF_EVENT_STATE_INACTIVE)
1073 run_end = event->tstamp_stopped;
1074 else
1075 run_end = perf_event_time(event);
1076
1077 event->total_time_running = run_end - event->tstamp_running;
1078
1079}
1080
1081
1082
1083
1084static void update_group_times(struct perf_event *leader)
1085{
1086 struct perf_event *event;
1087
1088 update_event_times(leader);
1089 list_for_each_entry(event, &leader->sibling_list, group_entry)
1090 update_event_times(event);
1091}
1092
1093static struct list_head *
1094ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1095{
1096 if (event->attr.pinned)
1097 return &ctx->pinned_groups;
1098 else
1099 return &ctx->flexible_groups;
1100}
1101
1102
1103
1104
1105
1106static void
1107list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1108{
1109 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1110 event->attach_state |= PERF_ATTACH_CONTEXT;
1111
1112
1113
1114
1115
1116
1117 if (event->group_leader == event) {
1118 struct list_head *list;
1119
1120 if (is_software_event(event))
1121 event->group_flags |= PERF_GROUP_SOFTWARE;
1122
1123 list = ctx_group_list(event, ctx);
1124 list_add_tail(&event->group_entry, list);
1125 }
1126
1127 if (is_cgroup_event(event))
1128 ctx->nr_cgroups++;
1129
1130 if (has_branch_stack(event))
1131 ctx->nr_branch_stack++;
1132
1133 list_add_rcu(&event->event_entry, &ctx->event_list);
1134 if (!ctx->nr_events)
1135 perf_pmu_rotate_start(ctx->pmu);
1136 ctx->nr_events++;
1137 if (event->attr.inherit_stat)
1138 ctx->nr_stat++;
1139}
1140
1141
1142
1143
1144static inline void perf_event__state_init(struct perf_event *event)
1145{
1146 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1147 PERF_EVENT_STATE_INACTIVE;
1148}
1149
1150
1151
1152
1153
1154static void perf_event__read_size(struct perf_event *event)
1155{
1156 int entry = sizeof(u64);
1157 int size = 0;
1158 int nr = 1;
1159
1160 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1161 size += sizeof(u64);
1162
1163 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1164 size += sizeof(u64);
1165
1166 if (event->attr.read_format & PERF_FORMAT_ID)
1167 entry += sizeof(u64);
1168
1169 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1170 nr += event->group_leader->nr_siblings;
1171 size += sizeof(u64);
1172 }
1173
1174 size += entry * nr;
1175 event->read_size = size;
1176}
1177
1178static void perf_event__header_size(struct perf_event *event)
1179{
1180 struct perf_sample_data *data;
1181 u64 sample_type = event->attr.sample_type;
1182 u16 size = 0;
1183
1184 perf_event__read_size(event);
1185
1186 if (sample_type & PERF_SAMPLE_IP)
1187 size += sizeof(data->ip);
1188
1189 if (sample_type & PERF_SAMPLE_ADDR)
1190 size += sizeof(data->addr);
1191
1192 if (sample_type & PERF_SAMPLE_PERIOD)
1193 size += sizeof(data->period);
1194
1195 if (sample_type & PERF_SAMPLE_WEIGHT)
1196 size += sizeof(data->weight);
1197
1198 if (sample_type & PERF_SAMPLE_READ)
1199 size += event->read_size;
1200
1201 if (sample_type & PERF_SAMPLE_DATA_SRC)
1202 size += sizeof(data->data_src.val);
1203
1204 event->header_size = size;
1205}
1206
1207static void perf_event__id_header_size(struct perf_event *event)
1208{
1209 struct perf_sample_data *data;
1210 u64 sample_type = event->attr.sample_type;
1211 u16 size = 0;
1212
1213 if (sample_type & PERF_SAMPLE_TID)
1214 size += sizeof(data->tid_entry);
1215
1216 if (sample_type & PERF_SAMPLE_TIME)
1217 size += sizeof(data->time);
1218
1219 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1220 size += sizeof(data->id);
1221
1222 if (sample_type & PERF_SAMPLE_ID)
1223 size += sizeof(data->id);
1224
1225 if (sample_type & PERF_SAMPLE_STREAM_ID)
1226 size += sizeof(data->stream_id);
1227
1228 if (sample_type & PERF_SAMPLE_CPU)
1229 size += sizeof(data->cpu_entry);
1230
1231 event->id_header_size = size;
1232}
1233
1234static void perf_group_attach(struct perf_event *event)
1235{
1236 struct perf_event *group_leader = event->group_leader, *pos;
1237
1238
1239
1240
1241 if (event->attach_state & PERF_ATTACH_GROUP)
1242 return;
1243
1244 event->attach_state |= PERF_ATTACH_GROUP;
1245
1246 if (group_leader == event)
1247 return;
1248
1249 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1250 !is_software_event(event))
1251 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1252
1253 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1254 group_leader->nr_siblings++;
1255
1256 perf_event__header_size(group_leader);
1257
1258 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1259 perf_event__header_size(pos);
1260}
1261
1262
1263
1264
1265
1266static void
1267list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1268{
1269 struct perf_cpu_context *cpuctx;
1270
1271
1272
1273 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1274 return;
1275
1276 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1277
1278 if (is_cgroup_event(event)) {
1279 ctx->nr_cgroups--;
1280 cpuctx = __get_cpu_context(ctx);
1281
1282
1283
1284
1285
1286 if (!ctx->nr_cgroups)
1287 cpuctx->cgrp = NULL;
1288 }
1289
1290 if (has_branch_stack(event))
1291 ctx->nr_branch_stack--;
1292
1293 ctx->nr_events--;
1294 if (event->attr.inherit_stat)
1295 ctx->nr_stat--;
1296
1297 list_del_rcu(&event->event_entry);
1298
1299 if (event->group_leader == event)
1300 list_del_init(&event->group_entry);
1301
1302 update_group_times(event);
1303
1304
1305
1306
1307
1308
1309
1310
1311 if (event->state > PERF_EVENT_STATE_OFF)
1312 event->state = PERF_EVENT_STATE_OFF;
1313}
1314
1315static void perf_group_detach(struct perf_event *event)
1316{
1317 struct perf_event *sibling, *tmp;
1318 struct list_head *list = NULL;
1319
1320
1321
1322
1323 if (!(event->attach_state & PERF_ATTACH_GROUP))
1324 return;
1325
1326 event->attach_state &= ~PERF_ATTACH_GROUP;
1327
1328
1329
1330
1331 if (event->group_leader != event) {
1332 list_del_init(&event->group_entry);
1333 event->group_leader->nr_siblings--;
1334 goto out;
1335 }
1336
1337 if (!list_empty(&event->group_entry))
1338 list = &event->group_entry;
1339
1340
1341
1342
1343
1344
1345 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1346 if (list)
1347 list_move_tail(&sibling->group_entry, list);
1348 sibling->group_leader = sibling;
1349
1350
1351 sibling->group_flags = event->group_flags;
1352 }
1353
1354out:
1355 perf_event__header_size(event->group_leader);
1356
1357 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1358 perf_event__header_size(tmp);
1359}
1360
1361static inline int
1362event_filter_match(struct perf_event *event)
1363{
1364 return (event->cpu == -1 || event->cpu == smp_processor_id())
1365 && perf_cgroup_match(event);
1366}
1367
1368static void
1369event_sched_out(struct perf_event *event,
1370 struct perf_cpu_context *cpuctx,
1371 struct perf_event_context *ctx)
1372{
1373 u64 tstamp = perf_event_time(event);
1374 u64 delta;
1375
1376
1377
1378
1379
1380
1381 if (event->state == PERF_EVENT_STATE_INACTIVE
1382 && !event_filter_match(event)) {
1383 delta = tstamp - event->tstamp_stopped;
1384 event->tstamp_running += delta;
1385 event->tstamp_stopped = tstamp;
1386 }
1387
1388 if (event->state != PERF_EVENT_STATE_ACTIVE)
1389 return;
1390
1391 event->state = PERF_EVENT_STATE_INACTIVE;
1392 if (event->pending_disable) {
1393 event->pending_disable = 0;
1394 event->state = PERF_EVENT_STATE_OFF;
1395 }
1396 event->tstamp_stopped = tstamp;
1397 event->pmu->del(event, 0);
1398 event->oncpu = -1;
1399
1400 if (!is_software_event(event))
1401 cpuctx->active_oncpu--;
1402 ctx->nr_active--;
1403 if (event->attr.freq && event->attr.sample_freq)
1404 ctx->nr_freq--;
1405 if (event->attr.exclusive || !cpuctx->active_oncpu)
1406 cpuctx->exclusive = 0;
1407}
1408
1409static void
1410group_sched_out(struct perf_event *group_event,
1411 struct perf_cpu_context *cpuctx,
1412 struct perf_event_context *ctx)
1413{
1414 struct perf_event *event;
1415 int state = group_event->state;
1416
1417 event_sched_out(group_event, cpuctx, ctx);
1418
1419
1420
1421
1422 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1423 event_sched_out(event, cpuctx, ctx);
1424
1425 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1426 cpuctx->exclusive = 0;
1427}
1428
1429
1430
1431
1432
1433
1434
1435static int __perf_remove_from_context(void *info)
1436{
1437 struct perf_event *event = info;
1438 struct perf_event_context *ctx = event->ctx;
1439 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1440
1441 raw_spin_lock(&ctx->lock);
1442 event_sched_out(event, cpuctx, ctx);
1443 list_del_event(event, ctx);
1444 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1445 ctx->is_active = 0;
1446 cpuctx->task_ctx = NULL;
1447 }
1448 raw_spin_unlock(&ctx->lock);
1449
1450 return 0;
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467static void perf_remove_from_context(struct perf_event *event)
1468{
1469 struct perf_event_context *ctx = event->ctx;
1470 struct task_struct *task = ctx->task;
1471
1472 lockdep_assert_held(&ctx->mutex);
1473
1474 if (!task) {
1475
1476
1477
1478
1479 cpu_function_call(event->cpu, __perf_remove_from_context, event);
1480 return;
1481 }
1482
1483retry:
1484 if (!task_function_call(task, __perf_remove_from_context, event))
1485 return;
1486
1487 raw_spin_lock_irq(&ctx->lock);
1488
1489
1490
1491
1492 if (ctx->is_active) {
1493 raw_spin_unlock_irq(&ctx->lock);
1494 goto retry;
1495 }
1496
1497
1498
1499
1500
1501 list_del_event(event, ctx);
1502 raw_spin_unlock_irq(&ctx->lock);
1503}
1504
1505
1506
1507
1508int __perf_event_disable(void *info)
1509{
1510 struct perf_event *event = info;
1511 struct perf_event_context *ctx = event->ctx;
1512 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1513
1514
1515
1516
1517
1518
1519
1520
1521 if (ctx->task && cpuctx->task_ctx != ctx)
1522 return -EINVAL;
1523
1524 raw_spin_lock(&ctx->lock);
1525
1526
1527
1528
1529
1530 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1531 update_context_time(ctx);
1532 update_cgrp_time_from_event(event);
1533 update_group_times(event);
1534 if (event == event->group_leader)
1535 group_sched_out(event, cpuctx, ctx);
1536 else
1537 event_sched_out(event, cpuctx, ctx);
1538 event->state = PERF_EVENT_STATE_OFF;
1539 }
1540
1541 raw_spin_unlock(&ctx->lock);
1542
1543 return 0;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559void perf_event_disable(struct perf_event *event)
1560{
1561 struct perf_event_context *ctx = event->ctx;
1562 struct task_struct *task = ctx->task;
1563
1564 if (!task) {
1565
1566
1567
1568 cpu_function_call(event->cpu, __perf_event_disable, event);
1569 return;
1570 }
1571
1572retry:
1573 if (!task_function_call(task, __perf_event_disable, event))
1574 return;
1575
1576 raw_spin_lock_irq(&ctx->lock);
1577
1578
1579
1580 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1581 raw_spin_unlock_irq(&ctx->lock);
1582
1583
1584
1585
1586 task = ctx->task;
1587 goto retry;
1588 }
1589
1590
1591
1592
1593
1594 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1595 update_group_times(event);
1596 event->state = PERF_EVENT_STATE_OFF;
1597 }
1598 raw_spin_unlock_irq(&ctx->lock);
1599}
1600EXPORT_SYMBOL_GPL(perf_event_disable);
1601
1602static void perf_set_shadow_time(struct perf_event *event,
1603 struct perf_event_context *ctx,
1604 u64 tstamp)
1605{
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631 if (is_cgroup_event(event))
1632 perf_cgroup_set_shadow_time(event, tstamp);
1633 else
1634 event->shadow_ctx_time = tstamp - ctx->timestamp;
1635}
1636
1637#define MAX_INTERRUPTS (~0ULL)
1638
1639static void perf_log_throttle(struct perf_event *event, int enable);
1640
1641static int
1642event_sched_in(struct perf_event *event,
1643 struct perf_cpu_context *cpuctx,
1644 struct perf_event_context *ctx)
1645{
1646 u64 tstamp = perf_event_time(event);
1647
1648 if (event->state <= PERF_EVENT_STATE_OFF)
1649 return 0;
1650
1651 event->state = PERF_EVENT_STATE_ACTIVE;
1652 event->oncpu = smp_processor_id();
1653
1654
1655
1656
1657
1658
1659 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1660 perf_log_throttle(event, 1);
1661 event->hw.interrupts = 0;
1662 }
1663
1664
1665
1666
1667 smp_wmb();
1668
1669 if (event->pmu->add(event, PERF_EF_START)) {
1670 event->state = PERF_EVENT_STATE_INACTIVE;
1671 event->oncpu = -1;
1672 return -EAGAIN;
1673 }
1674
1675 event->tstamp_running += tstamp - event->tstamp_stopped;
1676
1677 perf_set_shadow_time(event, ctx, tstamp);
1678
1679 if (!is_software_event(event))
1680 cpuctx->active_oncpu++;
1681 ctx->nr_active++;
1682 if (event->attr.freq && event->attr.sample_freq)
1683 ctx->nr_freq++;
1684
1685 if (event->attr.exclusive)
1686 cpuctx->exclusive = 1;
1687
1688 return 0;
1689}
1690
1691static int
1692group_sched_in(struct perf_event *group_event,
1693 struct perf_cpu_context *cpuctx,
1694 struct perf_event_context *ctx)
1695{
1696 struct perf_event *event, *partial_group = NULL;
1697 struct pmu *pmu = group_event->pmu;
1698 u64 now = ctx->time;
1699 bool simulate = false;
1700
1701 if (group_event->state == PERF_EVENT_STATE_OFF)
1702 return 0;
1703
1704 pmu->start_txn(pmu);
1705
1706 if (event_sched_in(group_event, cpuctx, ctx)) {
1707 pmu->cancel_txn(pmu);
1708 perf_cpu_hrtimer_restart(cpuctx);
1709 return -EAGAIN;
1710 }
1711
1712
1713
1714
1715 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1716 if (event_sched_in(event, cpuctx, ctx)) {
1717 partial_group = event;
1718 goto group_error;
1719 }
1720 }
1721
1722 if (!pmu->commit_txn(pmu))
1723 return 0;
1724
1725group_error:
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1741 if (event == partial_group)
1742 simulate = true;
1743
1744 if (simulate) {
1745 event->tstamp_running += now - event->tstamp_stopped;
1746 event->tstamp_stopped = now;
1747 } else {
1748 event_sched_out(event, cpuctx, ctx);
1749 }
1750 }
1751 event_sched_out(group_event, cpuctx, ctx);
1752
1753 pmu->cancel_txn(pmu);
1754
1755 perf_cpu_hrtimer_restart(cpuctx);
1756
1757 return -EAGAIN;
1758}
1759
1760
1761
1762
1763static int group_can_go_on(struct perf_event *event,
1764 struct perf_cpu_context *cpuctx,
1765 int can_add_hw)
1766{
1767
1768
1769
1770 if (event->group_flags & PERF_GROUP_SOFTWARE)
1771 return 1;
1772
1773
1774
1775
1776 if (cpuctx->exclusive)
1777 return 0;
1778
1779
1780
1781
1782 if (event->attr.exclusive && cpuctx->active_oncpu)
1783 return 0;
1784
1785
1786
1787
1788 return can_add_hw;
1789}
1790
1791static void add_event_to_ctx(struct perf_event *event,
1792 struct perf_event_context *ctx)
1793{
1794 u64 tstamp = perf_event_time(event);
1795
1796 list_add_event(event, ctx);
1797 perf_group_attach(event);
1798 event->tstamp_enabled = tstamp;
1799 event->tstamp_running = tstamp;
1800 event->tstamp_stopped = tstamp;
1801}
1802
1803static void task_ctx_sched_out(struct perf_event_context *ctx);
1804static void
1805ctx_sched_in(struct perf_event_context *ctx,
1806 struct perf_cpu_context *cpuctx,
1807 enum event_type_t event_type,
1808 struct task_struct *task);
1809
1810static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1811 struct perf_event_context *ctx,
1812 struct task_struct *task)
1813{
1814 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1815 if (ctx)
1816 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1817 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1818 if (ctx)
1819 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1820}
1821
1822
1823
1824
1825
1826
1827static int __perf_install_in_context(void *info)
1828{
1829 struct perf_event *event = info;
1830 struct perf_event_context *ctx = event->ctx;
1831 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1832 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1833 struct task_struct *task = current;
1834
1835 perf_ctx_lock(cpuctx, task_ctx);
1836 perf_pmu_disable(cpuctx->ctx.pmu);
1837
1838
1839
1840
1841 if (task_ctx)
1842 task_ctx_sched_out(task_ctx);
1843
1844
1845
1846
1847
1848 if (ctx->task && task_ctx != ctx) {
1849 if (task_ctx)
1850 raw_spin_unlock(&task_ctx->lock);
1851 raw_spin_lock(&ctx->lock);
1852 task_ctx = ctx;
1853 }
1854
1855 if (task_ctx) {
1856 cpuctx->task_ctx = task_ctx;
1857 task = task_ctx->task;
1858 }
1859
1860 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1861
1862 update_context_time(ctx);
1863
1864
1865
1866
1867
1868 update_cgrp_time_from_event(event);
1869
1870 add_event_to_ctx(event, ctx);
1871
1872
1873
1874
1875 perf_event_sched_in(cpuctx, task_ctx, task);
1876
1877 perf_pmu_enable(cpuctx->ctx.pmu);
1878 perf_ctx_unlock(cpuctx, task_ctx);
1879
1880 return 0;
1881}
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static void
1894perf_install_in_context(struct perf_event_context *ctx,
1895 struct perf_event *event,
1896 int cpu)
1897{
1898 struct task_struct *task = ctx->task;
1899
1900 lockdep_assert_held(&ctx->mutex);
1901
1902 event->ctx = ctx;
1903 if (event->cpu != -1)
1904 event->cpu = cpu;
1905
1906 if (!task) {
1907
1908
1909
1910
1911 cpu_function_call(cpu, __perf_install_in_context, event);
1912 return;
1913 }
1914
1915retry:
1916 if (!task_function_call(task, __perf_install_in_context, event))
1917 return;
1918
1919 raw_spin_lock_irq(&ctx->lock);
1920
1921
1922
1923
1924 if (ctx->is_active) {
1925 raw_spin_unlock_irq(&ctx->lock);
1926 goto retry;
1927 }
1928
1929
1930
1931
1932
1933 add_event_to_ctx(event, ctx);
1934 raw_spin_unlock_irq(&ctx->lock);
1935}
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945static void __perf_event_mark_enabled(struct perf_event *event)
1946{
1947 struct perf_event *sub;
1948 u64 tstamp = perf_event_time(event);
1949
1950 event->state = PERF_EVENT_STATE_INACTIVE;
1951 event->tstamp_enabled = tstamp - event->total_time_enabled;
1952 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1953 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1954 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1955 }
1956}
1957
1958
1959
1960
1961static int __perf_event_enable(void *info)
1962{
1963 struct perf_event *event = info;
1964 struct perf_event_context *ctx = event->ctx;
1965 struct perf_event *leader = event->group_leader;
1966 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1967 int err;
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978 if (!ctx->is_active)
1979 return -EINVAL;
1980
1981 raw_spin_lock(&ctx->lock);
1982 update_context_time(ctx);
1983
1984 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1985 goto unlock;
1986
1987
1988
1989
1990 perf_cgroup_set_timestamp(current, ctx);
1991
1992 __perf_event_mark_enabled(event);
1993
1994 if (!event_filter_match(event)) {
1995 if (is_cgroup_event(event))
1996 perf_cgroup_defer_enabled(event);
1997 goto unlock;
1998 }
1999
2000
2001
2002
2003
2004 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2005 goto unlock;
2006
2007 if (!group_can_go_on(event, cpuctx, 1)) {
2008 err = -EEXIST;
2009 } else {
2010 if (event == leader)
2011 err = group_sched_in(event, cpuctx, ctx);
2012 else
2013 err = event_sched_in(event, cpuctx, ctx);
2014 }
2015
2016 if (err) {
2017
2018
2019
2020
2021 if (leader != event) {
2022 group_sched_out(leader, cpuctx, ctx);
2023 perf_cpu_hrtimer_restart(cpuctx);
2024 }
2025 if (leader->attr.pinned) {
2026 update_group_times(leader);
2027 leader->state = PERF_EVENT_STATE_ERROR;
2028 }
2029 }
2030
2031unlock:
2032 raw_spin_unlock(&ctx->lock);
2033
2034 return 0;
2035}
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046void perf_event_enable(struct perf_event *event)
2047{
2048 struct perf_event_context *ctx = event->ctx;
2049 struct task_struct *task = ctx->task;
2050
2051 if (!task) {
2052
2053
2054
2055 cpu_function_call(event->cpu, __perf_event_enable, event);
2056 return;
2057 }
2058
2059 raw_spin_lock_irq(&ctx->lock);
2060 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2061 goto out;
2062
2063
2064
2065
2066
2067
2068
2069
2070 if (event->state == PERF_EVENT_STATE_ERROR)
2071 event->state = PERF_EVENT_STATE_OFF;
2072
2073retry:
2074 if (!ctx->is_active) {
2075 __perf_event_mark_enabled(event);
2076 goto out;
2077 }
2078
2079 raw_spin_unlock_irq(&ctx->lock);
2080
2081 if (!task_function_call(task, __perf_event_enable, event))
2082 return;
2083
2084 raw_spin_lock_irq(&ctx->lock);
2085
2086
2087
2088
2089
2090 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2091
2092
2093
2094
2095 task = ctx->task;
2096 goto retry;
2097 }
2098
2099out:
2100 raw_spin_unlock_irq(&ctx->lock);
2101}
2102EXPORT_SYMBOL_GPL(perf_event_enable);
2103
2104int perf_event_refresh(struct perf_event *event, int refresh)
2105{
2106
2107
2108
2109 if (event->attr.inherit || !is_sampling_event(event))
2110 return -EINVAL;
2111
2112 atomic_add(refresh, &event->event_limit);
2113 perf_event_enable(event);
2114
2115 return 0;
2116}
2117EXPORT_SYMBOL_GPL(perf_event_refresh);
2118
2119static void ctx_sched_out(struct perf_event_context *ctx,
2120 struct perf_cpu_context *cpuctx,
2121 enum event_type_t event_type)
2122{
2123 struct perf_event *event;
2124 int is_active = ctx->is_active;
2125
2126 ctx->is_active &= ~event_type;
2127 if (likely(!ctx->nr_events))
2128 return;
2129
2130 update_context_time(ctx);
2131 update_cgrp_time_from_cpuctx(cpuctx);
2132 if (!ctx->nr_active)
2133 return;
2134
2135 perf_pmu_disable(ctx->pmu);
2136 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
2137 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2138 group_sched_out(event, cpuctx, ctx);
2139 }
2140
2141 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
2142 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2143 group_sched_out(event, cpuctx, ctx);
2144 }
2145 perf_pmu_enable(ctx->pmu);
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159static int context_equiv(struct perf_event_context *ctx1,
2160 struct perf_event_context *ctx2)
2161{
2162 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
2163 && ctx1->parent_gen == ctx2->parent_gen
2164 && !ctx1->pin_count && !ctx2->pin_count;
2165}
2166
2167static void __perf_event_sync_stat(struct perf_event *event,
2168 struct perf_event *next_event)
2169{
2170 u64 value;
2171
2172 if (!event->attr.inherit_stat)
2173 return;
2174
2175
2176
2177
2178
2179
2180
2181
2182 switch (event->state) {
2183 case PERF_EVENT_STATE_ACTIVE:
2184 event->pmu->read(event);
2185
2186
2187 case PERF_EVENT_STATE_INACTIVE:
2188 update_event_times(event);
2189 break;
2190
2191 default:
2192 break;
2193 }
2194
2195
2196
2197
2198
2199 value = local64_read(&next_event->count);
2200 value = local64_xchg(&event->count, value);
2201 local64_set(&next_event->count, value);
2202
2203 swap(event->total_time_enabled, next_event->total_time_enabled);
2204 swap(event->total_time_running, next_event->total_time_running);
2205
2206
2207
2208
2209 perf_event_update_userpage(event);
2210 perf_event_update_userpage(next_event);
2211}
2212
2213#define list_next_entry(pos, member) \
2214 list_entry(pos->member.next, typeof(*pos), member)
2215
2216static void perf_event_sync_stat(struct perf_event_context *ctx,
2217 struct perf_event_context *next_ctx)
2218{
2219 struct perf_event *event, *next_event;
2220
2221 if (!ctx->nr_stat)
2222 return;
2223
2224 update_context_time(ctx);
2225
2226 event = list_first_entry(&ctx->event_list,
2227 struct perf_event, event_entry);
2228
2229 next_event = list_first_entry(&next_ctx->event_list,
2230 struct perf_event, event_entry);
2231
2232 while (&event->event_entry != &ctx->event_list &&
2233 &next_event->event_entry != &next_ctx->event_list) {
2234
2235 __perf_event_sync_stat(event, next_event);
2236
2237 event = list_next_entry(event, event_entry);
2238 next_event = list_next_entry(next_event, event_entry);
2239 }
2240}
2241
2242static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2243 struct task_struct *next)
2244{
2245 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2246 struct perf_event_context *next_ctx;
2247 struct perf_event_context *parent;
2248 struct perf_cpu_context *cpuctx;
2249 int do_switch = 1;
2250
2251 if (likely(!ctx))
2252 return;
2253
2254 cpuctx = __get_cpu_context(ctx);
2255 if (!cpuctx->task_ctx)
2256 return;
2257
2258 rcu_read_lock();
2259 parent = rcu_dereference(ctx->parent_ctx);
2260 next_ctx = next->perf_event_ctxp[ctxn];
2261 if (parent && next_ctx &&
2262 rcu_dereference(next_ctx->parent_ctx) == parent) {
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272 raw_spin_lock(&ctx->lock);
2273 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2274 if (context_equiv(ctx, next_ctx)) {
2275
2276
2277
2278
2279 task->perf_event_ctxp[ctxn] = next_ctx;
2280 next->perf_event_ctxp[ctxn] = ctx;
2281 ctx->task = next;
2282 next_ctx->task = task;
2283 do_switch = 0;
2284
2285 perf_event_sync_stat(ctx, next_ctx);
2286 }
2287 raw_spin_unlock(&next_ctx->lock);
2288 raw_spin_unlock(&ctx->lock);
2289 }
2290 rcu_read_unlock();
2291
2292 if (do_switch) {
2293 raw_spin_lock(&ctx->lock);
2294 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2295 cpuctx->task_ctx = NULL;
2296 raw_spin_unlock(&ctx->lock);
2297 }
2298}
2299
2300#define for_each_task_context_nr(ctxn) \
2301 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314void __perf_event_task_sched_out(struct task_struct *task,
2315 struct task_struct *next)
2316{
2317 int ctxn;
2318
2319 for_each_task_context_nr(ctxn)
2320 perf_event_context_sched_out(task, ctxn, next);
2321
2322
2323
2324
2325
2326
2327 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2328 perf_cgroup_sched_out(task, next);
2329}
2330
2331static void task_ctx_sched_out(struct perf_event_context *ctx)
2332{
2333 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2334
2335 if (!cpuctx->task_ctx)
2336 return;
2337
2338 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2339 return;
2340
2341 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2342 cpuctx->task_ctx = NULL;
2343}
2344
2345
2346
2347
2348static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2349 enum event_type_t event_type)
2350{
2351 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2352}
2353
2354static void
2355ctx_pinned_sched_in(struct perf_event_context *ctx,
2356 struct perf_cpu_context *cpuctx)
2357{
2358 struct perf_event *event;
2359
2360 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2361 if (event->state <= PERF_EVENT_STATE_OFF)
2362 continue;
2363 if (!event_filter_match(event))
2364 continue;
2365
2366
2367 if (is_cgroup_event(event))
2368 perf_cgroup_mark_enabled(event, ctx);
2369
2370 if (group_can_go_on(event, cpuctx, 1))
2371 group_sched_in(event, cpuctx, ctx);
2372
2373
2374
2375
2376
2377 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2378 update_group_times(event);
2379 event->state = PERF_EVENT_STATE_ERROR;
2380 }
2381 }
2382}
2383
2384static void
2385ctx_flexible_sched_in(struct perf_event_context *ctx,
2386 struct perf_cpu_context *cpuctx)
2387{
2388 struct perf_event *event;
2389 int can_add_hw = 1;
2390
2391 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2392
2393 if (event->state <= PERF_EVENT_STATE_OFF)
2394 continue;
2395
2396
2397
2398
2399 if (!event_filter_match(event))
2400 continue;
2401
2402
2403 if (is_cgroup_event(event))
2404 perf_cgroup_mark_enabled(event, ctx);
2405
2406 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2407 if (group_sched_in(event, cpuctx, ctx))
2408 can_add_hw = 0;
2409 }
2410 }
2411}
2412
2413static void
2414ctx_sched_in(struct perf_event_context *ctx,
2415 struct perf_cpu_context *cpuctx,
2416 enum event_type_t event_type,
2417 struct task_struct *task)
2418{
2419 u64 now;
2420 int is_active = ctx->is_active;
2421
2422 ctx->is_active |= event_type;
2423 if (likely(!ctx->nr_events))
2424 return;
2425
2426 now = perf_clock();
2427 ctx->timestamp = now;
2428 perf_cgroup_set_timestamp(task, ctx);
2429
2430
2431
2432
2433 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2434 ctx_pinned_sched_in(ctx, cpuctx);
2435
2436
2437 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2438 ctx_flexible_sched_in(ctx, cpuctx);
2439}
2440
2441static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2442 enum event_type_t event_type,
2443 struct task_struct *task)
2444{
2445 struct perf_event_context *ctx = &cpuctx->ctx;
2446
2447 ctx_sched_in(ctx, cpuctx, event_type, task);
2448}
2449
2450static void perf_event_context_sched_in(struct perf_event_context *ctx,
2451 struct task_struct *task)
2452{
2453 struct perf_cpu_context *cpuctx;
2454
2455 cpuctx = __get_cpu_context(ctx);
2456 if (cpuctx->task_ctx == ctx)
2457 return;
2458
2459 perf_ctx_lock(cpuctx, ctx);
2460 perf_pmu_disable(ctx->pmu);
2461
2462
2463
2464
2465
2466 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2467
2468 if (ctx->nr_events)
2469 cpuctx->task_ctx = ctx;
2470
2471 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2472
2473 perf_pmu_enable(ctx->pmu);
2474 perf_ctx_unlock(cpuctx, ctx);
2475
2476
2477
2478
2479
2480 perf_pmu_rotate_start(ctx->pmu);
2481}
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499static void perf_branch_stack_sched_in(struct task_struct *prev,
2500 struct task_struct *task)
2501{
2502 struct perf_cpu_context *cpuctx;
2503 struct pmu *pmu;
2504 unsigned long flags;
2505
2506
2507 if (prev == task)
2508 return;
2509
2510 local_irq_save(flags);
2511
2512 rcu_read_lock();
2513
2514 list_for_each_entry_rcu(pmu, &pmus, entry) {
2515 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2516
2517
2518
2519
2520
2521 if (cpuctx->ctx.nr_branch_stack > 0
2522 && pmu->flush_branch_stack) {
2523
2524 pmu = cpuctx->ctx.pmu;
2525
2526 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2527
2528 perf_pmu_disable(pmu);
2529
2530 pmu->flush_branch_stack();
2531
2532 perf_pmu_enable(pmu);
2533
2534 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2535 }
2536 }
2537
2538 rcu_read_unlock();
2539
2540 local_irq_restore(flags);
2541}
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554void __perf_event_task_sched_in(struct task_struct *prev,
2555 struct task_struct *task)
2556{
2557 struct perf_event_context *ctx;
2558 int ctxn;
2559
2560 for_each_task_context_nr(ctxn) {
2561 ctx = task->perf_event_ctxp[ctxn];
2562 if (likely(!ctx))
2563 continue;
2564
2565 perf_event_context_sched_in(ctx, task);
2566 }
2567
2568
2569
2570
2571
2572 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2573 perf_cgroup_sched_in(prev, task);
2574
2575
2576 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2577 perf_branch_stack_sched_in(prev, task);
2578}
2579
2580static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2581{
2582 u64 frequency = event->attr.sample_freq;
2583 u64 sec = NSEC_PER_SEC;
2584 u64 divisor, dividend;
2585
2586 int count_fls, nsec_fls, frequency_fls, sec_fls;
2587
2588 count_fls = fls64(count);
2589 nsec_fls = fls64(nsec);
2590 frequency_fls = fls64(frequency);
2591 sec_fls = 30;
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607#define REDUCE_FLS(a, b) \
2608do { \
2609 if (a##_fls > b##_fls) { \
2610 a >>= 1; \
2611 a##_fls--; \
2612 } else { \
2613 b >>= 1; \
2614 b##_fls--; \
2615 } \
2616} while (0)
2617
2618
2619
2620
2621
2622 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2623 REDUCE_FLS(nsec, frequency);
2624 REDUCE_FLS(sec, count);
2625 }
2626
2627 if (count_fls + sec_fls > 64) {
2628 divisor = nsec * frequency;
2629
2630 while (count_fls + sec_fls > 64) {
2631 REDUCE_FLS(count, sec);
2632 divisor >>= 1;
2633 }
2634
2635 dividend = count * sec;
2636 } else {
2637 dividend = count * sec;
2638
2639 while (nsec_fls + frequency_fls > 64) {
2640 REDUCE_FLS(nsec, frequency);
2641 dividend >>= 1;
2642 }
2643
2644 divisor = nsec * frequency;
2645 }
2646
2647 if (!divisor)
2648 return dividend;
2649
2650 return div64_u64(dividend, divisor);
2651}
2652
2653static DEFINE_PER_CPU(int, perf_throttled_count);
2654static DEFINE_PER_CPU(u64, perf_throttled_seq);
2655
2656static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2657{
2658 struct hw_perf_event *hwc = &event->hw;
2659 s64 period, sample_period;
2660 s64 delta;
2661
2662 period = perf_calculate_period(event, nsec, count);
2663
2664 delta = (s64)(period - hwc->sample_period);
2665 delta = (delta + 7) / 8;
2666
2667 sample_period = hwc->sample_period + delta;
2668
2669 if (!sample_period)
2670 sample_period = 1;
2671
2672 hwc->sample_period = sample_period;
2673
2674 if (local64_read(&hwc->period_left) > 8*sample_period) {
2675 if (disable)
2676 event->pmu->stop(event, PERF_EF_UPDATE);
2677
2678 local64_set(&hwc->period_left, 0);
2679
2680 if (disable)
2681 event->pmu->start(event, PERF_EF_RELOAD);
2682 }
2683}
2684
2685
2686
2687
2688
2689
2690static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2691 int needs_unthr)
2692{
2693 struct perf_event *event;
2694 struct hw_perf_event *hwc;
2695 u64 now, period = TICK_NSEC;
2696 s64 delta;
2697
2698
2699
2700
2701
2702
2703 if (!(ctx->nr_freq || needs_unthr))
2704 return;
2705
2706 raw_spin_lock(&ctx->lock);
2707 perf_pmu_disable(ctx->pmu);
2708
2709 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2710 if (event->state != PERF_EVENT_STATE_ACTIVE)
2711 continue;
2712
2713 if (!event_filter_match(event))
2714 continue;
2715
2716 hwc = &event->hw;
2717
2718 if (hwc->interrupts == MAX_INTERRUPTS) {
2719 hwc->interrupts = 0;
2720 perf_log_throttle(event, 1);
2721 event->pmu->start(event, 0);
2722 }
2723
2724 if (!event->attr.freq || !event->attr.sample_freq)
2725 continue;
2726
2727
2728
2729
2730 event->pmu->stop(event, PERF_EF_UPDATE);
2731
2732 now = local64_read(&event->count);
2733 delta = now - hwc->freq_count_stamp;
2734 hwc->freq_count_stamp = now;
2735
2736
2737
2738
2739
2740
2741
2742
2743 if (delta > 0)
2744 perf_adjust_period(event, period, delta, false);
2745
2746 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2747 }
2748
2749 perf_pmu_enable(ctx->pmu);
2750 raw_spin_unlock(&ctx->lock);
2751}
2752
2753
2754
2755
2756static void rotate_ctx(struct perf_event_context *ctx)
2757{
2758
2759
2760
2761
2762 if (!ctx->rotate_disable)
2763 list_rotate_left(&ctx->flexible_groups);
2764}
2765
2766
2767
2768
2769
2770
2771static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2772{
2773 struct perf_event_context *ctx = NULL;
2774 int rotate = 0, remove = 1;
2775
2776 if (cpuctx->ctx.nr_events) {
2777 remove = 0;
2778 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2779 rotate = 1;
2780 }
2781
2782 ctx = cpuctx->task_ctx;
2783 if (ctx && ctx->nr_events) {
2784 remove = 0;
2785 if (ctx->nr_events != ctx->nr_active)
2786 rotate = 1;
2787 }
2788
2789 if (!rotate)
2790 goto done;
2791
2792 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2793 perf_pmu_disable(cpuctx->ctx.pmu);
2794
2795 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2796 if (ctx)
2797 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2798
2799 rotate_ctx(&cpuctx->ctx);
2800 if (ctx)
2801 rotate_ctx(ctx);
2802
2803 perf_event_sched_in(cpuctx, ctx, current);
2804
2805 perf_pmu_enable(cpuctx->ctx.pmu);
2806 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2807done:
2808 if (remove)
2809 list_del_init(&cpuctx->rotation_list);
2810
2811 return rotate;
2812}
2813
2814#ifdef CONFIG_NO_HZ_FULL
2815bool perf_event_can_stop_tick(void)
2816{
2817 if (atomic_read(&nr_freq_events) ||
2818 __this_cpu_read(perf_throttled_count))
2819 return false;
2820 else
2821 return true;
2822}
2823#endif
2824
2825void perf_event_task_tick(void)
2826{
2827 struct list_head *head = &__get_cpu_var(rotation_list);
2828 struct perf_cpu_context *cpuctx, *tmp;
2829 struct perf_event_context *ctx;
2830 int throttled;
2831
2832 WARN_ON(!irqs_disabled());
2833
2834 __this_cpu_inc(perf_throttled_seq);
2835 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2836
2837 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2838 ctx = &cpuctx->ctx;
2839 perf_adjust_freq_unthr_context(ctx, throttled);
2840
2841 ctx = cpuctx->task_ctx;
2842 if (ctx)
2843 perf_adjust_freq_unthr_context(ctx, throttled);
2844 }
2845}
2846
2847static int event_enable_on_exec(struct perf_event *event,
2848 struct perf_event_context *ctx)
2849{
2850 if (!event->attr.enable_on_exec)
2851 return 0;
2852
2853 event->attr.enable_on_exec = 0;
2854 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2855 return 0;
2856
2857 __perf_event_mark_enabled(event);
2858
2859 return 1;
2860}
2861
2862
2863
2864
2865
2866static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2867{
2868 struct perf_event *event;
2869 unsigned long flags;
2870 int enabled = 0;
2871 int ret;
2872
2873 local_irq_save(flags);
2874 if (!ctx || !ctx->nr_events)
2875 goto out;
2876
2877
2878
2879
2880
2881
2882
2883
2884 perf_cgroup_sched_out(current, NULL);
2885
2886 raw_spin_lock(&ctx->lock);
2887 task_ctx_sched_out(ctx);
2888
2889 list_for_each_entry(event, &ctx->event_list, event_entry) {
2890 ret = event_enable_on_exec(event, ctx);
2891 if (ret)
2892 enabled = 1;
2893 }
2894
2895
2896
2897
2898 if (enabled)
2899 unclone_ctx(ctx);
2900
2901 raw_spin_unlock(&ctx->lock);
2902
2903
2904
2905
2906 perf_event_context_sched_in(ctx, ctx->task);
2907out:
2908 local_irq_restore(flags);
2909}
2910
2911
2912
2913
2914static void __perf_event_read(void *info)
2915{
2916 struct perf_event *event = info;
2917 struct perf_event_context *ctx = event->ctx;
2918 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2919
2920
2921
2922
2923
2924
2925
2926
2927 if (ctx->task && cpuctx->task_ctx != ctx)
2928 return;
2929
2930 raw_spin_lock(&ctx->lock);
2931 if (ctx->is_active) {
2932 update_context_time(ctx);
2933 update_cgrp_time_from_event(event);
2934 }
2935 update_event_times(event);
2936 if (event->state == PERF_EVENT_STATE_ACTIVE)
2937 event->pmu->read(event);
2938 raw_spin_unlock(&ctx->lock);
2939}
2940
2941static inline u64 perf_event_count(struct perf_event *event)
2942{
2943 return local64_read(&event->count) + atomic64_read(&event->child_count);
2944}
2945
2946static u64 perf_event_read(struct perf_event *event)
2947{
2948
2949
2950
2951
2952 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2953 smp_call_function_single(event->oncpu,
2954 __perf_event_read, event, 1);
2955 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2956 struct perf_event_context *ctx = event->ctx;
2957 unsigned long flags;
2958
2959 raw_spin_lock_irqsave(&ctx->lock, flags);
2960
2961
2962
2963
2964
2965 if (ctx->is_active) {
2966 update_context_time(ctx);
2967 update_cgrp_time_from_event(event);
2968 }
2969 update_event_times(event);
2970 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2971 }
2972
2973 return perf_event_count(event);
2974}
2975
2976
2977
2978
2979static void __perf_event_init_context(struct perf_event_context *ctx)
2980{
2981 raw_spin_lock_init(&ctx->lock);
2982 mutex_init(&ctx->mutex);
2983 INIT_LIST_HEAD(&ctx->pinned_groups);
2984 INIT_LIST_HEAD(&ctx->flexible_groups);
2985 INIT_LIST_HEAD(&ctx->event_list);
2986 atomic_set(&ctx->refcount, 1);
2987}
2988
2989static struct perf_event_context *
2990alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2991{
2992 struct perf_event_context *ctx;
2993
2994 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2995 if (!ctx)
2996 return NULL;
2997
2998 __perf_event_init_context(ctx);
2999 if (task) {
3000 ctx->task = task;
3001 get_task_struct(task);
3002 }
3003 ctx->pmu = pmu;
3004
3005 return ctx;
3006}
3007
3008static struct task_struct *
3009find_lively_task_by_vpid(pid_t vpid)
3010{
3011 struct task_struct *task;
3012 int err;
3013
3014 rcu_read_lock();
3015 if (!vpid)
3016 task = current;
3017 else
3018 task = find_task_by_vpid(vpid);
3019 if (task)
3020 get_task_struct(task);
3021 rcu_read_unlock();
3022
3023 if (!task)
3024 return ERR_PTR(-ESRCH);
3025
3026
3027 err = -EACCES;
3028 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3029 goto errout;
3030
3031 return task;
3032errout:
3033 put_task_struct(task);
3034 return ERR_PTR(err);
3035
3036}
3037
3038
3039
3040
3041static struct perf_event_context *
3042find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
3043{
3044 struct perf_event_context *ctx;
3045 struct perf_cpu_context *cpuctx;
3046 unsigned long flags;
3047 int ctxn, err;
3048
3049 if (!task) {
3050
3051 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3052 return ERR_PTR(-EACCES);
3053
3054
3055
3056
3057
3058
3059 if (!cpu_online(cpu))
3060 return ERR_PTR(-ENODEV);
3061
3062 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3063 ctx = &cpuctx->ctx;
3064 get_ctx(ctx);
3065 ++ctx->pin_count;
3066
3067 return ctx;
3068 }
3069
3070 err = -EINVAL;
3071 ctxn = pmu->task_ctx_nr;
3072 if (ctxn < 0)
3073 goto errout;
3074
3075retry:
3076 ctx = perf_lock_task_context(task, ctxn, &flags);
3077 if (ctx) {
3078 unclone_ctx(ctx);
3079 ++ctx->pin_count;
3080 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3081 } else {
3082 ctx = alloc_perf_context(pmu, task);
3083 err = -ENOMEM;
3084 if (!ctx)
3085 goto errout;
3086
3087 err = 0;
3088 mutex_lock(&task->perf_event_mutex);
3089
3090
3091
3092
3093 if (task->flags & PF_EXITING)
3094 err = -ESRCH;
3095 else if (task->perf_event_ctxp[ctxn])
3096 err = -EAGAIN;
3097 else {
3098 get_ctx(ctx);
3099 ++ctx->pin_count;
3100 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3101 }
3102 mutex_unlock(&task->perf_event_mutex);
3103
3104 if (unlikely(err)) {
3105 put_ctx(ctx);
3106
3107 if (err == -EAGAIN)
3108 goto retry;
3109 goto errout;
3110 }
3111 }
3112
3113 return ctx;
3114
3115errout:
3116 return ERR_PTR(err);
3117}
3118
3119static void perf_event_free_filter(struct perf_event *event);
3120
3121static void free_event_rcu(struct rcu_head *head)
3122{
3123 struct perf_event *event;
3124
3125 event = container_of(head, struct perf_event, rcu_head);
3126 if (event->ns)
3127 put_pid_ns(event->ns);
3128 perf_event_free_filter(event);
3129 kfree(event);
3130}
3131
3132static void ring_buffer_put(struct ring_buffer *rb);
3133static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
3134
3135static void unaccount_event_cpu(struct perf_event *event, int cpu)
3136{
3137 if (event->parent)
3138 return;
3139
3140 if (has_branch_stack(event)) {
3141 if (!(event->attach_state & PERF_ATTACH_TASK))
3142 atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
3143 }
3144 if (is_cgroup_event(event))
3145 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3146}
3147
3148static void unaccount_event(struct perf_event *event)
3149{
3150 if (event->parent)
3151 return;
3152
3153 if (event->attach_state & PERF_ATTACH_TASK)
3154 static_key_slow_dec_deferred(&perf_sched_events);
3155 if (event->attr.mmap || event->attr.mmap_data)
3156 atomic_dec(&nr_mmap_events);
3157 if (event->attr.comm)
3158 atomic_dec(&nr_comm_events);
3159 if (event->attr.task)
3160 atomic_dec(&nr_task_events);
3161 if (event->attr.freq)
3162 atomic_dec(&nr_freq_events);
3163 if (is_cgroup_event(event))
3164 static_key_slow_dec_deferred(&perf_sched_events);
3165 if (has_branch_stack(event))
3166 static_key_slow_dec_deferred(&perf_sched_events);
3167
3168 unaccount_event_cpu(event, event->cpu);
3169}
3170
3171static void __free_event(struct perf_event *event)
3172{
3173 if (!event->parent) {
3174 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3175 put_callchain_buffers();
3176 }
3177
3178 if (event->destroy)
3179 event->destroy(event);
3180
3181 if (event->ctx)
3182 put_ctx(event->ctx);
3183
3184 call_rcu(&event->rcu_head, free_event_rcu);
3185}
3186static void free_event(struct perf_event *event)
3187{
3188 irq_work_sync(&event->pending);
3189
3190 unaccount_event(event);
3191
3192 if (event->rb) {
3193 struct ring_buffer *rb;
3194
3195
3196
3197
3198
3199
3200
3201 mutex_lock(&event->mmap_mutex);
3202 rb = event->rb;
3203 if (rb) {
3204 rcu_assign_pointer(event->rb, NULL);
3205 ring_buffer_detach(event, rb);
3206 ring_buffer_put(rb);
3207 }
3208 mutex_unlock(&event->mmap_mutex);
3209 }
3210
3211 if (is_cgroup_event(event))
3212 perf_detach_cgroup(event);
3213
3214
3215 __free_event(event);
3216}
3217
3218int perf_event_release_kernel(struct perf_event *event)
3219{
3220 struct perf_event_context *ctx = event->ctx;
3221
3222 WARN_ON_ONCE(ctx->parent_ctx);
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3236 raw_spin_lock_irq(&ctx->lock);
3237 perf_group_detach(event);
3238 raw_spin_unlock_irq(&ctx->lock);
3239 perf_remove_from_context(event);
3240 mutex_unlock(&ctx->mutex);
3241
3242 free_event(event);
3243
3244 return 0;
3245}
3246EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3247
3248
3249
3250
3251static void put_event(struct perf_event *event)
3252{
3253 struct task_struct *owner;
3254
3255 if (!atomic_long_dec_and_test(&event->refcount))
3256 return;
3257
3258 rcu_read_lock();
3259 owner = ACCESS_ONCE(event->owner);
3260
3261
3262
3263
3264
3265
3266 smp_read_barrier_depends();
3267 if (owner) {
3268
3269
3270
3271
3272
3273 get_task_struct(owner);
3274 }
3275 rcu_read_unlock();
3276
3277 if (owner) {
3278 mutex_lock(&owner->perf_event_mutex);
3279
3280
3281
3282
3283
3284
3285 if (event->owner)
3286 list_del_init(&event->owner_entry);
3287 mutex_unlock(&owner->perf_event_mutex);
3288 put_task_struct(owner);
3289 }
3290
3291 perf_event_release_kernel(event);
3292}
3293
3294static int perf_release(struct inode *inode, struct file *file)
3295{
3296 put_event(file->private_data);
3297 return 0;
3298}
3299
3300u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3301{
3302 struct perf_event *child;
3303 u64 total = 0;
3304
3305 *enabled = 0;
3306 *running = 0;
3307
3308 mutex_lock(&event->child_mutex);
3309 total += perf_event_read(event);
3310 *enabled += event->total_time_enabled +
3311 atomic64_read(&event->child_total_time_enabled);
3312 *running += event->total_time_running +
3313 atomic64_read(&event->child_total_time_running);
3314
3315 list_for_each_entry(child, &event->child_list, child_list) {
3316 total += perf_event_read(child);
3317 *enabled += child->total_time_enabled;
3318 *running += child->total_time_running;
3319 }
3320 mutex_unlock(&event->child_mutex);
3321
3322 return total;
3323}
3324EXPORT_SYMBOL_GPL(perf_event_read_value);
3325
3326static int perf_event_read_group(struct perf_event *event,
3327 u64 read_format, char __user *buf)
3328{
3329 struct perf_event *leader = event->group_leader, *sub;
3330 int n = 0, size = 0, ret = -EFAULT;
3331 struct perf_event_context *ctx = leader->ctx;
3332 u64 values[5];
3333 u64 count, enabled, running;
3334
3335 mutex_lock(&ctx->mutex);
3336 count = perf_event_read_value(leader, &enabled, &running);
3337
3338 values[n++] = 1 + leader->nr_siblings;
3339 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3340 values[n++] = enabled;
3341 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3342 values[n++] = running;
3343 values[n++] = count;
3344 if (read_format & PERF_FORMAT_ID)
3345 values[n++] = primary_event_id(leader);
3346
3347 size = n * sizeof(u64);
3348
3349 if (copy_to_user(buf, values, size))
3350 goto unlock;
3351
3352 ret = size;
3353
3354 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3355 n = 0;
3356
3357 values[n++] = perf_event_read_value(sub, &enabled, &running);
3358 if (read_format & PERF_FORMAT_ID)
3359 values[n++] = primary_event_id(sub);
3360
3361 size = n * sizeof(u64);
3362
3363 if (copy_to_user(buf + ret, values, size)) {
3364 ret = -EFAULT;
3365 goto unlock;
3366 }
3367
3368 ret += size;
3369 }
3370unlock:
3371 mutex_unlock(&ctx->mutex);
3372
3373 return ret;
3374}
3375
3376static int perf_event_read_one(struct perf_event *event,
3377 u64 read_format, char __user *buf)
3378{
3379 u64 enabled, running;
3380 u64 values[4];
3381 int n = 0;
3382
3383 values[n++] = perf_event_read_value(event, &enabled, &running);
3384 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3385 values[n++] = enabled;
3386 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3387 values[n++] = running;
3388 if (read_format & PERF_FORMAT_ID)
3389 values[n++] = primary_event_id(event);
3390
3391 if (copy_to_user(buf, values, n * sizeof(u64)))
3392 return -EFAULT;
3393
3394 return n * sizeof(u64);
3395}
3396
3397
3398
3399
3400static ssize_t
3401perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3402{
3403 u64 read_format = event->attr.read_format;
3404 int ret;
3405
3406
3407
3408
3409
3410
3411 if (event->state == PERF_EVENT_STATE_ERROR)
3412 return 0;
3413
3414 if (count < event->read_size)
3415 return -ENOSPC;
3416
3417 WARN_ON_ONCE(event->ctx->parent_ctx);
3418 if (read_format & PERF_FORMAT_GROUP)
3419 ret = perf_event_read_group(event, read_format, buf);
3420 else
3421 ret = perf_event_read_one(event, read_format, buf);
3422
3423 return ret;
3424}
3425
3426static ssize_t
3427perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3428{
3429 struct perf_event *event = file->private_data;
3430
3431 return perf_read_hw(event, buf, count);
3432}
3433
3434static unsigned int perf_poll(struct file *file, poll_table *wait)
3435{
3436 struct perf_event *event = file->private_data;
3437 struct ring_buffer *rb;
3438 unsigned int events = POLL_HUP;
3439
3440
3441
3442
3443
3444 mutex_lock(&event->mmap_mutex);
3445 rb = event->rb;
3446 if (rb)
3447 events = atomic_xchg(&rb->poll, 0);
3448 mutex_unlock(&event->mmap_mutex);
3449
3450 poll_wait(file, &event->waitq, wait);
3451
3452 return events;
3453}
3454
3455static void perf_event_reset(struct perf_event *event)
3456{
3457 (void)perf_event_read(event);
3458 local64_set(&event->count, 0);
3459 perf_event_update_userpage(event);
3460}
3461
3462
3463
3464
3465
3466
3467
3468static void perf_event_for_each_child(struct perf_event *event,
3469 void (*func)(struct perf_event *))
3470{
3471 struct perf_event *child;
3472
3473 WARN_ON_ONCE(event->ctx->parent_ctx);
3474 mutex_lock(&event->child_mutex);
3475 func(event);
3476 list_for_each_entry(child, &event->child_list, child_list)
3477 func(child);
3478 mutex_unlock(&event->child_mutex);
3479}
3480
3481static void perf_event_for_each(struct perf_event *event,
3482 void (*func)(struct perf_event *))
3483{
3484 struct perf_event_context *ctx = event->ctx;
3485 struct perf_event *sibling;
3486
3487 WARN_ON_ONCE(ctx->parent_ctx);
3488 mutex_lock(&ctx->mutex);
3489 event = event->group_leader;
3490
3491 perf_event_for_each_child(event, func);
3492 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3493 perf_event_for_each_child(sibling, func);
3494 mutex_unlock(&ctx->mutex);
3495}
3496
3497static int perf_event_period(struct perf_event *event, u64 __user *arg)
3498{
3499 struct perf_event_context *ctx = event->ctx;
3500 int ret = 0;
3501 u64 value;
3502
3503 if (!is_sampling_event(event))
3504 return -EINVAL;
3505
3506 if (copy_from_user(&value, arg, sizeof(value)))
3507 return -EFAULT;
3508
3509 if (!value)
3510 return -EINVAL;
3511
3512 raw_spin_lock_irq(&ctx->lock);
3513 if (event->attr.freq) {
3514 if (value > sysctl_perf_event_sample_rate) {
3515 ret = -EINVAL;
3516 goto unlock;
3517 }
3518
3519 event->attr.sample_freq = value;
3520 } else {
3521 event->attr.sample_period = value;
3522 event->hw.sample_period = value;
3523 }
3524unlock:
3525 raw_spin_unlock_irq(&ctx->lock);
3526
3527 return ret;
3528}
3529
3530static const struct file_operations perf_fops;
3531
3532static inline int perf_fget_light(int fd, struct fd *p)
3533{
3534 struct fd f = fdget(fd);
3535 if (!f.file)
3536 return -EBADF;
3537
3538 if (f.file->f_op != &perf_fops) {
3539 fdput(f);
3540 return -EBADF;
3541 }
3542 *p = f;
3543 return 0;
3544}
3545
3546static int perf_event_set_output(struct perf_event *event,
3547 struct perf_event *output_event);
3548static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3549
3550static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3551{
3552 struct perf_event *event = file->private_data;
3553 void (*func)(struct perf_event *);
3554 u32 flags = arg;
3555
3556 switch (cmd) {
3557 case PERF_EVENT_IOC_ENABLE:
3558 func = perf_event_enable;
3559 break;
3560 case PERF_EVENT_IOC_DISABLE:
3561 func = perf_event_disable;
3562 break;
3563 case PERF_EVENT_IOC_RESET:
3564 func = perf_event_reset;
3565 break;
3566
3567 case PERF_EVENT_IOC_REFRESH:
3568 return perf_event_refresh(event, arg);
3569
3570 case PERF_EVENT_IOC_PERIOD:
3571 return perf_event_period(event, (u64 __user *)arg);
3572
3573 case PERF_EVENT_IOC_ID:
3574 {
3575 u64 id = primary_event_id(event);
3576
3577 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
3578 return -EFAULT;
3579 return 0;
3580 }
3581
3582 case PERF_EVENT_IOC_SET_OUTPUT:
3583 {
3584 int ret;
3585 if (arg != -1) {
3586 struct perf_event *output_event;
3587 struct fd output;
3588 ret = perf_fget_light(arg, &output);
3589 if (ret)
3590 return ret;
3591 output_event = output.file->private_data;
3592 ret = perf_event_set_output(event, output_event);
3593 fdput(output);
3594 } else {
3595 ret = perf_event_set_output(event, NULL);
3596 }
3597 return ret;
3598 }
3599
3600 case PERF_EVENT_IOC_SET_FILTER:
3601 return perf_event_set_filter(event, (void __user *)arg);
3602
3603 default:
3604 return -ENOTTY;
3605 }
3606
3607 if (flags & PERF_IOC_FLAG_GROUP)
3608 perf_event_for_each(event, func);
3609 else
3610 perf_event_for_each_child(event, func);
3611
3612 return 0;
3613}
3614
3615int perf_event_task_enable(void)
3616{
3617 struct perf_event *event;
3618
3619 mutex_lock(¤t->perf_event_mutex);
3620 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3621 perf_event_for_each_child(event, perf_event_enable);
3622 mutex_unlock(¤t->perf_event_mutex);
3623
3624 return 0;
3625}
3626
3627int perf_event_task_disable(void)
3628{
3629 struct perf_event *event;
3630
3631 mutex_lock(¤t->perf_event_mutex);
3632 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3633 perf_event_for_each_child(event, perf_event_disable);
3634 mutex_unlock(¤t->perf_event_mutex);
3635
3636 return 0;
3637}
3638
3639static int perf_event_index(struct perf_event *event)
3640{
3641 if (event->hw.state & PERF_HES_STOPPED)
3642 return 0;
3643
3644 if (event->state != PERF_EVENT_STATE_ACTIVE)
3645 return 0;
3646
3647 return event->pmu->event_idx(event);
3648}
3649
3650static void calc_timer_values(struct perf_event *event,
3651 u64 *now,
3652 u64 *enabled,
3653 u64 *running)
3654{
3655 u64 ctx_time;
3656
3657 *now = perf_clock();
3658 ctx_time = event->shadow_ctx_time + *now;
3659 *enabled = ctx_time - event->tstamp_enabled;
3660 *running = ctx_time - event->tstamp_running;
3661}
3662
3663static void perf_event_init_userpage(struct perf_event *event)
3664{
3665 struct perf_event_mmap_page *userpg;
3666 struct ring_buffer *rb;
3667
3668 rcu_read_lock();
3669 rb = rcu_dereference(event->rb);
3670 if (!rb)
3671 goto unlock;
3672
3673 userpg = rb->user_page;
3674
3675
3676 userpg->cap_bit0_is_deprecated = 1;
3677 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3678
3679unlock:
3680 rcu_read_unlock();
3681}
3682
3683void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3684{
3685}
3686
3687
3688
3689
3690
3691
3692void perf_event_update_userpage(struct perf_event *event)
3693{
3694 struct perf_event_mmap_page *userpg;
3695 struct ring_buffer *rb;
3696 u64 enabled, running, now;
3697
3698 rcu_read_lock();
3699 rb = rcu_dereference(event->rb);
3700 if (!rb)
3701 goto unlock;
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712 calc_timer_values(event, &now, &enabled, &running);
3713
3714 userpg = rb->user_page;
3715
3716
3717
3718
3719 preempt_disable();
3720 ++userpg->lock;
3721 barrier();
3722 userpg->index = perf_event_index(event);
3723 userpg->offset = perf_event_count(event);
3724 if (userpg->index)
3725 userpg->offset -= local64_read(&event->hw.prev_count);
3726
3727 userpg->time_enabled = enabled +
3728 atomic64_read(&event->child_total_time_enabled);
3729
3730 userpg->time_running = running +
3731 atomic64_read(&event->child_total_time_running);
3732
3733 arch_perf_update_userpage(userpg, now);
3734
3735 barrier();
3736 ++userpg->lock;
3737 preempt_enable();
3738unlock:
3739 rcu_read_unlock();
3740}
3741
3742static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3743{
3744 struct perf_event *event = vma->vm_file->private_data;
3745 struct ring_buffer *rb;
3746 int ret = VM_FAULT_SIGBUS;
3747
3748 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3749 if (vmf->pgoff == 0)
3750 ret = 0;
3751 return ret;
3752 }
3753
3754 rcu_read_lock();
3755 rb = rcu_dereference(event->rb);
3756 if (!rb)
3757 goto unlock;
3758
3759 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3760 goto unlock;
3761
3762 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3763 if (!vmf->page)
3764 goto unlock;
3765
3766 get_page(vmf->page);
3767 vmf->page->mapping = vma->vm_file->f_mapping;
3768 vmf->page->index = vmf->pgoff;
3769
3770 ret = 0;
3771unlock:
3772 rcu_read_unlock();
3773
3774 return ret;
3775}
3776
3777static void ring_buffer_attach(struct perf_event *event,
3778 struct ring_buffer *rb)
3779{
3780 unsigned long flags;
3781
3782 if (!list_empty(&event->rb_entry))
3783 return;
3784
3785 spin_lock_irqsave(&rb->event_lock, flags);
3786 if (list_empty(&event->rb_entry))
3787 list_add(&event->rb_entry, &rb->event_list);
3788 spin_unlock_irqrestore(&rb->event_lock, flags);
3789}
3790
3791static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
3792{
3793 unsigned long flags;
3794
3795 if (list_empty(&event->rb_entry))
3796 return;
3797
3798 spin_lock_irqsave(&rb->event_lock, flags);
3799 list_del_init(&event->rb_entry);
3800 wake_up_all(&event->waitq);
3801 spin_unlock_irqrestore(&rb->event_lock, flags);
3802}
3803
3804static void ring_buffer_wakeup(struct perf_event *event)
3805{
3806 struct ring_buffer *rb;
3807
3808 rcu_read_lock();
3809 rb = rcu_dereference(event->rb);
3810 if (rb) {
3811 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3812 wake_up_all(&event->waitq);
3813 }
3814 rcu_read_unlock();
3815}
3816
3817static void rb_free_rcu(struct rcu_head *rcu_head)
3818{
3819 struct ring_buffer *rb;
3820
3821 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3822 rb_free(rb);
3823}
3824
3825static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3826{
3827 struct ring_buffer *rb;
3828
3829 rcu_read_lock();
3830 rb = rcu_dereference(event->rb);
3831 if (rb) {
3832 if (!atomic_inc_not_zero(&rb->refcount))
3833 rb = NULL;
3834 }
3835 rcu_read_unlock();
3836
3837 return rb;
3838}
3839
3840static void ring_buffer_put(struct ring_buffer *rb)
3841{
3842 if (!atomic_dec_and_test(&rb->refcount))
3843 return;
3844
3845 WARN_ON_ONCE(!list_empty(&rb->event_list));
3846
3847 call_rcu(&rb->rcu_head, rb_free_rcu);
3848}
3849
3850static void perf_mmap_open(struct vm_area_struct *vma)
3851{
3852 struct perf_event *event = vma->vm_file->private_data;
3853
3854 atomic_inc(&event->mmap_count);
3855 atomic_inc(&event->rb->mmap_count);
3856}
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866static void perf_mmap_close(struct vm_area_struct *vma)
3867{
3868 struct perf_event *event = vma->vm_file->private_data;
3869
3870 struct ring_buffer *rb = event->rb;
3871 struct user_struct *mmap_user = rb->mmap_user;
3872 int mmap_locked = rb->mmap_locked;
3873 unsigned long size = perf_data_size(rb);
3874
3875 atomic_dec(&rb->mmap_count);
3876
3877 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3878 return;
3879
3880
3881 rcu_assign_pointer(event->rb, NULL);
3882 ring_buffer_detach(event, rb);
3883 mutex_unlock(&event->mmap_mutex);
3884
3885
3886 if (atomic_read(&rb->mmap_count)) {
3887 ring_buffer_put(rb);
3888 return;
3889 }
3890
3891
3892
3893
3894
3895
3896again:
3897 rcu_read_lock();
3898 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3899 if (!atomic_long_inc_not_zero(&event->refcount)) {
3900
3901
3902
3903
3904 continue;
3905 }
3906 rcu_read_unlock();
3907
3908 mutex_lock(&event->mmap_mutex);
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919 if (event->rb == rb) {
3920 rcu_assign_pointer(event->rb, NULL);
3921 ring_buffer_detach(event, rb);
3922 ring_buffer_put(rb);
3923 }
3924 mutex_unlock(&event->mmap_mutex);
3925 put_event(event);
3926
3927
3928
3929
3930
3931 goto again;
3932 }
3933 rcu_read_unlock();
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
3945 vma->vm_mm->pinned_vm -= mmap_locked;
3946 free_uid(mmap_user);
3947
3948 ring_buffer_put(rb);
3949}
3950
3951static const struct vm_operations_struct perf_mmap_vmops = {
3952 .open = perf_mmap_open,
3953 .close = perf_mmap_close,
3954 .fault = perf_mmap_fault,
3955 .page_mkwrite = perf_mmap_fault,
3956};
3957
3958static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3959{
3960 struct perf_event *event = file->private_data;
3961 unsigned long user_locked, user_lock_limit;
3962 struct user_struct *user = current_user();
3963 unsigned long locked, lock_limit;
3964 struct ring_buffer *rb;
3965 unsigned long vma_size;
3966 unsigned long nr_pages;
3967 long user_extra, extra;
3968 int ret = 0, flags = 0;
3969
3970
3971
3972
3973
3974
3975 if (event->cpu == -1 && event->attr.inherit)
3976 return -EINVAL;
3977
3978 if (!(vma->vm_flags & VM_SHARED))
3979 return -EINVAL;
3980
3981 vma_size = vma->vm_end - vma->vm_start;
3982 nr_pages = (vma_size / PAGE_SIZE) - 1;
3983
3984
3985
3986
3987
3988 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3989 return -EINVAL;
3990
3991 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3992 return -EINVAL;
3993
3994 if (vma->vm_pgoff != 0)
3995 return -EINVAL;
3996
3997 WARN_ON_ONCE(event->ctx->parent_ctx);
3998again:
3999 mutex_lock(&event->mmap_mutex);
4000 if (event->rb) {
4001 if (event->rb->nr_pages != nr_pages) {
4002 ret = -EINVAL;
4003 goto unlock;
4004 }
4005
4006 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4007
4008
4009
4010
4011
4012 mutex_unlock(&event->mmap_mutex);
4013 goto again;
4014 }
4015
4016 goto unlock;
4017 }
4018
4019 user_extra = nr_pages + 1;
4020 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
4021
4022
4023
4024
4025 user_lock_limit *= num_online_cpus();
4026
4027 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
4028
4029 extra = 0;
4030 if (user_locked > user_lock_limit)
4031 extra = user_locked - user_lock_limit;
4032
4033 lock_limit = rlimit(RLIMIT_MEMLOCK);
4034 lock_limit >>= PAGE_SHIFT;
4035 locked = vma->vm_mm->pinned_vm + extra;
4036
4037 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4038 !capable(CAP_IPC_LOCK)) {
4039 ret = -EPERM;
4040 goto unlock;
4041 }
4042
4043 WARN_ON(event->rb);
4044
4045 if (vma->vm_flags & VM_WRITE)
4046 flags |= RING_BUFFER_WRITABLE;
4047
4048 rb = rb_alloc(nr_pages,
4049 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4050 event->cpu, flags);
4051
4052 if (!rb) {
4053 ret = -ENOMEM;
4054 goto unlock;
4055 }
4056
4057 atomic_set(&rb->mmap_count, 1);
4058 rb->mmap_locked = extra;
4059 rb->mmap_user = get_current_user();
4060
4061 atomic_long_add(user_extra, &user->locked_vm);
4062 vma->vm_mm->pinned_vm += extra;
4063
4064 ring_buffer_attach(event, rb);
4065 rcu_assign_pointer(event->rb, rb);
4066
4067 perf_event_init_userpage(event);
4068 perf_event_update_userpage(event);
4069
4070unlock:
4071 if (!ret)
4072 atomic_inc(&event->mmap_count);
4073 mutex_unlock(&event->mmap_mutex);
4074
4075
4076
4077
4078
4079 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4080 vma->vm_ops = &perf_mmap_vmops;
4081
4082 return ret;
4083}
4084
4085static int perf_fasync(int fd, struct file *filp, int on)
4086{
4087 struct inode *inode = file_inode(filp);
4088 struct perf_event *event = filp->private_data;
4089 int retval;
4090
4091 mutex_lock(&inode->i_mutex);
4092 retval = fasync_helper(fd, filp, on, &event->fasync);
4093 mutex_unlock(&inode->i_mutex);
4094
4095 if (retval < 0)
4096 return retval;
4097
4098 return 0;
4099}
4100
4101static const struct file_operations perf_fops = {
4102 .llseek = no_llseek,
4103 .release = perf_release,
4104 .read = perf_read,
4105 .poll = perf_poll,
4106 .unlocked_ioctl = perf_ioctl,
4107 .compat_ioctl = perf_ioctl,
4108 .mmap = perf_mmap,
4109 .fasync = perf_fasync,
4110};
4111
4112
4113
4114
4115
4116
4117
4118
4119void perf_event_wakeup(struct perf_event *event)
4120{
4121 ring_buffer_wakeup(event);
4122
4123 if (event->pending_kill) {
4124 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4125 event->pending_kill = 0;
4126 }
4127}
4128
4129static void perf_pending_event(struct irq_work *entry)
4130{
4131 struct perf_event *event = container_of(entry,
4132 struct perf_event, pending);
4133
4134 if (event->pending_disable) {
4135 event->pending_disable = 0;
4136 __perf_event_disable(event);
4137 }
4138
4139 if (event->pending_wakeup) {
4140 event->pending_wakeup = 0;
4141 perf_event_wakeup(event);
4142 }
4143}
4144
4145
4146
4147
4148
4149
4150struct perf_guest_info_callbacks *perf_guest_cbs;
4151
4152int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4153{
4154 perf_guest_cbs = cbs;
4155 return 0;
4156}
4157EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4158
4159int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4160{
4161 perf_guest_cbs = NULL;
4162 return 0;
4163}
4164EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4165
4166static void
4167perf_output_sample_regs(struct perf_output_handle *handle,
4168 struct pt_regs *regs, u64 mask)
4169{
4170 int bit;
4171
4172 for_each_set_bit(bit, (const unsigned long *) &mask,
4173 sizeof(mask) * BITS_PER_BYTE) {
4174 u64 val;
4175
4176 val = perf_reg_value(regs, bit);
4177 perf_output_put(handle, val);
4178 }
4179}
4180
4181static void perf_sample_regs_user(struct perf_regs_user *regs_user,
4182 struct pt_regs *regs)
4183{
4184 if (!user_mode(regs)) {
4185 if (current->mm)
4186 regs = task_pt_regs(current);
4187 else
4188 regs = NULL;
4189 }
4190
4191 if (regs) {
4192 regs_user->regs = regs;
4193 regs_user->abi = perf_reg_abi(current);
4194 }
4195}
4196
4197
4198
4199
4200
4201
4202
4203
4204static u64 perf_ustack_task_size(struct pt_regs *regs)
4205{
4206 unsigned long addr = perf_user_stack_pointer(regs);
4207
4208 if (!addr || addr >= TASK_SIZE)
4209 return 0;
4210
4211 return TASK_SIZE - addr;
4212}
4213
4214static u16
4215perf_sample_ustack_size(u16 stack_size, u16 header_size,
4216 struct pt_regs *regs)
4217{
4218 u64 task_size;
4219
4220
4221 if (!regs)
4222 return 0;
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4235 stack_size = min(stack_size, (u16) task_size);
4236
4237
4238 header_size += 2 * sizeof(u64);
4239
4240
4241 if ((u16) (header_size + stack_size) < header_size) {
4242
4243
4244
4245
4246 stack_size = USHRT_MAX - header_size - sizeof(u64);
4247 stack_size = round_up(stack_size, sizeof(u64));
4248 }
4249
4250 return stack_size;
4251}
4252
4253static void
4254perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4255 struct pt_regs *regs)
4256{
4257
4258 if (!regs) {
4259 u64 size = 0;
4260 perf_output_put(handle, size);
4261 } else {
4262 unsigned long sp;
4263 unsigned int rem;
4264 u64 dyn_size;
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278 perf_output_put(handle, dump_size);
4279
4280
4281 sp = perf_user_stack_pointer(regs);
4282 rem = __output_copy_user(handle, (void *) sp, dump_size);
4283 dyn_size = dump_size - rem;
4284
4285 perf_output_skip(handle, rem);
4286
4287
4288 perf_output_put(handle, dyn_size);
4289 }
4290}
4291
4292static void __perf_event_header__init_id(struct perf_event_header *header,
4293 struct perf_sample_data *data,
4294 struct perf_event *event)
4295{
4296 u64 sample_type = event->attr.sample_type;
4297
4298 data->type = sample_type;
4299 header->size += event->id_header_size;
4300
4301 if (sample_type & PERF_SAMPLE_TID) {
4302
4303 data->tid_entry.pid = perf_event_pid(event, current);
4304 data->tid_entry.tid = perf_event_tid(event, current);
4305 }
4306
4307 if (sample_type & PERF_SAMPLE_TIME)
4308 data->time = perf_clock();
4309
4310 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
4311 data->id = primary_event_id(event);
4312
4313 if (sample_type & PERF_SAMPLE_STREAM_ID)
4314 data->stream_id = event->id;
4315
4316 if (sample_type & PERF_SAMPLE_CPU) {
4317 data->cpu_entry.cpu = raw_smp_processor_id();
4318 data->cpu_entry.reserved = 0;
4319 }
4320}
4321
4322void perf_event_header__init_id(struct perf_event_header *header,
4323 struct perf_sample_data *data,
4324 struct perf_event *event)
4325{
4326 if (event->attr.sample_id_all)
4327 __perf_event_header__init_id(header, data, event);
4328}
4329
4330static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4331 struct perf_sample_data *data)
4332{
4333 u64 sample_type = data->type;
4334
4335 if (sample_type & PERF_SAMPLE_TID)
4336 perf_output_put(handle, data->tid_entry);
4337
4338 if (sample_type & PERF_SAMPLE_TIME)
4339 perf_output_put(handle, data->time);
4340
4341 if (sample_type & PERF_SAMPLE_ID)
4342 perf_output_put(handle, data->id);
4343
4344 if (sample_type & PERF_SAMPLE_STREAM_ID)
4345 perf_output_put(handle, data->stream_id);
4346
4347 if (sample_type & PERF_SAMPLE_CPU)
4348 perf_output_put(handle, data->cpu_entry);
4349
4350 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4351 perf_output_put(handle, data->id);
4352}
4353
4354void perf_event__output_id_sample(struct perf_event *event,
4355 struct perf_output_handle *handle,
4356 struct perf_sample_data *sample)
4357{
4358 if (event->attr.sample_id_all)
4359 __perf_event__output_id_sample(handle, sample);
4360}
4361
4362static void perf_output_read_one(struct perf_output_handle *handle,
4363 struct perf_event *event,
4364 u64 enabled, u64 running)
4365{
4366 u64 read_format = event->attr.read_format;
4367 u64 values[4];
4368 int n = 0;
4369
4370 values[n++] = perf_event_count(event);
4371 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4372 values[n++] = enabled +
4373 atomic64_read(&event->child_total_time_enabled);
4374 }
4375 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4376 values[n++] = running +
4377 atomic64_read(&event->child_total_time_running);
4378 }
4379 if (read_format & PERF_FORMAT_ID)
4380 values[n++] = primary_event_id(event);
4381
4382 __output_copy(handle, values, n * sizeof(u64));
4383}
4384
4385
4386
4387
4388static void perf_output_read_group(struct perf_output_handle *handle,
4389 struct perf_event *event,
4390 u64 enabled, u64 running)
4391{
4392 struct perf_event *leader = event->group_leader, *sub;
4393 u64 read_format = event->attr.read_format;
4394 u64 values[5];
4395 int n = 0;
4396
4397 values[n++] = 1 + leader->nr_siblings;
4398
4399 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4400 values[n++] = enabled;
4401
4402 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4403 values[n++] = running;
4404
4405 if (leader != event)
4406 leader->pmu->read(leader);
4407
4408 values[n++] = perf_event_count(leader);
4409 if (read_format & PERF_FORMAT_ID)
4410 values[n++] = primary_event_id(leader);
4411
4412 __output_copy(handle, values, n * sizeof(u64));
4413
4414 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4415 n = 0;
4416
4417 if ((sub != event) &&
4418 (sub->state == PERF_EVENT_STATE_ACTIVE))
4419 sub->pmu->read(sub);
4420
4421 values[n++] = perf_event_count(sub);
4422 if (read_format & PERF_FORMAT_ID)
4423 values[n++] = primary_event_id(sub);
4424
4425 __output_copy(handle, values, n * sizeof(u64));
4426 }
4427}
4428
4429#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4430 PERF_FORMAT_TOTAL_TIME_RUNNING)
4431
4432static void perf_output_read(struct perf_output_handle *handle,
4433 struct perf_event *event)
4434{
4435 u64 enabled = 0, running = 0, now;
4436 u64 read_format = event->attr.read_format;
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447 if (read_format & PERF_FORMAT_TOTAL_TIMES)
4448 calc_timer_values(event, &now, &enabled, &running);
4449
4450 if (event->attr.read_format & PERF_FORMAT_GROUP)
4451 perf_output_read_group(handle, event, enabled, running);
4452 else
4453 perf_output_read_one(handle, event, enabled, running);
4454}
4455
4456void perf_output_sample(struct perf_output_handle *handle,
4457 struct perf_event_header *header,
4458 struct perf_sample_data *data,
4459 struct perf_event *event)
4460{
4461 u64 sample_type = data->type;
4462
4463 perf_output_put(handle, *header);
4464
4465 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4466 perf_output_put(handle, data->id);
4467
4468 if (sample_type & PERF_SAMPLE_IP)
4469 perf_output_put(handle, data->ip);
4470
4471 if (sample_type & PERF_SAMPLE_TID)
4472 perf_output_put(handle, data->tid_entry);
4473
4474 if (sample_type & PERF_SAMPLE_TIME)
4475 perf_output_put(handle, data->time);
4476
4477 if (sample_type & PERF_SAMPLE_ADDR)
4478 perf_output_put(handle, data->addr);
4479
4480 if (sample_type & PERF_SAMPLE_ID)
4481 perf_output_put(handle, data->id);
4482
4483 if (sample_type & PERF_SAMPLE_STREAM_ID)
4484 perf_output_put(handle, data->stream_id);
4485
4486 if (sample_type & PERF_SAMPLE_CPU)
4487 perf_output_put(handle, data->cpu_entry);
4488
4489 if (sample_type & PERF_SAMPLE_PERIOD)
4490 perf_output_put(handle, data->period);
4491
4492 if (sample_type & PERF_SAMPLE_READ)
4493 perf_output_read(handle, event);
4494
4495 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4496 if (data->callchain) {
4497 int size = 1;
4498
4499 if (data->callchain)
4500 size += data->callchain->nr;
4501
4502 size *= sizeof(u64);
4503
4504 __output_copy(handle, data->callchain, size);
4505 } else {
4506 u64 nr = 0;
4507 perf_output_put(handle, nr);
4508 }
4509 }
4510
4511 if (sample_type & PERF_SAMPLE_RAW) {
4512 if (data->raw) {
4513 perf_output_put(handle, data->raw->size);
4514 __output_copy(handle, data->raw->data,
4515 data->raw->size);
4516 } else {
4517 struct {
4518 u32 size;
4519 u32 data;
4520 } raw = {
4521 .size = sizeof(u32),
4522 .data = 0,
4523 };
4524 perf_output_put(handle, raw);
4525 }
4526 }
4527
4528 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4529 if (data->br_stack) {
4530 size_t size;
4531
4532 size = data->br_stack->nr
4533 * sizeof(struct perf_branch_entry);
4534
4535 perf_output_put(handle, data->br_stack->nr);
4536 perf_output_copy(handle, data->br_stack->entries, size);
4537 } else {
4538
4539
4540
4541 u64 nr = 0;
4542 perf_output_put(handle, nr);
4543 }
4544 }
4545
4546 if (sample_type & PERF_SAMPLE_REGS_USER) {
4547 u64 abi = data->regs_user.abi;
4548
4549
4550
4551
4552
4553 perf_output_put(handle, abi);
4554
4555 if (abi) {
4556 u64 mask = event->attr.sample_regs_user;
4557 perf_output_sample_regs(handle,
4558 data->regs_user.regs,
4559 mask);
4560 }
4561 }
4562
4563 if (sample_type & PERF_SAMPLE_STACK_USER) {
4564 perf_output_sample_ustack(handle,
4565 data->stack_user_size,
4566 data->regs_user.regs);
4567 }
4568
4569 if (sample_type & PERF_SAMPLE_WEIGHT)
4570 perf_output_put(handle, data->weight);
4571
4572 if (sample_type & PERF_SAMPLE_DATA_SRC)
4573 perf_output_put(handle, data->data_src.val);
4574
4575 if (!event->attr.watermark) {
4576 int wakeup_events = event->attr.wakeup_events;
4577
4578 if (wakeup_events) {
4579 struct ring_buffer *rb = handle->rb;
4580 int events = local_inc_return(&rb->events);
4581
4582 if (events >= wakeup_events) {
4583 local_sub(wakeup_events, &rb->events);
4584 local_inc(&rb->wakeup);
4585 }
4586 }
4587 }
4588}
4589
4590void perf_prepare_sample(struct perf_event_header *header,
4591 struct perf_sample_data *data,
4592 struct perf_event *event,
4593 struct pt_regs *regs)
4594{
4595 u64 sample_type = event->attr.sample_type;
4596
4597 header->type = PERF_RECORD_SAMPLE;
4598 header->size = sizeof(*header) + event->header_size;
4599
4600 header->misc = 0;
4601 header->misc |= perf_misc_flags(regs);
4602
4603 __perf_event_header__init_id(header, data, event);
4604
4605 if (sample_type & PERF_SAMPLE_IP)
4606 data->ip = perf_instruction_pointer(regs);
4607
4608 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4609 int size = 1;
4610
4611 data->callchain = perf_callchain(event, regs);
4612
4613 if (data->callchain)
4614 size += data->callchain->nr;
4615
4616 header->size += size * sizeof(u64);
4617 }
4618
4619 if (sample_type & PERF_SAMPLE_RAW) {
4620 int size = sizeof(u32);
4621
4622 if (data->raw)
4623 size += data->raw->size;
4624 else
4625 size += sizeof(u32);
4626
4627 WARN_ON_ONCE(size & (sizeof(u64)-1));
4628 header->size += size;
4629 }
4630
4631 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4632 int size = sizeof(u64);
4633 if (data->br_stack) {
4634 size += data->br_stack->nr
4635 * sizeof(struct perf_branch_entry);
4636 }
4637 header->size += size;
4638 }
4639
4640 if (sample_type & PERF_SAMPLE_REGS_USER) {
4641
4642 int size = sizeof(u64);
4643
4644 perf_sample_regs_user(&data->regs_user, regs);
4645
4646 if (data->regs_user.regs) {
4647 u64 mask = event->attr.sample_regs_user;
4648 size += hweight64(mask) * sizeof(u64);
4649 }
4650
4651 header->size += size;
4652 }
4653
4654 if (sample_type & PERF_SAMPLE_STACK_USER) {
4655
4656
4657
4658
4659
4660
4661 struct perf_regs_user *uregs = &data->regs_user;
4662 u16 stack_size = event->attr.sample_stack_user;
4663 u16 size = sizeof(u64);
4664
4665 if (!uregs->abi)
4666 perf_sample_regs_user(uregs, regs);
4667
4668 stack_size = perf_sample_ustack_size(stack_size, header->size,
4669 uregs->regs);
4670
4671
4672
4673
4674
4675
4676 if (stack_size)
4677 size += sizeof(u64) + stack_size;
4678
4679 data->stack_user_size = stack_size;
4680 header->size += size;
4681 }
4682}
4683
4684static void perf_event_output(struct perf_event *event,
4685 struct perf_sample_data *data,
4686 struct pt_regs *regs)
4687{
4688 struct perf_output_handle handle;
4689 struct perf_event_header header;
4690
4691
4692 rcu_read_lock();
4693
4694 perf_prepare_sample(&header, data, event, regs);
4695
4696 if (perf_output_begin(&handle, event, header.size))
4697 goto exit;
4698
4699 perf_output_sample(&handle, &header, data, event);
4700
4701 perf_output_end(&handle);
4702
4703exit:
4704 rcu_read_unlock();
4705}
4706
4707
4708
4709
4710
4711struct perf_read_event {
4712 struct perf_event_header header;
4713
4714 u32 pid;
4715 u32 tid;
4716};
4717
4718static void
4719perf_event_read_event(struct perf_event *event,
4720 struct task_struct *task)
4721{
4722 struct perf_output_handle handle;
4723 struct perf_sample_data sample;
4724 struct perf_read_event read_event = {
4725 .header = {
4726 .type = PERF_RECORD_READ,
4727 .misc = 0,
4728 .size = sizeof(read_event) + event->read_size,
4729 },
4730 .pid = perf_event_pid(event, task),
4731 .tid = perf_event_tid(event, task),
4732 };
4733 int ret;
4734
4735 perf_event_header__init_id(&read_event.header, &sample, event);
4736 ret = perf_output_begin(&handle, event, read_event.header.size);
4737 if (ret)
4738 return;
4739
4740 perf_output_put(&handle, read_event);
4741 perf_output_read(&handle, event);
4742 perf_event__output_id_sample(event, &handle, &sample);
4743
4744 perf_output_end(&handle);
4745}
4746
4747typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4748
4749static void
4750perf_event_aux_ctx(struct perf_event_context *ctx,
4751 perf_event_aux_output_cb output,
4752 void *data)
4753{
4754 struct perf_event *event;
4755
4756 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4757 if (event->state < PERF_EVENT_STATE_INACTIVE)
4758 continue;
4759 if (!event_filter_match(event))
4760 continue;
4761 output(event, data);
4762 }
4763}
4764
4765static void
4766perf_event_aux(perf_event_aux_output_cb output, void *data,
4767 struct perf_event_context *task_ctx)
4768{
4769 struct perf_cpu_context *cpuctx;
4770 struct perf_event_context *ctx;
4771 struct pmu *pmu;
4772 int ctxn;
4773
4774 rcu_read_lock();
4775 list_for_each_entry_rcu(pmu, &pmus, entry) {
4776 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4777 if (cpuctx->unique_pmu != pmu)
4778 goto next;
4779 perf_event_aux_ctx(&cpuctx->ctx, output, data);
4780 if (task_ctx)
4781 goto next;
4782 ctxn = pmu->task_ctx_nr;
4783 if (ctxn < 0)
4784 goto next;
4785 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4786 if (ctx)
4787 perf_event_aux_ctx(ctx, output, data);
4788next:
4789 put_cpu_ptr(pmu->pmu_cpu_context);
4790 }
4791
4792 if (task_ctx) {
4793 preempt_disable();
4794 perf_event_aux_ctx(task_ctx, output, data);
4795 preempt_enable();
4796 }
4797 rcu_read_unlock();
4798}
4799
4800
4801
4802
4803
4804
4805
4806struct perf_task_event {
4807 struct task_struct *task;
4808 struct perf_event_context *task_ctx;
4809
4810 struct {
4811 struct perf_event_header header;
4812
4813 u32 pid;
4814 u32 ppid;
4815 u32 tid;
4816 u32 ptid;
4817 u64 time;
4818 } event_id;
4819};
4820
4821static int perf_event_task_match(struct perf_event *event)
4822{
4823 return event->attr.comm || event->attr.mmap ||
4824 event->attr.mmap2 || event->attr.mmap_data ||
4825 event->attr.task;
4826}
4827
4828static void perf_event_task_output(struct perf_event *event,
4829 void *data)
4830{
4831 struct perf_task_event *task_event = data;
4832 struct perf_output_handle handle;
4833 struct perf_sample_data sample;
4834 struct task_struct *task = task_event->task;
4835 int ret, size = task_event->event_id.header.size;
4836
4837 if (!perf_event_task_match(event))
4838 return;
4839
4840 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4841
4842 ret = perf_output_begin(&handle, event,
4843 task_event->event_id.header.size);
4844 if (ret)
4845 goto out;
4846
4847 task_event->event_id.pid = perf_event_pid(event, task);
4848 task_event->event_id.ppid = perf_event_pid(event, current);
4849
4850 task_event->event_id.tid = perf_event_tid(event, task);
4851 task_event->event_id.ptid = perf_event_tid(event, current);
4852
4853 perf_output_put(&handle, task_event->event_id);
4854
4855 perf_event__output_id_sample(event, &handle, &sample);
4856
4857 perf_output_end(&handle);
4858out:
4859 task_event->event_id.header.size = size;
4860}
4861
4862static void perf_event_task(struct task_struct *task,
4863 struct perf_event_context *task_ctx,
4864 int new)
4865{
4866 struct perf_task_event task_event;
4867
4868 if (!atomic_read(&nr_comm_events) &&
4869 !atomic_read(&nr_mmap_events) &&
4870 !atomic_read(&nr_task_events))
4871 return;
4872
4873 task_event = (struct perf_task_event){
4874 .task = task,
4875 .task_ctx = task_ctx,
4876 .event_id = {
4877 .header = {
4878 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4879 .misc = 0,
4880 .size = sizeof(task_event.event_id),
4881 },
4882
4883
4884
4885
4886 .time = perf_clock(),
4887 },
4888 };
4889
4890 perf_event_aux(perf_event_task_output,
4891 &task_event,
4892 task_ctx);
4893}
4894
4895void perf_event_fork(struct task_struct *task)
4896{
4897 perf_event_task(task, NULL, 1);
4898}
4899
4900
4901
4902
4903
4904struct perf_comm_event {
4905 struct task_struct *task;
4906 char *comm;
4907 int comm_size;
4908
4909 struct {
4910 struct perf_event_header header;
4911
4912 u32 pid;
4913 u32 tid;
4914 } event_id;
4915};
4916
4917static int perf_event_comm_match(struct perf_event *event)
4918{
4919 return event->attr.comm;
4920}
4921
4922static void perf_event_comm_output(struct perf_event *event,
4923 void *data)
4924{
4925 struct perf_comm_event *comm_event = data;
4926 struct perf_output_handle handle;
4927 struct perf_sample_data sample;
4928 int size = comm_event->event_id.header.size;
4929 int ret;
4930
4931 if (!perf_event_comm_match(event))
4932 return;
4933
4934 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4935 ret = perf_output_begin(&handle, event,
4936 comm_event->event_id.header.size);
4937
4938 if (ret)
4939 goto out;
4940
4941 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4942 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4943
4944 perf_output_put(&handle, comm_event->event_id);
4945 __output_copy(&handle, comm_event->comm,
4946 comm_event->comm_size);
4947
4948 perf_event__output_id_sample(event, &handle, &sample);
4949
4950 perf_output_end(&handle);
4951out:
4952 comm_event->event_id.header.size = size;
4953}
4954
4955static void perf_event_comm_event(struct perf_comm_event *comm_event)
4956{
4957 char comm[TASK_COMM_LEN];
4958 unsigned int size;
4959
4960 memset(comm, 0, sizeof(comm));
4961 strlcpy(comm, comm_event->task->comm, sizeof(comm));
4962 size = ALIGN(strlen(comm)+1, sizeof(u64));
4963
4964 comm_event->comm = comm;
4965 comm_event->comm_size = size;
4966
4967 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4968
4969 perf_event_aux(perf_event_comm_output,
4970 comm_event,
4971 NULL);
4972}
4973
4974void perf_event_comm(struct task_struct *task)
4975{
4976 struct perf_comm_event comm_event;
4977 struct perf_event_context *ctx;
4978 int ctxn;
4979
4980 rcu_read_lock();
4981 for_each_task_context_nr(ctxn) {
4982 ctx = task->perf_event_ctxp[ctxn];
4983 if (!ctx)
4984 continue;
4985
4986 perf_event_enable_on_exec(ctx);
4987 }
4988 rcu_read_unlock();
4989
4990 if (!atomic_read(&nr_comm_events))
4991 return;
4992
4993 comm_event = (struct perf_comm_event){
4994 .task = task,
4995
4996
4997 .event_id = {
4998 .header = {
4999 .type = PERF_RECORD_COMM,
5000 .misc = 0,
5001
5002 },
5003
5004
5005 },
5006 };
5007
5008 perf_event_comm_event(&comm_event);
5009}
5010
5011
5012
5013
5014
5015struct perf_mmap_event {
5016 struct vm_area_struct *vma;
5017
5018 const char *file_name;
5019 int file_size;
5020 int maj, min;
5021 u64 ino;
5022 u64 ino_generation;
5023
5024 struct {
5025 struct perf_event_header header;
5026
5027 u32 pid;
5028 u32 tid;
5029 u64 start;
5030 u64 len;
5031 u64 pgoff;
5032 } event_id;
5033};
5034
5035static int perf_event_mmap_match(struct perf_event *event,
5036 void *data)
5037{
5038 struct perf_mmap_event *mmap_event = data;
5039 struct vm_area_struct *vma = mmap_event->vma;
5040 int executable = vma->vm_flags & VM_EXEC;
5041
5042 return (!executable && event->attr.mmap_data) ||
5043 (executable && (event->attr.mmap || event->attr.mmap2));
5044}
5045
5046static void perf_event_mmap_output(struct perf_event *event,
5047 void *data)
5048{
5049 struct perf_mmap_event *mmap_event = data;
5050 struct perf_output_handle handle;
5051 struct perf_sample_data sample;
5052 int size = mmap_event->event_id.header.size;
5053 int ret;
5054
5055 if (!perf_event_mmap_match(event, data))
5056 return;
5057
5058 if (event->attr.mmap2) {
5059 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5060 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5061 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5062 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
5063 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
5064 }
5065
5066 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5067 ret = perf_output_begin(&handle, event,
5068 mmap_event->event_id.header.size);
5069 if (ret)
5070 goto out;
5071
5072 mmap_event->event_id.pid = perf_event_pid(event, current);
5073 mmap_event->event_id.tid = perf_event_tid(event, current);
5074
5075 perf_output_put(&handle, mmap_event->event_id);
5076
5077 if (event->attr.mmap2) {
5078 perf_output_put(&handle, mmap_event->maj);
5079 perf_output_put(&handle, mmap_event->min);
5080 perf_output_put(&handle, mmap_event->ino);
5081 perf_output_put(&handle, mmap_event->ino_generation);
5082 }
5083
5084 __output_copy(&handle, mmap_event->file_name,
5085 mmap_event->file_size);
5086
5087 perf_event__output_id_sample(event, &handle, &sample);
5088
5089 perf_output_end(&handle);
5090out:
5091 mmap_event->event_id.header.size = size;
5092}
5093
5094static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
5095{
5096 struct vm_area_struct *vma = mmap_event->vma;
5097 struct file *file = vma->vm_file;
5098 int maj = 0, min = 0;
5099 u64 ino = 0, gen = 0;
5100 unsigned int size;
5101 char tmp[16];
5102 char *buf = NULL;
5103 const char *name;
5104
5105 memset(tmp, 0, sizeof(tmp));
5106
5107 if (file) {
5108 struct inode *inode;
5109 dev_t dev;
5110
5111
5112
5113
5114
5115 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
5116 if (!buf) {
5117 name = strncpy(tmp, "//enomem", sizeof(tmp));
5118 goto got_name;
5119 }
5120 name = d_path(&file->f_path, buf, PATH_MAX);
5121 if (IS_ERR(name)) {
5122 name = strncpy(tmp, "//toolong", sizeof(tmp));
5123 goto got_name;
5124 }
5125 inode = file_inode(vma->vm_file);
5126 dev = inode->i_sb->s_dev;
5127 ino = inode->i_ino;
5128 gen = inode->i_generation;
5129 maj = MAJOR(dev);
5130 min = MINOR(dev);
5131
5132 } else {
5133 if (arch_vma_name(mmap_event->vma)) {
5134 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
5135 sizeof(tmp) - 1);
5136 tmp[sizeof(tmp) - 1] = '\0';
5137 goto got_name;
5138 }
5139
5140 if (!vma->vm_mm) {
5141 name = strncpy(tmp, "[vdso]", sizeof(tmp));
5142 goto got_name;
5143 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
5144 vma->vm_end >= vma->vm_mm->brk) {
5145 name = strncpy(tmp, "[heap]", sizeof(tmp));
5146 goto got_name;
5147 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
5148 vma->vm_end >= vma->vm_mm->start_stack) {
5149 name = strncpy(tmp, "[stack]", sizeof(tmp));
5150 goto got_name;
5151 }
5152
5153 name = strncpy(tmp, "//anon", sizeof(tmp));
5154 goto got_name;
5155 }
5156
5157got_name:
5158 size = ALIGN(strlen(name)+1, sizeof(u64));
5159
5160 mmap_event->file_name = name;
5161 mmap_event->file_size = size;
5162 mmap_event->maj = maj;
5163 mmap_event->min = min;
5164 mmap_event->ino = ino;
5165 mmap_event->ino_generation = gen;
5166
5167 if (!(vma->vm_flags & VM_EXEC))
5168 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5169
5170 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
5171
5172 perf_event_aux(perf_event_mmap_output,
5173 mmap_event,
5174 NULL);
5175
5176 kfree(buf);
5177}
5178
5179void perf_event_mmap(struct vm_area_struct *vma)
5180{
5181 struct perf_mmap_event mmap_event;
5182
5183 if (!atomic_read(&nr_mmap_events))
5184 return;
5185
5186 mmap_event = (struct perf_mmap_event){
5187 .vma = vma,
5188
5189
5190 .event_id = {
5191 .header = {
5192 .type = PERF_RECORD_MMAP,
5193 .misc = PERF_RECORD_MISC_USER,
5194
5195 },
5196
5197
5198 .start = vma->vm_start,
5199 .len = vma->vm_end - vma->vm_start,
5200 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
5201 },
5202
5203
5204
5205
5206 };
5207
5208 perf_event_mmap_event(&mmap_event);
5209}
5210
5211
5212
5213
5214
5215static void perf_log_throttle(struct perf_event *event, int enable)
5216{
5217 struct perf_output_handle handle;
5218 struct perf_sample_data sample;
5219 int ret;
5220
5221 struct {
5222 struct perf_event_header header;
5223 u64 time;
5224 u64 id;
5225 u64 stream_id;
5226 } throttle_event = {
5227 .header = {
5228 .type = PERF_RECORD_THROTTLE,
5229 .misc = 0,
5230 .size = sizeof(throttle_event),
5231 },
5232 .time = perf_clock(),
5233 .id = primary_event_id(event),
5234 .stream_id = event->id,
5235 };
5236
5237 if (enable)
5238 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
5239
5240 perf_event_header__init_id(&throttle_event.header, &sample, event);
5241
5242 ret = perf_output_begin(&handle, event,
5243 throttle_event.header.size);
5244 if (ret)
5245 return;
5246
5247 perf_output_put(&handle, throttle_event);
5248 perf_event__output_id_sample(event, &handle, &sample);
5249 perf_output_end(&handle);
5250}
5251
5252
5253
5254
5255
5256static int __perf_event_overflow(struct perf_event *event,
5257 int throttle, struct perf_sample_data *data,
5258 struct pt_regs *regs)
5259{
5260 int events = atomic_read(&event->event_limit);
5261 struct hw_perf_event *hwc = &event->hw;
5262 u64 seq;
5263 int ret = 0;
5264
5265
5266
5267
5268
5269 if (unlikely(!is_sampling_event(event)))
5270 return 0;
5271
5272 seq = __this_cpu_read(perf_throttled_seq);
5273 if (seq != hwc->interrupts_seq) {
5274 hwc->interrupts_seq = seq;
5275 hwc->interrupts = 1;
5276 } else {
5277 hwc->interrupts++;
5278 if (unlikely(throttle
5279 && hwc->interrupts >= max_samples_per_tick)) {
5280 __this_cpu_inc(perf_throttled_count);
5281 hwc->interrupts = MAX_INTERRUPTS;
5282 perf_log_throttle(event, 0);
5283 tick_nohz_full_kick();
5284 ret = 1;
5285 }
5286 }
5287
5288 if (event->attr.freq) {
5289 u64 now = perf_clock();
5290 s64 delta = now - hwc->freq_time_stamp;
5291
5292 hwc->freq_time_stamp = now;
5293
5294 if (delta > 0 && delta < 2*TICK_NSEC)
5295 perf_adjust_period(event, delta, hwc->last_period, true);
5296 }
5297
5298
5299
5300
5301
5302
5303 event->pending_kill = POLL_IN;
5304 if (events && atomic_dec_and_test(&event->event_limit)) {
5305 ret = 1;
5306 event->pending_kill = POLL_HUP;
5307 event->pending_disable = 1;
5308 irq_work_queue(&event->pending);
5309 }
5310
5311 if (event->overflow_handler)
5312 event->overflow_handler(event, data, regs);
5313 else
5314 perf_event_output(event, data, regs);
5315
5316 if (event->fasync && event->pending_kill) {
5317 event->pending_wakeup = 1;
5318 irq_work_queue(&event->pending);
5319 }
5320
5321 return ret;
5322}
5323
5324int perf_event_overflow(struct perf_event *event,
5325 struct perf_sample_data *data,
5326 struct pt_regs *regs)
5327{
5328 return __perf_event_overflow(event, 1, data, regs);
5329}
5330
5331
5332
5333
5334
5335struct swevent_htable {
5336 struct swevent_hlist *swevent_hlist;
5337 struct mutex hlist_mutex;
5338 int hlist_refcount;
5339
5340
5341 int recursion[PERF_NR_CONTEXTS];
5342};
5343
5344static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5345
5346
5347
5348
5349
5350
5351
5352
5353u64 perf_swevent_set_period(struct perf_event *event)
5354{
5355 struct hw_perf_event *hwc = &event->hw;
5356 u64 period = hwc->last_period;
5357 u64 nr, offset;
5358 s64 old, val;
5359
5360 hwc->last_period = hwc->sample_period;
5361
5362again:
5363 old = val = local64_read(&hwc->period_left);
5364 if (val < 0)
5365 return 0;
5366
5367 nr = div64_u64(period + val, period);
5368 offset = nr * period;
5369 val -= offset;
5370 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
5371 goto again;
5372
5373 return nr;
5374}
5375
5376static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
5377 struct perf_sample_data *data,
5378 struct pt_regs *regs)
5379{
5380 struct hw_perf_event *hwc = &event->hw;
5381 int throttle = 0;
5382
5383 if (!overflow)
5384 overflow = perf_swevent_set_period(event);
5385
5386 if (hwc->interrupts == MAX_INTERRUPTS)
5387 return;
5388
5389 for (; overflow; overflow--) {
5390 if (__perf_event_overflow(event, throttle,
5391 data, regs)) {
5392
5393
5394
5395
5396 break;
5397 }
5398 throttle = 1;
5399 }
5400}
5401
5402static void perf_swevent_event(struct perf_event *event, u64 nr,
5403 struct perf_sample_data *data,
5404 struct pt_regs *regs)
5405{
5406 struct hw_perf_event *hwc = &event->hw;
5407
5408 local64_add(nr, &event->count);
5409
5410 if (!regs)
5411 return;
5412
5413 if (!is_sampling_event(event))
5414 return;
5415
5416 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5417 data->period = nr;
5418 return perf_swevent_overflow(event, 1, data, regs);
5419 } else
5420 data->period = event->hw.last_period;
5421
5422 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
5423 return perf_swevent_overflow(event, 1, data, regs);
5424
5425 if (local64_add_negative(nr, &hwc->period_left))
5426 return;
5427
5428 perf_swevent_overflow(event, 0, data, regs);
5429}
5430
5431static int perf_exclude_event(struct perf_event *event,
5432 struct pt_regs *regs)
5433{
5434 if (event->hw.state & PERF_HES_STOPPED)
5435 return 1;
5436
5437 if (regs) {
5438 if (event->attr.exclude_user && user_mode(regs))
5439 return 1;
5440
5441 if (event->attr.exclude_kernel && !user_mode(regs))
5442 return 1;
5443 }
5444
5445 return 0;
5446}
5447
5448static int perf_swevent_match(struct perf_event *event,
5449 enum perf_type_id type,
5450 u32 event_id,
5451 struct perf_sample_data *data,
5452 struct pt_regs *regs)
5453{
5454 if (event->attr.type != type)
5455 return 0;
5456
5457 if (event->attr.config != event_id)
5458 return 0;
5459
5460 if (perf_exclude_event(event, regs))
5461 return 0;
5462
5463 return 1;
5464}
5465
5466static inline u64 swevent_hash(u64 type, u32 event_id)
5467{
5468 u64 val = event_id | (type << 32);
5469
5470 return hash_64(val, SWEVENT_HLIST_BITS);
5471}
5472
5473static inline struct hlist_head *
5474__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
5475{
5476 u64 hash = swevent_hash(type, event_id);
5477
5478 return &hlist->heads[hash];
5479}
5480
5481
5482static inline struct hlist_head *
5483find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
5484{
5485 struct swevent_hlist *hlist;
5486
5487 hlist = rcu_dereference(swhash->swevent_hlist);
5488 if (!hlist)
5489 return NULL;
5490
5491 return __find_swevent_head(hlist, type, event_id);
5492}
5493
5494
5495static inline struct hlist_head *
5496find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
5497{
5498 struct swevent_hlist *hlist;
5499 u32 event_id = event->attr.config;
5500 u64 type = event->attr.type;
5501
5502
5503
5504
5505
5506
5507 hlist = rcu_dereference_protected(swhash->swevent_hlist,
5508 lockdep_is_held(&event->ctx->lock));
5509 if (!hlist)
5510 return NULL;
5511
5512 return __find_swevent_head(hlist, type, event_id);
5513}
5514
5515static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5516 u64 nr,
5517 struct perf_sample_data *data,
5518 struct pt_regs *regs)
5519{
5520 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5521 struct perf_event *event;
5522 struct hlist_head *head;
5523
5524 rcu_read_lock();
5525 head = find_swevent_head_rcu(swhash, type, event_id);
5526 if (!head)
5527 goto end;
5528
5529 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5530 if (perf_swevent_match(event, type, event_id, data, regs))
5531 perf_swevent_event(event, nr, data, regs);
5532 }
5533end:
5534 rcu_read_unlock();
5535}
5536
5537int perf_swevent_get_recursion_context(void)
5538{
5539 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5540
5541 return get_recursion_context(swhash->recursion);
5542}
5543EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5544
5545inline void perf_swevent_put_recursion_context(int rctx)
5546{
5547 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5548
5549 put_recursion_context(swhash->recursion, rctx);
5550}
5551
5552void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5553{
5554 struct perf_sample_data data;
5555 int rctx;
5556
5557 preempt_disable_notrace();
5558 rctx = perf_swevent_get_recursion_context();
5559 if (rctx < 0)
5560 return;
5561
5562 perf_sample_data_init(&data, addr, 0);
5563
5564 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
5565
5566 perf_swevent_put_recursion_context(rctx);
5567 preempt_enable_notrace();
5568}
5569
5570static void perf_swevent_read(struct perf_event *event)
5571{
5572}
5573
5574static int perf_swevent_add(struct perf_event *event, int flags)
5575{
5576 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5577 struct hw_perf_event *hwc = &event->hw;
5578 struct hlist_head *head;
5579
5580 if (is_sampling_event(event)) {
5581 hwc->last_period = hwc->sample_period;
5582 perf_swevent_set_period(event);
5583 }
5584
5585 hwc->state = !(flags & PERF_EF_START);
5586
5587 head = find_swevent_head(swhash, event);
5588 if (WARN_ON_ONCE(!head))
5589 return -EINVAL;
5590
5591 hlist_add_head_rcu(&event->hlist_entry, head);
5592
5593 return 0;
5594}
5595
5596static void perf_swevent_del(struct perf_event *event, int flags)
5597{
5598 hlist_del_rcu(&event->hlist_entry);
5599}
5600
5601static void perf_swevent_start(struct perf_event *event, int flags)
5602{
5603 event->hw.state = 0;
5604}
5605
5606static void perf_swevent_stop(struct perf_event *event, int flags)
5607{
5608 event->hw.state = PERF_HES_STOPPED;
5609}
5610
5611
5612static inline struct swevent_hlist *
5613swevent_hlist_deref(struct swevent_htable *swhash)
5614{
5615 return rcu_dereference_protected(swhash->swevent_hlist,
5616 lockdep_is_held(&swhash->hlist_mutex));
5617}
5618
5619static void swevent_hlist_release(struct swevent_htable *swhash)
5620{
5621 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5622
5623 if (!hlist)
5624 return;
5625
5626 rcu_assign_pointer(swhash->swevent_hlist, NULL);
5627 kfree_rcu(hlist, rcu_head);
5628}
5629
5630static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5631{
5632 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5633
5634 mutex_lock(&swhash->hlist_mutex);
5635
5636 if (!--swhash->hlist_refcount)
5637 swevent_hlist_release(swhash);
5638
5639 mutex_unlock(&swhash->hlist_mutex);
5640}
5641
5642static void swevent_hlist_put(struct perf_event *event)
5643{
5644 int cpu;
5645
5646 if (event->cpu != -1) {
5647 swevent_hlist_put_cpu(event, event->cpu);
5648 return;
5649 }
5650
5651 for_each_possible_cpu(cpu)
5652 swevent_hlist_put_cpu(event, cpu);
5653}
5654
5655static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5656{
5657 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5658 int err = 0;
5659
5660 mutex_lock(&swhash->hlist_mutex);
5661
5662 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5663 struct swevent_hlist *hlist;
5664
5665 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5666 if (!hlist) {
5667 err = -ENOMEM;
5668 goto exit;
5669 }
5670 rcu_assign_pointer(swhash->swevent_hlist, hlist);
5671 }
5672 swhash->hlist_refcount++;
5673exit:
5674 mutex_unlock(&swhash->hlist_mutex);
5675
5676 return err;
5677}
5678
5679static int swevent_hlist_get(struct perf_event *event)
5680{
5681 int err;
5682 int cpu, failed_cpu;
5683
5684 if (event->cpu != -1)
5685 return swevent_hlist_get_cpu(event, event->cpu);
5686
5687 get_online_cpus();
5688 for_each_possible_cpu(cpu) {
5689 err = swevent_hlist_get_cpu(event, cpu);
5690 if (err) {
5691 failed_cpu = cpu;
5692 goto fail;
5693 }
5694 }
5695 put_online_cpus();
5696
5697 return 0;
5698fail:
5699 for_each_possible_cpu(cpu) {
5700 if (cpu == failed_cpu)
5701 break;
5702 swevent_hlist_put_cpu(event, cpu);
5703 }
5704
5705 put_online_cpus();
5706 return err;
5707}
5708
5709struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5710
5711static void sw_perf_event_destroy(struct perf_event *event)
5712{
5713 u64 event_id = event->attr.config;
5714
5715 WARN_ON(event->parent);
5716
5717 static_key_slow_dec(&perf_swevent_enabled[event_id]);
5718 swevent_hlist_put(event);
5719}
5720
5721static int perf_swevent_init(struct perf_event *event)
5722{
5723 u64 event_id = event->attr.config;
5724
5725 if (event->attr.type != PERF_TYPE_SOFTWARE)
5726 return -ENOENT;
5727
5728
5729
5730
5731 if (has_branch_stack(event))
5732 return -EOPNOTSUPP;
5733
5734 switch (event_id) {
5735 case PERF_COUNT_SW_CPU_CLOCK:
5736 case PERF_COUNT_SW_TASK_CLOCK:
5737 return -ENOENT;
5738
5739 default:
5740 break;
5741 }
5742
5743 if (event_id >= PERF_COUNT_SW_MAX)
5744 return -ENOENT;
5745
5746 if (!event->parent) {
5747 int err;
5748
5749 err = swevent_hlist_get(event);
5750 if (err)
5751 return err;
5752
5753 static_key_slow_inc(&perf_swevent_enabled[event_id]);
5754 event->destroy = sw_perf_event_destroy;
5755 }
5756
5757 return 0;
5758}
5759
5760static int perf_swevent_event_idx(struct perf_event *event)
5761{
5762 return 0;
5763}
5764
5765static struct pmu perf_swevent = {
5766 .task_ctx_nr = perf_sw_context,
5767
5768 .event_init = perf_swevent_init,
5769 .add = perf_swevent_add,
5770 .del = perf_swevent_del,
5771 .start = perf_swevent_start,
5772 .stop = perf_swevent_stop,
5773 .read = perf_swevent_read,
5774
5775 .event_idx = perf_swevent_event_idx,
5776};
5777
5778#ifdef CONFIG_EVENT_TRACING
5779
5780static int perf_tp_filter_match(struct perf_event *event,
5781 struct perf_sample_data *data)
5782{
5783 void *record = data->raw->data;
5784
5785 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5786 return 1;
5787 return 0;
5788}
5789
5790static int perf_tp_event_match(struct perf_event *event,
5791 struct perf_sample_data *data,
5792 struct pt_regs *regs)
5793{
5794 if (event->hw.state & PERF_HES_STOPPED)
5795 return 0;
5796
5797
5798
5799 if (event->attr.exclude_kernel)
5800 return 0;
5801
5802 if (!perf_tp_filter_match(event, data))
5803 return 0;
5804
5805 return 1;
5806}
5807
5808void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5809 struct pt_regs *regs, struct hlist_head *head, int rctx,
5810 struct task_struct *task)
5811{
5812 struct perf_sample_data data;
5813 struct perf_event *event;
5814
5815 struct perf_raw_record raw = {
5816 .size = entry_size,
5817 .data = record,
5818 };
5819
5820 perf_sample_data_init(&data, addr, 0);
5821 data.raw = &raw;
5822
5823 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5824 if (perf_tp_event_match(event, &data, regs))
5825 perf_swevent_event(event, count, &data, regs);
5826 }
5827
5828
5829
5830
5831
5832 if (task && task != current) {
5833 struct perf_event_context *ctx;
5834 struct trace_entry *entry = record;
5835
5836 rcu_read_lock();
5837 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5838 if (!ctx)
5839 goto unlock;
5840
5841 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5842 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5843 continue;
5844 if (event->attr.config != entry->type)
5845 continue;
5846 if (perf_tp_event_match(event, &data, regs))
5847 perf_swevent_event(event, count, &data, regs);
5848 }
5849unlock:
5850 rcu_read_unlock();
5851 }
5852
5853 perf_swevent_put_recursion_context(rctx);
5854}
5855EXPORT_SYMBOL_GPL(perf_tp_event);
5856
5857static void tp_perf_event_destroy(struct perf_event *event)
5858{
5859 perf_trace_destroy(event);
5860}
5861
5862static int perf_tp_event_init(struct perf_event *event)
5863{
5864 int err;
5865
5866 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5867 return -ENOENT;
5868
5869
5870
5871
5872 if (has_branch_stack(event))
5873 return -EOPNOTSUPP;
5874
5875 err = perf_trace_init(event);
5876 if (err)
5877 return err;
5878
5879 event->destroy = tp_perf_event_destroy;
5880
5881 return 0;
5882}
5883
5884static struct pmu perf_tracepoint = {
5885 .task_ctx_nr = perf_sw_context,
5886
5887 .event_init = perf_tp_event_init,
5888 .add = perf_trace_add,
5889 .del = perf_trace_del,
5890 .start = perf_swevent_start,
5891 .stop = perf_swevent_stop,
5892 .read = perf_swevent_read,
5893
5894 .event_idx = perf_swevent_event_idx,
5895};
5896
5897static inline void perf_tp_register(void)
5898{
5899 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5900}
5901
5902static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5903{
5904 char *filter_str;
5905 int ret;
5906
5907 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5908 return -EINVAL;
5909
5910 filter_str = strndup_user(arg, PAGE_SIZE);
5911 if (IS_ERR(filter_str))
5912 return PTR_ERR(filter_str);
5913
5914 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5915
5916 kfree(filter_str);
5917 return ret;
5918}
5919
5920static void perf_event_free_filter(struct perf_event *event)
5921{
5922 ftrace_profile_free_filter(event);
5923}
5924
5925#else
5926
5927static inline void perf_tp_register(void)
5928{
5929}
5930
5931static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5932{
5933 return -ENOENT;
5934}
5935
5936static void perf_event_free_filter(struct perf_event *event)
5937{
5938}
5939
5940#endif
5941
5942#ifdef CONFIG_HAVE_HW_BREAKPOINT
5943void perf_bp_event(struct perf_event *bp, void *data)
5944{
5945 struct perf_sample_data sample;
5946 struct pt_regs *regs = data;
5947
5948 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5949
5950 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5951 perf_swevent_event(bp, 1, &sample, regs);
5952}
5953#endif
5954
5955
5956
5957
5958
5959static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5960{
5961 enum hrtimer_restart ret = HRTIMER_RESTART;
5962 struct perf_sample_data data;
5963 struct pt_regs *regs;
5964 struct perf_event *event;
5965 u64 period;
5966
5967 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5968
5969 if (event->state != PERF_EVENT_STATE_ACTIVE)
5970 return HRTIMER_NORESTART;
5971
5972 event->pmu->read(event);
5973
5974 perf_sample_data_init(&data, 0, event->hw.last_period);
5975 regs = get_irq_regs();
5976
5977 if (regs && !perf_exclude_event(event, regs)) {
5978 if (!(event->attr.exclude_idle && is_idle_task(current)))
5979 if (__perf_event_overflow(event, 1, &data, regs))
5980 ret = HRTIMER_NORESTART;
5981 }
5982
5983 period = max_t(u64, 10000, event->hw.sample_period);
5984 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5985
5986 return ret;
5987}
5988
5989static void perf_swevent_start_hrtimer(struct perf_event *event)
5990{
5991 struct hw_perf_event *hwc = &event->hw;
5992 s64 period;
5993
5994 if (!is_sampling_event(event))
5995 return;
5996
5997 period = local64_read(&hwc->period_left);
5998 if (period) {
5999 if (period < 0)
6000 period = 10000;
6001
6002 local64_set(&hwc->period_left, 0);
6003 } else {
6004 period = max_t(u64, 10000, hwc->sample_period);
6005 }
6006 __hrtimer_start_range_ns(&hwc->hrtimer,
6007 ns_to_ktime(period), 0,
6008 HRTIMER_MODE_REL_PINNED, 0);
6009}
6010
6011static void perf_swevent_cancel_hrtimer(struct perf_event *event)
6012{
6013 struct hw_perf_event *hwc = &event->hw;
6014
6015 if (is_sampling_event(event)) {
6016 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
6017 local64_set(&hwc->period_left, ktime_to_ns(remaining));
6018
6019 hrtimer_cancel(&hwc->hrtimer);
6020 }
6021}
6022
6023static void perf_swevent_init_hrtimer(struct perf_event *event)
6024{
6025 struct hw_perf_event *hwc = &event->hw;
6026
6027 if (!is_sampling_event(event))
6028 return;
6029
6030 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6031 hwc->hrtimer.function = perf_swevent_hrtimer;
6032
6033
6034
6035
6036
6037 if (event->attr.freq) {
6038 long freq = event->attr.sample_freq;
6039
6040 event->attr.sample_period = NSEC_PER_SEC / freq;
6041 hwc->sample_period = event->attr.sample_period;
6042 local64_set(&hwc->period_left, hwc->sample_period);
6043 hwc->last_period = hwc->sample_period;
6044 event->attr.freq = 0;
6045 }
6046}
6047
6048
6049
6050
6051
6052static void cpu_clock_event_update(struct perf_event *event)
6053{
6054 s64 prev;
6055 u64 now;
6056
6057 now = local_clock();
6058 prev = local64_xchg(&event->hw.prev_count, now);
6059 local64_add(now - prev, &event->count);
6060}
6061
6062static void cpu_clock_event_start(struct perf_event *event, int flags)
6063{
6064 local64_set(&event->hw.prev_count, local_clock());
6065 perf_swevent_start_hrtimer(event);
6066}
6067
6068static void cpu_clock_event_stop(struct perf_event *event, int flags)
6069{
6070 perf_swevent_cancel_hrtimer(event);
6071 cpu_clock_event_update(event);
6072}
6073
6074static int cpu_clock_event_add(struct perf_event *event, int flags)
6075{
6076 if (flags & PERF_EF_START)
6077 cpu_clock_event_start(event, flags);
6078
6079 return 0;
6080}
6081
6082static void cpu_clock_event_del(struct perf_event *event, int flags)
6083{
6084 cpu_clock_event_stop(event, flags);
6085}
6086
6087static void cpu_clock_event_read(struct perf_event *event)
6088{
6089 cpu_clock_event_update(event);
6090}
6091
6092static int cpu_clock_event_init(struct perf_event *event)
6093{
6094 if (event->attr.type != PERF_TYPE_SOFTWARE)
6095 return -ENOENT;
6096
6097 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
6098 return -ENOENT;
6099
6100
6101
6102
6103 if (has_branch_stack(event))
6104 return -EOPNOTSUPP;
6105
6106 perf_swevent_init_hrtimer(event);
6107
6108 return 0;
6109}
6110
6111static struct pmu perf_cpu_clock = {
6112 .task_ctx_nr = perf_sw_context,
6113
6114 .event_init = cpu_clock_event_init,
6115 .add = cpu_clock_event_add,
6116 .del = cpu_clock_event_del,
6117 .start = cpu_clock_event_start,
6118 .stop = cpu_clock_event_stop,
6119 .read = cpu_clock_event_read,
6120
6121 .event_idx = perf_swevent_event_idx,
6122};
6123
6124
6125
6126
6127
6128static void task_clock_event_update(struct perf_event *event, u64 now)
6129{
6130 u64 prev;
6131 s64 delta;
6132
6133 prev = local64_xchg(&event->hw.prev_count, now);
6134 delta = now - prev;
6135 local64_add(delta, &event->count);
6136}
6137
6138static void task_clock_event_start(struct perf_event *event, int flags)
6139{
6140 local64_set(&event->hw.prev_count, event->ctx->time);
6141 perf_swevent_start_hrtimer(event);
6142}
6143
6144static void task_clock_event_stop(struct perf_event *event, int flags)
6145{
6146 perf_swevent_cancel_hrtimer(event);
6147 task_clock_event_update(event, event->ctx->time);
6148}
6149
6150static int task_clock_event_add(struct perf_event *event, int flags)
6151{
6152 if (flags & PERF_EF_START)
6153 task_clock_event_start(event, flags);
6154
6155 return 0;
6156}
6157
6158static void task_clock_event_del(struct perf_event *event, int flags)
6159{
6160 task_clock_event_stop(event, PERF_EF_UPDATE);
6161}
6162
6163static void task_clock_event_read(struct perf_event *event)
6164{
6165 u64 now = perf_clock();
6166 u64 delta = now - event->ctx->timestamp;
6167 u64 time = event->ctx->time + delta;
6168
6169 task_clock_event_update(event, time);
6170}
6171
6172static int task_clock_event_init(struct perf_event *event)
6173{
6174 if (event->attr.type != PERF_TYPE_SOFTWARE)
6175 return -ENOENT;
6176
6177 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
6178 return -ENOENT;
6179
6180
6181
6182
6183 if (has_branch_stack(event))
6184 return -EOPNOTSUPP;
6185
6186 perf_swevent_init_hrtimer(event);
6187
6188 return 0;
6189}
6190
6191static struct pmu perf_task_clock = {
6192 .task_ctx_nr = perf_sw_context,
6193
6194 .event_init = task_clock_event_init,
6195 .add = task_clock_event_add,
6196 .del = task_clock_event_del,
6197 .start = task_clock_event_start,
6198 .stop = task_clock_event_stop,
6199 .read = task_clock_event_read,
6200
6201 .event_idx = perf_swevent_event_idx,
6202};
6203
6204static void perf_pmu_nop_void(struct pmu *pmu)
6205{
6206}
6207
6208static int perf_pmu_nop_int(struct pmu *pmu)
6209{
6210 return 0;
6211}
6212
6213static void perf_pmu_start_txn(struct pmu *pmu)
6214{
6215 perf_pmu_disable(pmu);
6216}
6217
6218static int perf_pmu_commit_txn(struct pmu *pmu)
6219{
6220 perf_pmu_enable(pmu);
6221 return 0;
6222}
6223
6224static void perf_pmu_cancel_txn(struct pmu *pmu)
6225{
6226 perf_pmu_enable(pmu);
6227}
6228
6229static int perf_event_idx_default(struct perf_event *event)
6230{
6231 return event->hw.idx + 1;
6232}
6233
6234
6235
6236
6237
6238static void *find_pmu_context(int ctxn)
6239{
6240 struct pmu *pmu;
6241
6242 if (ctxn < 0)
6243 return NULL;
6244
6245 list_for_each_entry(pmu, &pmus, entry) {
6246 if (pmu->task_ctx_nr == ctxn)
6247 return pmu->pmu_cpu_context;
6248 }
6249
6250 return NULL;
6251}
6252
6253static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
6254{
6255 int cpu;
6256
6257 for_each_possible_cpu(cpu) {
6258 struct perf_cpu_context *cpuctx;
6259
6260 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6261
6262 if (cpuctx->unique_pmu == old_pmu)
6263 cpuctx->unique_pmu = pmu;
6264 }
6265}
6266
6267static void free_pmu_context(struct pmu *pmu)
6268{
6269 struct pmu *i;
6270
6271 mutex_lock(&pmus_lock);
6272
6273
6274
6275 list_for_each_entry(i, &pmus, entry) {
6276 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
6277 update_pmu_context(i, pmu);
6278 goto out;
6279 }
6280 }
6281
6282 free_percpu(pmu->pmu_cpu_context);
6283out:
6284 mutex_unlock(&pmus_lock);
6285}
6286static struct idr pmu_idr;
6287
6288static ssize_t
6289type_show(struct device *dev, struct device_attribute *attr, char *page)
6290{
6291 struct pmu *pmu = dev_get_drvdata(dev);
6292
6293 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
6294}
6295
6296static ssize_t
6297perf_event_mux_interval_ms_show(struct device *dev,
6298 struct device_attribute *attr,
6299 char *page)
6300{
6301 struct pmu *pmu = dev_get_drvdata(dev);
6302
6303 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
6304}
6305
6306static ssize_t
6307perf_event_mux_interval_ms_store(struct device *dev,
6308 struct device_attribute *attr,
6309 const char *buf, size_t count)
6310{
6311 struct pmu *pmu = dev_get_drvdata(dev);
6312 int timer, cpu, ret;
6313
6314 ret = kstrtoint(buf, 0, &timer);
6315 if (ret)
6316 return ret;
6317
6318 if (timer < 1)
6319 return -EINVAL;
6320
6321
6322 if (timer == pmu->hrtimer_interval_ms)
6323 return count;
6324
6325 pmu->hrtimer_interval_ms = timer;
6326
6327
6328 for_each_possible_cpu(cpu) {
6329 struct perf_cpu_context *cpuctx;
6330 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6331 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
6332
6333 if (hrtimer_active(&cpuctx->hrtimer))
6334 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
6335 }
6336
6337 return count;
6338}
6339
6340static struct device_attribute pmu_dev_attrs[] = {
6341 __ATTR_RO(type),
6342 __ATTR_RW(perf_event_mux_interval_ms),
6343 __ATTR_NULL,
6344};
6345
6346static int pmu_bus_running;
6347static struct bus_type pmu_bus = {
6348 .name = "event_source",
6349 .dev_attrs = pmu_dev_attrs,
6350};
6351
6352static void pmu_dev_release(struct device *dev)
6353{
6354 kfree(dev);
6355}
6356
6357static int pmu_dev_alloc(struct pmu *pmu)
6358{
6359 int ret = -ENOMEM;
6360
6361 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6362 if (!pmu->dev)
6363 goto out;
6364
6365 pmu->dev->groups = pmu->attr_groups;
6366 device_initialize(pmu->dev);
6367 ret = dev_set_name(pmu->dev, "%s", pmu->name);
6368 if (ret)
6369 goto free_dev;
6370
6371 dev_set_drvdata(pmu->dev, pmu);
6372 pmu->dev->bus = &pmu_bus;
6373 pmu->dev->release = pmu_dev_release;
6374 ret = device_add(pmu->dev);
6375 if (ret)
6376 goto free_dev;
6377
6378out:
6379 return ret;
6380
6381free_dev:
6382 put_device(pmu->dev);
6383 goto out;
6384}
6385
6386static struct lock_class_key cpuctx_mutex;
6387static struct lock_class_key cpuctx_lock;
6388
6389int perf_pmu_register(struct pmu *pmu, const char *name, int type)
6390{
6391 int cpu, ret;
6392
6393 mutex_lock(&pmus_lock);
6394 ret = -ENOMEM;
6395 pmu->pmu_disable_count = alloc_percpu(int);
6396 if (!pmu->pmu_disable_count)
6397 goto unlock;
6398
6399 pmu->type = -1;
6400 if (!name)
6401 goto skip_type;
6402 pmu->name = name;
6403
6404 if (type < 0) {
6405 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6406 if (type < 0) {
6407 ret = type;
6408 goto free_pdc;
6409 }
6410 }
6411 pmu->type = type;
6412
6413 if (pmu_bus_running) {
6414 ret = pmu_dev_alloc(pmu);
6415 if (ret)
6416 goto free_idr;
6417 }
6418
6419skip_type:
6420 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6421 if (pmu->pmu_cpu_context)
6422 goto got_cpu_context;
6423
6424 ret = -ENOMEM;
6425 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6426 if (!pmu->pmu_cpu_context)
6427 goto free_dev;
6428
6429 for_each_possible_cpu(cpu) {
6430 struct perf_cpu_context *cpuctx;
6431
6432 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6433 __perf_event_init_context(&cpuctx->ctx);
6434 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6435 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6436 cpuctx->ctx.type = cpu_context;
6437 cpuctx->ctx.pmu = pmu;
6438
6439 __perf_cpu_hrtimer_init(cpuctx, cpu);
6440
6441 INIT_LIST_HEAD(&cpuctx->rotation_list);
6442 cpuctx->unique_pmu = pmu;
6443 }
6444
6445got_cpu_context:
6446 if (!pmu->start_txn) {
6447 if (pmu->pmu_enable) {
6448
6449
6450
6451
6452
6453 pmu->start_txn = perf_pmu_start_txn;
6454 pmu->commit_txn = perf_pmu_commit_txn;
6455 pmu->cancel_txn = perf_pmu_cancel_txn;
6456 } else {
6457 pmu->start_txn = perf_pmu_nop_void;
6458 pmu->commit_txn = perf_pmu_nop_int;
6459 pmu->cancel_txn = perf_pmu_nop_void;
6460 }
6461 }
6462
6463 if (!pmu->pmu_enable) {
6464 pmu->pmu_enable = perf_pmu_nop_void;
6465 pmu->pmu_disable = perf_pmu_nop_void;
6466 }
6467
6468 if (!pmu->event_idx)
6469 pmu->event_idx = perf_event_idx_default;
6470
6471 list_add_rcu(&pmu->entry, &pmus);
6472 ret = 0;
6473unlock:
6474 mutex_unlock(&pmus_lock);
6475
6476 return ret;
6477
6478free_dev:
6479 device_del(pmu->dev);
6480 put_device(pmu->dev);
6481
6482free_idr:
6483 if (pmu->type >= PERF_TYPE_MAX)
6484 idr_remove(&pmu_idr, pmu->type);
6485
6486free_pdc:
6487 free_percpu(pmu->pmu_disable_count);
6488 goto unlock;
6489}
6490
6491void perf_pmu_unregister(struct pmu *pmu)
6492{
6493 mutex_lock(&pmus_lock);
6494 list_del_rcu(&pmu->entry);
6495 mutex_unlock(&pmus_lock);
6496
6497
6498
6499
6500
6501 synchronize_srcu(&pmus_srcu);
6502 synchronize_rcu();
6503
6504 free_percpu(pmu->pmu_disable_count);
6505 if (pmu->type >= PERF_TYPE_MAX)
6506 idr_remove(&pmu_idr, pmu->type);
6507 device_del(pmu->dev);
6508 put_device(pmu->dev);
6509 free_pmu_context(pmu);
6510}
6511
6512struct pmu *perf_init_event(struct perf_event *event)
6513{
6514 struct pmu *pmu = NULL;
6515 int idx;
6516 int ret;
6517
6518 idx = srcu_read_lock(&pmus_srcu);
6519
6520 rcu_read_lock();
6521 pmu = idr_find(&pmu_idr, event->attr.type);
6522 rcu_read_unlock();
6523 if (pmu) {
6524 event->pmu = pmu;
6525 ret = pmu->event_init(event);
6526 if (ret)
6527 pmu = ERR_PTR(ret);
6528 goto unlock;
6529 }
6530
6531 list_for_each_entry_rcu(pmu, &pmus, entry) {
6532 event->pmu = pmu;
6533 ret = pmu->event_init(event);
6534 if (!ret)
6535 goto unlock;
6536
6537 if (ret != -ENOENT) {
6538 pmu = ERR_PTR(ret);
6539 goto unlock;
6540 }
6541 }
6542 pmu = ERR_PTR(-ENOENT);
6543unlock:
6544 srcu_read_unlock(&pmus_srcu, idx);
6545
6546 return pmu;
6547}
6548
6549static void account_event_cpu(struct perf_event *event, int cpu)
6550{
6551 if (event->parent)
6552 return;
6553
6554 if (has_branch_stack(event)) {
6555 if (!(event->attach_state & PERF_ATTACH_TASK))
6556 atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
6557 }
6558 if (is_cgroup_event(event))
6559 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
6560}
6561
6562static void account_event(struct perf_event *event)
6563{
6564 if (event->parent)
6565 return;
6566
6567 if (event->attach_state & PERF_ATTACH_TASK)
6568 static_key_slow_inc(&perf_sched_events.key);
6569 if (event->attr.mmap || event->attr.mmap_data)
6570 atomic_inc(&nr_mmap_events);
6571 if (event->attr.comm)
6572 atomic_inc(&nr_comm_events);
6573 if (event->attr.task)
6574 atomic_inc(&nr_task_events);
6575 if (event->attr.freq) {
6576 if (atomic_inc_return(&nr_freq_events) == 1)
6577 tick_nohz_full_kick_all();
6578 }
6579 if (has_branch_stack(event))
6580 static_key_slow_inc(&perf_sched_events.key);
6581 if (is_cgroup_event(event))
6582 static_key_slow_inc(&perf_sched_events.key);
6583
6584 account_event_cpu(event, event->cpu);
6585}
6586
6587
6588
6589
6590static struct perf_event *
6591perf_event_alloc(struct perf_event_attr *attr, int cpu,
6592 struct task_struct *task,
6593 struct perf_event *group_leader,
6594 struct perf_event *parent_event,
6595 perf_overflow_handler_t overflow_handler,
6596 void *context)
6597{
6598 struct pmu *pmu;
6599 struct perf_event *event;
6600 struct hw_perf_event *hwc;
6601 long err = -EINVAL;
6602
6603 if ((unsigned)cpu >= nr_cpu_ids) {
6604 if (!task || cpu != -1)
6605 return ERR_PTR(-EINVAL);
6606 }
6607
6608 event = kzalloc(sizeof(*event), GFP_KERNEL);
6609 if (!event)
6610 return ERR_PTR(-ENOMEM);
6611
6612
6613
6614
6615
6616 if (!group_leader)
6617 group_leader = event;
6618
6619 mutex_init(&event->child_mutex);
6620 INIT_LIST_HEAD(&event->child_list);
6621
6622 INIT_LIST_HEAD(&event->group_entry);
6623 INIT_LIST_HEAD(&event->event_entry);
6624 INIT_LIST_HEAD(&event->sibling_list);
6625 INIT_LIST_HEAD(&event->rb_entry);
6626
6627 init_waitqueue_head(&event->waitq);
6628 init_irq_work(&event->pending, perf_pending_event);
6629
6630 mutex_init(&event->mmap_mutex);
6631
6632 atomic_long_set(&event->refcount, 1);
6633 event->cpu = cpu;
6634 event->attr = *attr;
6635 event->group_leader = group_leader;
6636 event->pmu = NULL;
6637 event->oncpu = -1;
6638
6639 event->parent = parent_event;
6640
6641 event->ns = get_pid_ns(task_active_pid_ns(current));
6642 event->id = atomic64_inc_return(&perf_event_id);
6643
6644 event->state = PERF_EVENT_STATE_INACTIVE;
6645
6646 if (task) {
6647 event->attach_state = PERF_ATTACH_TASK;
6648
6649 if (attr->type == PERF_TYPE_TRACEPOINT)
6650 event->hw.tp_target = task;
6651#ifdef CONFIG_HAVE_HW_BREAKPOINT
6652
6653
6654
6655 else if (attr->type == PERF_TYPE_BREAKPOINT)
6656 event->hw.bp_target = task;
6657#endif
6658 }
6659
6660 if (!overflow_handler && parent_event) {
6661 overflow_handler = parent_event->overflow_handler;
6662 context = parent_event->overflow_handler_context;
6663 }
6664
6665 event->overflow_handler = overflow_handler;
6666 event->overflow_handler_context = context;
6667
6668 perf_event__state_init(event);
6669
6670 pmu = NULL;
6671
6672 hwc = &event->hw;
6673 hwc->sample_period = attr->sample_period;
6674 if (attr->freq && attr->sample_freq)
6675 hwc->sample_period = 1;
6676 hwc->last_period = hwc->sample_period;
6677
6678 local64_set(&hwc->period_left, hwc->sample_period);
6679
6680
6681
6682
6683 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6684 goto err_ns;
6685
6686 pmu = perf_init_event(event);
6687 if (!pmu)
6688 goto err_ns;
6689 else if (IS_ERR(pmu)) {
6690 err = PTR_ERR(pmu);
6691 goto err_ns;
6692 }
6693
6694 if (!event->parent) {
6695 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6696 err = get_callchain_buffers();
6697 if (err)
6698 goto err_pmu;
6699 }
6700 }
6701
6702 return event;
6703
6704err_pmu:
6705 if (event->destroy)
6706 event->destroy(event);
6707err_ns:
6708 if (event->ns)
6709 put_pid_ns(event->ns);
6710 kfree(event);
6711
6712 return ERR_PTR(err);
6713}
6714
6715static int perf_copy_attr(struct perf_event_attr __user *uattr,
6716 struct perf_event_attr *attr)
6717{
6718 u32 size;
6719 int ret;
6720
6721 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6722 return -EFAULT;
6723
6724
6725
6726
6727 memset(attr, 0, sizeof(*attr));
6728
6729 ret = get_user(size, &uattr->size);
6730 if (ret)
6731 return ret;
6732
6733 if (size > PAGE_SIZE)
6734 goto err_size;
6735
6736 if (!size)
6737 size = PERF_ATTR_SIZE_VER0;
6738
6739 if (size < PERF_ATTR_SIZE_VER0)
6740 goto err_size;
6741
6742
6743
6744
6745
6746
6747
6748 if (size > sizeof(*attr)) {
6749 unsigned char __user *addr;
6750 unsigned char __user *end;
6751 unsigned char val;
6752
6753 addr = (void __user *)uattr + sizeof(*attr);
6754 end = (void __user *)uattr + size;
6755
6756 for (; addr < end; addr++) {
6757 ret = get_user(val, addr);
6758 if (ret)
6759 return ret;
6760 if (val)
6761 goto err_size;
6762 }
6763 size = sizeof(*attr);
6764 }
6765
6766 ret = copy_from_user(attr, uattr, size);
6767 if (ret)
6768 return -EFAULT;
6769
6770
6771 if (attr->mmap2)
6772 return -EINVAL;
6773
6774 if (attr->__reserved_1)
6775 return -EINVAL;
6776
6777 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6778 return -EINVAL;
6779
6780 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6781 return -EINVAL;
6782
6783 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6784 u64 mask = attr->branch_sample_type;
6785
6786
6787 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6788 return -EINVAL;
6789
6790
6791 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6792 return -EINVAL;
6793
6794
6795 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6796
6797
6798 if (!attr->exclude_kernel)
6799 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6800
6801 if (!attr->exclude_user)
6802 mask |= PERF_SAMPLE_BRANCH_USER;
6803
6804 if (!attr->exclude_hv)
6805 mask |= PERF_SAMPLE_BRANCH_HV;
6806
6807
6808
6809 attr->branch_sample_type = mask;
6810 }
6811
6812 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6813 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6814 return -EACCES;
6815 }
6816
6817 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
6818 ret = perf_reg_validate(attr->sample_regs_user);
6819 if (ret)
6820 return ret;
6821 }
6822
6823 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6824 if (!arch_perf_have_user_stack_dump())
6825 return -ENOSYS;
6826
6827
6828
6829
6830
6831
6832 if (attr->sample_stack_user >= USHRT_MAX)
6833 ret = -EINVAL;
6834 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6835 ret = -EINVAL;
6836 }
6837
6838out:
6839 return ret;
6840
6841err_size:
6842 put_user(sizeof(*attr), &uattr->size);
6843 ret = -E2BIG;
6844 goto out;
6845}
6846
6847static int
6848perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6849{
6850 struct ring_buffer *rb = NULL, *old_rb = NULL;
6851 int ret = -EINVAL;
6852
6853 if (!output_event)
6854 goto set;
6855
6856
6857 if (event == output_event)
6858 goto out;
6859
6860
6861
6862
6863 if (output_event->cpu != event->cpu)
6864 goto out;
6865
6866
6867
6868
6869 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6870 goto out;
6871
6872set:
6873 mutex_lock(&event->mmap_mutex);
6874
6875 if (atomic_read(&event->mmap_count))
6876 goto unlock;
6877
6878 old_rb = event->rb;
6879
6880 if (output_event) {
6881
6882 rb = ring_buffer_get(output_event);
6883 if (!rb)
6884 goto unlock;
6885 }
6886
6887 if (old_rb)
6888 ring_buffer_detach(event, old_rb);
6889
6890 if (rb)
6891 ring_buffer_attach(event, rb);
6892
6893 rcu_assign_pointer(event->rb, rb);
6894
6895 if (old_rb) {
6896 ring_buffer_put(old_rb);
6897
6898
6899
6900
6901
6902 wake_up_all(&event->waitq);
6903 }
6904
6905 ret = 0;
6906unlock:
6907 mutex_unlock(&event->mmap_mutex);
6908
6909out:
6910 return ret;
6911}
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921SYSCALL_DEFINE5(perf_event_open,
6922 struct perf_event_attr __user *, attr_uptr,
6923 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6924{
6925 struct perf_event *group_leader = NULL, *output_event = NULL;
6926 struct perf_event *event, *sibling;
6927 struct perf_event_attr attr;
6928 struct perf_event_context *ctx;
6929 struct file *event_file = NULL;
6930 struct fd group = {NULL, 0};
6931 struct task_struct *task = NULL;
6932 struct pmu *pmu;
6933 int event_fd;
6934 int move_group = 0;
6935 int err;
6936
6937
6938 if (flags & ~PERF_FLAG_ALL)
6939 return -EINVAL;
6940
6941 err = perf_copy_attr(attr_uptr, &attr);
6942 if (err)
6943 return err;
6944
6945 if (!attr.exclude_kernel) {
6946 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6947 return -EACCES;
6948 }
6949
6950 if (attr.freq) {
6951 if (attr.sample_freq > sysctl_perf_event_sample_rate)
6952 return -EINVAL;
6953 }
6954
6955
6956
6957
6958
6959
6960
6961 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6962 return -EINVAL;
6963
6964 event_fd = get_unused_fd();
6965 if (event_fd < 0)
6966 return event_fd;
6967
6968 if (group_fd != -1) {
6969 err = perf_fget_light(group_fd, &group);
6970 if (err)
6971 goto err_fd;
6972 group_leader = group.file->private_data;
6973 if (flags & PERF_FLAG_FD_OUTPUT)
6974 output_event = group_leader;
6975 if (flags & PERF_FLAG_FD_NO_GROUP)
6976 group_leader = NULL;
6977 }
6978
6979 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6980 task = find_lively_task_by_vpid(pid);
6981 if (IS_ERR(task)) {
6982 err = PTR_ERR(task);
6983 goto err_group_fd;
6984 }
6985 }
6986
6987 get_online_cpus();
6988
6989 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6990 NULL, NULL);
6991 if (IS_ERR(event)) {
6992 err = PTR_ERR(event);
6993 goto err_task;
6994 }
6995
6996 if (flags & PERF_FLAG_PID_CGROUP) {
6997 err = perf_cgroup_connect(pid, event, &attr, group_leader);
6998 if (err) {
6999 __free_event(event);
7000 goto err_task;
7001 }
7002 }
7003
7004 account_event(event);
7005
7006
7007
7008
7009
7010 pmu = event->pmu;
7011
7012 if (group_leader &&
7013 (is_software_event(event) != is_software_event(group_leader))) {
7014 if (is_software_event(event)) {
7015
7016
7017
7018
7019
7020
7021
7022
7023 pmu = group_leader->pmu;
7024 } else if (is_software_event(group_leader) &&
7025 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
7026
7027
7028
7029
7030
7031 move_group = 1;
7032 }
7033 }
7034
7035
7036
7037
7038 ctx = find_get_context(pmu, task, event->cpu);
7039 if (IS_ERR(ctx)) {
7040 err = PTR_ERR(ctx);
7041 goto err_alloc;
7042 }
7043
7044 if (task) {
7045 put_task_struct(task);
7046 task = NULL;
7047 }
7048
7049
7050
7051
7052 if (group_leader) {
7053 err = -EINVAL;
7054
7055
7056
7057
7058
7059 if (group_leader->group_leader != group_leader)
7060 goto err_context;
7061
7062
7063
7064
7065 if (move_group) {
7066 if (group_leader->ctx->type != ctx->type)
7067 goto err_context;
7068 } else {
7069 if (group_leader->ctx != ctx)
7070 goto err_context;
7071 }
7072
7073
7074
7075
7076 if (attr.exclusive || attr.pinned)
7077 goto err_context;
7078 }
7079
7080 if (output_event) {
7081 err = perf_event_set_output(event, output_event);
7082 if (err)
7083 goto err_context;
7084 }
7085
7086 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
7087 if (IS_ERR(event_file)) {
7088 err = PTR_ERR(event_file);
7089 goto err_context;
7090 }
7091
7092 if (move_group) {
7093 struct perf_event_context *gctx = group_leader->ctx;
7094
7095 mutex_lock(&gctx->mutex);
7096 perf_remove_from_context(group_leader);
7097
7098
7099
7100
7101
7102
7103 perf_event__state_init(group_leader);
7104 list_for_each_entry(sibling, &group_leader->sibling_list,
7105 group_entry) {
7106 perf_remove_from_context(sibling);
7107 perf_event__state_init(sibling);
7108 put_ctx(gctx);
7109 }
7110 mutex_unlock(&gctx->mutex);
7111 put_ctx(gctx);
7112 }
7113
7114 WARN_ON_ONCE(ctx->parent_ctx);
7115 mutex_lock(&ctx->mutex);
7116
7117 if (move_group) {
7118 synchronize_rcu();
7119 perf_install_in_context(ctx, group_leader, event->cpu);
7120 get_ctx(ctx);
7121 list_for_each_entry(sibling, &group_leader->sibling_list,
7122 group_entry) {
7123 perf_install_in_context(ctx, sibling, event->cpu);
7124 get_ctx(ctx);
7125 }
7126 }
7127
7128 perf_install_in_context(ctx, event, event->cpu);
7129 ++ctx->generation;
7130 perf_unpin_context(ctx);
7131 mutex_unlock(&ctx->mutex);
7132
7133 put_online_cpus();
7134
7135 event->owner = current;
7136
7137 mutex_lock(¤t->perf_event_mutex);
7138 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
7139 mutex_unlock(¤t->perf_event_mutex);
7140
7141
7142
7143
7144 perf_event__header_size(event);
7145 perf_event__id_header_size(event);
7146
7147
7148
7149
7150
7151
7152
7153 fdput(group);
7154 fd_install(event_fd, event_file);
7155 return event_fd;
7156
7157err_context:
7158 perf_unpin_context(ctx);
7159 put_ctx(ctx);
7160err_alloc:
7161 free_event(event);
7162err_task:
7163 put_online_cpus();
7164 if (task)
7165 put_task_struct(task);
7166err_group_fd:
7167 fdput(group);
7168err_fd:
7169 put_unused_fd(event_fd);
7170 return err;
7171}
7172
7173
7174
7175
7176
7177
7178
7179
7180struct perf_event *
7181perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
7182 struct task_struct *task,
7183 perf_overflow_handler_t overflow_handler,
7184 void *context)
7185{
7186 struct perf_event_context *ctx;
7187 struct perf_event *event;
7188 int err;
7189
7190
7191
7192
7193
7194 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7195 overflow_handler, context);
7196 if (IS_ERR(event)) {
7197 err = PTR_ERR(event);
7198 goto err;
7199 }
7200
7201 account_event(event);
7202
7203 ctx = find_get_context(event->pmu, task, cpu);
7204 if (IS_ERR(ctx)) {
7205 err = PTR_ERR(ctx);
7206 goto err_free;
7207 }
7208
7209 WARN_ON_ONCE(ctx->parent_ctx);
7210 mutex_lock(&ctx->mutex);
7211 perf_install_in_context(ctx, event, cpu);
7212 ++ctx->generation;
7213 perf_unpin_context(ctx);
7214 mutex_unlock(&ctx->mutex);
7215
7216 return event;
7217
7218err_free:
7219 free_event(event);
7220err:
7221 return ERR_PTR(err);
7222}
7223EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
7224
7225void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7226{
7227 struct perf_event_context *src_ctx;
7228 struct perf_event_context *dst_ctx;
7229 struct perf_event *event, *tmp;
7230 LIST_HEAD(events);
7231
7232 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7233 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7234
7235 mutex_lock(&src_ctx->mutex);
7236 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7237 event_entry) {
7238 perf_remove_from_context(event);
7239 unaccount_event_cpu(event, src_cpu);
7240 put_ctx(src_ctx);
7241 list_add(&event->migrate_entry, &events);
7242 }
7243 mutex_unlock(&src_ctx->mutex);
7244
7245 synchronize_rcu();
7246
7247 mutex_lock(&dst_ctx->mutex);
7248 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7249 list_del(&event->migrate_entry);
7250 if (event->state >= PERF_EVENT_STATE_OFF)
7251 event->state = PERF_EVENT_STATE_INACTIVE;
7252 account_event_cpu(event, dst_cpu);
7253 perf_install_in_context(dst_ctx, event, dst_cpu);
7254 get_ctx(dst_ctx);
7255 }
7256 mutex_unlock(&dst_ctx->mutex);
7257}
7258EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7259
7260static void sync_child_event(struct perf_event *child_event,
7261 struct task_struct *child)
7262{
7263 struct perf_event *parent_event = child_event->parent;
7264 u64 child_val;
7265
7266 if (child_event->attr.inherit_stat)
7267 perf_event_read_event(child_event, child);
7268
7269 child_val = perf_event_count(child_event);
7270
7271
7272
7273
7274 atomic64_add(child_val, &parent_event->child_count);
7275 atomic64_add(child_event->total_time_enabled,
7276 &parent_event->child_total_time_enabled);
7277 atomic64_add(child_event->total_time_running,
7278 &parent_event->child_total_time_running);
7279
7280
7281
7282
7283 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7284 mutex_lock(&parent_event->child_mutex);
7285 list_del_init(&child_event->child_list);
7286 mutex_unlock(&parent_event->child_mutex);
7287
7288
7289
7290
7291
7292 put_event(parent_event);
7293}
7294
7295static void
7296__perf_event_exit_task(struct perf_event *child_event,
7297 struct perf_event_context *child_ctx,
7298 struct task_struct *child)
7299{
7300 if (child_event->parent) {
7301 raw_spin_lock_irq(&child_ctx->lock);
7302 perf_group_detach(child_event);
7303 raw_spin_unlock_irq(&child_ctx->lock);
7304 }
7305
7306 perf_remove_from_context(child_event);
7307
7308
7309
7310
7311
7312
7313 if (child_event->parent) {
7314 sync_child_event(child_event, child);
7315 free_event(child_event);
7316 }
7317}
7318
7319static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7320{
7321 struct perf_event *child_event, *tmp;
7322 struct perf_event_context *child_ctx;
7323 unsigned long flags;
7324
7325 if (likely(!child->perf_event_ctxp[ctxn])) {
7326 perf_event_task(child, NULL, 0);
7327 return;
7328 }
7329
7330 local_irq_save(flags);
7331
7332
7333
7334
7335
7336
7337 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
7338
7339
7340
7341
7342
7343
7344 raw_spin_lock(&child_ctx->lock);
7345 task_ctx_sched_out(child_ctx);
7346 child->perf_event_ctxp[ctxn] = NULL;
7347
7348
7349
7350
7351
7352 unclone_ctx(child_ctx);
7353 update_context_time(child_ctx);
7354 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7355
7356
7357
7358
7359
7360
7361 perf_event_task(child, child_ctx, 0);
7362
7363
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373 mutex_lock(&child_ctx->mutex);
7374
7375again:
7376 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
7377 group_entry)
7378 __perf_event_exit_task(child_event, child_ctx, child);
7379
7380 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
7381 group_entry)
7382 __perf_event_exit_task(child_event, child_ctx, child);
7383
7384
7385
7386
7387
7388
7389 if (!list_empty(&child_ctx->pinned_groups) ||
7390 !list_empty(&child_ctx->flexible_groups))
7391 goto again;
7392
7393 mutex_unlock(&child_ctx->mutex);
7394
7395 put_ctx(child_ctx);
7396}
7397
7398
7399
7400
7401void perf_event_exit_task(struct task_struct *child)
7402{
7403 struct perf_event *event, *tmp;
7404 int ctxn;
7405
7406 mutex_lock(&child->perf_event_mutex);
7407 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7408 owner_entry) {
7409 list_del_init(&event->owner_entry);
7410
7411
7412
7413
7414
7415
7416 smp_wmb();
7417 event->owner = NULL;
7418 }
7419 mutex_unlock(&child->perf_event_mutex);
7420
7421 for_each_task_context_nr(ctxn)
7422 perf_event_exit_task_context(child, ctxn);
7423}
7424
7425static void perf_free_event(struct perf_event *event,
7426 struct perf_event_context *ctx)
7427{
7428 struct perf_event *parent = event->parent;
7429
7430 if (WARN_ON_ONCE(!parent))
7431 return;
7432
7433 mutex_lock(&parent->child_mutex);
7434 list_del_init(&event->child_list);
7435 mutex_unlock(&parent->child_mutex);
7436
7437 put_event(parent);
7438
7439 perf_group_detach(event);
7440 list_del_event(event, ctx);
7441 free_event(event);
7442}
7443
7444
7445
7446
7447
7448void perf_event_free_task(struct task_struct *task)
7449{
7450 struct perf_event_context *ctx;
7451 struct perf_event *event, *tmp;
7452 int ctxn;
7453
7454 for_each_task_context_nr(ctxn) {
7455 ctx = task->perf_event_ctxp[ctxn];
7456 if (!ctx)
7457 continue;
7458
7459 mutex_lock(&ctx->mutex);
7460again:
7461 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7462 group_entry)
7463 perf_free_event(event, ctx);
7464
7465 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7466 group_entry)
7467 perf_free_event(event, ctx);
7468
7469 if (!list_empty(&ctx->pinned_groups) ||
7470 !list_empty(&ctx->flexible_groups))
7471 goto again;
7472
7473 mutex_unlock(&ctx->mutex);
7474
7475 put_ctx(ctx);
7476 }
7477}
7478
7479void perf_event_delayed_put(struct task_struct *task)
7480{
7481 int ctxn;
7482
7483 for_each_task_context_nr(ctxn)
7484 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7485}
7486
7487
7488
7489
7490static struct perf_event *
7491inherit_event(struct perf_event *parent_event,
7492 struct task_struct *parent,
7493 struct perf_event_context *parent_ctx,
7494 struct task_struct *child,
7495 struct perf_event *group_leader,
7496 struct perf_event_context *child_ctx)
7497{
7498 struct perf_event *child_event;
7499 unsigned long flags;
7500
7501
7502
7503
7504
7505
7506
7507 if (parent_event->parent)
7508 parent_event = parent_event->parent;
7509
7510 child_event = perf_event_alloc(&parent_event->attr,
7511 parent_event->cpu,
7512 child,
7513 group_leader, parent_event,
7514 NULL, NULL);
7515 if (IS_ERR(child_event))
7516 return child_event;
7517
7518 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7519 free_event(child_event);
7520 return NULL;
7521 }
7522
7523 get_ctx(child_ctx);
7524
7525
7526
7527
7528
7529
7530 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7531 child_event->state = PERF_EVENT_STATE_INACTIVE;
7532 else
7533 child_event->state = PERF_EVENT_STATE_OFF;
7534
7535 if (parent_event->attr.freq) {
7536 u64 sample_period = parent_event->hw.sample_period;
7537 struct hw_perf_event *hwc = &child_event->hw;
7538
7539 hwc->sample_period = sample_period;
7540 hwc->last_period = sample_period;
7541
7542 local64_set(&hwc->period_left, sample_period);
7543 }
7544
7545 child_event->ctx = child_ctx;
7546 child_event->overflow_handler = parent_event->overflow_handler;
7547 child_event->overflow_handler_context
7548 = parent_event->overflow_handler_context;
7549
7550
7551
7552
7553 perf_event__header_size(child_event);
7554 perf_event__id_header_size(child_event);
7555
7556
7557
7558
7559 raw_spin_lock_irqsave(&child_ctx->lock, flags);
7560 add_event_to_ctx(child_event, child_ctx);
7561 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7562
7563
7564
7565
7566 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7567 mutex_lock(&parent_event->child_mutex);
7568 list_add_tail(&child_event->child_list, &parent_event->child_list);
7569 mutex_unlock(&parent_event->child_mutex);
7570
7571 return child_event;
7572}
7573
7574static int inherit_group(struct perf_event *parent_event,
7575 struct task_struct *parent,
7576 struct perf_event_context *parent_ctx,
7577 struct task_struct *child,
7578 struct perf_event_context *child_ctx)
7579{
7580 struct perf_event *leader;
7581 struct perf_event *sub;
7582 struct perf_event *child_ctr;
7583
7584 leader = inherit_event(parent_event, parent, parent_ctx,
7585 child, NULL, child_ctx);
7586 if (IS_ERR(leader))
7587 return PTR_ERR(leader);
7588 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7589 child_ctr = inherit_event(sub, parent, parent_ctx,
7590 child, leader, child_ctx);
7591 if (IS_ERR(child_ctr))
7592 return PTR_ERR(child_ctr);
7593 }
7594 return 0;
7595}
7596
7597static int
7598inherit_task_group(struct perf_event *event, struct task_struct *parent,
7599 struct perf_event_context *parent_ctx,
7600 struct task_struct *child, int ctxn,
7601 int *inherited_all)
7602{
7603 int ret;
7604 struct perf_event_context *child_ctx;
7605
7606 if (!event->attr.inherit) {
7607 *inherited_all = 0;
7608 return 0;
7609 }
7610
7611 child_ctx = child->perf_event_ctxp[ctxn];
7612 if (!child_ctx) {
7613
7614
7615
7616
7617
7618
7619
7620 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
7621 if (!child_ctx)
7622 return -ENOMEM;
7623
7624 child->perf_event_ctxp[ctxn] = child_ctx;
7625 }
7626
7627 ret = inherit_group(event, parent, parent_ctx,
7628 child, child_ctx);
7629
7630 if (ret)
7631 *inherited_all = 0;
7632
7633 return ret;
7634}
7635
7636
7637
7638
7639int perf_event_init_context(struct task_struct *child, int ctxn)
7640{
7641 struct perf_event_context *child_ctx, *parent_ctx;
7642 struct perf_event_context *cloned_ctx;
7643 struct perf_event *event;
7644 struct task_struct *parent = current;
7645 int inherited_all = 1;
7646 unsigned long flags;
7647 int ret = 0;
7648
7649 if (likely(!parent->perf_event_ctxp[ctxn]))
7650 return 0;
7651
7652
7653
7654
7655
7656 parent_ctx = perf_pin_task_context(parent, ctxn);
7657
7658
7659
7660
7661
7662
7663
7664
7665
7666
7667
7668
7669 mutex_lock(&parent_ctx->mutex);
7670
7671
7672
7673
7674
7675 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
7676 ret = inherit_task_group(event, parent, parent_ctx,
7677 child, ctxn, &inherited_all);
7678 if (ret)
7679 break;
7680 }
7681
7682
7683
7684
7685
7686
7687 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7688 parent_ctx->rotate_disable = 1;
7689 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7690
7691 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
7692 ret = inherit_task_group(event, parent, parent_ctx,
7693 child, ctxn, &inherited_all);
7694 if (ret)
7695 break;
7696 }
7697
7698 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7699 parent_ctx->rotate_disable = 0;
7700
7701 child_ctx = child->perf_event_ctxp[ctxn];
7702
7703 if (child_ctx && inherited_all) {
7704
7705
7706
7707
7708
7709
7710
7711 cloned_ctx = parent_ctx->parent_ctx;
7712 if (cloned_ctx) {
7713 child_ctx->parent_ctx = cloned_ctx;
7714 child_ctx->parent_gen = parent_ctx->parent_gen;
7715 } else {
7716 child_ctx->parent_ctx = parent_ctx;
7717 child_ctx->parent_gen = parent_ctx->generation;
7718 }
7719 get_ctx(child_ctx->parent_ctx);
7720 }
7721
7722 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7723 mutex_unlock(&parent_ctx->mutex);
7724
7725 perf_unpin_context(parent_ctx);
7726 put_ctx(parent_ctx);
7727
7728 return ret;
7729}
7730
7731
7732
7733
7734int perf_event_init_task(struct task_struct *child)
7735{
7736 int ctxn, ret;
7737
7738 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7739 mutex_init(&child->perf_event_mutex);
7740 INIT_LIST_HEAD(&child->perf_event_list);
7741
7742 for_each_task_context_nr(ctxn) {
7743 ret = perf_event_init_context(child, ctxn);
7744 if (ret)
7745 return ret;
7746 }
7747
7748 return 0;
7749}
7750
7751static void __init perf_event_init_all_cpus(void)
7752{
7753 struct swevent_htable *swhash;
7754 int cpu;
7755
7756 for_each_possible_cpu(cpu) {
7757 swhash = &per_cpu(swevent_htable, cpu);
7758 mutex_init(&swhash->hlist_mutex);
7759 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
7760 }
7761}
7762
7763static void perf_event_init_cpu(int cpu)
7764{
7765 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7766
7767 mutex_lock(&swhash->hlist_mutex);
7768 if (swhash->hlist_refcount > 0) {
7769 struct swevent_hlist *hlist;
7770
7771 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7772 WARN_ON(!hlist);
7773 rcu_assign_pointer(swhash->swevent_hlist, hlist);
7774 }
7775 mutex_unlock(&swhash->hlist_mutex);
7776}
7777
7778#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7779static void perf_pmu_rotate_stop(struct pmu *pmu)
7780{
7781 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7782
7783 WARN_ON(!irqs_disabled());
7784
7785 list_del_init(&cpuctx->rotation_list);
7786}
7787
7788static void __perf_event_exit_context(void *__info)
7789{
7790 struct perf_event_context *ctx = __info;
7791 struct perf_event *event, *tmp;
7792
7793 perf_pmu_rotate_stop(ctx->pmu);
7794
7795 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7796 __perf_remove_from_context(event);
7797 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7798 __perf_remove_from_context(event);
7799}
7800
7801static void perf_event_exit_cpu_context(int cpu)
7802{
7803 struct perf_event_context *ctx;
7804 struct pmu *pmu;
7805 int idx;
7806
7807 idx = srcu_read_lock(&pmus_srcu);
7808 list_for_each_entry_rcu(pmu, &pmus, entry) {
7809 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7810
7811 mutex_lock(&ctx->mutex);
7812 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7813 mutex_unlock(&ctx->mutex);
7814 }
7815 srcu_read_unlock(&pmus_srcu, idx);
7816}
7817
7818static void perf_event_exit_cpu(int cpu)
7819{
7820 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7821
7822 mutex_lock(&swhash->hlist_mutex);
7823 swevent_hlist_release(swhash);
7824 mutex_unlock(&swhash->hlist_mutex);
7825
7826 perf_event_exit_cpu_context(cpu);
7827}
7828#else
7829static inline void perf_event_exit_cpu(int cpu) { }
7830#endif
7831
7832static int
7833perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7834{
7835 int cpu;
7836
7837 for_each_online_cpu(cpu)
7838 perf_event_exit_cpu(cpu);
7839
7840 return NOTIFY_OK;
7841}
7842
7843
7844
7845
7846
7847static struct notifier_block perf_reboot_notifier = {
7848 .notifier_call = perf_reboot,
7849 .priority = INT_MIN,
7850};
7851
7852static int
7853perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7854{
7855 unsigned int cpu = (long)hcpu;
7856
7857 switch (action & ~CPU_TASKS_FROZEN) {
7858
7859 case CPU_UP_PREPARE:
7860 case CPU_DOWN_FAILED:
7861 perf_event_init_cpu(cpu);
7862 break;
7863
7864 case CPU_UP_CANCELED:
7865 case CPU_DOWN_PREPARE:
7866 perf_event_exit_cpu(cpu);
7867 break;
7868 default:
7869 break;
7870 }
7871
7872 return NOTIFY_OK;
7873}
7874
7875void __init perf_event_init(void)
7876{
7877 int ret;
7878
7879 idr_init(&pmu_idr);
7880
7881 perf_event_init_all_cpus();
7882 init_srcu_struct(&pmus_srcu);
7883 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7884 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7885 perf_pmu_register(&perf_task_clock, NULL, -1);
7886 perf_tp_register();
7887 perf_cpu_notifier(perf_cpu_notify);
7888 register_reboot_notifier(&perf_reboot_notifier);
7889
7890 ret = init_hw_breakpoint();
7891 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7892
7893
7894 jump_label_rate_limit(&perf_sched_events, HZ);
7895
7896
7897
7898
7899
7900 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7901 != 1024);
7902}
7903
7904static int __init perf_event_sysfs_init(void)
7905{
7906 struct pmu *pmu;
7907 int ret;
7908
7909 mutex_lock(&pmus_lock);
7910
7911 ret = bus_register(&pmu_bus);
7912 if (ret)
7913 goto unlock;
7914
7915 list_for_each_entry(pmu, &pmus, entry) {
7916 if (!pmu->name || pmu->type < 0)
7917 continue;
7918
7919 ret = pmu_dev_alloc(pmu);
7920 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7921 }
7922 pmu_bus_running = 1;
7923 ret = 0;
7924
7925unlock:
7926 mutex_unlock(&pmus_lock);
7927
7928 return ret;
7929}
7930device_initcall(perf_event_sysfs_init);
7931
7932#ifdef CONFIG_CGROUP_PERF
7933static struct cgroup_subsys_state *
7934perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
7935{
7936 struct perf_cgroup *jc;
7937
7938 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7939 if (!jc)
7940 return ERR_PTR(-ENOMEM);
7941
7942 jc->info = alloc_percpu(struct perf_cgroup_info);
7943 if (!jc->info) {
7944 kfree(jc);
7945 return ERR_PTR(-ENOMEM);
7946 }
7947
7948 return &jc->css;
7949}
7950
7951static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
7952{
7953 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
7954
7955 free_percpu(jc->info);
7956 kfree(jc);
7957}
7958
7959static int __perf_cgroup_move(void *info)
7960{
7961 struct task_struct *task = info;
7962 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7963 return 0;
7964}
7965
7966static void perf_cgroup_attach(struct cgroup_subsys_state *css,
7967 struct cgroup_taskset *tset)
7968{
7969 struct task_struct *task;
7970
7971 cgroup_taskset_for_each(task, css, tset)
7972 task_function_call(task, __perf_cgroup_move, task);
7973}
7974
7975static void perf_cgroup_exit(struct cgroup_subsys_state *css,
7976 struct cgroup_subsys_state *old_css,
7977 struct task_struct *task)
7978{
7979
7980
7981
7982
7983
7984 if (!(task->flags & PF_EXITING))
7985 return;
7986
7987 task_function_call(task, __perf_cgroup_move, task);
7988}
7989
7990struct cgroup_subsys perf_subsys = {
7991 .name = "perf_event",
7992 .subsys_id = perf_subsys_id,
7993 .css_alloc = perf_cgroup_css_alloc,
7994 .css_free = perf_cgroup_css_free,
7995 .exit = perf_cgroup_exit,
7996 .attach = perf_cgroup_attach,
7997};
7998#endif
7999